repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_POWER/averagePicNML.py
import numpy as np # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] files = ['PIC_POWER_FOLD_1.txt','PIC_POWER_FOLD_2.txt', 'PIC_POWER_FOLD_3.txt', 'PIC_POWER_FOLD_4.txt','PIC_POWER_FOLD_5.txt'] data = np.zeros(5, dtype=object) iterations = 5 for d in xrange(len(files)): data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4)) lastT = np.zeros(len(files)) lastE = np.zeros(len(files)) count = 0 while(True): occur = 0 totalT = 0 totalE = 0 iterations = 0 count = count + 1 for d in xrange(len(files)): fold = data[d] if len(fold) >= count: occur = occur+1 iterations = fold[count-1][0] totalT = totalT + fold[count-1][1] totalE = totalE + fold[count-1][2] lastT[d] = fold[count-1][1] lastE[d] = fold[count-1][2] else: totalE = totalE + lastE[d] if (occur == 0): break avgT = totalT / occur avgE = totalE / len(files) if (count < 10 or count % 10 == 0): print '%s %s %s' % (iterations, np.log10(avgT), avgE)
1,445
32.627907
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_POWER/plots.py
import numpy as np from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] pp = PdfPages('PowerPlant - GpStuff Results.pdf') files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt'] names = ['FIC','VAR','PIC'] data = np.zeros(5, dtype=object) for d in xrange(len(files)): data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2)) plt.plot(data[:,0], data[:,1], label=names[d]) plt.legend(loc='upper left') plt.title('Power Plant') plt.ylabel('Negative Marginal Log Likelihood') plt.xlabel("Time taken (seconds - log)") pp.savefig() plt.clf() files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt'] names = ['FIC','VAR','PIC'] data = np.zeros(5, dtype=object) for d in xrange(len(files)): data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2)) plt.plot(data[:,0], data[:,1], label=names[d]) plt.legend(loc='upper left') plt.title('Power Plant') plt.ylabel('Mean Squared Error') plt.xlabel("Time taken (seconds - log)") pp.savefig() pp.close()
1,514
33.431818
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_POWER/averageVarNML.py
import numpy as np # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] files = ['VAR_POWER_FOLD_1.txt','VAR_POWER_FOLD_2.txt', 'VAR_POWER_FOLD_3.txt', 'VAR_POWER_FOLD_4.txt','VAR_POWER_FOLD_5.txt'] data = np.zeros(5, dtype=object) iterations = 5 for d in xrange(len(files)): data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 1, 2)) lastT = np.zeros(len(files)) lastE = np.zeros(len(files)) count = 0 while(True): occur = 0 totalT = 0 totalE = 0 iterations = 0 count = count + 1 for d in xrange(len(files)): fold = data[d] if len(fold) >= count: occur = occur+1 iterations = fold[count-1][0] totalT = totalT + fold[count-1][1] totalE = totalE + fold[count-1][2] lastT[d] = fold[count-1][1] lastE[d] = fold[count-1][2] else: totalE = totalE + lastE[d] if (occur == 0): break avgT = totalT / occur avgE = totalE / len(files) if (count < 10 or count % 10 == 0): print '%s %s %s' % (iterations, np.log10(avgT), avgE)
1,445
32.627907
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_PROTEIN/averageFicNML.py
import numpy as np # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] files = ['FIC_PROTEIN_FOLD_1.txt','FIC_PROTEIN_FOLD_2.txt']#, 'FIC_PROTEIN_FOLD_3.txt' ] data = np.zeros(2, dtype=object) iterations = 5 for d in xrange(len(files)): data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4)) lastT = np.zeros(len(files)) lastE = np.zeros(len(files)) count = 0 while(True): occur = 0 totalT = 0 totalE = 0 iterations = 0 count = count + 1 for d in xrange(len(files)): fold = data[d] if len(fold) >= count: occur = occur+1 iterations = fold[count-1][0] totalT = totalT + fold[count-1][1] totalE = totalE + fold[count-1][2] lastT[d] = fold[count-1][1] lastE[d] = fold[count-1][2] else: totalE = totalE + lastE[d] if (occur == 0): break avgT = totalT / occur avgE = totalE / len(files) if (count < 10 or count % 10 == 0): print '%s %s %s' % (iterations, np.log10(avgT), avgE)
1,407
31.744186
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_PROTEIN/averagePicNML.py
import numpy as np # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] files = ['PIC_PROTEIN_FOLD_1.txt','PIC_PROTEIN_FOLD_2.txt']#, 'PIC_PROTEIN_FOLD_3.txt' ] data = np.zeros(2, dtype=object) iterations = 5 for d in xrange(len(files)): data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4)) lastT = np.zeros(len(files)) lastE = np.zeros(len(files)) count = 0 while(True): occur = 0 totalT = 0 totalE = 0 iterations = 0 count = count + 1 for d in xrange(len(files)): fold = data[d] if len(fold) >= count: occur = occur+1 iterations = fold[count-1][0] totalT = totalT + fold[count-1][1] totalE = totalE + fold[count-1][2] lastT[d] = fold[count-1][1] lastE[d] = fold[count-1][2] else: totalE = totalE + lastE[d] if (occur == 0): break avgT = totalT / occur avgE = totalE / len(files) if (count < 10 or count % 10 == 0): print '%s %s %s' % (iterations, np.log10(avgT), avgE)
1,407
31.744186
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_PROTEIN/plots.py
import numpy as np from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] pp = PdfPages('Protein - GpStuff Results.pdf') files = ['FIC_NMLL.txt','VAR_NMLL.txt','PIC_NMLL.txt'] names = ['FIC','VAR','PIC'] data = np.zeros(3, dtype=object) for d in xrange(len(files)): data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2)) plt.plot(data[:,0], data[:,1], label=names[d]) plt.legend(loc='upper left') plt.title('Protein') plt.ylabel('Negative Marginal Log Likelihood') plt.xlabel("Time taken (seconds - log)") pp.savefig() plt.clf() files = ['FIC_MSE.txt','VAR_MSE.txt','PIC_MSE.txt'] names = ['FIC','VAR','PIC'] data = np.zeros(3, dtype=object) for d in xrange(len(files)): data = np.loadtxt(files[d], delimiter=' ',usecols=(1, 2)) plt.plot(data[:,0], data[:,1], label=names[d]) plt.legend(loc='upper left') plt.title('Protein') plt.ylabel('Mean Squared Error') plt.xlabel("Time taken (seconds - log)") pp.savefig() pp.close()
1,503
33.181818
168
py
preconditioned_GPs
preconditioned_GPs-master/code/pcgComparison/GpStuff Comparison/RBF/RBF_RESULTS_PROTEIN/averageVarNML.py
import numpy as np # names = ['Concrete - Block Precon','Concrete - Pitc Precon', 'Concrete - Fitc Precon','Concrete - Nyst Precon', 'Concrete - Spec Precon', 'Concrete - Randomized SVD'] # files = ['ConcBlock.txt','ConcPitc.txt', 'ConcFitc.txt','ConcNyst.txt','ConcSpec.txt','ConcSvd.txt'] # names = ['Protein - Block Precon','Protein - Pitc Precon','Protein - Nyst Precon']#, 'Protein - Spec Precon','Protein - Reg Precon'] # files = ['ProtBlock_output.txt','ProtPitc_output.txt','ProtNyst_output.txt']#,'ProtSpec_output.txt','ProtReg_output.txt'] files = ['VAR_PROTEIN_FOLD_1.txt','VAR_PROTEIN_FOLD_2.txt']#, 'VAR_PROTEIN_FOLD_3.txt' ] data = np.zeros(2, dtype=object) iterations = 5 for d in xrange(len(files)): data[d] = np.loadtxt(files[d],delimiter=' ',usecols=(0, 3, 4)) lastT = np.zeros(len(files)) lastE = np.zeros(len(files)) count = 0 while(True): occur = 0 totalT = 0 totalE = 0 iterations = 0 count = count + 1 for d in xrange(len(files)): fold = data[d] if len(fold) >= count: occur = occur+1 iterations = fold[count-1][0] totalT = totalT + fold[count-1][1] totalE = totalE + fold[count-1][2] lastT[d] = fold[count-1][1] lastE[d] = fold[count-1][2] else: totalE = totalE + lastE[d] if (occur == 0): break avgT = totalT / occur avgE = totalE / len(files) if (count < 10 or count % 10 == 0): print '%s %s %s' % (iterations, np.log10(avgT), avgE)
1,407
31.744186
168
py
HIWL
HIWL-main/select cleandata.py
import shutil import os import pandas as pd import numpy as np gzimg_path = r'F:\dataSet\galaxy-zoo-the-galaxy-challenge\images_training_rev1' #原始星系图片存放路径 gzlabel_path = r'F:\dataSet\galaxy-zoo-the-galaxy-challenge\training_solutions_rev1.csv' #原始星系图片对应csv标签存放路径 gzclean_path = r'F:\dataSet\clean gzdata5' #干净样本存放路径 labels=pd.read_csv(gzlabel_path) norepeat_df = labels #阈值筛选 rou = norepeat_df[((norepeat_df['Class1.1']>0.469)&(norepeat_df['Class7.1']>0.50))] bet = norepeat_df[((norepeat_df['Class1.1']>0.469)&(norepeat_df['Class7.2']>0.50))] cig = norepeat_df[((norepeat_df['Class1.1']>0.469)&(norepeat_df['Class7.3']>0.50))] edg = norepeat_df[((norepeat_df['Class1.2']>0.430)&(norepeat_df['Class2.1']>0.602))] spi = norepeat_df[((norepeat_df['Class1.2']>0.430)&(norepeat_df['Class2.2']>0.715)&(norepeat_df['Class4.1']>0.619))] #结合类别放入文件夹 origin_path = gzimg_path root = gzclean_path class5=['rou', 'bet', 'cig', 'edg', 'spi'] classdict={'rou':rou, 'bet':bet, 'cig':cig, 'edg':edg, 'spi':spi} for classi in class5: #按不同类创建文件夹 if not os.path.exists(os.path.join(root, classi)): os.makedirs(os.path.join(root, classi)) #对源文件夹类别中的每个图片进行复制导入 for i in (classdict[classi])['GalaxyID']: shutil.copy(os.path.join(origin_path,'%s.jpg'%i),os.path.join(root,classi,'%s.jpg'%i))
1,305
42.533333
116
py
HIWL
HIWL-main/scheme/model_vit.py
""" original code from rwightman: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py """ from functools import partial from collections import OrderedDict import torch import torch.nn as nn def drop_path(x, drop_prob: float = 0., training: bool = False): if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None): super().__init__() img_size = (img_size, img_size) patch_size = (patch_size, patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." # flatten: [B, C, H, W] -> [B, C, HW] # transpose: [B, C, HW] -> [B, HW, C] x = self.proj(x).flatten(2).transpose(1, 2) x = self.norm(x) return x class Attention(nn.Module): def __init__(self, dim, # 输入token的dim num_heads=8, qkv_bias=False, qk_scale=None, attn_drop_ratio=0., proj_drop_ratio=0.): super(Attention, self).__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop_ratio) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop_ratio) def forward(self, x): # [batch_size, num_patches + 1, total_embed_dim] B, N, C = x.shape # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim] # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head] # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head] qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size, num_heads, num_patches + 1, embed_dim_per_head] q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1] # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head] # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head] # reshape: -> [batch_size, num_patches + 1, total_embed_dim] x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_ratio=0., attn_drop_ratio=0., drop_path_ratio=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super(Block, self).__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, representation_size=None, distilled=False, drop_ratio=0., attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_c (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_ratio (float): dropout rate attn_drop_ratio (float): attention dropout rate drop_path_ratio (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer """ super(VisionTransformer, self).__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_ratio) dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth) ]) self.norm = norm_layer(embed_dim) # Representation layer if representation_size and not distilled: self.has_logits = True self.num_features = representation_size self.pre_logits = nn.Sequential(OrderedDict([ ("fc", nn.Linear(embed_dim, representation_size)), ("act", nn.Tanh()) ])) else: self.has_logits = False self.pre_logits = nn.Identity() # Classifier head(s) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = None if distilled: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() # Weight init nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.dist_token is not None: nn.init.trunc_normal_(self.dist_token, std=0.02) nn.init.trunc_normal_(self.cls_token, std=0.02) self.apply(_init_vit_weights) def forward_features(self, x): # [B, C, H, W] -> [B, num_patches, embed_dim] x = self.patch_embed(x) # [B, 196, 768] # [1, 1, 768] -> [B, 1, 768] cls_token = self.cls_token.expand(x.shape[0], -1, -1) if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) # [B, 197, 768] else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = self.pos_drop(x + self.pos_embed) x = self.blocks(x) x = self.norm(x) if self.dist_token is None: return self.pre_logits(x[:, 0]) else: return x[:, 0], x[:, 1] def forward(self, x): x = self.forward_features(x) if self.head_dist is not None: x, x_dist = self.head(x[0]), self.head_dist(x[1]) if self.training and not torch.jit.is_scripting(): # during inference, return the average of both classifier predictions return x, x_dist else: return (x + x_dist) / 2 else: x = self.head(x) return x def _init_vit_weights(m): """ ViT weight initialization :param m: module """ if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.LayerNorm): nn.init.zeros_(m.bias) nn.init.ones_(m.weight) def vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768 if has_logits else None, num_classes=num_classes) return model def vit_base_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768 if has_logits else None, num_classes=num_classes) return model def vit_large_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024 if has_logits else None, num_classes=num_classes) return model def vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024 if has_logits else None, num_classes=num_classes) return model def vit_huge_patch14_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280 if has_logits else None, num_classes=num_classes) return model
13,700
39.178886
118
py
HIWL
HIWL-main/scheme/model_googlenet.py
import torch.nn as nn import torch import torch.nn.functional as F class GoogLeNet(nn.Module): def __init__(self, num_classes=1000, aux_logits=True, init_weights=False): super(GoogLeNet, self).__init__() self.aux_logits = aux_logits self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3) self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.conv2 = BasicConv2d(64, 64, kernel_size=1) self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) self.maxpool4 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) if self.aux_logits: self.aux1 = InceptionAux(512, num_classes) self.aux2 = InceptionAux(528, num_classes) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.4) self.fc = nn.Linear(1024, num_classes) if init_weights: self._initialize_weights() def forward(self, x): # N x 3 x 224 x 224 x = self.conv1(x) # N x 64 x 112 x 112 x = self.maxpool1(x) # N x 64 x 56 x 56 x = self.conv2(x) # N x 64 x 56 x 56 x = self.conv3(x) # N x 192 x 56 x 56 x = self.maxpool2(x) # N x 192 x 28 x 28 x = self.inception3a(x) # N x 256 x 28 x 28 x = self.inception3b(x) # N x 480 x 28 x 28 x = self.maxpool3(x) # N x 480 x 14 x 14 x = self.inception4a(x) # N x 512 x 14 x 14 if self.training and self.aux_logits: # eval model lose this layer aux1 = self.aux1(x) x = self.inception4b(x) # N x 512 x 14 x 14 x = self.inception4c(x) # N x 512 x 14 x 14 x = self.inception4d(x) # N x 528 x 14 x 14 if self.training and self.aux_logits: # eval model lose this layer aux2 = self.aux2(x) x = self.inception4e(x) # N x 832 x 14 x 14 x = self.maxpool4(x) # N x 832 x 7 x 7 x = self.inception5a(x) # N x 832 x 7 x 7 x = self.inception5b(x) # N x 1024 x 7 x 7 x = self.avgpool(x) # N x 1024 x 1 x 1 x = torch.flatten(x, 1) # N x 1024 x = self.dropout(x) x = self.fc(x) # N x 1000 (num_classes) if self.training and self.aux_logits: # eval model lose this layer return x, aux2, aux1 return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) class Inception(nn.Module): def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj): super(Inception, self).__init__() self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) self.branch2 = nn.Sequential( BasicConv2d(in_channels, ch3x3red, kernel_size=1), BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # 保证输出大小等于输入大小 ) self.branch3 = nn.Sequential( BasicConv2d(in_channels, ch5x5red, kernel_size=1), BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2) # 保证输出大小等于输入大小 ) self.branch4 = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(in_channels, pool_proj, kernel_size=1) ) def forward(self, x): branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) outputs = [branch1, branch2, branch3, branch4] return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes): super(InceptionAux, self).__init__() self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3) self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # output[batch, 128, 4, 4] self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, num_classes) def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = self.averagePool(x) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = torch.flatten(x, 1) x = F.dropout(x, 0.5, training=self.training) # N x 2048 x = F.relu(self.fc1(x), inplace=True) x = F.dropout(x, 0.5, training=self.training) # N x 1024 x = self.fc2(x) # N x num_classes return x class BasicConv2d(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, **kwargs) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.relu(x) return x
5,919
33.219653
92
py
HIWL
HIWL-main/scheme/model_vgg.py
import torch.nn as nn import torch # official pretrain weights model_urls = { 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth' } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=False): super(VGG, self).__init__() self.features = features self.classifier = nn.Sequential( nn.Linear(512*7*7, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, num_classes) ) if init_weights: self._initialize_weights() def forward(self, x): # N x 3 x 224 x 224 x = self.features(x) # N x 512 x 7 x 7 x = torch.flatten(x, start_dim=1) # N x 512*7*7 x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) # nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def make_features(cfg: list): layers = [] in_channels = 3 for v in cfg: if v == "M": layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) layers += [conv2d, nn.ReLU(True)] in_channels = v return nn.Sequential(*layers) cfgs = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def vgg(model_name="vgg16", **kwargs): assert model_name in cfgs, "Warning: model number {} not in cfgs dict!".format(model_name) cfg = cfgs[model_name] model = VGG(make_features(cfg), **kwargs) return model
2,616
32.551282
117
py
HIWL
HIWL-main/scheme/model_resnet.py
import torch.nn as nn import torch class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channel) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_channel) self.downsample = downsample def forward(self, x): identity = x if self.downsample is not None: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_channel, out_channel, stride=1, downsample=None, groups=1, width_per_group=64): super(Bottleneck, self).__init__() width = int(out_channel * (width_per_group / 64.)) * groups self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width, kernel_size=1, stride=1, bias=False) # squeeze channels self.bn1 = nn.BatchNorm2d(width) # ----------------------------------------- self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups, kernel_size=3, stride=stride, bias=False, padding=1) self.bn2 = nn.BatchNorm2d(width) # ----------------------------------------- self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion, kernel_size=1, stride=1, bias=False) # unsqueeze channels self.bn3 = nn.BatchNorm2d(out_channel*self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): identity = x if self.downsample is not None: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, blocks_num, num_classes=1000, include_top=True, groups=1, width_per_group=64): super(ResNet, self).__init__() self.include_top = include_top self.in_channel = 64 self.groups = groups self.width_per_group = width_per_group self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(self.in_channel) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, blocks_num[0]) self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2) self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2) self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2) if self.include_top: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') def _make_layer(self, block, channel, block_num, stride=1): downsample = None if stride != 1 or self.in_channel != channel * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(channel * block.expansion)) layers = [] layers.append(block(self.in_channel, channel, downsample=downsample, stride=stride, groups=self.groups, width_per_group=self.width_per_group)) self.in_channel = channel * block.expansion for _ in range(1, block_num): layers.append(block(self.in_channel, channel, groups=self.groups, width_per_group=self.width_per_group)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) if self.include_top: x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def resnet34(num_classes=1000, include_top=True): return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top) from torchsummary import summary summary(resnet34().cuda(),(3,64,64)) def resnet50(num_classes=1000, include_top=True): return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top) def resnet101(num_classes=1000, include_top=True): return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top) def resnext50_32x4d(num_classes=1000, include_top=True): groups = 32 width_per_group = 4 return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top, groups=groups, width_per_group=width_per_group) def resnext101_32x8d(num_classes=1000, include_top=True): groups = 32 width_per_group = 8 return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top, groups=groups, width_per_group=width_per_group)
6,513
33.465608
112
py
HIWL
HIWL-main/scheme/train_resnet26.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_resnet26 import resnet26 as create_model from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/resnet26', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'a') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='resnet26' print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists("./weights/resnet26") is False: os.makedirs("./weights/resnet26") if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/resnet26") is False: os.makedirs("./predicts/resnet26") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data( args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.Resize((64, 64)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.Resize((64, 64)), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset( data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"] # 需要画图的指标 # 加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4, T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 # 20个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience, verbose=True) early_stopping2 = EarlyStopping(patience, verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break # 验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=48) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.01) parser.add_argument('--lrf4', type=float, default=0.01) parser.add_argument('--weights4', type=str, default='', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.01) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default='', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
14,240
47.770548
138
py
HIWL
HIWL-main/scheme/train_resnet.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_resnet import resnet34, resnet50, resnet101 from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/resnet', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') num_model = args.num_model if num_model == 'resnet34': create_model = resnet34 if num_model == 'resnet50': create_model = resnet50 if num_model == 'resnet101': create_model = resnet101 tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/{}".format(num_model)) is False: os.makedirs("./predicts/{}".format(num_model)) sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data( args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset( data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #25个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience , verbose=True) early_stopping2 = EarlyStopping(patience , verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.01) parser.add_argument('--lrf4', type=float, default=0.01) parser.add_argument('--weights4', type=str, default=r"", help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.01) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default=r"", help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') parser.add_argument('--num-model', default='resnet101', help='resnet34-101') opt = parser.parse_args() main(opt)
13,921
45.875421
138
py
HIWL
HIWL-main/scheme/utils.py
import os import sys import json import pickle import random import numpy as np import torch from tqdm import tqdm import copy import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F # 标签平滑嵌入到loss函数 class SMLoss(nn.Module): ''' Cross Entropy Loss with label smoothing ''' def __init__(self, label_smooth=None, class_num=137): super().__init__() self.label_smooth = label_smooth self.class_num = class_num def forward(self, pred, target): ''' Args: pred: prediction of model output [N, M] target: ground truth of sampler [N] ''' eps = 1e-12 if self.label_smooth is not None: # cross entropy loss with label smoothing logprobs = F.log_softmax(pred, dim=1) # softmax + log target = F.one_hot(target, self.class_num) # 转换成one-hot # 实现 target = torch.clamp(target.float(), min=self.label_smooth / (self.class_num - 1), max=1.0 - self.label_smooth) loss = -1 * torch.sum(target * logprobs, 1) else: # standard cross entropy loss loss = -1. * pred.gather(1, target.unsqueeze(-1)) + torch.log(torch.exp(pred + eps).sum(dim=1)) return loss.mean() def read_split_data(root: str, val_rate: float = 0.1, test_rate: float = 0.1): split_rate = val_rate + test_rate random.seed(0) # 保证随机结果可复现 assert os.path.exists(root), "dataset root: {} does not exist.".format(root) # 遍历文件夹,一个文件夹对应一个类别 galaxy_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))] split_galaxy_class = [] # 存储切分后类别 for i in galaxy_class: split_galaxy_class.append(i + '_train') split_galaxy_class.append(i + '_test') # 排序,保证顺序一致 galaxy_class.sort() # 生成类别名称以及对应的数字索引 class_indices = dict((k, v) for v, k in enumerate(galaxy_class)) json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4) with open('class_indices.json', 'w') as json_file: json_file.write(json_str) train_images_path = [] # 存储训练集的所有图片路径 train_images_label = [] # 存储训练集图片对应索引信息 val_images_path = [] # 存储验证集的所有图片路径 val_images_label = [] # 存储验证集图片对应索引信息 test_images_path = [] # 存储测试集的所有图片路径 test_images_label = [] # 存储测试集图片对应索引信息 every_class_num = [] # 存储每个类别的样本总数 split_every_class_num = [] # 存储每个类别的切分后样本总数 supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型 # 遍历每个文件夹下的文件 for cla in galaxy_class: cla_path = os.path.join(root, cla) sample_count = 0 # 遍历获取supported支持的所有文件路径 images = [os.path.join(root, cla, i) for i in os.listdir(cla_path) if os.path.splitext(i)[-1] in supported] # 获取该类别对应的索引 image_class = class_indices[cla] # 记录该类别的样本数量 every_class_num.append(len(images)) split_path = random.sample(images, round(len(images) * split_rate)) for img_path in images: if img_path in split_path: # 如果该路径在采样的集合样本中则存入划分集 sample_count += 1 if sample_count <= len(split_path)*(val_rate/split_rate): val_images_path.append(img_path) val_images_label.append(image_class) else: test_images_path.append(img_path) test_images_label.append(image_class) else: # 否则存入训练集 train_images_path.append(img_path) train_images_label.append(image_class) print("{} images were found in the dataset.".format(sum(every_class_num))) print("{} images for training.".format(len(train_images_path))) print("{} images for val.".format(len(val_images_path))) print("{} images for test.".format(len(test_images_path))) plot_image = False if plot_image: # 绘制每种类别个数柱状图 plt.bar(range(len(split_every_class_num)), split_every_class_num, align='center') # 将横坐标0,1,2,3,4替换为相应的类别名称 plt.xticks(range(len(split_every_class_num)),split_galaxy_class) # 在柱状图上添加数值标签 for i, v in enumerate(split_every_class_num): plt.text(x=i, y=v + 5, s=str(v), ha='center') # 设置x坐标 plt.xlabel('image class') # 设置y坐标 plt.ylabel('number of images') # 设置柱状图的标题 plt.title('galaxy class distribution') plt.show() return train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label def plot_data_loader_image(data_loader): batch_size = data_loader.batch_size plot_num = min(batch_size, 4) json_path = './class_indices.json' assert os.path.exists(json_path), json_path + " does not exist." json_file = open(json_path, 'r') class_indices = json.load(json_file) for data in data_loader: images, labels = data for i in range(plot_num): # [C, H, W] -> [H, W, C] img = images[i].numpy().transpose(1, 2, 0) # 反Normalize操作 img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255 label = labels[i].item() plt.subplot(1, plot_num, i+1) plt.xlabel(class_indices[str(label)]) plt.xticks([]) # 去掉x轴的刻度 plt.yticks([]) # 去掉y轴的刻度 plt.imshow(img.astype('uint8')) plt.show() def write_pickle(list_info: list, file_name: str): with open(file_name, 'wb') as f: pickle.dump(list_info, f) def read_pickle(file_name: str) -> list: with open(file_name, 'rb') as f: info_list = pickle.load(f) return info_list def train_one_epoch(str,model, optimizer, data_loader, device, epoch): model.train() loss_function = torch.nn.CrossEntropyLoss() mean_loss = torch.zeros(1).to(device) optimizer.zero_grad() data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, targets = data pred = model(images.to(device)) loss = loss_function(pred, targets.to(device)) loss.backward() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses data_loader.desc = "[epoch {}] {}-meanloss: {}".format(epoch, str, round(mean_loss.item(), 3)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() print('traloss: {}'.format(mean_loss.item())) return mean_loss.item() def train_googlenet_one_epoch(str, model, optimizer, data_loader, device, epoch): model.train() # loss_function = torch.nn.CrossEntropyLoss() # 标签平滑 loss_function = SMLoss(label_smooth=0.05, class_num=int(str.split('l')[1])) mean_loss = torch.zeros(1).to(device) optimizer.zero_grad() data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data logits, aux_logits2, aux_logits1 = model(images.to(device)) loss0 = loss_function(logits, labels.to(device)) loss1 = loss_function(aux_logits1, labels.to(device)) loss2 = loss_function(aux_logits2, labels.to(device)) loss = loss0 + loss1 * 0.3 + loss2 * 0.3 loss.backward() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() print('traloss: {}'.format(mean_loss.item())) return mean_loss.item() @torch.no_grad() def evaluateall(model2, model4,test_loader5, device): model2.eval() model4.eval() classes2=[1, 2] classes4=[0, 1, 3, 4] # 验证样本总个数 total_num = len(test_loader5.dataset) correct_num=torch.tensor([0]).to(device)#统计预测正确个数 pred_all=torch.tensor([]).to(device)#不断拼接最终输出为预测列表 for step, data in enumerate(tqdm(test_loader5)): images5, labels5= data pred4 = model4(images5.to(device)) pred4 = torch.max(pred4, dim=1)[1]#3类的预测 preddata=copy.deepcopy(pred4)#每个batch的预测结果 preddata=torch.tensor([classes4[i] for i in preddata.tolist()])#将索引转为对应类别 select_index2=torch.where(preddata==1)#选出为2类的索引 if len(select_index2[0])!=0: select_images2=images5[select_index2]#选出为2类的图片 pred2=model2(select_images2.to(device)) pred2=torch.max(pred2,dim=1)[1] pred2 = torch.tensor([classes2[i] for i in pred2.tolist()]) for i,j in enumerate(select_index2[0].tolist()): preddata[j]=pred2[i] correct_num += torch.eq(preddata.to(device), labels5.to(device)).sum() pred_all = torch.cat([pred_all, preddata.to(device)], dim=0) acc_combine = correct_num.item() / total_num return acc_combine,pred_all @torch.no_grad() def evaluate(model, data_loader, device): model.eval() # 验证样本总个数 total_num = len(data_loader.dataset) # 用于存储预测正确的样本个数 loss_function = torch.nn.CrossEntropyLoss() sum_num = torch.zeros(1).to(device) data_loader = tqdm(data_loader) mean_loss = torch.zeros(1).to(device) for step, data in enumerate(data_loader): images, labels = data pred = model(images.to(device)) loss = loss_function(pred, labels.to(device)) pred_label = torch.max(pred, dim=1)[1] sum_num += torch.eq(pred_label, labels.to(device)).sum() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses acc = sum_num.item() / total_num return acc, mean_loss.item()
9,877
34.789855
120
py
HIWL
HIWL-main/scheme/B1_nol.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_efficientnet import efficientnet_b1 from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/b1', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") img_size = {"B0": 224, "B1-nol": 240, "B2": 260, "B3": 300, "B4": 380, "B5": 456, "B6": 528, "B7": 600} num_model = args.num_model print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if num_model == 'B0': create_model = efficientnet_b0 if num_model == 'B1-nol': create_model = efficientnet_b1 if num_model == 'B2': create_model = efficientnet_b2 if num_model == 'B3': create_model = efficientnet_b3 if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/{}".format(num_model)) is False: os.makedirs("./predicts/{}".format(num_model)) sys.stdout = Logger(filename='./log/efficientnet-{}'.format(num_model), stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(img_size[num_model]), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #20个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience, verbose=True) early_stopping2 = EarlyStopping(patience, verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.005) parser.add_argument('--lrf4', type=float, default=0.05) parser.add_argument('--weights4', type=str, default='F:/pretrain pth/efficientnetb1.pth', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.005) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default='F:/pretrain pth/efficientnetb1.pth', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') parser.add_argument('--num-model', default='B1-nol', help='B0-B7') opt = parser.parse_args() main(opt)
14,720
46.640777
240
py
HIWL
HIWL-main/scheme/train_googlenet.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_googlenet import GoogLeNet as create_model from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_googlenet_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/googlenet', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='googlenet' print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists("./weights/googlenet") is False: os.makedirs("./weights/googlenet") if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/googlenet") is False: os.makedirs("./predicts/googlenet") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #25个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience , verbose=True) early_stopping2 = EarlyStopping(patience , verbose=True) for epoch in range(1000): mean_loss4 = train_googlenet_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_googlenet_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=24) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.01) parser.add_argument('--lrf4', type=float, default=0.01) parser.add_argument('--weights4', type=str, default='', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.01) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default='', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
14,050
47.619377
240
py
HIWL
HIWL-main/scheme/train_vgg.py
# coding=UTF-8 import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_vgg import vgg as create_model from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/vgg', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='vgg' print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists("./weights/vgg") is False: os.makedirs("./weights/vgg") if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/vgg") is False: os.makedirs("./predicts/vgg") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #25个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience , verbose=True) early_stopping2 = EarlyStopping(patience , verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default="F:/dataSet/clean gzdata") parser.add_argument('--batch-size', type=int, default=2) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.01) parser.add_argument('--lrf4', type=float, default=0.01) parser.add_argument('--weights4', type=str, default=r'F:\pretrain pth\vgg16-397923af.pth', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.01) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default=r'F:\pretrain pth\vgg16-397923af.pth', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
14,055
47.468966
240
py
HIWL
HIWL-main/scheme/model_resnet26.py
import torch.nn as nn import torch #源tf码中全局池化前有bn,不同深度的先对输入进行bn-relu再变成shortcut,同深度shortcut直接对输入下采样(maxpooling k=1*1 strid=s) class BasicBlock(nn.Module): def __init__(self, m, k=2, dropoutrate=0.2, istop : bool = False,isbottom : bool = False): super(BasicBlock, self).__init__() self.in_channel = m*k * 2 self.out_channel = m*k * 4 self.istop=istop self.isbottom=isbottom if self.istop: self.in_channel = 64 self.conv1 = nn.Conv2d(in_channels=self.in_channel, out_channels=m*k, kernel_size=1, stride=1, padding=0) self.conv2 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0) self.conv4 = nn.Conv2d(in_channels=self.out_channel, out_channels=m*k, kernel_size=1, stride=1, padding=0) self.conv5 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1, stride=2, padding=0) self.convshortcut1= nn.Conv2d(in_channels=self.in_channel, out_channels=self.out_channel, kernel_size=1,#raise dimension padding=0, stride=1) self.convshortcut2 = nn.MaxPool2d(kernel_size=2,stride=2)#downsample self.bninc = nn.BatchNorm2d(self.in_channel) self.bnmk = nn.BatchNorm2d(m*k) self.bnoutc = nn.BatchNorm2d(self.out_channel) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropoutrate) if self.isbottom: self.conv6 = nn.Conv2d(in_channels=m * k, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0) def forward(self, x): #第一个块 # identity1 = self.bninc(x) out = self.bninc(x) out = self.relu(out) identity1 = out out = self.conv1(out) out = self.bnmk(out) out = self.relu(out) out = self.conv2(out) out = self.dropout(out) out = self.bnmk(out) out = self.relu(out) out = self.conv3(out) out += self.convshortcut1(identity1) #第二个块 # identity2 = self.bnoutc(out) identity2 = out out = self.bnoutc(out) out = self.relu(out) out = self.conv4(out) out = self.bnmk(out) out = self.relu(out) out = self.conv5(out) out = self.dropout(out) out = self.bnmk(out) out = self.relu(out) out = self.conv6(out) if self.isbottom: out += identity2 out = self.bnoutc(out) else: out += self.convshortcut2(identity2) return out class ResNet26(nn.Module): def __init__(self, block, mlist, # mlist=[32, 64, 128, 256], k, dropoutrate, num_classes ): super(ResNet26, self).__init__() self.pad=nn.ZeroPad2d(padding=(2, 3, 2, 3)) self.conv1x = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=6, stride=1) self.maxpool = nn.MaxPool2d(kernel_size=1, stride=2) self.conv2to5x = self._make_layer(block, mlist, k, dropoutrate) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # self.fc = nn.Linear(mlist[-1]*k*4, num_classes) self.fc = nn.Sequential( # nn.Dropout(p=0.3), nn.Conv2d(in_channels=mlist[-1]*k*4,out_channels=num_classes, kernel_size=1) ) def forward(self, x): out = self.pad(x) out = self.conv1x(out) out = self.maxpool(out) out = self.conv2to5x(out) out = self.avgpool(out) # out = torch.flatten(out, start_dim=1) out = self.fc(out) out = torch.flatten(out, start_dim=1, end_dim=3) return out def _make_layer(self, block, mlist, k, dropoutrate): layers = [] for i in range(len(mlist)): if i == 0: layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop= True, isbottom=False)) elif (i == len(mlist)-1): layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=True)) else: layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=False)) return nn.Sequential(*layers) def resnet26(block=BasicBlock, mlist=[64, 128, 256, 512], k=2, dropoutrate=0.33, num_classes=5): return ResNet26(block=block, mlist=mlist, k=k, dropoutrate=dropoutrate, num_classes=num_classes) # from torchsummary import summary # summary(resnet26().cuda(),(3,64,64))
4,981
37.921875
128
py
HIWL
HIWL-main/scheme/model_efficientnet.py
import math import copy from functools import partial from collections import OrderedDict from typing import Optional, Callable import torch import torch.nn as nn from torch import Tensor from torch.nn import functional as F def _make_divisible(ch, divisor=8, min_ch=None): if min_ch is None: min_ch = divisor new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_ch < 0.9 * ch: new_ch += divisor return new_ch class ConvBNActivation(nn.Sequential): def __init__(self, in_planes: int, out_planes: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, activation_layer: Optional[Callable[..., nn.Module]] = None): padding = (kernel_size - 1) // 2 if norm_layer is None: norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.SiLU # alias Swish (torch>=1.7) super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False), norm_layer(out_planes), activation_layer()) class SqueezeExcitation(nn.Module): def __init__(self, input_c: int, # block input channel expand_c: int, # block expand channel squeeze_factor: int = 4): super(SqueezeExcitation, self).__init__() squeeze_c = input_c // squeeze_factor self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1) self.ac1 = nn.SiLU() # alias Swish self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1) self.ac2 = nn.Sigmoid() def forward(self, x: Tensor) -> Tensor: scale = F.adaptive_avg_pool2d(x, output_size=(1, 1)) scale = self.fc1(scale) scale = self.ac1(scale) scale = self.fc2(scale) scale = self.ac2(scale) return scale * x class InvertedResidualConfig: # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate def __init__(self, kernel: int, # 3 or 5 input_c: int, out_c: int, expanded_ratio: int, # 1 or 6 stride: int, # 1 or 2 use_se: bool, # True drop_rate: float, index: str, # 1a, 2a, 2b, ... width_coefficient: float): self.input_c = self.adjust_channels(input_c, width_coefficient) self.kernel = kernel self.expanded_c = self.input_c * expanded_ratio self.out_c = self.adjust_channels(out_c, width_coefficient) self.use_se = use_se self.stride = stride self.drop_rate = drop_rate self.index = index @staticmethod def adjust_channels(channels: int, width_coefficient: float): return _make_divisible(channels * width_coefficient, 8) class InvertedResidual(nn.Module): def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module]): super(InvertedResidual, self).__init__() if cnf.stride not in [1, 2]: raise ValueError("illegal stride value.") self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c) layers = OrderedDict() activation_layer = nn.SiLU # alias Swish # expand if cnf.expanded_c != cnf.input_c: layers.update({"expand_conv": ConvBNActivation(cnf.input_c, cnf.expanded_c, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer)}) # depthwise layers.update({"dwconv": ConvBNActivation(cnf.expanded_c, cnf.expanded_c, kernel_size=cnf.kernel, stride=cnf.stride, groups=cnf.expanded_c, norm_layer=norm_layer, activation_layer=activation_layer)}) if cnf.use_se: layers.update({"se": SqueezeExcitation(cnf.input_c, cnf.expanded_c)}) # project layers.update({"project_conv": ConvBNActivation(cnf.expanded_c, cnf.out_c, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.Identity)}) self.block = nn.Sequential(layers) self.out_channels = cnf.out_c self.is_strided = cnf.stride > 1 # 只有在使用shortcut连接时才使用dropout层 if self.use_res_connect and cnf.drop_rate > 0: self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True) else: self.dropout = nn.Identity() def forward(self, x: Tensor) -> Tensor: result = self.block(x) result = self.dropout(result) if self.use_res_connect: result += x return result class EfficientNet(nn.Module): def __init__(self, width_coefficient: float, depth_coefficient: float, num_classes: int = 1000, dropout_rate: float = 0.2, drop_connect_rate: float = 0.2, block: Optional[Callable[..., nn.Module]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None ): super(EfficientNet, self).__init__() # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1], [3, 16, 24, 6, 2, True, drop_connect_rate, 2], [5, 24, 40, 6, 2, True, drop_connect_rate, 2], [3, 40, 80, 6, 2, True, drop_connect_rate, 3], [5, 80, 112, 6, 1, True, drop_connect_rate, 3], [5, 112, 192, 6, 2, True, drop_connect_rate, 4], [3, 192, 320, 6, 1, True, drop_connect_rate, 1]] def round_repeats(repeats): """Round number of repeats based on depth multiplier.""" return int(math.ceil(depth_coefficient * repeats)) if block is None: block = InvertedResidual if norm_layer is None: norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1) adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_coefficient=width_coefficient) # build inverted_residual_setting bneck_conf = partial(InvertedResidualConfig, width_coefficient=width_coefficient) b = 0 num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf)) inverted_residual_setting = [] for stage, args in enumerate(default_cnf): cnf = copy.copy(args) for i in range(round_repeats(cnf.pop(-1))): if i > 0: # strides equal 1 except first cnf cnf[-3] = 1 # strides cnf[1] = cnf[2] # input_channel equal output_channel cnf[-1] *= b / num_blocks # update dropout ratio index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ... inverted_residual_setting.append(bneck_conf(*cnf, index)) b += 1 # create layers layers = OrderedDict() # first conv layers.update({"stem_conv": ConvBNActivation(in_planes=3, out_planes=adjust_channels(32), kernel_size=3, stride=2, norm_layer=norm_layer)}) # building inverted residual blocks for cnf in inverted_residual_setting: layers.update({cnf.index: block(cnf, norm_layer)}) # build top last_conv_input_c = inverted_residual_setting[-1].out_c last_conv_output_c = adjust_channels(1280) layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c, out_planes=last_conv_output_c, kernel_size=1, norm_layer=norm_layer)}) self.features = nn.Sequential(layers) self.avgpool = nn.AdaptiveAvgPool2d(1) classifier = [] if dropout_rate > 0: classifier.append(nn.Dropout(p=dropout_rate, inplace=True)) classifier.append(nn.Linear(last_conv_output_c, num_classes)) self.classifier = nn.Sequential(*classifier) # initial weights for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x: Tensor) -> Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def forward(self, x: Tensor) -> Tensor: return self._forward_impl(x) def efficientnet_b0(num_classes=1000): # input image size 224x224 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.0, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b1(num_classes=1000): # input image size 240x240 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.1, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b2(num_classes=1000): # input image size 260x260 return EfficientNet(width_coefficient=1.1, depth_coefficient=1.2, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b3(num_classes=1000): # input image size 300x300 return EfficientNet(width_coefficient=1.2, depth_coefficient=1.4, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b4(num_classes=1000): # input image size 380x380 return EfficientNet(width_coefficient=1.4, depth_coefficient=1.8, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b5(num_classes=1000): # input image size 456x456 return EfficientNet(width_coefficient=1.6, depth_coefficient=2.2, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b6(num_classes=1000): # input image size 528x528 return EfficientNet(width_coefficient=1.8, depth_coefficient=2.6, dropout_rate=0.5, num_classes=num_classes) def efficientnet_b7(num_classes=1000): # input image size 600x600 return EfficientNet(width_coefficient=2.0, depth_coefficient=3.1, dropout_rate=0.5, num_classes=num_classes)
12,752
37.645455
102
py
HIWL
HIWL-main/scheme/model_dieleman.py
import torch.nn as nn import torch class Dieleman(nn.Module): def __init__(self, num_classes=1000): super(Dieleman, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 32, kernel_size=6, bias=True), # input[3, 224, 224] output[48, 55, 55] nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # output[48, 27, 27] nn.Conv2d(32, 64, kernel_size=5, bias=True), # output[128, 27, 27] nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # output[128, 13, 13] nn.Conv2d(64, 128, kernel_size=3, bias=True), # output[192, 13, 13] nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=3, bias=True), # output[192, 13, 13] nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # output[128, 6, 6] ) self.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Linear(2 * 2 * 128, 2048, bias=True), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(2048, 2048, bias=True), nn.ReLU(inplace=True), nn.Linear(2048, num_classes ,bias=True), ) # if init_weights: # self._initialize_weights() def forward(self, x): x = self.features(x) x = torch.flatten(x, start_dim=1) x = self.classifier(x) return x
1,519
37.974359
97
py
HIWL
HIWL-main/scheme/train_efficientnet.py
import os import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_efficientnet import efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3 from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/b1', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") img_size = {"B0": 224, "B1": 240, "B2": 260, "B3": 300, "B4": 380, "B5": 456, "B6": 528, "B7": 600} num_model = args.num_model print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if num_model == 'B0': create_model = efficientnet_b0 if num_model == 'B1': create_model = efficientnet_b1 if num_model == 'B2': create_model = efficientnet_b2 if num_model == 'B3': create_model = efficientnet_b3 if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/{}".format(num_model)) is False: os.makedirs("./predicts/{}".format(num_model)) sys.stdout = Logger(filename='./log/efficientnet-{}'.format(num_model), stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(img_size[num_model]), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["train_loss", "val_loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #20个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience, verbose=True) early_stopping2 = EarlyStopping(patience, verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], val_loss_4, epoch) tb_writer4.add_scalar(tags[2], acc_4, epoch) tb_writer4.add_scalar(tags[3], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(mean_loss4, acc_4, model4)#or va_lloss_4 if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], val_loss_2, epoch) tb_writer2.add_scalar(tags[2], acc_2, epoch) tb_writer2.add_scalar(tags[3], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(mean_loss2, acc_2, model2)#or va_lloss_2 if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.005) parser.add_argument('--lrf4', type=float, default=0.05) parser.add_argument('--weights4', type=str, default='F:/pretrain pth/efficientnetb1.pth', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.005) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default='F:/pretrain pth/efficientnetb1.pth', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') parser.add_argument('--num-model', default='B1', help='B0-B7') opt = parser.parse_args() main(opt)
14,958
46.640127
240
py
HIWL
HIWL-main/scheme/train_dieleman.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_dieleman import Dieleman as create_model from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/dieleman', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model = 'dieleman' print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists('./weights/{}'.format(num_model)) is False: os.makedirs('./weights/{}'.format(num_model)) if os.path.exists('./log') is False: os.makedirs('./log') if os.path.exists('./predicts/{}'.format(num_model)) is False: os.makedirs('./predicts/{}'.format(num_model)) sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data( args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.Resize((45, 45)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.Resize((45, 45)), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset( data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"] # 需要画图的指标 # 加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4, T_max=150, eta_min=0) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 # 25个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience, verbose=True) early_stopping2 = EarlyStopping(patience, verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break # 验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.005) parser.add_argument('--lrf4', type=float, default=0.05) parser.add_argument('--weights4', type=str, default='', help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.005) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default='', help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
13,800
47.424561
138
py
HIWL
HIWL-main/scheme/train_vit.py
import os import math import argparse import sys import copy import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_vit import vit_base_patch16_224_in21k as create_model from my_dataset import MyDataSet, returnDataset from utils import read_split_data, train_one_epoch, evaluateall, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/vit', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='vit' print(args) print('Start Tensorboard with "tensorboard --logdir=.", view at http://localhost:6006/') tb_writer2 = SummaryWriter('classes2/{}'.format(num_model)) tb_writer4 = SummaryWriter('classes4/{}'.format(num_model)) if os.path.exists("./weights/vit") is False: os.makedirs("./weights/vit") if os.path.exists("./log") is False: os.makedirs("./log") if os.path.exists("./predicts/vit") is False: os.makedirs("./predicts/vit") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5 = read_split_data(args.data_path5) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(256), transforms.Resize(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2 = returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5) # 权重采样,定义每个类别采样的权重 target = train_dataset2.images_class class_sample_count = np.array([len(np.where(target == t)[0]) for t in np.unique(target)]) weight = 1. / class_sample_count samples_weight = np.array([weight[t] for t in target]) samples_weight = torch.from_numpy(samples_weight) samples_weight = samples_weight.double() sampler = torch.utils.data.WeightedRandomSampler(samples_weight, len(samples_weight)) # 在DataLoader的时候传入采样器即可 batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) val_loader5 = torch.utils.data.DataLoader(val_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) test_loader5 = torch.utils.data.DataLoader(test_dataset5, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader4 = torch.utils.data.DataLoader(train_dataset4, batch_size=batch_size, pin_memory=True, shuffle=True, num_workers=nw) val_loader4 = torch.utils.data.DataLoader(val_dataset4, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw) train_loader2 = torch.utils.data.DataLoader(train_dataset2, batch_size=batch_size, pin_memory=False, shuffle=False, sampler=sampler, num_workers=nw) val_loader2 = torch.utils.data.DataLoader(val_dataset2, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=nw) # 如果存在预训练权重则载入 model4 = create_model(num_classes=args.num_classes4).to(device) model2 = create_model(num_classes=args.num_classes2).to(device) tags = ["loss", "accuracy", "learning_rate"]#需要画图的指标 #加载权重4 if args.weights4 != "": if os.path.exists(args.weights4): weights4_dict = torch.load(args.weights4, map_location=device) load_weights4_dict = {k: v for k, v in weights4_dict.items() if model4.state_dict()[k].numel() == v.numel()} print(model4.load_state_dict(load_weights4_dict, strict=False)) else: raise FileNotFoundError("not found weights4 file: {}".format(args.weights4)) pg4 = [p for p in model4.parameters() if p.requires_grad] # optimizer4 = optim.Adam(pg4, lr=args.lr4, weight_decay=1E-4) optimizer4 = optim.SGD(pg4, lr=args.lr4, momentum=0.9, weight_decay=1E-4) scheduler4 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer4 , T_max=150, eta_min=0) # lf4 = lambda x: ((1 + math.cos(x * math.pi / args.epochs4)) / 2) * (1 - args.lrf4) + args.lrf4 # cosine # scheduler4 = lr_scheduler.LambdaLR(optimizer4, lr_lambda=lf4) # 加载权重2 if args.weights2 != "": if os.path.exists(args.weights2): weights2_dict = torch.load(args.weights2, map_location=device) load_weights2_dict = {k: v for k, v in weights2_dict.items() if model2.state_dict()[k].numel() == v.numel()} print(model2.load_state_dict(load_weights2_dict, strict=False)) else: raise FileNotFoundError("not found weights2 file: {}".format(args.weights2)) pg2 = [p for p in model2.parameters() if p.requires_grad] # optimizer2 = optim.Adam(pg2, lr=args.lr2, weight_decay=1E-4) optimizer2 = optim.SGD(pg2, lr=args.lr2, momentum=0.9, weight_decay=1E-4) scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer2, T_max=150, eta_min=0) # lf2 = lambda x: ((1 + math.cos(x * math.pi / args.epochs2)) / 2) * (1 - args.lrf2) + args.lrf2 # cosine # scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf2) best_acc_4 = [0, 0, 0] # 精度前三 best_model4 = [copy.deepcopy(model4), copy.deepcopy(model4), copy.deepcopy(model4)] best_acc_2 = [0, 0, 0] best_model2 = [copy.deepcopy(model2), copy.deepcopy(model2), copy.deepcopy(model2)] acc_combine_best = 0 acc_combine_best_index = 0 patience = 25 #25个epoch内验证精度不下降 early_stopping4 = EarlyStopping(patience, verbose=True) early_stopping2 = EarlyStopping(patience, verbose=True) for epoch in range(1000): mean_loss4 = train_one_epoch('model4', model=model4, optimizer=optimizer4, data_loader=train_loader4, device=device, epoch=epoch) scheduler4.step() # validate acc_4, val_loss_4 = evaluate(model=model4, data_loader=val_loader4, device=device) if acc_4 > best_acc_4[2] and acc_4 < best_acc_4[1]: best_acc_4[2] = acc_4 best_model4[2] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_2.pth".format(num_model)) elif acc_4 > best_acc_4[1] and acc_4 < best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = acc_4 best_model4[1] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_1.pth".format(num_model)) elif acc_4 > best_acc_4[0]: best_acc_4[2] = best_acc_4[1] best_model4[2] = best_model4[1] best_acc_4[1] = best_acc_4[0] best_model4[1] = best_model4[0] best_acc_4[0] = acc_4 best_model4[0] = copy.deepcopy(model4) torch.save(model4.state_dict(), "./weights/{}/best_model4_0.pth".format(num_model)) tb_writer4.add_scalar(tags[0], mean_loss4, epoch) tb_writer4.add_scalar(tags[1], acc_4, epoch) tb_writer4.add_scalar(tags[2], optimizer4.param_groups[0]["lr"], epoch) print("epoch {}, acc4: {}, best_acc_4: {}".format(epoch, acc_4, best_acc_4[0])) early_stopping4(val_loss_4, acc_4, model4) if early_stopping4.early_stop: print("epoch = {}".format(epoch)) break for epoch in range(1000): mean_loss2 = train_one_epoch('model2', model=model2, optimizer=optimizer2, data_loader=train_loader2, device=device, epoch=epoch) scheduler2.step() acc_2, val_loss_2 = evaluate(model=model2, data_loader=val_loader2, device=device) if acc_2 > best_acc_2[2] and acc_2 < best_acc_2[1]: best_acc_2[2] = acc_2 best_model2[2] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_2.pth".format(num_model)) elif acc_2 > best_acc_2[1] and acc_2 < best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = acc_2 best_model2[1] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_1.pth".format(num_model)) elif acc_2 > best_acc_2[0]: best_acc_2[2] = best_acc_2[1] best_model2[2] = best_model2[1] best_acc_2[1] = best_acc_2[0] best_model2[1] = best_model2[0] best_acc_2[0] = acc_2 best_model2[0] = copy.deepcopy(model2) torch.save(model2.state_dict(), "./weights/{}/best_model2_0.pth".format(num_model)) tb_writer2.add_scalar(tags[0], mean_loss2, epoch) tb_writer2.add_scalar(tags[1], acc_2, epoch) tb_writer2.add_scalar(tags[2], optimizer2.param_groups[0]["lr"], epoch) print("epoch {}, acc2: {}, best_acc_2: {}".format(epoch, acc_2, best_acc_2[0])) early_stopping2(val_loss_2, acc_2, model2) if early_stopping2.early_stop: print("epoch = {}".format(epoch)) break #验证总的 for i in range(len(best_model2)): for j in range(len(best_model4)): acc_combine, pred_all = evaluateall( model2=best_model2[i], model4=best_model4[j], test_loader5=val_loader5, device=device) torch.save(pred_all, './predicts/{}/pred_all-{}-{}.pth'.format(num_model, i, j)) if acc_combine_best < acc_combine: acc_combine_best = acc_combine acc_combine_best_index = (i, j) test_acc, test_pred_all = evaluateall( model2=best_model2[acc_combine_best_index[0]], model4=best_model4[acc_combine_best_index[1]], test_loader5=test_loader5, device=device) torch.save(test_pred_all, './predicts/{}/test_pred_all.pth'.format(num_model)) print("acc_combine_best: {}, acc_combine_best_index: {}, test_acc: {}".format(acc_combine_best, acc_combine_best_index, test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path5', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--batch-size', type=int, default=2) parser.add_argument('--num_classes4', type=int, default=4) parser.add_argument('--epochs4', type=int, default=100) parser.add_argument('--lr4', type=float, default=0.005) parser.add_argument('--lrf4', type=float, default=0.05) parser.add_argument('--weights4', type=str, default=r"F:\pretrain pth\jx_vit_base_patch16_224_in21k-e5005f0a.pth", help='initial weights4 path') parser.add_argument('--num_classes2', type=int, default=2) parser.add_argument('--epochs2', type=int, default=150) parser.add_argument('--lr2', type=float, default=0.005) parser.add_argument('--lrf2', type=float, default=0.01) parser.add_argument('--weights2', type=str, default=r"F:\pretrain pth\jx_vit_base_patch16_224_in21k-e5005f0a.pth", help='initial weights2 path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
14,169
48.372822
240
py
HIWL
HIWL-main/scheme/my_dataset.py
from PIL import Image import torch from torch.utils.data import Dataset import numpy as np import copy class MyDataSet(Dataset): """自定义数据集""" def __init__(self, images_path: list, images_class: list, transform=None): self.images_path = images_path self.images_class = images_class self.transform = transform def __len__(self): return len(self.images_path) def __getitem__(self, item): img = Image.open(self.images_path[item]) # RGB为彩色图片,L为灰度图片 if img.mode != 'RGB': raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item])) label = self.images_class[item] if self.transform is not None: img = self.transform(img) return img, label @staticmethod def collate_fn(batch): images, labels = tuple(zip(*batch)) images = torch.stack(images, dim=0) labels = torch.as_tensor(labels) return images, labels def returnDataset(data_transform, train_images_path5, train_images_label5, val_images_path5, val_images_label5, test_images_path5, test_images_label5): #分层学习标签调整 # 两类合成一类后剩下4类[0,1,3,4],调整标签为[0,1,2,3] train_images_path4 = copy.deepcopy(train_images_path5) train_images_label4 = copy.deepcopy(train_images_label5) val_images_path4 = copy.deepcopy(val_images_path5) val_images_label4 = copy.deepcopy(val_images_label5) for i in range(len(train_images_label4)): # 标签2改1,3改2,4改3 if (train_images_label4[i] == 2): train_images_label4[i] = 1 if (train_images_label4[i] == 3): train_images_label4[i] = 2 if (train_images_label4[i] == 4): train_images_label4[i] = 3 for i in range(len(val_images_label4)): # 标签2改1,3改2,4改3 if (val_images_label4[i] == 2): val_images_label4[i] = 1 if (val_images_label4[i] == 3): val_images_label4[i] = 2 if (val_images_label4[i] == 4): val_images_label4[i] = 3 # 雪茄类和侧向类的训练测试集[1,2],调整标签为[0,1] train_images_label2 = ( np.array(copy.deepcopy(train_images_label5))[ (np.array(train_images_label5) == 1) | (np.array(train_images_label5) == 2)]-1).tolist() # 全部减1 val_images_label2 = ( np.array(copy.deepcopy(val_images_label5))[ (np.array(val_images_label5) == 1) | (np.array(val_images_label5) == 2)]-1).tolist() # 全部减1 train_images_path2 = ( np.array(copy.deepcopy(train_images_path5))[ (np.array(train_images_label5) == 1) | (np.array(train_images_label5) == 2)]).tolist() val_images_path2 = ( np.array(copy.deepcopy(val_images_path5))[ (np.array(val_images_label5) == 1) | (np.array(val_images_label5) == 2)]).tolist() # 实例化数据集 val_dataset5 = MyDataSet(images_path=val_images_path5, images_class=val_images_label5, transform=data_transform["test"]) test_dataset5 = MyDataSet(images_path=test_images_path5, images_class=test_images_label5, transform=data_transform["test"]) train_dataset4 = MyDataSet(images_path=train_images_path4, images_class=train_images_label4, transform=data_transform["train"]) val_dataset4 = MyDataSet(images_path=val_images_path4, images_class=val_images_label4, transform=data_transform["test"]) train_dataset2 = MyDataSet(images_path=train_images_path2, images_class=train_images_label2, transform=data_transform["train"]) val_dataset2 = MyDataSet(images_path=val_images_path2, images_class=val_images_label2, transform=data_transform["test"]) return val_dataset5, test_dataset5, train_dataset4, val_dataset4, train_dataset2, val_dataset2
4,059
40.010101
151
py
HIWL
HIWL-main/noscheme/model_vit.py
""" original code from rwightman: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py """ from functools import partial from collections import OrderedDict import torch import torch.nn as nn def drop_path(x, drop_prob: float = 0., training: bool = False): if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None): super().__init__() img_size = (img_size, img_size) patch_size = (patch_size, patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." # flatten: [B, C, H, W] -> [B, C, HW] # transpose: [B, C, HW] -> [B, HW, C] x = self.proj(x).flatten(2).transpose(1, 2) x = self.norm(x) return x class Attention(nn.Module): def __init__(self, dim, # 输入token的dim num_heads=8, qkv_bias=False, qk_scale=None, attn_drop_ratio=0., proj_drop_ratio=0.): super(Attention, self).__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop_ratio) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop_ratio) def forward(self, x): # [batch_size, num_patches + 1, total_embed_dim] B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # [batch_size, num_heads, num_patches + 1, embed_dim_per_head] q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_ratio=0., attn_drop_ratio=0., drop_path_ratio=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super(Block, self).__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class VisionTransformer(nn.Module): def __init__(self, img_size=224, patch_size=16, in_c=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, representation_size=None, distilled=False, drop_ratio=0., attn_drop_ratio=0., drop_path_ratio=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_c (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_scale (float): override default qk scale of head_dim ** -0.5 if set representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set distilled (bool): model includes a distillation token and head as in DeiT models drop_ratio (float): dropout rate attn_drop_ratio (float): attention dropout rate drop_path_ratio (float): stochastic depth rate embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer """ super(VisionTransformer, self).__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_tokens = 2 if distilled else 1 norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=drop_ratio) dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_ratio=drop_ratio, attn_drop_ratio=attn_drop_ratio, drop_path_ratio=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth) ]) self.norm = norm_layer(embed_dim) # Representation layer if representation_size and not distilled: self.has_logits = True self.num_features = representation_size self.pre_logits = nn.Sequential(OrderedDict([ ("fc", nn.Linear(embed_dim, representation_size)), ("act", nn.Tanh()) ])) else: self.has_logits = False self.pre_logits = nn.Identity() # Classifier head(s) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = None if distilled: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() # Weight init nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.dist_token is not None: nn.init.trunc_normal_(self.dist_token, std=0.02) nn.init.trunc_normal_(self.cls_token, std=0.02) self.apply(_init_vit_weights) def forward_features(self, x): # [B, C, H, W] -> [B, num_patches, embed_dim] x = self.patch_embed(x) # [B, 196, 768] # [1, 1, 768] -> [B, 1, 768] cls_token = self.cls_token.expand(x.shape[0], -1, -1) if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) # [B, 197, 768] else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = self.pos_drop(x + self.pos_embed) x = self.blocks(x) x = self.norm(x) if self.dist_token is None: return self.pre_logits(x[:, 0]) else: return x[:, 0], x[:, 1] def forward(self, x): x = self.forward_features(x) if self.head_dist is not None: x, x_dist = self.head(x[0]), self.head_dist(x[1]) if self.training and not torch.jit.is_scripting(): # during inference, return the average of both classifier predictions return x, x_dist else: return (x + x_dist) / 2 else: x = self.head(x) return x def _init_vit_weights(m): """ ViT weight initialization :param m: module """ if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.LayerNorm): nn.init.zeros_(m.bias) nn.init.ones_(m.weight) def vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768 if has_logits else None, num_classes=num_classes) return model def vit_base_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768 if has_logits else None, num_classes=num_classes) return model def vit_large_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024 if has_logits else None, num_classes=num_classes) return model def vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024 if has_logits else None, num_classes=num_classes) return model def vit_huge_patch14_224_in21k(num_classes: int = 21843, has_logits: bool = True): model = VisionTransformer(img_size=224, patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280 if has_logits else None, num_classes=num_classes) return model
13,049
38.189189
118
py
HIWL
HIWL-main/noscheme/model_googlenet.py
import torch.nn as nn import torch import torch.nn.functional as F class GoogLeNet(nn.Module): def __init__(self, num_classes=1000, aux_logits=True, init_weights=False): super(GoogLeNet, self).__init__() self.aux_logits = aux_logits self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3) self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.conv2 = BasicConv2d(64, 64, kernel_size=1) self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1) self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32) self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64) self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64) self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64) self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64) self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128) self.maxpool4 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128) self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) if self.aux_logits: self.aux1 = InceptionAux(512, num_classes) self.aux2 = InceptionAux(528, num_classes) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.4) self.fc = nn.Linear(1024, num_classes) if init_weights: self._initialize_weights() def forward(self, x): # N x 3 x 224 x 224 x = self.conv1(x) # N x 64 x 112 x 112 x = self.maxpool1(x) # N x 64 x 56 x 56 x = self.conv2(x) # N x 64 x 56 x 56 x = self.conv3(x) # N x 192 x 56 x 56 x = self.maxpool2(x) # N x 192 x 28 x 28 x = self.inception3a(x) # N x 256 x 28 x 28 x = self.inception3b(x) # N x 480 x 28 x 28 x = self.maxpool3(x) # N x 480 x 14 x 14 x = self.inception4a(x) # N x 512 x 14 x 14 if self.training and self.aux_logits: # eval model lose this layer aux1 = self.aux1(x) x = self.inception4b(x) # N x 512 x 14 x 14 x = self.inception4c(x) # N x 512 x 14 x 14 x = self.inception4d(x) # N x 528 x 14 x 14 if self.training and self.aux_logits: # eval model lose this layer aux2 = self.aux2(x) x = self.inception4e(x) # N x 832 x 14 x 14 x = self.maxpool4(x) # N x 832 x 7 x 7 x = self.inception5a(x) # N x 832 x 7 x 7 x = self.inception5b(x) # N x 1024 x 7 x 7 x = self.avgpool(x) # N x 1024 x 1 x 1 x = torch.flatten(x, 1) # N x 1024 x = self.dropout(x) x = self.fc(x) # N x 1000 (num_classes) if self.training and self.aux_logits: # eval model lose this layer return x, aux2, aux1 return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) class Inception(nn.Module): def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj): super(Inception, self).__init__() self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) self.branch2 = nn.Sequential( BasicConv2d(in_channels, ch3x3red, kernel_size=1), BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # 保证输出大小等于输入大小 ) self.branch3 = nn.Sequential( BasicConv2d(in_channels, ch5x5red, kernel_size=1), BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2) # 保证输出大小等于输入大小 ) self.branch4 = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(in_channels, pool_proj, kernel_size=1) ) def forward(self, x): branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) outputs = [branch1, branch2, branch3, branch4] return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes): super(InceptionAux, self).__init__() self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3) self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # output[batch, 128, 4, 4] self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, num_classes) def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = self.averagePool(x) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = torch.flatten(x, 1) x = F.dropout(x, 0.5, training=self.training) # N x 2048 x = F.relu(self.fc1(x), inplace=True) x = F.dropout(x, 0.5, training=self.training) # N x 1024 x = self.fc2(x) # N x num_classes return x class BasicConv2d(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, **kwargs) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.relu(x) return x
5,919
33.219653
92
py
HIWL
HIWL-main/noscheme/model_vgg.py
import torch.nn as nn import torch class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=False): super(VGG, self).__init__() self.features = features self.classifier = nn.Sequential( nn.Linear(512*7*7, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(4096, num_classes) ) if init_weights: self._initialize_weights() def forward(self, x): # N x 3 x 224 x 224 x = self.features(x) # N x 512 x 7 x 7 x = torch.flatten(x, start_dim=1) # N x 512*7*7 x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) # nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def make_features(cfg: list): layers = [] in_channels = 3 for v in cfg: if v == "M": layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) layers += [conv2d, nn.ReLU(True)] in_channels = v return nn.Sequential(*layers) cfgs = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def vgg(model_name="vgg16", **kwargs): assert model_name in cfgs, "Warning: model number {} not in cfgs dict!".format(model_name) cfg = cfgs[model_name] model = VGG(make_features(cfg), **kwargs) return model
2,287
31.685714
117
py
HIWL
HIWL-main/noscheme/model_resnet.py
import torch.nn as nn import torch class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(out_channel) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_channel) self.downsample = downsample def forward(self, x): identity = x if self.downsample is not None: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_channel, out_channel, stride=1, downsample=None, groups=1, width_per_group=64): super(Bottleneck, self).__init__() width = int(out_channel * (width_per_group / 64.)) * groups self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width, kernel_size=1, stride=1, bias=False) # squeeze channels self.bn1 = nn.BatchNorm2d(width) # ----------------------------------------- self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups, kernel_size=3, stride=stride, bias=False, padding=1) self.bn2 = nn.BatchNorm2d(width) # ----------------------------------------- self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel*self.expansion, kernel_size=1, stride=1, bias=False) # unsqueeze channels self.bn3 = nn.BatchNorm2d(out_channel*self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): identity = x if self.downsample is not None: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, blocks_num, num_classes=1000, include_top=True, groups=1, width_per_group=64): super(ResNet, self).__init__() self.include_top = include_top self.in_channel = 64 self.groups = groups self.width_per_group = width_per_group self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(self.in_channel) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, blocks_num[0]) self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2) self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2) self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2) if self.include_top: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') def _make_layer(self, block, channel, block_num, stride=1): downsample = None if stride != 1 or self.in_channel != channel * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(channel * block.expansion)) layers = [] layers.append(block(self.in_channel, channel, downsample=downsample, stride=stride, groups=self.groups, width_per_group=self.width_per_group)) self.in_channel = channel * block.expansion for _ in range(1, block_num): layers.append(block(self.in_channel, channel, groups=self.groups, width_per_group=self.width_per_group)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) if self.include_top: x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def resnet34(num_classes=1000, include_top=True): return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top) def resnet50(num_classes=1000, include_top=True): return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top) def resnet101(num_classes=1000, include_top=True): return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top) def resnext50_32x4d(num_classes=1000, include_top=True): groups = 32 width_per_group = 4 return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top, groups=groups, width_per_group=width_per_group) def resnext101_32x8d(num_classes=1000, include_top=True): groups = 32 width_per_group = 8 return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top, groups=groups, width_per_group=width_per_group)
6,446
32.931579
112
py
HIWL
HIWL-main/noscheme/B1_noh.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler import numpy as np from model_efficientnet import efficientnet_b1 as create_model from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate_noh class Logger(object): def __init__(self, filename='./log/effnoh', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass sys.stdout = Logger(stream=sys.stdout) def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() img_size = {"B0": 224, "B1": 240, "B2": 260, "B3": 300, "B4": 380, "B5": 456, "B6": 528, "B7": 600} num_model = "B1_noh" if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("../scheme/log") is False: os.makedirs("./log") train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(img_size['B1'], scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(img_size['B1']), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 model = create_model(num_classes=args.num_classes).to(device) # model.load_state_dict(torch.load(args.weights, map_location=device)) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(args.epochs): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate sum_num, pred_all = evaluate_noh(model=model, data_loader=val_loader, device=device) val_acc = sum_num / len(val_data_set) torch.save(model.state_dict(), "./weights/B1_noh/model-{}.pth".format(epoch)) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(pred_all, 'b1_noh_val_811.pth') print("[epoch {}] accuracy: {}".format(epoch, round(val_acc, 3))) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) # test sum_num, pred_all = evaluate_noh(model=model, data_loader=test_loader, device=device) test_acc = sum_num / len(test_data_set) torch.save(pred_all, 'b1_noh_test_811.pth') print('test_acc:{}'.format(test_acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:/dataSet/clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--lrf', type=float, default=0.005) parser.add_argument('--weights', type=str, default=r'', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') opt = parser.parse_args() main(opt)
7,347
42.47929
147
py
HIWL
HIWL-main/noscheme/train_resnet26.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_resnet26 import resnet26 as create_model from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/resnet26', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='resnet26' print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() if os.path.exists("./weights/resnet26") is False: os.makedirs("./weights/resnet26") if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data( args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.Resize((64, 64)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.Resize((64, 64)), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/resnet26/bestmodel-{}.pth".format(num_model, epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--lr', type=float, default=0.005) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
7,618
44.35119
132
py
HIWL
HIWL-main/noscheme/train_resnet.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_resnet import resnet34, resnet50, resnet101 from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/resnet', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() num_model = args.num_model if num_model == 'resnet34': create_model = resnet34 if num_model == 'resnet50': create_model = resnet50 if num_model == 'resnet101': create_model = resnet101 if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=24) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--freeze-layers', type=bool, default=False) parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') parser.add_argument('--num-model', default='resnet50', help='resnet34-101') opt = parser.parse_args() main(opt)
7,886
44.327586
147
py
HIWL
HIWL-main/noscheme/utils.py
import os import sys import json import pickle import random import numpy as np import torch from tqdm import tqdm import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F # 标签平滑嵌入到loss函数 class SMLoss(nn.Module): ''' Cross Entropy Loss with label smoothing ''' def __init__(self, label_smooth=None, class_num=137): super().__init__() self.label_smooth = label_smooth self.class_num = class_num def forward(self, pred, target): ''' Args: pred: prediction of model output [N, M] target: ground truth of sampler [N] ''' eps = 1e-12 if self.label_smooth is not None: # cross entropy loss with label smoothing logprobs = F.log_softmax(pred, dim=1) # softmax + log target = F.one_hot(target, self.class_num) # 转换成one-hot # label smoothing # 实现 target = torch.clamp(target.float(), min=self.label_smooth / (self.class_num - 1), max=1.0 - self.label_smooth) loss = -1 * torch.sum(target * logprobs, 1) else: # standard cross entropy loss loss = -1. * pred.gather(1, target.unsqueeze(-1)) + torch.log(torch.exp(pred + eps).sum(dim=1)) return loss.mean() def read_split_data(root: str, val_rate: float = 0.1, test_rate: float = 0.1): split_rate = val_rate + test_rate random.seed(0) # 保证随机结果可复现 assert os.path.exists(root), "dataset root: {} does not exist.".format(root) # 遍历文件夹,一个文件夹对应一个类别 galaxy_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))] split_galaxy_class = [] # 存储切分后类别 for i in galaxy_class: split_galaxy_class.append(i + '_train') split_galaxy_class.append(i + '_test') # 排序,保证顺序一致 galaxy_class.sort() # 生成类别名称以及对应的数字索引 class_indices = dict((k, v) for v, k in enumerate(galaxy_class)) json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4) with open('class_indices.json', 'w') as json_file: json_file.write(json_str) train_images_path = [] # 存储训练集的所有图片路径 train_images_label = [] # 存储训练集图片对应索引信息 val_images_path = [] # 存储验证集的所有图片路径 val_images_label = [] # 存储验证集图片对应索引信息 test_images_path = [] # 存储测试集的所有图片路径 test_images_label = [] # 存储测试集图片对应索引信息 every_class_num = [] # 存储每个类别的样本总数 split_every_class_num = [] # 存储每个类别的切分后样本总数 supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型 # 遍历每个文件夹下的文件 for cla in galaxy_class: cla_path = os.path.join(root, cla) sample_count = 0 # 遍历获取supported支持的所有文件路径 images = [os.path.join(root, cla, i) for i in os.listdir(cla_path) if os.path.splitext(i)[-1] in supported] # 获取该类别对应的索引 image_class = class_indices[cla] # 记录该类别的样本数量 every_class_num.append(len(images)) # 按比例随机采样验证样本 split_path = random.sample(images, round(len(images) * split_rate)) for img_path in images: if img_path in split_path: # 如果该路径在采样的集合样本中则存入划分集 sample_count += 1 if sample_count <= len(split_path)*(val_rate/split_rate): val_images_path.append(img_path) val_images_label.append(image_class) else: test_images_path.append(img_path) test_images_label.append(image_class) else: # 否则存入训练集 train_images_path.append(img_path) train_images_label.append(image_class) print("{} images were found in the dataset.".format(sum(every_class_num))) print("{} images for training.".format(len(train_images_path))) print("{} images for val.".format(len(val_images_path))) print("{} images for test.".format(len(test_images_path))) plot_image = False if plot_image: # 绘制每种类别个数柱状图 plt.bar(range(len(split_every_class_num)), split_every_class_num, align='center') # 将横坐标0,1,2,3,4替换为相应的类别名称 plt.xticks(range(len(split_every_class_num)),split_galaxy_class) # 在柱状图上添加数值标签 for i, v in enumerate(split_every_class_num): plt.text(x=i, y=v + 5, s=str(v), ha='center') # 设置x坐标 plt.xlabel('image class') # 设置y坐标 plt.ylabel('number of images') # 设置柱状图的标题 plt.title('galaxy class distribution') plt.show() return train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label def plot_data_loader_image(data_loader): batch_size = data_loader.batch_size plot_num = min(batch_size, 4) json_path = './class_indices.json' assert os.path.exists(json_path), json_path + " does not exist." json_file = open(json_path, 'r') class_indices = json.load(json_file) for data in data_loader: images, labels = data for i in range(plot_num): # [C, H, W] -> [H, W, C] img = images[i].numpy().transpose(1, 2, 0) # 反Normalize操作 img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255 label = labels[i].item() plt.subplot(1, plot_num, i+1) plt.xlabel(class_indices[str(label)]) plt.xticks([]) # 去掉x轴的刻度 plt.yticks([]) # 去掉y轴的刻度 plt.imshow(img.astype('uint8')) plt.show() def write_pickle(list_info: list, file_name: str): with open(file_name, 'wb') as f: pickle.dump(list_info, f) def read_pickle(file_name: str) -> list: with open(file_name, 'rb') as f: info_list = pickle.load(f) return info_list def train_one_epoch(model, optimizer, data_loader, device, epoch): model.train() # loss_function = torch.nn.CrossEntropyLoss() loss_function = SMLoss(label_smooth=0.05, class_num=5) mean_loss = torch.zeros(1).to(device) optimizer.zero_grad() data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data pred = model(images.to(device)) loss = loss_function(pred, labels.to(device)) loss.backward() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() print('traloss: {}'.format(mean_loss.item())) return mean_loss.item() def train_googlenet_one_epoch(model, optimizer, data_loader, device, epoch): model.train() loss_function = torch.nn.CrossEntropyLoss() # 标签平滑 # loss_function = SMLoss(label_smooth=0.05, class_num=int(str.split('l')[1])) mean_loss = torch.zeros(1).to(device) optimizer.zero_grad() data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data logits, aux_logits2, aux_logits1 = model(images.to(device)) loss0 = loss_function(logits, labels.to(device)) loss1 = loss_function(aux_logits1, labels.to(device)) loss2 = loss_function(aux_logits2, labels.to(device)) loss = loss0 + loss1 * 0.3 + loss2 * 0.3 loss.backward() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses data_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3)) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss) sys.exit(1) optimizer.step() optimizer.zero_grad() print('traloss: {}'.format(mean_loss.item())) return mean_loss.item() @torch.no_grad() def evaluate(model, data_loader, device): model.eval() # 验证样本总个数 total_num = len(data_loader.dataset) # 用于存储预测正确的样本个数 loss_function = torch.nn.CrossEntropyLoss() sum_num = torch.zeros(1).to(device) data_loader = tqdm(data_loader) mean_loss = torch.zeros(1).to(device) for step, data in enumerate(data_loader): images, labels = data pred = model(images.to(device)) loss = loss_function(pred, labels.to(device)) pred_label = torch.max(pred, dim=1)[1] sum_num += torch.eq(pred_label, labels.to(device)).sum() mean_loss = (mean_loss * step + loss.detach()) / (step + 1) # update mean losses acc = sum_num.item() / total_num return acc, mean_loss.item() @torch.no_grad() def evaluate_noh(model, data_loader, device): model.eval() mean_loss = torch.zeros(1).to(device) loss_function = torch.nn.CrossEntropyLoss() # 用于存储预测正确的样本个数 sum_num = torch.zeros(1).to(device) pred_all = torch.tensor([]).to(device) data_loader = tqdm(data_loader) for step, data in enumerate(data_loader): images, labels = data pred = model(images.to(device)) loss = loss_function(pred, labels.to(device)) mean_loss = (mean_loss * step + loss.detach()) / (step + 1) pred = torch.max(pred, dim=1)[1] sum_num += torch.eq(pred, labels.to(device)).sum() pred_all = torch.cat([pred_all, pred.to(device)], dim=0) print('valloss: {}'.format(mean_loss.item())) return sum_num.item(), pred_all
9,482
34.384328
120
py
HIWL
HIWL-main/noscheme/train_googlenet.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_googlenet import GoogLeNet as create_model from my_dataset import MyDataSet from utils import read_split_data, train_googlenet_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/googlenet', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model = 'googlenet' print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() if os.path.exists("./weights/googlenet") is False: os.makedirs("./weights/googlenet") if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_googlenet_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=24) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
7,500
44.460606
147
py
HIWL
HIWL-main/noscheme/train_vgg.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_vgg import vgg as create_model from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/vgg', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() if os.path.exists("./weights/vgg") is False: os.makedirs("./weights/vgg") if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25)), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(256), transforms.Resize(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/vgg/bestmodel-{}.pth".format(epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--freeze-layers', type=bool, default=False) parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
7,542
43.370588
147
py
HIWL
HIWL-main/noscheme/model_resnet26.py
import torch.nn as nn import torch #源tf码中全局池化前有bn,不同深度的先对输入进行bn-relu再变成shortcut,同深度shortcut直接对输入下采样(maxpooling k=1*1 strid=s) class BasicBlock(nn.Module): def __init__(self, m, k=2, dropoutrate=0.2, istop : bool = False,isbottom : bool = False): super(BasicBlock, self).__init__() self.in_channel = m*k * 2 self.out_channel = m*k * 4 self.istop=istop self.isbottom=isbottom if self.istop: self.in_channel = 64 self.conv1 = nn.Conv2d(in_channels=self.in_channel, out_channels=m*k, kernel_size=1, stride=1, padding=0) self.conv2 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0) self.conv4 = nn.Conv2d(in_channels=self.out_channel, out_channels=m*k, kernel_size=1, stride=1, padding=0) self.conv5 = nn.Conv2d(in_channels=m*k, out_channels=m*k, kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(in_channels=m*k, out_channels=self.out_channel, kernel_size=1, stride=2, padding=0) self.convshortcut1= nn.Conv2d(in_channels=self.in_channel, out_channels=self.out_channel, kernel_size=1,#raise dimension padding=0, stride=1) self.convshortcut2 = nn.MaxPool2d(kernel_size=2,stride=2)#downsample self.bninc = nn.BatchNorm2d(self.in_channel) self.bnmk = nn.BatchNorm2d(m*k) self.bnoutc = nn.BatchNorm2d(self.out_channel) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropoutrate) if self.isbottom: self.conv6 = nn.Conv2d(in_channels=m * k, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0) def forward(self, x): #第一个块 # identity1 = self.bninc(x) out = self.bninc(x) out = self.relu(out) identity1 = out out = self.conv1(out) out = self.bnmk(out) out = self.relu(out) out = self.conv2(out) out = self.dropout(out) out = self.bnmk(out) out = self.relu(out) out = self.conv3(out) out += self.convshortcut1(identity1) #第二个块 # identity2 = self.bnoutc(out) identity2 = out out = self.bnoutc(out) out = self.relu(out) out = self.conv4(out) out = self.bnmk(out) out = self.relu(out) out = self.conv5(out) out = self.dropout(out) out = self.bnmk(out) out = self.relu(out) out = self.conv6(out) if self.isbottom: out += identity2 out = self.bnoutc(out) else: out += self.convshortcut2(identity2) return out class ResNet26(nn.Module): def __init__(self, block, mlist, # mlist=[32, 64, 128, 256], k, dropoutrate, num_classes ): super(ResNet26, self).__init__() self.pad=nn.ZeroPad2d(padding=(2, 3, 2, 3)) self.conv1x = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=6, stride=1) self.maxpool = nn.MaxPool2d(kernel_size=1, stride=2) self.conv2to5x = self._make_layer(block, mlist, k, dropoutrate) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # self.fc = nn.Linear(mlist[-1]*k*4, num_classes) self.fc = nn.Sequential( # nn.Dropout(p=0.3), nn.Conv2d(in_channels=mlist[-1]*k*4,out_channels=num_classes, kernel_size=1) ) def forward(self, x): out = self.pad(x) out = self.conv1x(out) out = self.maxpool(out) out = self.conv2to5x(out) out = self.avgpool(out) # out = torch.flatten(out, start_dim=1) out = self.fc(out) out = torch.flatten(out, start_dim=1, end_dim=3) return out def _make_layer(self, block, mlist, k, dropoutrate): layers = [] for i in range(len(mlist)): if i == 0: layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop= True, isbottom=False)) elif (i == len(mlist)-1): layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=True)) else: layers.append(block(m=mlist[i], k=k, dropoutrate=dropoutrate, istop=False, isbottom=False)) return nn.Sequential(*layers) def resnet26(block=BasicBlock, mlist=[64, 128, 256, 512], k=2, dropoutrate=0.35, num_classes=5): return ResNet26(block=block, mlist=mlist, k=k, dropoutrate=dropoutrate, num_classes=num_classes) #查看具体网络结构和参数 # from torchinfo import summary # summary(resnet26().cuda(),(16,3,64,64))
4,994
37.72093
128
py
HIWL
HIWL-main/noscheme/model_efficientnet.py
import math import copy from functools import partial from collections import OrderedDict from typing import Optional, Callable import torch import torch.nn as nn from torch import Tensor from torch.nn import functional as F def _make_divisible(ch, divisor=8, min_ch=None): if min_ch is None: min_ch = divisor new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_ch < 0.9 * ch: new_ch += divisor return new_ch class ConvBNActivation(nn.Sequential): def __init__(self, in_planes: int, out_planes: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, activation_layer: Optional[Callable[..., nn.Module]] = None): padding = (kernel_size - 1) // 2 if norm_layer is None: norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.SiLU # alias Swish (torch>=1.7) super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False), norm_layer(out_planes), activation_layer()) class SqueezeExcitation(nn.Module): def __init__(self, input_c: int, # block input channel expand_c: int, # block expand channel squeeze_factor: int = 4): super(SqueezeExcitation, self).__init__() squeeze_c = input_c // squeeze_factor self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1) self.ac1 = nn.SiLU() # alias Swish self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1) self.ac2 = nn.Sigmoid() def forward(self, x: Tensor) -> Tensor: scale = F.adaptive_avg_pool2d(x, output_size=(1, 1)) scale = self.fc1(scale) scale = self.ac1(scale) scale = self.fc2(scale) scale = self.ac2(scale) return scale * x class InvertedResidualConfig: # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate def __init__(self, kernel: int, # 3 or 5 input_c: int, out_c: int, expanded_ratio: int, # 1 or 6 stride: int, # 1 or 2 use_se: bool, # True drop_rate: float, index: str, # 1a, 2a, 2b, ... width_coefficient: float): self.input_c = self.adjust_channels(input_c, width_coefficient) self.kernel = kernel self.expanded_c = self.input_c * expanded_ratio self.out_c = self.adjust_channels(out_c, width_coefficient) self.use_se = use_se self.stride = stride self.drop_rate = drop_rate self.index = index @staticmethod def adjust_channels(channels: int, width_coefficient: float): return _make_divisible(channels * width_coefficient, 8) class InvertedResidual(nn.Module): def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module]): super(InvertedResidual, self).__init__() if cnf.stride not in [1, 2]: raise ValueError("illegal stride value.") self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c) layers = OrderedDict() activation_layer = nn.SiLU # alias Swish # expand if cnf.expanded_c != cnf.input_c: layers.update({"expand_conv": ConvBNActivation(cnf.input_c, cnf.expanded_c, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer)}) # depthwise layers.update({"dwconv": ConvBNActivation(cnf.expanded_c, cnf.expanded_c, kernel_size=cnf.kernel, stride=cnf.stride, groups=cnf.expanded_c, norm_layer=norm_layer, activation_layer=activation_layer)}) if cnf.use_se: layers.update({"se": SqueezeExcitation(cnf.input_c, cnf.expanded_c)}) # project layers.update({"project_conv": ConvBNActivation(cnf.expanded_c, cnf.out_c, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.Identity)}) self.block = nn.Sequential(layers) self.out_channels = cnf.out_c self.is_strided = cnf.stride > 1 # 只有在使用shortcut连接时才使用dropout层 if self.use_res_connect and cnf.drop_rate > 0: self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True) else: self.dropout = nn.Identity() def forward(self, x: Tensor) -> Tensor: result = self.block(x) result = self.dropout(result) if self.use_res_connect: result += x return result class EfficientNet(nn.Module): def __init__(self, width_coefficient: float, depth_coefficient: float, num_classes: int = 1000, dropout_rate: float = 0.2, drop_connect_rate: float = 0.2, block: Optional[Callable[..., nn.Module]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None ): super(EfficientNet, self).__init__() # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1], [3, 16, 24, 6, 2, True, drop_connect_rate, 2], [5, 24, 40, 6, 2, True, drop_connect_rate, 2], [3, 40, 80, 6, 2, True, drop_connect_rate, 3], [5, 80, 112, 6, 1, True, drop_connect_rate, 3], [5, 112, 192, 6, 2, True, drop_connect_rate, 4], [3, 192, 320, 6, 1, True, drop_connect_rate, 1]] def round_repeats(repeats): """Round number of repeats based on depth multiplier.""" return int(math.ceil(depth_coefficient * repeats)) if block is None: block = InvertedResidual if norm_layer is None: norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1) adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_coefficient=width_coefficient) # build inverted_residual_setting bneck_conf = partial(InvertedResidualConfig, width_coefficient=width_coefficient) b = 0 num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf)) inverted_residual_setting = [] for stage, args in enumerate(default_cnf): cnf = copy.copy(args) for i in range(round_repeats(cnf.pop(-1))): if i > 0: # strides equal 1 except first cnf cnf[-3] = 1 # strides cnf[1] = cnf[2] # input_channel equal output_channel cnf[-1] *= b / num_blocks # update dropout ratio index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ... inverted_residual_setting.append(bneck_conf(*cnf, index)) b += 1 # create layers layers = OrderedDict() # first conv layers.update({"stem_conv": ConvBNActivation(in_planes=3, out_planes=adjust_channels(32), kernel_size=3, stride=2, norm_layer=norm_layer)}) # building inverted residual blocks for cnf in inverted_residual_setting: layers.update({cnf.index: block(cnf, norm_layer)}) # build top last_conv_input_c = inverted_residual_setting[-1].out_c last_conv_output_c = adjust_channels(1280) layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c, out_planes=last_conv_output_c, kernel_size=1, norm_layer=norm_layer)}) self.features = nn.Sequential(layers) self.avgpool = nn.AdaptiveAvgPool2d(1) classifier = [] if dropout_rate > 0: classifier.append(nn.Dropout(p=dropout_rate, inplace=True)) classifier.append(nn.Linear(last_conv_output_c, num_classes)) self.classifier = nn.Sequential(*classifier) # initial weights for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x: Tensor) -> Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def forward(self, x: Tensor) -> Tensor: return self._forward_impl(x) def efficientnet_b0(num_classes=1000): # input image size 224x224 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.0, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b1(num_classes=1000): # input image size 240x240 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.1, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b2(num_classes=1000): # input image size 260x260 return EfficientNet(width_coefficient=1.1, depth_coefficient=1.2, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b3(num_classes=1000): # input image size 300x300 return EfficientNet(width_coefficient=1.2, depth_coefficient=1.4, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b4(num_classes=1000): # input image size 380x380 return EfficientNet(width_coefficient=1.4, depth_coefficient=1.8, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b5(num_classes=1000): # input image size 456x456 return EfficientNet(width_coefficient=1.6, depth_coefficient=2.2, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b6(num_classes=1000): # input image size 528x528 return EfficientNet(width_coefficient=1.8, depth_coefficient=2.6, dropout_rate=0.5, num_classes=num_classes) def efficientnet_b7(num_classes=1000): # input image size 600x600 return EfficientNet(width_coefficient=2.0, depth_coefficient=3.1, dropout_rate=0.5, num_classes=num_classes)
12,752
37.645455
102
py
HIWL
HIWL-main/noscheme/model_dieleman.py
import torch.nn as nn import torch class Dieleman(nn.Module): def __init__(self, num_classes=1000): super(Dieleman, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 32, kernel_size=6, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(32, 64, kernel_size=5, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 128, kernel_size=3, bias=True), nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=3, bias=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.classifier = nn.Sequential( nn.Dropout(p=0.5), nn.Linear(2 * 2 * 128, 2048, bias=True), nn.ReLU(inplace=True), nn.Dropout(p=0.5), nn.Linear(2048, 2048, bias=True), nn.ReLU(inplace=True), nn.Linear(2048, num_classes ,bias=True), ) # if init_weights: # self._initialize_weights() def forward(self, x): x = self.features(x) x = torch.flatten(x, start_dim=1) x = self.classifier(x) return x
1,269
31.564103
58
py
HIWL
HIWL-main/noscheme/train_efficientnet.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_efficientnet import efficientnet_b0, efficientnet_b1, efficientnet_b2 from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/efficientnet', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() img_size = {"B0": 224, "B1": 240, "B2": 260, "B3": 300, "B4": 380, "B5": 456, "B6": 528, "B7": 600} num_model = args.num_model if num_model == 'B0': create_model = efficientnet_b0 if num_model == 'B1': create_model = efficientnet_b1 if num_model == 'B2': create_model = efficientnet_b2 if os.path.exists("./weights/{}".format(num_model)) is False: os.makedirs("./weights/{}".format(num_model)) if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(filename='./log/{}'.format(num_model), stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(img_size[num_model], scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(img_size[num_model]), transforms.Resize(260), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default=r'', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') parser.add_argument('--num-model', default='B2', help='B0-B7') opt = parser.parse_args() main(opt)
8,040
42.939891
147
py
HIWL
HIWL-main/noscheme/train_dieleman.py
import os import math import argparse import sys import torch import torch.optim as optim from torch.utils.tensorboard import SummaryWriter from torchvision import transforms import torch.optim.lr_scheduler as lr_scheduler from model_dieleman import Dieleman as create_model from my_dataset import MyDataSet from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/dieleman', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") num_model='dieleman' print(args) print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') tb_writer = SummaryWriter() if os.path.exists("./weights/dieleman") is False: os.makedirs("./weights/dieleman") if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(stream=sys.stdout) # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25 )), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.Resize((45, 45)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.Resize((45, 45)), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/{}/bestmodel-{}.pth".format(num_model, epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
7,607
43.232558
147
py
HIWL
HIWL-main/noscheme/train_vit.py
import os import math import argparse import sys import torch import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler from torch.utils.tensorboard import SummaryWriter from torchvision import transforms from my_dataset import MyDataSet from model_vit import vit_base_patch16_224_in21k as create_model from utils import read_split_data, train_one_epoch, evaluate from pytorchtools import EarlyStopping class Logger(object): def __init__(self, filename='./log/vit', stream=sys.stdout): self.terminal = stream self.log = open(filename, 'w') def write(self, message): self.terminal.write(message) self.log.write(message) def flush(self): pass def main(args): device = torch.device(args.device if torch.cuda.is_available() else "cpu") if os.path.exists("./weights/vit") is False: os.makedirs("./weights/vit") if os.path.exists("./log") is False: os.makedirs("./log") sys.stdout = Logger(stream=sys.stdout) tb_writer = SummaryWriter() # 0,1,2,3,4 分别对应类别 中间星系,雪茄星系,侧向星系,圆形星系,漩涡星系 train_images_path, train_images_label, val_images_path, val_images_label, test_images_path, test_images_label = read_split_data(args.data_path) data_transform = { "train": transforms.Compose([transforms.CenterCrop(256), transforms.RandomRotation((-25, 25)), transforms.RandomResizedCrop(224, scale=(0.9, 1)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ]), "test": transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.046, 0.041, 0.030], [0.090, 0.075, 0.065]) ])} # 实例化训练数据集 train_data_set = MyDataSet(images_path=train_images_path, images_class=train_images_label, transform=data_transform["train"]) # 实例化验证数据集 val_data_set = MyDataSet(images_path=val_images_path, images_class=val_images_label, transform=data_transform["test"]) # 实例化测试数据集 test_data_set = MyDataSet(images_path=test_images_path, images_class=test_images_label, transform=data_transform["test"]) batch_size = args.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers if args.os == 'windows': nw = 0 print('Using {} dataloader workers every process'.format(nw)) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=nw, collate_fn=train_data_set.collate_fn) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) test_loader = torch.utils.data.DataLoader(test_data_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=nw, collate_fn=val_data_set.collate_fn) # 如果存在预训练权重则载入 patience = 25 #25个epoch内验证精度不下降 early_stopping= EarlyStopping(patience, verbose=True) model = create_model(num_classes=args.num_classes).to(device) if os.path.exists(args.weights): weights_dict = torch.load(args.weights, map_location=device) load_weights_dict = {k: v for k, v in weights_dict.items() if model.state_dict()[k].numel() == v.numel()} print(model.load_state_dict(load_weights_dict, strict=False)) pg = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4) lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) best_val_acc =0.0 best_model=None for epoch in range(1000): # train mean_loss = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader, device=device, epoch=epoch) scheduler.step() # validate val_acc, val_loss= evaluate(model=model, data_loader=val_loader, device=device) tags = ["loss", "accuracy", "learning_rate"] tb_writer.add_scalar(tags[0], mean_loss, epoch) tb_writer.add_scalar(tags[1], val_acc, epoch) tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch) if val_acc > best_val_acc: best_model = model best_val_acc=val_acc torch.save(best_model.state_dict(), "./weights/vit/bestmodel-{}.pth".format(epoch)) print("[epoch {}] val_acc: {} best_acc:{}".format(epoch, round(val_acc, 3),round(best_val_acc, 3))) early_stopping(val_loss, val_acc, model) if early_stopping.early_stop: print("epoch = {}".format(epoch)) break # torch.save(model.state_dict(), "./weights/efficientnet-{}/model-{}.pth".format(num_model, epoch)) # test test_acc, sum_num = evaluate(model=best_model, data_loader=test_loader, device=device) print("best test accuracy: {}".format(round(test_acc, 3))) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default=r"F:\dataSet\clean gzdata") parser.add_argument('--num_classes', type=int, default=5) parser.add_argument('--epochs', type=int, default=150) parser.add_argument('--batch-size', type=int, default=16) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--lrf', type=float, default=0.01) parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') parser.add_argument('--os', default='windows', help='windows or linux') opt = parser.parse_args() main(opt)
7,325
42.868263
147
py
HIWL
HIWL-main/noscheme/my_dataset.py
from PIL import Image import torch from torch.utils.data import Dataset class MyDataSet(Dataset): """自定义数据集""" def __init__(self, images_path: list, images_class: list, transform=None): self.images_path = images_path self.images_class = images_class self.transform = transform def __len__(self): return len(self.images_path) def __getitem__(self, item): img = Image.open(self.images_path[item]) # RGB为彩色图片,L为灰度图片 if img.mode != 'RGB': raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item])) label = self.images_class[item] if self.transform is not None: img = self.transform(img) return img, label @staticmethod def collate_fn(batch): images, labels = tuple(zip(*batch)) images = torch.stack(images, dim=0) labels = torch.as_tensor(labels) return images, labels
952
25.472222
88
py
HIWL
HIWL-main/noscheme/log/model_efficientnet.py
import math import copy from functools import partial from collections import OrderedDict from typing import Optional, Callable import torch import torch.nn as nn from torch import Tensor from torch.nn import functional as F def _make_divisible(ch, divisor=8, min_ch=None): if min_ch is None: min_ch = divisor new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_ch < 0.9 * ch: new_ch += divisor return new_ch class ConvBNActivation(nn.Sequential): def __init__(self, in_planes: int, out_planes: int, kernel_size: int = 3, stride: int = 1, groups: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None, activation_layer: Optional[Callable[..., nn.Module]] = None): padding = (kernel_size - 1) // 2 if norm_layer is None: norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.SiLU # alias Swish (torch>=1.7) super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False), norm_layer(out_planes), activation_layer()) class SqueezeExcitation(nn.Module): def __init__(self, input_c: int, # block input channel expand_c: int, # block expand channel squeeze_factor: int = 4): super(SqueezeExcitation, self).__init__() squeeze_c = input_c // squeeze_factor self.fc1 = nn.Conv2d(expand_c, squeeze_c, 1) self.ac1 = nn.SiLU() # alias Swish self.fc2 = nn.Conv2d(squeeze_c, expand_c, 1) self.ac2 = nn.Sigmoid() def forward(self, x: Tensor) -> Tensor: scale = F.adaptive_avg_pool2d(x, output_size=(1, 1)) scale = self.fc1(scale) scale = self.ac1(scale) scale = self.fc2(scale) scale = self.ac2(scale) return scale * x class InvertedResidualConfig: # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate def __init__(self, kernel: int, # 3 or 5 input_c: int, out_c: int, expanded_ratio: int, # 1 or 6 stride: int, # 1 or 2 use_se: bool, # True drop_rate: float, index: str, # 1a, 2a, 2b, ... width_coefficient: float): self.input_c = self.adjust_channels(input_c, width_coefficient) self.kernel = kernel self.expanded_c = self.input_c * expanded_ratio self.out_c = self.adjust_channels(out_c, width_coefficient) self.use_se = use_se self.stride = stride self.drop_rate = drop_rate self.index = index @staticmethod def adjust_channels(channels: int, width_coefficient: float): return _make_divisible(channels * width_coefficient, 8) class InvertedResidual(nn.Module): def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module]): super(InvertedResidual, self).__init__() if cnf.stride not in [1, 2]: raise ValueError("illegal stride value.") self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c) layers = OrderedDict() activation_layer = nn.SiLU # alias Swish # expand if cnf.expanded_c != cnf.input_c: layers.update({"expand_conv": ConvBNActivation(cnf.input_c, cnf.expanded_c, kernel_size=1, norm_layer=norm_layer, activation_layer=activation_layer)}) # depthwise layers.update({"dwconv": ConvBNActivation(cnf.expanded_c, cnf.expanded_c, kernel_size=cnf.kernel, stride=cnf.stride, groups=cnf.expanded_c, norm_layer=norm_layer, activation_layer=activation_layer)}) if cnf.use_se: layers.update({"se": SqueezeExcitation(cnf.input_c, cnf.expanded_c)}) # project layers.update({"project_conv": ConvBNActivation(cnf.expanded_c, cnf.out_c, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.Identity)}) self.block = nn.Sequential(layers) self.out_channels = cnf.out_c self.is_strided = cnf.stride > 1 # 只有在使用shortcut连接时才使用dropout层 if self.use_res_connect and cnf.drop_rate > 0: self.dropout = nn.Dropout2d(p=cnf.drop_rate, inplace=True) else: self.dropout = nn.Identity() def forward(self, x: Tensor) -> Tensor: result = self.block(x) result = self.dropout(result) if self.use_res_connect: result += x return result class EfficientNet(nn.Module): def __init__(self, width_coefficient: float, depth_coefficient: float, num_classes: int = 1000, dropout_rate: float = 0.2, drop_connect_rate: float = 0.2, block: Optional[Callable[..., nn.Module]] = None, norm_layer: Optional[Callable[..., nn.Module]] = None ): super(EfficientNet, self).__init__() # kernel_size, in_channel, out_channel, exp_ratio, strides, use_SE, drop_connect_rate, repeats default_cnf = [[3, 32, 16, 1, 1, True, drop_connect_rate, 1], [3, 16, 24, 6, 2, True, drop_connect_rate, 2], [5, 24, 40, 6, 2, True, drop_connect_rate, 2], [3, 40, 80, 6, 2, True, drop_connect_rate, 3], [5, 80, 112, 6, 1, True, drop_connect_rate, 3], [5, 112, 192, 6, 2, True, drop_connect_rate, 4], [3, 192, 320, 6, 1, True, drop_connect_rate, 1]] def round_repeats(repeats): """Round number of repeats based on depth multiplier.""" return int(math.ceil(depth_coefficient * repeats)) if block is None: block = InvertedResidual if norm_layer is None: norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1) adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_coefficient=width_coefficient) # build inverted_residual_setting bneck_conf = partial(InvertedResidualConfig, width_coefficient=width_coefficient) b = 0 num_blocks = float(sum(round_repeats(i[-1]) for i in default_cnf)) inverted_residual_setting = [] for stage, args in enumerate(default_cnf): cnf = copy.copy(args) for i in range(round_repeats(cnf.pop(-1))): if i > 0: # strides equal 1 except first cnf cnf[-3] = 1 # strides cnf[1] = cnf[2] # input_channel equal output_channel cnf[-1] *= b / num_blocks # update dropout ratio index = str(stage + 1) + chr(i + 97) # 1a, 2a, 2b, ... inverted_residual_setting.append(bneck_conf(*cnf, index)) b += 1 # create layers layers = OrderedDict() # first conv layers.update({"stem_conv": ConvBNActivation(in_planes=3, out_planes=adjust_channels(32), kernel_size=3, stride=2, norm_layer=norm_layer)}) # building inverted residual blocks for cnf in inverted_residual_setting: layers.update({cnf.index: block(cnf, norm_layer)}) # build top last_conv_input_c = inverted_residual_setting[-1].out_c last_conv_output_c = adjust_channels(1280) layers.update({"top": ConvBNActivation(in_planes=last_conv_input_c, out_planes=last_conv_output_c, kernel_size=1, norm_layer=norm_layer)}) self.features = nn.Sequential(layers) self.avgpool = nn.AdaptiveAvgPool2d(1) classifier = [] if dropout_rate > 0: classifier.append(nn.Dropout(p=dropout_rate, inplace=True)) classifier.append(nn.Linear(last_conv_output_c, num_classes)) self.classifier = nn.Sequential(*classifier) # initial weights for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x: Tensor) -> Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def forward(self, x: Tensor) -> Tensor: return self._forward_impl(x) def efficientnet_b0(num_classes=1000): # input image size 224x224 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.0, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b1(num_classes=1000): # input image size 240x240 return EfficientNet(width_coefficient=1.0, depth_coefficient=1.1, dropout_rate=0.2, num_classes=num_classes) def efficientnet_b2(num_classes=1000): # input image size 260x260 return EfficientNet(width_coefficient=1.1, depth_coefficient=1.2, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b3(num_classes=1000): # input image size 300x300 return EfficientNet(width_coefficient=1.2, depth_coefficient=1.4, dropout_rate=0.3, num_classes=num_classes) def efficientnet_b4(num_classes=1000): # input image size 380x380 return EfficientNet(width_coefficient=1.4, depth_coefficient=1.8, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b5(num_classes=1000): # input image size 456x456 return EfficientNet(width_coefficient=1.6, depth_coefficient=2.2, dropout_rate=0.4, num_classes=num_classes) def efficientnet_b6(num_classes=1000): # input image size 528x528 return EfficientNet(width_coefficient=1.8, depth_coefficient=2.6, dropout_rate=0.5, num_classes=num_classes) def efficientnet_b7(num_classes=1000): # input image size 600x600 return EfficientNet(width_coefficient=2.0, depth_coefficient=3.1, dropout_rate=0.5, num_classes=num_classes)
12,752
37.645455
102
py
Nominatim
Nominatim-master/nominatim/errors.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Custom exception and error classes for Nominatim. """ class UsageError(Exception): """ An error raised because of bad user input. This error will usually not cause a stack trace to be printed unless debugging is enabled. """
453
29.266667
74
py
Nominatim
Nominatim-master/nominatim/typing.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Type definitions for typing annotations. Complex type definitions are moved here, to keep the source files readable. """ from typing import Any, Union, Mapping, TypeVar, Sequence, TYPE_CHECKING # Generics variable names do not confirm to naming styles, ignore globally here. # pylint: disable=invalid-name,abstract-method,multiple-statements # pylint: disable=missing-class-docstring,useless-import-alias if TYPE_CHECKING: import psycopg2.sql import psycopg2.extensions import psycopg2.extras import os StrPath = Union[str, 'os.PathLike[str]'] SysEnv = Mapping[str, str] # psycopg2-related types Query = Union[str, bytes, 'psycopg2.sql.Composable'] T_ResultKey = TypeVar('T_ResultKey', int, str) class DictCursorResult(Mapping[str, Any]): def __getitem__(self, x: Union[int, str]) -> Any: ... DictCursorResults = Sequence[DictCursorResult] T_cursor = TypeVar('T_cursor', bound='psycopg2.extensions.cursor') # The following typing features require typing_extensions to work # on all supported Python versions. # Only require this for type checking but not for normal operations. if TYPE_CHECKING: from typing_extensions import (Protocol as Protocol, Final as Final, TypedDict as TypedDict) else: Protocol = object Final = 'Final' TypedDict = dict # SQLAlchemy introduced generic types in version 2.0 making typing # incompatible with older versions. Add wrappers here so we don't have # to litter the code with bare-string types. if TYPE_CHECKING: import sqlalchemy as sa from typing_extensions import (TypeAlias as TypeAlias) else: TypeAlias = str SaLambdaSelect: TypeAlias = 'Union[sa.Select[Any], sa.StatementLambdaElement]' SaSelect: TypeAlias = 'sa.Select[Any]' SaScalarSelect: TypeAlias = 'sa.ScalarSelect[Any]' SaRow: TypeAlias = 'sa.Row[Any]' SaColumn: TypeAlias = 'sa.ColumnElement[Any]' SaExpression: TypeAlias = 'sa.ColumnElement[bool]' SaLabel: TypeAlias = 'sa.Label[Any]' SaFromClause: TypeAlias = 'sa.FromClause' SaSelectable: TypeAlias = 'sa.Selectable' SaBind: TypeAlias = 'sa.BindParameter[Any]'
2,358
30.453333
80
py
Nominatim
Nominatim-master/nominatim/cli.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Command-line interface to the Nominatim functions for import, update, database administration and querying. """ from typing import Optional, Any, List, Union import importlib import logging import os import sys import argparse from pathlib import Path from nominatim.config import Configuration from nominatim.tools.exec_utils import run_legacy_script, run_php_server from nominatim.errors import UsageError from nominatim import clicmd from nominatim import version from nominatim.clicmd.args import NominatimArgs, Subcommand LOG = logging.getLogger() class CommandlineParser: """ Wraps some of the common functions for parsing the command line and setting up subcommands. """ def __init__(self, prog: str, description: Optional[str]): self.parser = argparse.ArgumentParser( prog=prog, description=description, formatter_class=argparse.RawDescriptionHelpFormatter) self.subs = self.parser.add_subparsers(title='available commands', dest='subcommand') # Global arguments that only work if no sub-command given self.parser.add_argument('--version', action='store_true', help='Print Nominatim version and exit') # Arguments added to every sub-command self.default_args = argparse.ArgumentParser(add_help=False) group = self.default_args.add_argument_group('Default arguments') group.add_argument('-h', '--help', action='help', help='Show this help message and exit') group.add_argument('-q', '--quiet', action='store_const', const=0, dest='verbose', default=1, help='Print only error messages') group.add_argument('-v', '--verbose', action='count', default=1, help='Increase verboseness of output') group.add_argument('--project-dir', metavar='DIR', default='.', help='Base directory of the Nominatim installation (default:.)') group.add_argument('-j', '--threads', metavar='NUM', type=int, help='Number of parallel threads to use') def nominatim_version_text(self) -> str: """ Program name and version number as string """ text = f'Nominatim version {version.NOMINATIM_VERSION!s}' if version.GIT_COMMIT_HASH is not None: text += f' ({version.GIT_COMMIT_HASH})' return text def add_subcommand(self, name: str, cmd: Subcommand) -> None: """ Add a subcommand to the parser. The subcommand must be a class with a function add_args() that adds the parameters for the subcommand and a run() function that executes the command. """ assert cmd.__doc__ is not None parser = self.subs.add_parser(name, parents=[self.default_args], help=cmd.__doc__.split('\n', 1)[0], description=cmd.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) parser.set_defaults(command=cmd) cmd.add_args(parser) def run(self, **kwargs: Any) -> int: """ Parse the command line arguments of the program and execute the appropriate subcommand. """ args = NominatimArgs() try: self.parser.parse_args(args=kwargs.get('cli_args'), namespace=args) except SystemExit: return 1 if args.version: print(self.nominatim_version_text()) return 0 if args.subcommand is None: self.parser.print_help() return 1 args.phpcgi_path = Path(kwargs['phpcgi_path']) args.project_dir = Path(args.project_dir).resolve() if 'cli_args' not in kwargs: logging.basicConfig(stream=sys.stderr, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=max(4 - args.verbose, 1) * 10) args.config = Configuration(args.project_dir, environ=kwargs.get('environ', os.environ)) args.config.set_libdirs(module=kwargs['module_dir'], osm2pgsql=kwargs['osm2pgsql_path']) log = logging.getLogger() log.warning('Using project directory: %s', str(args.project_dir)) try: return args.command.run(args) except UsageError as exception: if log.isEnabledFor(logging.DEBUG): raise # use Python's exception printing log.fatal('FATAL: %s', exception) # If we get here, then execution has failed in some way. return 1 # Subcommand classes # # Each class needs to implement two functions: add_args() adds the CLI parameters # for the subfunction, run() executes the subcommand. # # The class documentation doubles as the help text for the command. The # first line is also used in the summary when calling the program without # a subcommand. # # No need to document the functions each time. # pylint: disable=C0111 class QueryExport: """\ Export addresses as CSV file from the database. """ def add_args(self, parser: argparse.ArgumentParser) -> None: group = parser.add_argument_group('Output arguments') group.add_argument('--output-type', default='street', choices=('continent', 'country', 'state', 'county', 'city', 'suburb', 'street', 'path'), help='Type of places to output (default: street)') group.add_argument('--output-format', default='street;suburb;city;county;state;country', help=("Semicolon-separated list of address types " "(see --output-type). Multiple ranks can be " "merged into one column by simply using a " "comma-separated list.")) group.add_argument('--output-all-postcodes', action='store_true', help=("List all postcodes for address instead of " "just the most likely one")) group.add_argument('--language', help=("Preferred language for output " "(use local name, if omitted)")) group = parser.add_argument_group('Filter arguments') group.add_argument('--restrict-to-country', metavar='COUNTRY_CODE', help='Export only objects within country') group.add_argument('--restrict-to-osm-node', metavar='ID', type=int, help='Export only children of this OSM node') group.add_argument('--restrict-to-osm-way', metavar='ID', type=int, help='Export only children of this OSM way') group.add_argument('--restrict-to-osm-relation', metavar='ID', type=int, help='Export only children of this OSM relation') def run(self, args: NominatimArgs) -> int: params: List[Union[int, str]] = [ '--output-type', args.output_type, '--output-format', args.output_format] if args.output_all_postcodes: params.append('--output-all-postcodes') if args.language: params.extend(('--language', args.language)) if args.restrict_to_country: params.extend(('--restrict-to-country', args.restrict_to_country)) if args.restrict_to_osm_node: params.extend(('--restrict-to-osm-node', args.restrict_to_osm_node)) if args.restrict_to_osm_way: params.extend(('--restrict-to-osm-way', args.restrict_to_osm_way)) if args.restrict_to_osm_relation: params.extend(('--restrict-to-osm-relation', args.restrict_to_osm_relation)) return run_legacy_script('export.php', *params, config=args.config) class AdminServe: """\ Start a simple web server for serving the API. This command starts a built-in webserver to serve the website from the current project directory. This webserver is only suitable for testing and development. Do not use it in production setups! There are different webservers available. The default 'php' engine runs the classic PHP frontend. The other engines are Python servers which run the new Python frontend code. This is highly experimental at the moment and may not include the full API. By the default, the webserver can be accessed at: http://127.0.0.1:8088 """ def add_args(self, parser: argparse.ArgumentParser) -> None: group = parser.add_argument_group('Server arguments') group.add_argument('--server', default='127.0.0.1:8088', help='The address the server will listen to.') group.add_argument('--engine', default='php', choices=('php', 'falcon', 'starlette'), help='Webserver framework to run. (default: php)') def run(self, args: NominatimArgs) -> int: if args.engine == 'php': run_php_server(args.server, args.project_dir / 'website') else: import uvicorn # pylint: disable=import-outside-toplevel server_info = args.server.split(':', 1) host = server_info[0] if len(server_info) > 1: if not server_info[1].isdigit(): raise UsageError('Invalid format for --server parameter. Use <host>:<port>') port = int(server_info[1]) else: port = 8088 server_module = importlib.import_module(f'nominatim.server.{args.engine}.server') app = server_module.get_application(args.project_dir) uvicorn.run(app, host=host, port=port) return 0 def get_set_parser() -> CommandlineParser: """\ Initializes the parser and adds various subcommands for nominatim cli. """ parser = CommandlineParser('nominatim', nominatim.__doc__) parser.add_subcommand('import', clicmd.SetupAll()) parser.add_subcommand('freeze', clicmd.SetupFreeze()) parser.add_subcommand('replication', clicmd.UpdateReplication()) parser.add_subcommand('special-phrases', clicmd.ImportSpecialPhrases()) parser.add_subcommand('add-data', clicmd.UpdateAddData()) parser.add_subcommand('index', clicmd.UpdateIndex()) parser.add_subcommand('refresh', clicmd.UpdateRefresh()) parser.add_subcommand('admin', clicmd.AdminFuncs()) parser.add_subcommand('export', QueryExport()) parser.add_subcommand('serve', AdminServe()) parser.add_subcommand('search', clicmd.APISearch()) parser.add_subcommand('reverse', clicmd.APIReverse()) parser.add_subcommand('lookup', clicmd.APILookup()) parser.add_subcommand('details', clicmd.APIDetails()) parser.add_subcommand('status', clicmd.APIStatus()) return parser def nominatim(**kwargs: Any) -> int: """\ Command-line tools for importing, updating, administrating and querying the Nominatim database. """ return get_set_parser().run(**kwargs)
11,707
40.66548
96
py
Nominatim
Nominatim-master/nominatim/config.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Nominatim configuration accessor. """ from typing import Dict, Any, List, Mapping, Optional import importlib.util import logging import os import sys from pathlib import Path import json import yaml from dotenv import dotenv_values from psycopg2.extensions import parse_dsn from nominatim.typing import StrPath from nominatim.errors import UsageError import nominatim.paths LOG = logging.getLogger() CONFIG_CACHE : Dict[str, Any] = {} def flatten_config_list(content: Any, section: str = '') -> List[Any]: """ Flatten YAML configuration lists that contain include sections which are lists themselves. """ if not content: return [] if not isinstance(content, list): raise UsageError(f"List expected in section '{section}'.") output = [] for ele in content: if isinstance(ele, list): output.extend(flatten_config_list(ele, section)) else: output.append(ele) return output class Configuration: """ Load and manage the project configuration. Nominatim uses dotenv to configure the software. Configuration options are resolved in the following order: * from the OS environment (or the dictionary given in `environ`) * from the .env file in the project directory of the installation * from the default installation in the configuration directory All Nominatim configuration options are prefixed with 'NOMINATIM_' to avoid conflicts with other environment variables. """ def __init__(self, project_dir: Optional[Path], environ: Optional[Mapping[str, str]] = None) -> None: self.environ = environ or os.environ self.project_dir = project_dir self.config_dir = nominatim.paths.CONFIG_DIR self._config = dotenv_values(str(self.config_dir / 'env.defaults')) if self.project_dir is not None and (self.project_dir / '.env').is_file(): self.project_dir = self.project_dir.resolve() self._config.update(dotenv_values(str(self.project_dir / '.env'))) class _LibDirs: module: Path osm2pgsql: Path php = nominatim.paths.PHPLIB_DIR sql = nominatim.paths.SQLLIB_DIR data = nominatim.paths.DATA_DIR self.lib_dir = _LibDirs() self._private_plugins: Dict[str, object] = {} def set_libdirs(self, **kwargs: StrPath) -> None: """ Set paths to library functions and data. """ for key, value in kwargs.items(): setattr(self.lib_dir, key, Path(value)) def __getattr__(self, name: str) -> str: name = 'NOMINATIM_' + name if name in self.environ: return self.environ[name] return self._config[name] or '' def get_bool(self, name: str) -> bool: """ Return the given configuration parameter as a boolean. Values of '1', 'yes' and 'true' are accepted as truthy values, everything else is interpreted as false. """ return getattr(self, name).lower() in ('1', 'yes', 'true') def get_int(self, name: str) -> int: """ Return the given configuration parameter as an int. """ try: return int(getattr(self, name)) except ValueError as exp: LOG.fatal("Invalid setting NOMINATIM_%s. Needs to be a number.", name) raise UsageError("Configuration error.") from exp def get_str_list(self, name: str) -> Optional[List[str]]: """ Return the given configuration parameter as a list of strings. The values are assumed to be given as a comma-sparated list and will be stripped before returning them. On empty values None is returned. """ raw = getattr(self, name) return [v.strip() for v in raw.split(',')] if raw else None def get_path(self, name: str) -> Optional[Path]: """ Return the given configuration parameter as a Path. If a relative path is configured, then the function converts this into an absolute path with the project directory as root path. If the configuration is unset, None is returned. """ value = getattr(self, name) if not value: return None cfgpath = Path(value) if not cfgpath.is_absolute(): assert self.project_dir is not None cfgpath = self.project_dir / cfgpath return cfgpath.resolve() def get_libpq_dsn(self) -> str: """ Get configured database DSN converted into the key/value format understood by libpq and psycopg. """ dsn = self.DATABASE_DSN def quote_param(param: str) -> str: key, val = param.split('=') val = val.replace('\\', '\\\\').replace("'", "\\'") if ' ' in val: val = "'" + val + "'" return key + '=' + val if dsn.startswith('pgsql:'): # Old PHP DSN format. Convert before returning. return ' '.join([quote_param(p) for p in dsn[6:].split(';')]) return dsn def get_database_params(self) -> Mapping[str, str]: """ Get the configured parameters for the database connection as a mapping. """ dsn = self.DATABASE_DSN if dsn.startswith('pgsql:'): return dict((p.split('=', 1) for p in dsn[6:].split(';'))) return parse_dsn(dsn) def get_import_style_file(self) -> Path: """ Return the import style file as a path object. Translates the name of the standard styles automatically into a file in the config style. """ style = getattr(self, 'IMPORT_STYLE') if style in ('admin', 'street', 'address', 'full', 'extratags'): return self.config_dir / f'import-{style}.lua' return self.find_config_file('', 'IMPORT_STYLE') def get_os_env(self) -> Dict[str, str]: """ Return a copy of the OS environment with the Nominatim configuration merged in. """ env = {k: v for k, v in self._config.items() if v is not None} env.update(self.environ) return env def load_sub_configuration(self, filename: StrPath, config: Optional[str] = None) -> Any: """ Load additional configuration from a file. `filename` is the name of the configuration file. The file is first searched in the project directory and then in the global settings directory. If `config` is set, then the name of the configuration file can be additionally given through a .env configuration option. When the option is set, then the file will be exclusively loaded as set: if the name is an absolute path, the file name is taken as is, if the name is relative, it is taken to be relative to the project directory. The format of the file is determined from the filename suffix. Currently only files with extension '.yaml' are supported. YAML files support a special '!include' construct. When the directive is given, the value is taken to be a filename, the file is loaded using this function and added at the position in the configuration tree. """ configfile = self.find_config_file(filename, config) if str(configfile) in CONFIG_CACHE: return CONFIG_CACHE[str(configfile)] if configfile.suffix in ('.yaml', '.yml'): result = self._load_from_yaml(configfile) elif configfile.suffix == '.json': with configfile.open('r', encoding='utf-8') as cfg: result = json.load(cfg) else: raise UsageError(f"Config file '{configfile}' has unknown format.") CONFIG_CACHE[str(configfile)] = result return result def load_plugin_module(self, module_name: str, internal_path: str) -> Any: """ Load a Python module as a plugin. The module_name may have three variants: * A name without any '.' is assumed to be an internal module and will be searched relative to `internal_path`. * If the name ends in `.py`, module_name is assumed to be a file name relative to the project directory. * Any other name is assumed to be an absolute module name. In either of the variants the module name must start with a letter. """ if not module_name or not module_name[0].isidentifier(): raise UsageError(f'Invalid module name {module_name}') if '.' not in module_name: module_name = module_name.replace('-', '_') full_module = f'{internal_path}.{module_name}' return sys.modules.get(full_module) or importlib.import_module(full_module) if module_name.endswith('.py'): if self.project_dir is None or not (self.project_dir / module_name).exists(): raise UsageError(f"Cannot find module '{module_name}' in project directory.") if module_name in self._private_plugins: return self._private_plugins[module_name] file_path = str(self.project_dir / module_name) spec = importlib.util.spec_from_file_location(module_name, file_path) if spec: module = importlib.util.module_from_spec(spec) # Do not add to global modules because there is no standard # module name that Python can resolve. self._private_plugins[module_name] = module assert spec.loader is not None spec.loader.exec_module(module) return module return sys.modules.get(module_name) or importlib.import_module(module_name) def find_config_file(self, filename: StrPath, config: Optional[str] = None) -> Path: """ Resolve the location of a configuration file given a filename and an optional configuration option with the file name. Raises a UsageError when the file cannot be found or is not a regular file. """ if config is not None: cfg_value = getattr(self, config) if cfg_value: cfg_filename = Path(cfg_value) if cfg_filename.is_absolute(): cfg_filename = cfg_filename.resolve() if not cfg_filename.is_file(): LOG.fatal("Cannot find config file '%s'.", cfg_filename) raise UsageError("Config file not found.") return cfg_filename filename = cfg_filename search_paths = [self.project_dir, self.config_dir] for path in search_paths: if path is not None and (path / filename).is_file(): return path / filename LOG.fatal("Configuration file '%s' not found.\nDirectories searched: %s", filename, search_paths) raise UsageError("Config file not found.") def _load_from_yaml(self, cfgfile: Path) -> Any: """ Load a YAML configuration file. This installs a special handler that allows to include other YAML files using the '!include' operator. """ yaml.add_constructor('!include', self._yaml_include_representer, Loader=yaml.SafeLoader) return yaml.safe_load(cfgfile.read_text(encoding='utf-8')) def _yaml_include_representer(self, loader: Any, node: yaml.Node) -> Any: """ Handler for the '!include' operator in YAML files. When the filename is relative, then the file is first searched in the project directory and then in the global settings directory. """ fname = loader.construct_scalar(node) if Path(fname).is_absolute(): configfile = Path(fname) else: configfile = self.find_config_file(loader.construct_scalar(node)) if configfile.suffix != '.yaml': LOG.fatal("Format error while reading '%s': only YAML format supported.", configfile) raise UsageError("Cannot handle config file format.") return yaml.safe_load(configfile.read_text(encoding='utf-8'))
12,709
35.734104
93
py
Nominatim
Nominatim-master/nominatim/version.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Version information for Nominatim. """ from typing import Optional, NamedTuple class NominatimVersion(NamedTuple): """ Version information for Nominatim. We follow semantic versioning. Major, minor and patch_level refer to the last released version. The database patch level tracks important changes between releases and must always be increased when there is a change to the database or code that requires a migration. When adding a migration on the development branch, raise the patch level to 99 to make sure that the migration is applied when updating from a patch release to the next minor version. Patch releases usually shouldn't have migrations in them. When they are needed, then make sure that the migration can be reapplied and set the migration version to the appropriate patch level when cherry-picking the commit with the migration. """ major: int minor: int patch_level: int db_patch_level: int def __str__(self) -> str: return f"{self.major}.{self.minor}.{self.patch_level}-{self.db_patch_level}" NOMINATIM_VERSION = NominatimVersion(4, 2, 99, 2) POSTGRESQL_REQUIRED_VERSION = (9, 6) POSTGIS_REQUIRED_VERSION = (2, 2) # Cmake sets a variable @GIT_HASH@ by executing 'git --log'. It is not run # on every execution of 'make'. # cmake/tool-installed.tmpl is used to build the binary 'nominatim'. Inside # there is a call to set the variable value below. GIT_COMMIT_HASH : Optional[str] = None def parse_version(version: str) -> NominatimVersion: """ Parse a version string into a version consisting of a tuple of four ints: major, minor, patch level, database patch level This is the reverse operation of `version_str()`. """ parts = version.split('.') return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
2,114
36.105263
84
py
Nominatim
Nominatim-master/nominatim/__init__.py
0
0
0
py
Nominatim
Nominatim-master/nominatim/paths.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Path settings for extra data used by Nominatim. """ from pathlib import Path PHPLIB_DIR = (Path(__file__) / '..' / '..' / 'lib-php').resolve() SQLLIB_DIR = (Path(__file__) / '..' / '..' / 'lib-sql').resolve() DATA_DIR = (Path(__file__) / '..' / '..' / 'data').resolve() CONFIG_DIR = (Path(__file__) / '..' / '..' / 'settings').resolve()
549
33.375
66
py
Nominatim
Nominatim-master/nominatim/tools/database_import.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for setting up and importing a new Nominatim database. """ from typing import Tuple, Optional, Union, Sequence, MutableMapping, Any import logging import os import selectors import subprocess from pathlib import Path import psutil from psycopg2 import sql as pysql from nominatim.config import Configuration from nominatim.db.connection import connect, get_pg_env, Connection from nominatim.db.async_connection import DBConnection from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.tools.exec_utils import run_osm2pgsql from nominatim.errors import UsageError from nominatim.version import POSTGRESQL_REQUIRED_VERSION, POSTGIS_REQUIRED_VERSION LOG = logging.getLogger() def _require_version(module: str, actual: Tuple[int, int], expected: Tuple[int, int]) -> None: """ Compares the version for the given module and raises an exception if the actual version is too old. """ if actual < expected: LOG.fatal('Minimum supported version of %s is %d.%d. ' 'Found version %d.%d.', module, expected[0], expected[1], actual[0], actual[1]) raise UsageError(f'{module} is too old.') def setup_database_skeleton(dsn: str, rouser: Optional[str] = None) -> None: """ Create a new database for Nominatim and populate it with the essential extensions. The function fails when the database already exists or Postgresql or PostGIS versions are too old. Uses `createdb` to create the database. If 'rouser' is given, then the function also checks that the user with that given name exists. Requires superuser rights by the caller. """ proc = subprocess.run(['createdb'], env=get_pg_env(dsn), check=False) if proc.returncode != 0: raise UsageError('Creating new database failed.') with connect(dsn) as conn: _require_version('PostgreSQL server', conn.server_version_tuple(), POSTGRESQL_REQUIRED_VERSION) if rouser is not None: with conn.cursor() as cur: cnt = cur.scalar('SELECT count(*) FROM pg_user where usename = %s', (rouser, )) if cnt == 0: LOG.fatal("Web user '%s' does not exist. Create it with:\n" "\n createuser %s", rouser, rouser) raise UsageError('Missing read-only user.') # Create extensions. with conn.cursor() as cur: cur.execute('CREATE EXTENSION IF NOT EXISTS hstore') cur.execute('CREATE EXTENSION IF NOT EXISTS postgis') postgis_version = conn.postgis_version_tuple() if postgis_version[0] >= 3: cur.execute('CREATE EXTENSION IF NOT EXISTS postgis_raster') conn.commit() _require_version('PostGIS', conn.postgis_version_tuple(), POSTGIS_REQUIRED_VERSION) def import_osm_data(osm_files: Union[Path, Sequence[Path]], options: MutableMapping[str, Any], drop: bool = False, ignore_errors: bool = False) -> None: """ Import the given OSM files. 'options' contains the list of default settings for osm2pgsql. """ options['import_file'] = osm_files options['append'] = False options['threads'] = 1 if not options['flatnode_file'] and options['osm2pgsql_cache'] == 0: # Make some educated guesses about cache size based on the size # of the import file and the available memory. mem = psutil.virtual_memory() fsize = 0 if isinstance(osm_files, list): for fname in osm_files: fsize += os.stat(str(fname)).st_size else: fsize = os.stat(str(osm_files)).st_size options['osm2pgsql_cache'] = int(min((mem.available + mem.cached) * 0.75, fsize * 2) / 1024 / 1024) + 1 run_osm2pgsql(options) with connect(options['dsn']) as conn: if not ignore_errors: with conn.cursor() as cur: cur.execute('SELECT * FROM place LIMIT 1') if cur.rowcount == 0: raise UsageError('No data imported by osm2pgsql.') if drop: conn.drop_table('planet_osm_nodes') if drop and options['flatnode_file']: Path(options['flatnode_file']).unlink() def create_tables(conn: Connection, config: Configuration, reverse_only: bool = False) -> None: """ Create the set of basic tables. When `reverse_only` is True, then the main table for searching will be skipped and only reverse search is possible. """ sql = SQLPreprocessor(conn, config) sql.env.globals['db']['reverse_only'] = reverse_only sql.run_sql_file(conn, 'tables.sql') def create_table_triggers(conn: Connection, config: Configuration) -> None: """ Create the triggers for the tables. The trigger functions must already have been imported with refresh.create_functions(). """ sql = SQLPreprocessor(conn, config) sql.run_sql_file(conn, 'table-triggers.sql') def create_partition_tables(conn: Connection, config: Configuration) -> None: """ Create tables that have explicit partitioning. """ sql = SQLPreprocessor(conn, config) sql.run_sql_file(conn, 'partition-tables.src.sql') def truncate_data_tables(conn: Connection) -> None: """ Truncate all data tables to prepare for a fresh load. """ with conn.cursor() as cur: cur.execute('TRUNCATE placex') cur.execute('TRUNCATE place_addressline') cur.execute('TRUNCATE location_area') cur.execute('TRUNCATE location_area_country') cur.execute('TRUNCATE location_property_tiger') cur.execute('TRUNCATE location_property_osmline') cur.execute('TRUNCATE location_postcode') if conn.table_exists('search_name'): cur.execute('TRUNCATE search_name') cur.execute('DROP SEQUENCE IF EXISTS seq_place') cur.execute('CREATE SEQUENCE seq_place start 100000') cur.execute("""SELECT tablename FROM pg_tables WHERE tablename LIKE 'location_road_%'""") for table in [r[0] for r in list(cur)]: cur.execute('TRUNCATE ' + table) conn.commit() _COPY_COLUMNS = pysql.SQL(',').join(map(pysql.Identifier, ('osm_type', 'osm_id', 'class', 'type', 'name', 'admin_level', 'address', 'extratags', 'geometry'))) def load_data(dsn: str, threads: int) -> None: """ Copy data into the word and placex table. """ sel = selectors.DefaultSelector() # Then copy data from place to placex in <threads - 1> chunks. place_threads = max(1, threads - 1) for imod in range(place_threads): conn = DBConnection(dsn) conn.connect() conn.perform( pysql.SQL("""INSERT INTO placex ({columns}) SELECT {columns} FROM place WHERE osm_id % {total} = {mod} AND NOT (class='place' and (type='houses' or type='postcode')) AND ST_IsValid(geometry) """).format(columns=_COPY_COLUMNS, total=pysql.Literal(place_threads), mod=pysql.Literal(imod))) sel.register(conn, selectors.EVENT_READ, conn) # Address interpolations go into another table. conn = DBConnection(dsn) conn.connect() conn.perform("""INSERT INTO location_property_osmline (osm_id, address, linegeo) SELECT osm_id, address, geometry FROM place WHERE class='place' and type='houses' and osm_type='W' and ST_GeometryType(geometry) = 'ST_LineString' """) sel.register(conn, selectors.EVENT_READ, conn) # Now wait for all of them to finish. todo = place_threads + 1 while todo > 0: for key, _ in sel.select(1): conn = key.data sel.unregister(conn) conn.wait() conn.close() todo -= 1 print('.', end='', flush=True) print('\n') with connect(dsn) as syn_conn: with syn_conn.cursor() as cur: cur.execute('ANALYSE') def create_search_indices(conn: Connection, config: Configuration, drop: bool = False, threads: int = 1) -> None: """ Create tables that have explicit partitioning. """ # If index creation failed and left an index invalid, they need to be # cleaned out first, so that the script recreates them. with conn.cursor() as cur: cur.execute("""SELECT relname FROM pg_class, pg_index WHERE pg_index.indisvalid = false AND pg_index.indexrelid = pg_class.oid""") bad_indices = [row[0] for row in list(cur)] for idx in bad_indices: LOG.info("Drop invalid index %s.", idx) cur.execute(pysql.SQL('DROP INDEX {}').format(pysql.Identifier(idx))) conn.commit() sql = SQLPreprocessor(conn, config) sql.run_parallel_sql_file(config.get_libpq_dsn(), 'indices.sql', min(8, threads), drop=drop)
9,718
37.26378
95
py
Nominatim
Nominatim-master/nominatim/tools/refresh.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for bringing auxiliary data in the database up-to-date. """ from typing import MutableSequence, Tuple, Any, Type, Mapping, Sequence, List, cast import logging from textwrap import dedent from pathlib import Path from psycopg2 import sql as pysql from nominatim.config import Configuration from nominatim.db.connection import Connection, connect from nominatim.db.utils import execute_file from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.version import NOMINATIM_VERSION LOG = logging.getLogger() OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation'} def _add_address_level_rows_from_entry(rows: MutableSequence[Tuple[Any, ...]], entry: Mapping[str, Any]) -> None: """ Converts a single entry from the JSON format for address rank descriptions into a flat format suitable for inserting into a PostgreSQL table and adds these lines to `rows`. """ countries = entry.get('countries') or (None, ) for key, values in entry['tags'].items(): for value, ranks in values.items(): if isinstance(ranks, list): rank_search, rank_address = ranks else: rank_search = rank_address = ranks if not value: value = None for country in countries: rows.append((country, key, value, rank_search, rank_address)) def load_address_levels(conn: Connection, table: str, levels: Sequence[Mapping[str, Any]]) -> None: """ Replace the `address_levels` table with the contents of `levels'. A new table is created any previously existing table is dropped. The table has the following columns: country, class, type, rank_search, rank_address """ rows: List[Tuple[Any, ...]] = [] for entry in levels: _add_address_level_rows_from_entry(rows, entry) with conn.cursor() as cur: cur.drop_table(table) cur.execute(pysql.SQL("""CREATE TABLE {} ( country_code varchar(2), class TEXT, type TEXT, rank_search SMALLINT, rank_address SMALLINT) """).format(pysql.Identifier(table))) cur.execute_values(pysql.SQL("INSERT INTO {} VALUES %s") .format(pysql.Identifier(table)), rows) cur.execute(pysql.SQL('CREATE UNIQUE INDEX ON {} (country_code, class, type)') .format(pysql.Identifier(table))) conn.commit() def load_address_levels_from_config(conn: Connection, config: Configuration) -> None: """ Replace the `address_levels` table with the content as defined in the given configuration. Uses the parameter NOMINATIM_ADDRESS_LEVEL_CONFIG to determine the location of the configuration file. """ cfg = config.load_sub_configuration('', config='ADDRESS_LEVEL_CONFIG') load_address_levels(conn, 'address_levels', cfg) def create_functions(conn: Connection, config: Configuration, enable_diff_updates: bool = True, enable_debug: bool = False) -> None: """ (Re)create the PL/pgSQL functions. """ sql = SQLPreprocessor(conn, config) sql.run_sql_file(conn, 'functions.sql', disable_diff_updates=not enable_diff_updates, debug=enable_debug) WEBSITE_SCRIPTS = ( 'deletable.php', 'details.php', 'lookup.php', 'polygons.php', 'reverse.php', 'search.php', 'status.php' ) # constants needed by PHP scripts: PHP name, config name, type PHP_CONST_DEFS = ( ('Database_DSN', 'DATABASE_DSN', str), ('Default_Language', 'DEFAULT_LANGUAGE', str), ('Log_DB', 'LOG_DB', bool), ('Log_File', 'LOG_FILE', Path), ('NoAccessControl', 'CORS_NOACCESSCONTROL', bool), ('Places_Max_ID_count', 'LOOKUP_MAX_COUNT', int), ('PolygonOutput_MaximumTypes', 'POLYGON_OUTPUT_MAX_TYPES', int), ('Search_BatchMode', 'SEARCH_BATCH_MODE', bool), ('Search_NameOnlySearchFrequencyThreshold', 'SEARCH_NAME_ONLY_THRESHOLD', str), ('Use_US_Tiger_Data', 'USE_US_TIGER_DATA', bool), ('MapIcon_URL', 'MAPICON_URL', str), ('Search_WithinCountries', 'SEARCH_WITHIN_COUNTRIES', bool), ) def import_wikipedia_articles(dsn: str, data_path: Path, ignore_errors: bool = False) -> int: """ Replaces the wikipedia importance tables with new data. The import is run in a single transaction so that the new data is replace seamlessly. Returns 0 if all was well and 1 if the importance file could not be found. Throws an exception if there was an error reading the file. """ datafile = data_path / 'wikimedia-importance.sql.gz' if not datafile.exists(): return 1 pre_code = """BEGIN; DROP TABLE IF EXISTS "wikipedia_article"; DROP TABLE IF EXISTS "wikipedia_redirect" """ post_code = "COMMIT" execute_file(dsn, datafile, ignore_errors=ignore_errors, pre_code=pre_code, post_code=post_code) return 0 def import_secondary_importance(dsn: str, data_path: Path, ignore_errors: bool = False) -> int: """ Replaces the secondary importance raster data table with new data. Returns 0 if all was well and 1 if the raster SQL file could not be found. Throws an exception if there was an error reading the file. """ datafile = data_path / 'secondary_importance.sql.gz' if not datafile.exists(): return 1 with connect(dsn) as conn: postgis_version = conn.postgis_version_tuple() if postgis_version[0] < 3: LOG.error('PostGIS version is too old for using OSM raster data.') return 2 execute_file(dsn, datafile, ignore_errors=ignore_errors) return 0 def recompute_importance(conn: Connection) -> None: """ Recompute wikipedia links and importance for all entries in placex. This is a long-running operations that must not be executed in parallel with updates. """ with conn.cursor() as cur: cur.execute('ALTER TABLE placex DISABLE TRIGGER ALL') cur.execute(""" UPDATE placex SET (wikipedia, importance) = (SELECT wikipedia, importance FROM compute_importance(extratags, country_code, rank_search, centroid)) """) cur.execute(""" UPDATE placex s SET wikipedia = d.wikipedia, importance = d.importance FROM placex d WHERE s.place_id = d.linked_place_id and d.wikipedia is not null and (s.wikipedia is null or s.importance < d.importance); """) cur.execute('ALTER TABLE placex ENABLE TRIGGER ALL') conn.commit() def _quote_php_variable(var_type: Type[Any], config: Configuration, conf_name: str) -> str: if var_type == bool: return 'true' if config.get_bool(conf_name) else 'false' if var_type == int: return cast(str, getattr(config, conf_name)) if not getattr(config, conf_name): return 'false' if var_type == Path: value = str(config.get_path(conf_name) or '') else: value = getattr(config, conf_name) quoted = value.replace("'", "\\'") return f"'{quoted}'" def setup_website(basedir: Path, config: Configuration, conn: Connection) -> None: """ Create the website script stubs. """ if not basedir.exists(): LOG.info('Creating website directory.') basedir.mkdir() assert config.project_dir is not None basedata = dedent(f"""\ <?php @define('CONST_Debug', $_GET['debug'] ?? false); @define('CONST_LibDir', '{config.lib_dir.php}'); @define('CONST_TokenizerDir', '{config.project_dir / 'tokenizer'}'); @define('CONST_NominatimVersion', '{NOMINATIM_VERSION!s}'); """) for php_name, conf_name, var_type in PHP_CONST_DEFS: varout = _quote_php_variable(var_type, config, conf_name) basedata += f"@define('CONST_{php_name}', {varout});\n" template = "\nrequire_once(CONST_LibDir.'/website/{}');\n" search_name_table_exists = bool(conn and conn.table_exists('search_name')) for script in WEBSITE_SCRIPTS: if not search_name_table_exists and script == 'search.php': out = template.format('reverse-only-search.php') else: out = template.format(script) (basedir / script).write_text(basedata + out, 'utf-8') def invalidate_osm_object(osm_type: str, osm_id: int, conn: Connection, recursive: bool = True) -> None: """ Mark the given OSM object for reindexing. When 'recursive' is set to True (the default), then all dependent objects are marked for reindexing as well. 'osm_type' must be on of 'N' (node), 'W' (way) or 'R' (relation). If the given object does not exist, then nothing happens. """ assert osm_type in ('N', 'R', 'W') LOG.warning("Invalidating OSM %s %s%s.", OSM_TYPE[osm_type], osm_id, ' and its dependent places' if recursive else '') with conn.cursor() as cur: if recursive: sql = """SELECT place_force_update(place_id) FROM placex WHERE osm_type = %s and osm_id = %s""" else: sql = """UPDATE placex SET indexed_status = 2 WHERE osm_type = %s and osm_id = %s""" cur.execute(sql, (osm_type, osm_id))
10,019
35.703297
99
py
Nominatim
Nominatim-master/nominatim/tools/postcodes.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for importing, updating and otherwise maintaining the table of artificial postcode centroids. """ from typing import Optional, Tuple, Dict, List, TextIO from collections import defaultdict from pathlib import Path import csv import gzip import logging from math import isfinite from psycopg2 import sql as pysql from nominatim.db.connection import connect, Connection from nominatim.utils.centroid import PointsCentroid from nominatim.data.postcode_format import PostcodeFormatter, CountryPostcodeMatcher from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer LOG = logging.getLogger() def _to_float(numstr: str, max_value: float) -> float: """ Convert the number in string into a float. The number is expected to be in the range of [-max_value, max_value]. Otherwise rises a ValueError. """ num = float(numstr) if not isfinite(num) or num <= -max_value or num >= max_value: raise ValueError() return num class _PostcodeCollector: """ Collector for postcodes of a single country. """ def __init__(self, country: str, matcher: Optional[CountryPostcodeMatcher]): self.country = country self.matcher = matcher self.collected: Dict[str, PointsCentroid] = defaultdict(PointsCentroid) self.normalization_cache: Optional[Tuple[str, Optional[str]]] = None def add(self, postcode: str, x: float, y: float) -> None: """ Add the given postcode to the collection cache. If the postcode already existed, it is overwritten with the new centroid. """ if self.matcher is not None: normalized: Optional[str] if self.normalization_cache and self.normalization_cache[0] == postcode: normalized = self.normalization_cache[1] else: match = self.matcher.match(postcode) normalized = self.matcher.normalize(match) if match else None self.normalization_cache = (postcode, normalized) if normalized: self.collected[normalized] += (x, y) def commit(self, conn: Connection, analyzer: AbstractAnalyzer, project_dir: Path) -> None: """ Update postcodes for the country from the postcodes selected so far as well as any externally supplied postcodes. """ self._update_from_external(analyzer, project_dir) to_add, to_delete, to_update = self._compute_changes(conn) LOG.info("Processing country '%s' (%s added, %s deleted, %s updated).", self.country, len(to_add), len(to_delete), len(to_update)) with conn.cursor() as cur: if to_add: cur.execute_values( """INSERT INTO location_postcode (place_id, indexed_status, country_code, postcode, geometry) VALUES %s""", to_add, template=pysql.SQL("""(nextval('seq_place'), 1, {}, %s, 'SRID=4326;POINT(%s %s)') """).format(pysql.Literal(self.country))) if to_delete: cur.execute("""DELETE FROM location_postcode WHERE country_code = %s and postcode = any(%s) """, (self.country, to_delete)) if to_update: cur.execute_values( pysql.SQL("""UPDATE location_postcode SET indexed_status = 2, geometry = ST_SetSRID(ST_Point(v.x, v.y), 4326) FROM (VALUES %s) AS v (pc, x, y) WHERE country_code = {} and postcode = pc """).format(pysql.Literal(self.country)), to_update) def _compute_changes(self, conn: Connection) \ -> Tuple[List[Tuple[str, float, float]], List[str], List[Tuple[str, float, float]]]: """ Compute which postcodes from the collected postcodes have to be added or modified and which from the location_postcode table have to be deleted. """ to_update = [] to_delete = [] with conn.cursor() as cur: cur.execute("""SELECT postcode, ST_X(geometry), ST_Y(geometry) FROM location_postcode WHERE country_code = %s""", (self.country, )) for postcode, x, y in cur: pcobj = self.collected.pop(postcode, None) if pcobj: newx, newy = pcobj.centroid() if (x - newx) > 0.0000001 or (y - newy) > 0.0000001: to_update.append((postcode, newx, newy)) else: to_delete.append(postcode) to_add = [(k, *v.centroid()) for k, v in self.collected.items()] self.collected = defaultdict(PointsCentroid) return to_add, to_delete, to_update def _update_from_external(self, analyzer: AbstractAnalyzer, project_dir: Path) -> None: """ Look for an external postcode file for the active country in the project directory and add missing postcodes when found. """ csvfile = self._open_external(project_dir) if csvfile is None: return try: reader = csv.DictReader(csvfile) for row in reader: if 'postcode' not in row or 'lat' not in row or 'lon' not in row: LOG.warning("Bad format for external postcode file for country '%s'." " Ignored.", self.country) return postcode = analyzer.normalize_postcode(row['postcode']) if postcode not in self.collected: try: # Do float conversation separately, it might throw centroid = (_to_float(row['lon'], 180), _to_float(row['lat'], 90)) self.collected[postcode] += centroid except ValueError: LOG.warning("Bad coordinates %s, %s in %s country postcode file.", row['lat'], row['lon'], self.country) finally: csvfile.close() def _open_external(self, project_dir: Path) -> Optional[TextIO]: fname = project_dir / f'{self.country}_postcodes.csv' if fname.is_file(): LOG.info("Using external postcode file '%s'.", fname) return open(fname, 'r', encoding='utf-8') fname = project_dir / f'{self.country}_postcodes.csv.gz' if fname.is_file(): LOG.info("Using external postcode file '%s'.", fname) return gzip.open(fname, 'rt') return None def update_postcodes(dsn: str, project_dir: Path, tokenizer: AbstractTokenizer) -> None: """ Update the table of artificial postcodes. Computes artificial postcode centroids from the placex table, potentially enhances it with external data and then updates the postcodes in the table 'location_postcode'. """ matcher = PostcodeFormatter() with tokenizer.name_analyzer() as analyzer: with connect(dsn) as conn: # First get the list of countries that currently have postcodes. # (Doing this before starting to insert, so it is fast on import.) with conn.cursor() as cur: cur.execute("SELECT DISTINCT country_code FROM location_postcode") todo_countries = set((row[0] for row in cur)) # Recompute the list of valid postcodes from placex. with conn.cursor(name="placex_postcodes") as cur: cur.execute(""" SELECT cc, pc, ST_X(centroid), ST_Y(centroid) FROM (SELECT COALESCE(plx.country_code, get_country_code(ST_Centroid(pl.geometry))) as cc, pl.address->'postcode' as pc, COALESCE(plx.centroid, ST_Centroid(pl.geometry)) as centroid FROM place AS pl LEFT OUTER JOIN placex AS plx ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type WHERE pl.address ? 'postcode' AND pl.geometry IS NOT null) xx WHERE pc IS NOT null AND cc IS NOT null ORDER BY cc, pc""") collector = None for country, postcode, x, y in cur: if collector is None or country != collector.country: if collector is not None: collector.commit(conn, analyzer, project_dir) collector = _PostcodeCollector(country, matcher.get_matcher(country)) todo_countries.discard(country) collector.add(postcode, x, y) if collector is not None: collector.commit(conn, analyzer, project_dir) # Now handle any countries that are only in the postcode table. for country in todo_countries: fmt = matcher.get_matcher(country) _PostcodeCollector(country, fmt).commit(conn, analyzer, project_dir) conn.commit() analyzer.update_postcodes_from_db() def can_compute(dsn: str) -> bool: """ Check that the place table exists so that postcodes can be computed. """ with connect(dsn) as conn: return conn.table_exists('place')
9,932
41.268085
94
py
Nominatim
Nominatim-master/nominatim/tools/tiger_data.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for importing tiger data and handling tarbar and directory files """ from typing import Any, TextIO, List, Union, cast import csv import io import logging import os import tarfile from psycopg2.extras import Json from nominatim.config import Configuration from nominatim.db.connection import connect from nominatim.db.async_connection import WorkerPool from nominatim.db.sql_preprocessor import SQLPreprocessor from nominatim.errors import UsageError from nominatim.data.place_info import PlaceInfo from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer from nominatim.tools import freeze LOG = logging.getLogger() class TigerInput: """ Context manager that goes through Tiger input files which may either be in a directory or gzipped together in a tar file. """ def __init__(self, data_dir: str) -> None: self.tar_handle = None self.files: List[Union[str, tarfile.TarInfo]] = [] if data_dir.endswith('.tar.gz'): try: self.tar_handle = tarfile.open(data_dir) # pylint: disable=consider-using-with except tarfile.ReadError as err: LOG.fatal("Cannot open '%s'. Is this a tar file?", data_dir) raise UsageError("Cannot open Tiger data file.") from err self.files = [i for i in self.tar_handle.getmembers() if i.name.endswith('.csv')] LOG.warning("Found %d CSV files in tarfile with path %s", len(self.files), data_dir) else: files = os.listdir(data_dir) self.files = [os.path.join(data_dir, i) for i in files if i.endswith('.csv')] LOG.warning("Found %d CSV files in path %s", len(self.files), data_dir) if not self.files: LOG.warning("Tiger data import selected but no files found at %s", data_dir) def __enter__(self) -> 'TigerInput': return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: if self.tar_handle: self.tar_handle.close() self.tar_handle = None def next_file(self) -> TextIO: """ Return a file handle to the next file to be processed. Raises an IndexError if there is no file left. """ fname = self.files.pop(0) if self.tar_handle is not None: extracted = self.tar_handle.extractfile(fname) assert extracted is not None return io.TextIOWrapper(extracted) return open(cast(str, fname), encoding='utf-8') def __len__(self) -> int: return len(self.files) def handle_threaded_sql_statements(pool: WorkerPool, fd: TextIO, analyzer: AbstractAnalyzer) -> None: """ Handles sql statement with multiplexing """ lines = 0 # Using pool of database connections to execute sql statements sql = "SELECT tiger_line_import(%s, %s, %s, %s, %s, %s)" for row in csv.DictReader(fd, delimiter=';'): try: address = dict(street=row['street'], postcode=row['postcode']) args = ('SRID=4326;' + row['geometry'], int(row['from']), int(row['to']), row['interpolation'], Json(analyzer.process_place(PlaceInfo({'address': address}))), analyzer.normalize_postcode(row['postcode'])) except ValueError: continue pool.next_free_worker().perform(sql, args=args) lines += 1 if lines == 1000: print('.', end='', flush=True) lines = 0 def add_tiger_data(data_dir: str, config: Configuration, threads: int, tokenizer: AbstractTokenizer) -> int: """ Import tiger data from directory or tar file `data dir`. """ dsn = config.get_libpq_dsn() with connect(dsn) as conn: is_frozen = freeze.is_frozen(conn) conn.close() if is_frozen: raise UsageError("Tiger cannot be imported when database frozen (Github issue #3048)") with TigerInput(data_dir) as tar: if not tar: return 1 with connect(dsn) as conn: sql = SQLPreprocessor(conn, config) sql.run_sql_file(conn, 'tiger_import_start.sql') # Reading files and then for each file line handling # sql_query in <threads - 1> chunks. place_threads = max(1, threads - 1) with WorkerPool(dsn, place_threads, ignore_sql_errors=True) as pool: with tokenizer.name_analyzer() as analyzer: while tar: with tar.next_file() as fd: handle_threaded_sql_statements(pool, fd, analyzer) print('\n') LOG.warning("Creating indexes on Tiger data") with connect(dsn) as conn: sql = SQLPreprocessor(conn, config) sql.run_sql_file(conn, 'tiger_import_finish.sql') return 0
5,105
33.04
98
py
Nominatim
Nominatim-master/nominatim/tools/migration.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for database migration to newer software versions. """ from typing import List, Tuple, Callable, Any import logging from psycopg2 import sql as pysql from nominatim.config import Configuration from nominatim.db import properties from nominatim.db.connection import connect, Connection from nominatim.version import NominatimVersion, NOMINATIM_VERSION, parse_version from nominatim.tools import refresh from nominatim.tokenizer import factory as tokenizer_factory from nominatim.errors import UsageError LOG = logging.getLogger() _MIGRATION_FUNCTIONS : List[Tuple[NominatimVersion, Callable[..., None]]] = [] def migrate(config: Configuration, paths: Any) -> int: """ Check for the current database version and execute migrations, if necesssary. """ with connect(config.get_libpq_dsn()) as conn: if conn.table_exists('nominatim_properties'): db_version_str = properties.get_property(conn, 'database_version') else: db_version_str = None if db_version_str is not None: db_version = parse_version(db_version_str) if db_version == NOMINATIM_VERSION: LOG.warning("Database already at latest version (%s)", db_version_str) return 0 LOG.info("Detected database version: %s", db_version_str) else: db_version = _guess_version(conn) for version, func in _MIGRATION_FUNCTIONS: if db_version < version or \ (db_version == (3, 5, 0, 99) and version == (3, 5, 0, 99)): title = func.__doc__ or '' LOG.warning("Running: %s (%s)", title.split('\n', 1)[0], version) kwargs = dict(conn=conn, config=config, paths=paths) func(**kwargs) conn.commit() LOG.warning('Updating SQL functions.') refresh.create_functions(conn, config) tokenizer = tokenizer_factory.get_tokenizer_for_db(config) tokenizer.update_sql_functions(config) properties.set_property(conn, 'database_version', str(NOMINATIM_VERSION)) conn.commit() return 0 def _guess_version(conn: Connection) -> NominatimVersion: """ Guess a database version when there is no property table yet. Only migrations for 3.6 and later are supported, so bail out when the version seems older. """ with conn.cursor() as cur: # In version 3.6, the country_name table was updated. Check for that. cnt = cur.scalar("""SELECT count(*) FROM (SELECT svals(name) FROM country_name WHERE country_code = 'gb')x; """) if cnt < 100: LOG.fatal('It looks like your database was imported with a version ' 'prior to 3.6.0. Automatic migration not possible.') raise UsageError('Migration not possible.') return NominatimVersion(3, 5, 0, 99) def _migration(major: int, minor: int, patch: int = 0, dbpatch: int = 0) -> Callable[[Callable[..., None]], Callable[..., None]]: """ Decorator for a single migration step. The parameters describe the version after which the migration is applicable, i.e before changing from the given version to the next, the migration is required. All migrations are run in the order in which they are defined in this file. Do not run global SQL scripts for migrations as you cannot be sure that these scripts do the same in later versions. Functions will always be reimported in full at the end of the migration process, so the migration functions may leave a temporary state behind there. """ def decorator(func: Callable[..., None]) -> Callable[..., None]: version = NominatimVersion(major, minor, patch, dbpatch) _MIGRATION_FUNCTIONS.append((version, func)) return func return decorator @_migration(3, 5, 0, 99) def import_status_timestamp_change(conn: Connection, **_: Any) -> None: """ Add timezone to timestamp in status table. The import_status table has been changed to include timezone information with the time stamp. """ with conn.cursor() as cur: cur.execute("""ALTER TABLE import_status ALTER COLUMN lastimportdate TYPE timestamp with time zone;""") @_migration(3, 5, 0, 99) def add_nominatim_property_table(conn: Connection, config: Configuration, **_: Any) -> None: """ Add nominatim_property table. """ if not conn.table_exists('nominatim_properties'): with conn.cursor() as cur: cur.execute(pysql.SQL("""CREATE TABLE nominatim_properties ( property TEXT, value TEXT); GRANT SELECT ON TABLE nominatim_properties TO {}; """).format(pysql.Identifier(config.DATABASE_WEBUSER))) @_migration(3, 6, 0, 0) def change_housenumber_transliteration(conn: Connection, **_: Any) -> None: """ Transliterate housenumbers. The database schema switched from saving raw housenumbers in placex.housenumber to saving transliterated ones. Note: the function create_housenumber_id() has been dropped in later versions. """ with conn.cursor() as cur: cur.execute("""CREATE OR REPLACE FUNCTION create_housenumber_id(housenumber TEXT) RETURNS TEXT AS $$ DECLARE normtext TEXT; BEGIN SELECT array_to_string(array_agg(trans), ';') INTO normtext FROM (SELECT lookup_word as trans, getorcreate_housenumber_id(lookup_word) FROM (SELECT make_standard_name(h) as lookup_word FROM regexp_split_to_table(housenumber, '[,;]') h) x) y; return normtext; END; $$ LANGUAGE plpgsql STABLE STRICT;""") cur.execute("DELETE FROM word WHERE class = 'place' and type = 'house'") cur.execute("""UPDATE placex SET housenumber = create_housenumber_id(housenumber) WHERE housenumber is not null""") @_migration(3, 7, 0, 0) def switch_placenode_geometry_index(conn: Connection, **_: Any) -> None: """ Replace idx_placex_geometry_reverse_placeNode index. Make the index slightly more permissive, so that it can also be used when matching up boundaries and place nodes. It makes the index idx_placex_adminname index unnecessary. """ with conn.cursor() as cur: cur.execute(""" CREATE INDEX IF NOT EXISTS idx_placex_geometry_placenode ON placex USING GIST (geometry) WHERE osm_type = 'N' and rank_search < 26 and class = 'place' and type != 'postcode' and linked_place_id is null""") cur.execute(""" DROP INDEX IF EXISTS idx_placex_adminname """) @_migration(3, 7, 0, 1) def install_legacy_tokenizer(conn: Connection, config: Configuration, **_: Any) -> None: """ Setup legacy tokenizer. If no other tokenizer has been configured yet, then create the configuration for the backwards-compatible legacy tokenizer """ if properties.get_property(conn, 'tokenizer') is None: with conn.cursor() as cur: for table in ('placex', 'location_property_osmline'): has_column = cur.scalar("""SELECT count(*) FROM information_schema.columns WHERE table_name = %s and column_name = 'token_info'""", (table, )) if has_column == 0: cur.execute(pysql.SQL('ALTER TABLE {} ADD COLUMN token_info JSONB') .format(pysql.Identifier(table))) tokenizer = tokenizer_factory.create_tokenizer(config, init_db=False, module_name='legacy') tokenizer.migrate_database(config) # type: ignore[attr-defined] @_migration(4, 0, 99, 0) def create_tiger_housenumber_index(conn: Connection, **_: Any) -> None: """ Create idx_location_property_tiger_parent_place_id with included house number. The inclusion is needed for efficient lookup of housenumbers in full address searches. """ if conn.server_version_tuple() >= (11, 0, 0): with conn.cursor() as cur: cur.execute(""" CREATE INDEX IF NOT EXISTS idx_location_property_tiger_housenumber_migrated ON location_property_tiger USING btree(parent_place_id) INCLUDE (startnumber, endnumber) """) @_migration(4, 0, 99, 1) def create_interpolation_index_on_place(conn: Connection, **_: Any) -> None: """ Create idx_place_interpolations for lookup of interpolation lines on updates. """ with conn.cursor() as cur: cur.execute("""CREATE INDEX IF NOT EXISTS idx_place_interpolations ON place USING gist(geometry) WHERE osm_type = 'W' and address ? 'interpolation'""") @_migration(4, 0, 99, 2) def add_step_column_for_interpolation(conn: Connection, **_: Any) -> None: """ Add a new column 'step' to the interpolations table. Also converts the data into the stricter format which requires that startnumbers comply with the odd/even requirements. """ if conn.table_has_column('location_property_osmline', 'step'): return with conn.cursor() as cur: # Mark invalid all interpolations with no intermediate numbers. cur.execute("""UPDATE location_property_osmline SET startnumber = null WHERE endnumber - startnumber <= 1 """) # Align the start numbers where odd/even does not match. cur.execute("""UPDATE location_property_osmline SET startnumber = startnumber + 1, linegeo = ST_LineSubString(linegeo, 1.0 / (endnumber - startnumber)::float, 1) WHERE (interpolationtype = 'odd' and startnumber % 2 = 0) or (interpolationtype = 'even' and startnumber % 2 = 1) """) # Mark invalid odd/even interpolations with no intermediate numbers. cur.execute("""UPDATE location_property_osmline SET startnumber = null WHERE interpolationtype in ('odd', 'even') and endnumber - startnumber = 2""") # Finally add the new column and populate it. cur.execute("ALTER TABLE location_property_osmline ADD COLUMN step SMALLINT") cur.execute("""UPDATE location_property_osmline SET step = CASE WHEN interpolationtype = 'all' THEN 1 ELSE 2 END """) @_migration(4, 0, 99, 3) def add_step_column_for_tiger(conn: Connection, **_: Any) -> None: """ Add a new column 'step' to the tiger data table. """ if conn.table_has_column('location_property_tiger', 'step'): return with conn.cursor() as cur: cur.execute("ALTER TABLE location_property_tiger ADD COLUMN step SMALLINT") cur.execute("""UPDATE location_property_tiger SET step = CASE WHEN interpolationtype = 'all' THEN 1 ELSE 2 END """) @_migration(4, 0, 99, 4) def add_derived_name_column_for_country_names(conn: Connection, **_: Any) -> None: """ Add a new column 'derived_name' which in the future takes the country names as imported from OSM data. """ if not conn.table_has_column('country_name', 'derived_name'): with conn.cursor() as cur: cur.execute("ALTER TABLE country_name ADD COLUMN derived_name public.HSTORE") @_migration(4, 0, 99, 5) def mark_internal_country_names(conn: Connection, config: Configuration, **_: Any) -> None: """ Names from the country table should be marked as internal to prevent them from being deleted. Only necessary for ICU tokenizer. """ import psycopg2.extras # pylint: disable=import-outside-toplevel tokenizer = tokenizer_factory.get_tokenizer_for_db(config) with tokenizer.name_analyzer() as analyzer: with conn.cursor() as cur: psycopg2.extras.register_hstore(cur) cur.execute("SELECT country_code, name FROM country_name") for country_code, names in cur: if not names: names = {} names['countrycode'] = country_code analyzer.add_country_names(country_code, names) @_migration(4, 1, 99, 0) def add_place_deletion_todo_table(conn: Connection, **_: Any) -> None: """ Add helper table for deleting data on updates. The table is only necessary when updates are possible, i.e. the database is not in freeze mode. """ if conn.table_exists('place'): with conn.cursor() as cur: cur.execute("""CREATE TABLE IF NOT EXISTS place_to_be_deleted ( osm_type CHAR(1), osm_id BIGINT, class TEXT, type TEXT, deferred BOOLEAN)""") @_migration(4, 1, 99, 1) def split_pending_index(conn: Connection, **_: Any) -> None: """ Reorganise indexes for pending updates. """ if conn.table_exists('place'): with conn.cursor() as cur: cur.execute("""CREATE INDEX IF NOT EXISTS idx_placex_rank_address_sector ON placex USING BTREE (rank_address, geometry_sector) WHERE indexed_status > 0""") cur.execute("""CREATE INDEX IF NOT EXISTS idx_placex_rank_boundaries_sector ON placex USING BTREE (rank_search, geometry_sector) WHERE class = 'boundary' and type = 'administrative' and indexed_status > 0""") cur.execute("DROP INDEX IF EXISTS idx_placex_pendingsector") @_migration(4, 2, 99, 0) def enable_forward_dependencies(conn: Connection, **_: Any) -> None: """ Create indexes for updates with forward dependency tracking (long-running). """ if conn.table_exists('planet_osm_ways'): with conn.cursor() as cur: cur.execute("""SELECT * FROM pg_indexes WHERE tablename = 'planet_osm_ways' and indexdef LIKE '%nodes%'""") if cur.rowcount == 0: cur.execute("""CREATE OR REPLACE FUNCTION public.planet_osm_index_bucket(bigint[]) RETURNS bigint[] LANGUAGE sql IMMUTABLE AS $function$ SELECT ARRAY(SELECT DISTINCT unnest($1) >> 5) $function$""") cur.execute("""CREATE INDEX planet_osm_ways_nodes_bucket_idx ON planet_osm_ways USING gin (planet_osm_index_bucket(nodes)) WITH (fastupdate=off)""") cur.execute("""CREATE INDEX planet_osm_rels_parts_idx ON planet_osm_rels USING gin (parts) WITH (fastupdate=off)""") cur.execute("ANALYZE planet_osm_ways") @_migration(4, 2, 99, 1) def add_improved_geometry_reverse_placenode_index(conn: Connection, **_: Any) -> None: """ Create improved index for reverse lookup of place nodes. """ with conn.cursor() as cur: cur.execute("""CREATE INDEX IF NOT EXISTS idx_placex_geometry_reverse_lookupPlaceNode ON placex USING gist (ST_Buffer(geometry, reverse_place_diameter(rank_search))) WHERE rank_address between 4 and 25 AND type != 'postcode' AND name is not null AND linked_place_id is null AND osm_type = 'N' """)
16,965
43.067532
98
py
Nominatim
Nominatim-master/nominatim/tools/add_osm_data.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Function to add additional OSM data from a file or the API into the database. """ from typing import Any, MutableMapping from pathlib import Path import logging import urllib from nominatim.db.connection import connect from nominatim.tools.exec_utils import run_osm2pgsql, get_url LOG = logging.getLogger() def _run_osm2pgsql(dsn: str, options: MutableMapping[str, Any]) -> None: run_osm2pgsql(options) # Handle deletions with connect(dsn) as conn: with conn.cursor() as cur: cur.execute('SELECT flush_deleted_places()') conn.commit() def add_data_from_file(dsn: str, fname: str, options: MutableMapping[str, Any]) -> int: """ Adds data from a OSM file to the database. The file may be a normal OSM file or a diff file in all formats supported by libosmium. """ options['import_file'] = Path(fname) options['append'] = True _run_osm2pgsql(dsn, options) # No status update. We don't know where the file came from. return 0 def add_osm_object(dsn: str, osm_type: str, osm_id: int, use_main_api: bool, options: MutableMapping[str, Any]) -> int: """ Add or update a single OSM object from the latest version of the API. """ if use_main_api: base_url = f'https://www.openstreetmap.org/api/0.6/{osm_type}/{osm_id}' if osm_type in ('way', 'relation'): base_url += '/full' else: # use Overpass API if osm_type == 'node': data = f'node({osm_id});out meta;' elif osm_type == 'way': data = f'(way({osm_id});>;);out meta;' else: data = f'(rel(id:{osm_id});>;);out meta;' base_url = 'https://overpass-api.de/api/interpreter?' \ + urllib.parse.urlencode({'data': data}) options['append'] = True options['import_data'] = get_url(base_url).encode('utf-8') _run_osm2pgsql(dsn, options) return 0
2,159
30.764706
87
py
Nominatim
Nominatim-master/nominatim/tools/admin.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for database analysis and maintenance. """ from typing import Optional, Tuple, Any, cast import logging from psycopg2.extras import Json, register_hstore from nominatim.config import Configuration from nominatim.db.connection import connect, Cursor from nominatim.tokenizer import factory as tokenizer_factory from nominatim.errors import UsageError from nominatim.data.place_info import PlaceInfo from nominatim.typing import DictCursorResult LOG = logging.getLogger() def _get_place_info(cursor: Cursor, osm_id: Optional[str], place_id: Optional[int]) -> DictCursorResult: sql = """SELECT place_id, extra.* FROM placex, LATERAL placex_indexing_prepare(placex) as extra """ values: Tuple[Any, ...] if osm_id: osm_type = osm_id[0].upper() if osm_type not in 'NWR' or not osm_id[1:].isdigit(): LOG.fatal('OSM ID must be of form <N|W|R><id>. Got: %s', osm_id) raise UsageError("OSM ID parameter badly formatted") sql += ' WHERE placex.osm_type = %s AND placex.osm_id = %s' values = (osm_type, int(osm_id[1:])) elif place_id is not None: sql += ' WHERE placex.place_id = %s' values = (place_id, ) else: LOG.fatal("No OSM object given to index.") raise UsageError("OSM object not found") cursor.execute(sql + ' LIMIT 1', values) if cursor.rowcount < 1: LOG.fatal("OSM object %s not found in database.", osm_id) raise UsageError("OSM object not found") return cast(DictCursorResult, cursor.fetchone()) def analyse_indexing(config: Configuration, osm_id: Optional[str] = None, place_id: Optional[int] = None) -> None: """ Analyse indexing of a single Nominatim object. """ with connect(config.get_libpq_dsn()) as conn: register_hstore(conn) with conn.cursor() as cur: place = _get_place_info(cur, osm_id, place_id) cur.execute("update placex set indexed_status = 2 where place_id = %s", (place['place_id'], )) cur.execute("""SET auto_explain.log_min_duration = '0'; SET auto_explain.log_analyze = 'true'; SET auto_explain.log_nested_statements = 'true'; LOAD 'auto_explain'; SET client_min_messages = LOG; SET log_min_messages = FATAL""") tokenizer = tokenizer_factory.get_tokenizer_for_db(config) with tokenizer.name_analyzer() as analyzer: cur.execute("""UPDATE placex SET indexed_status = 0, address = %s, token_info = %s, name = %s, linked_place_id = %s WHERE place_id = %s""", (place['address'], Json(analyzer.process_place(PlaceInfo(place))), place['name'], place['linked_place_id'], place['place_id'])) # we do not want to keep the results conn.rollback() for msg in conn.notices: print(msg)
3,409
36.888889
89
py
Nominatim
Nominatim-master/nominatim/tools/check_database.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Collection of functions that check if the database is complete and functional. """ from typing import Callable, Optional, Any, Union, Tuple, Mapping, List from enum import Enum from textwrap import dedent from nominatim.config import Configuration from nominatim.db.connection import connect, Connection from nominatim.errors import UsageError from nominatim.tokenizer import factory as tokenizer_factory from nominatim.tools import freeze CHECKLIST = [] class CheckState(Enum): """ Possible states of a check. FATAL stops check execution entirely. """ OK = 0 FAIL = 1 FATAL = 2 NOT_APPLICABLE = 3 WARN = 4 CheckResult = Union[CheckState, Tuple[CheckState, Mapping[str, Any]]] CheckFunc = Callable[[Connection, Configuration], CheckResult] def _check(hint: Optional[str] = None) -> Callable[[CheckFunc], CheckFunc]: """ Decorator for checks. It adds the function to the list of checks to execute and adds the code for printing progress messages. """ def decorator(func: CheckFunc) -> CheckFunc: title = (func.__doc__ or '').split('\n', 1)[0].strip() def run_check(conn: Connection, config: Configuration) -> CheckState: print(title, end=' ... ') ret = func(conn, config) if isinstance(ret, tuple): ret, params = ret else: params = {} if ret == CheckState.OK: print('\033[92mOK\033[0m') elif ret == CheckState.WARN: print('\033[93mWARNING\033[0m') if hint: print('') print(dedent(hint.format(**params))) elif ret == CheckState.NOT_APPLICABLE: print('not applicable') else: print('\x1B[31mFailed\033[0m') if hint: print(dedent(hint.format(**params))) return ret CHECKLIST.append(run_check) return run_check return decorator class _BadConnection: def __init__(self, msg: str) -> None: self.msg = msg def close(self) -> None: """ Dummy function to provide the implementation. """ def check_database(config: Configuration) -> int: """ Run a number of checks on the database and return the status. """ try: conn = connect(config.get_libpq_dsn()).connection except UsageError as err: conn = _BadConnection(str(err)) # type: ignore[assignment] overall_result = 0 for check in CHECKLIST: ret = check(conn, config) if ret == CheckState.FATAL: conn.close() return 1 if ret in (CheckState.FATAL, CheckState.FAIL): overall_result = 1 conn.close() return overall_result def _get_indexes(conn: Connection) -> List[str]: indexes = ['idx_place_addressline_address_place_id', 'idx_placex_rank_search', 'idx_placex_rank_address', 'idx_placex_parent_place_id', 'idx_placex_geometry_reverse_lookuppolygon', 'idx_placex_geometry_placenode', 'idx_osmline_parent_place_id', 'idx_osmline_parent_osm_id', 'idx_postcode_id', 'idx_postcode_postcode' ] if conn.table_exists('search_name'): indexes.extend(('idx_search_name_nameaddress_vector', 'idx_search_name_name_vector', 'idx_search_name_centroid')) if conn.server_version_tuple() >= (11, 0, 0): indexes.extend(('idx_placex_housenumber', 'idx_osmline_parent_osm_id_with_hnr')) if conn.table_exists('place'): indexes.extend(('idx_location_area_country_place_id', 'idx_place_osm_unique', 'idx_placex_rank_address_sector', 'idx_placex_rank_boundaries_sector')) return indexes # CHECK FUNCTIONS # # Functions are exectured in the order they appear here. @_check(hint="""\ {error} Hints: * Is the database server started? * Check the NOMINATIM_DATABASE_DSN variable in your local .env * Try connecting to the database with the same settings Project directory: {config.project_dir} Current setting of NOMINATIM_DATABASE_DSN: {config.DATABASE_DSN} """) def check_connection(conn: Any, config: Configuration) -> CheckResult: """ Checking database connection """ if isinstance(conn, _BadConnection): return CheckState.FATAL, dict(error=conn.msg, config=config) return CheckState.OK @_check(hint="""\ placex table not found Hints: * Are you connecting to the right database? * Did the import process finish without errors? Project directory: {config.project_dir} Current setting of NOMINATIM_DATABASE_DSN: {config.DATABASE_DSN} """) def check_placex_table(conn: Connection, config: Configuration) -> CheckResult: """ Checking for placex table """ if conn.table_exists('placex'): return CheckState.OK return CheckState.FATAL, dict(config=config) @_check(hint="""placex table has no data. Did the import finish successfully?""") def check_placex_size(conn: Connection, _: Configuration) -> CheckResult: """ Checking for placex content """ with conn.cursor() as cur: cnt = cur.scalar('SELECT count(*) FROM (SELECT * FROM placex LIMIT 100) x') return CheckState.OK if cnt > 0 else CheckState.FATAL @_check(hint="""{msg}""") def check_tokenizer(_: Connection, config: Configuration) -> CheckResult: """ Checking that tokenizer works """ try: tokenizer = tokenizer_factory.get_tokenizer_for_db(config) except UsageError: return CheckState.FAIL, dict(msg="""\ Cannot load tokenizer. Did the import finish successfully?""") result = tokenizer.check_database(config) if result is None: return CheckState.OK return CheckState.FAIL, dict(msg=result) @_check(hint="""\ Wikipedia/Wikidata importance tables missing. Quality of search results may be degraded. Reverse geocoding is unaffected. See https://nominatim.org/release-docs/latest/admin/Import/#wikipediawikidata-rankings """) def check_existance_wikipedia(conn: Connection, _: Configuration) -> CheckResult: """ Checking for wikipedia/wikidata data """ if not conn.table_exists('search_name') or not conn.table_exists('place'): return CheckState.NOT_APPLICABLE with conn.cursor() as cur: cnt = cur.scalar('SELECT count(*) FROM wikipedia_article') return CheckState.WARN if cnt == 0 else CheckState.OK @_check(hint="""\ The indexing didn't finish. {count} entries are not yet indexed. To index the remaining entries, run: {index_cmd} """) def check_indexing(conn: Connection, _: Configuration) -> CheckResult: """ Checking indexing status """ with conn.cursor() as cur: cnt = cur.scalar('SELECT count(*) FROM placex WHERE indexed_status > 0') if cnt == 0: return CheckState.OK if freeze.is_frozen(conn): index_cmd="""\ Database is marked frozen, it cannot be updated. Low counts of unindexed places are fine.""" return CheckState.WARN, dict(count=cnt, index_cmd=index_cmd) if conn.index_exists('idx_placex_rank_search'): # Likely just an interrupted update. index_cmd = 'nominatim index' else: # Looks like the import process got interrupted. index_cmd = 'nominatim import --continue indexing' return CheckState.FAIL, dict(count=cnt, index_cmd=index_cmd) @_check(hint="""\ The following indexes are missing: {indexes} Rerun the index creation with: nominatim import --continue db-postprocess """) def check_database_indexes(conn: Connection, _: Configuration) -> CheckResult: """ Checking that database indexes are complete """ missing = [] for index in _get_indexes(conn): if not conn.index_exists(index): missing.append(index) if missing: return CheckState.FAIL, dict(indexes='\n '.join(missing)) return CheckState.OK @_check(hint="""\ At least one index is invalid. That can happen, e.g. when index creation was disrupted and later restarted. You should delete the affected indices and recreate them. Invalid indexes: {indexes} """) def check_database_index_valid(conn: Connection, _: Configuration) -> CheckResult: """ Checking that all database indexes are valid """ with conn.cursor() as cur: cur.execute(""" SELECT relname FROM pg_class, pg_index WHERE pg_index.indisvalid = false AND pg_index.indexrelid = pg_class.oid""") broken = [c[0] for c in cur] if broken: return CheckState.FAIL, dict(indexes='\n '.join(broken)) return CheckState.OK @_check(hint="""\ {error} Run TIGER import again: nominatim add-data --tiger-data <DIR> """) def check_tiger_table(conn: Connection, config: Configuration) -> CheckResult: """ Checking TIGER external data table. """ if not config.get_bool('USE_US_TIGER_DATA'): return CheckState.NOT_APPLICABLE if not conn.table_exists('location_property_tiger'): return CheckState.FAIL, dict(error='TIGER data table not found.') with conn.cursor() as cur: if cur.scalar('SELECT count(*) FROM location_property_tiger') == 0: return CheckState.FAIL, dict(error='TIGER data table is empty.') return CheckState.OK
10,211
32.481967
99
py
Nominatim
Nominatim-master/nominatim/tools/__init__.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Module with functions for importing, updating Nominatim databases as well as general maintenance helpers. """
321
28.272727
65
py
Nominatim
Nominatim-master/nominatim/tools/exec_utils.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper functions for executing external programs. """ from typing import Any, Union, Optional, Mapping, IO from pathlib import Path import logging import os import subprocess import urllib.request as urlrequest from urllib.parse import urlencode from nominatim.config import Configuration from nominatim.typing import StrPath from nominatim.version import NOMINATIM_VERSION from nominatim.db.connection import get_pg_env LOG = logging.getLogger() def run_legacy_script(script: StrPath, *args: Union[int, str], config: Configuration, throw_on_fail: bool = False) -> int: """ Run a Nominatim PHP script with the given arguments. Returns the exit code of the script. If `throw_on_fail` is True then throw a `CalledProcessError` on a non-zero exit. """ cmd = ['/usr/bin/env', 'php', '-Cq', str(config.lib_dir.php / 'admin' / script)] cmd.extend([str(a) for a in args]) env = config.get_os_env() env['NOMINATIM_DATADIR'] = str(config.lib_dir.data) env['NOMINATIM_SQLDIR'] = str(config.lib_dir.sql) env['NOMINATIM_CONFIGDIR'] = str(config.config_dir) env['NOMINATIM_DATABASE_MODULE_SRC_PATH'] = str(config.lib_dir.module) if not env['NOMINATIM_OSM2PGSQL_BINARY']: env['NOMINATIM_OSM2PGSQL_BINARY'] = str(config.lib_dir.osm2pgsql) proc = subprocess.run(cmd, cwd=str(config.project_dir), env=env, check=throw_on_fail) return proc.returncode def run_api_script(endpoint: str, project_dir: Path, extra_env: Optional[Mapping[str, str]] = None, phpcgi_bin: Optional[Path] = None, params: Optional[Mapping[str, Any]] = None) -> int: """ Execute a Nominatim API function. The function needs a project directory that contains the website directory with the scripts to be executed. The scripts will be run using php_cgi. Query parameters can be added as named arguments. Returns the exit code of the script. """ log = logging.getLogger() webdir = str(project_dir / 'website') query_string = urlencode(params or {}) env = dict(QUERY_STRING=query_string, SCRIPT_NAME=f'/{endpoint}.php', REQUEST_URI=f'/{endpoint}.php?{query_string}', CONTEXT_DOCUMENT_ROOT=webdir, SCRIPT_FILENAME=f'{webdir}/{endpoint}.php', HTTP_HOST='localhost', HTTP_USER_AGENT='nominatim-tool', REMOTE_ADDR='0.0.0.0', DOCUMENT_ROOT=webdir, REQUEST_METHOD='GET', SERVER_PROTOCOL='HTTP/1.1', GATEWAY_INTERFACE='CGI/1.1', REDIRECT_STATUS='CGI') if extra_env: env.update(extra_env) if phpcgi_bin is None: cmd = ['/usr/bin/env', 'php-cgi'] else: cmd = [str(phpcgi_bin)] proc = subprocess.run(cmd, cwd=str(project_dir), env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) if proc.returncode != 0 or proc.stderr: if proc.stderr: log.error(proc.stderr.decode('utf-8').replace('\\n', '\n')) else: log.error(proc.stdout.decode('utf-8').replace('\\n', '\n')) return proc.returncode or 1 result = proc.stdout.decode('utf-8') content_start = result.find('\r\n\r\n') print(result[content_start + 4:].replace('\\n', '\n')) return 0 def run_php_server(server_address: str, base_dir: StrPath) -> None: """ Run the built-in server from the given directory. """ subprocess.run(['/usr/bin/env', 'php', '-S', server_address], cwd=str(base_dir), check=True) def run_osm2pgsql(options: Mapping[str, Any]) -> None: """ Run osm2pgsql with the given options. """ env = get_pg_env(options['dsn']) cmd = [str(options['osm2pgsql']), '--hstore', '--latlon', '--slim', '--log-progress', 'true', '--number-processes', '1' if options['append'] else str(options['threads']), '--cache', str(options['osm2pgsql_cache']), '--style', str(options['osm2pgsql_style']) ] if str(options['osm2pgsql_style']).endswith('.lua'): env['LUA_PATH'] = ';'.join((str(options['osm2pgsql_style_path'] / '?.lua'), os.environ.get('LUAPATH', ';'))) cmd.extend(('--output', 'flex')) else: cmd.extend(('--output', 'gazetteer')) cmd.append('--append' if options['append'] else '--create') if options['flatnode_file']: cmd.extend(('--flat-nodes', options['flatnode_file'])) for key, param in (('slim_data', '--tablespace-slim-data'), ('slim_index', '--tablespace-slim-index'), ('main_data', '--tablespace-main-data'), ('main_index', '--tablespace-main-index')): if options['tablespaces'][key]: cmd.extend((param, options['tablespaces'][key])) if options.get('disable_jit', False): env['PGOPTIONS'] = '-c jit=off -c max_parallel_workers_per_gather=0' if 'import_data' in options: cmd.extend(('-r', 'xml', '-')) elif isinstance(options['import_file'], list): for fname in options['import_file']: cmd.append(str(fname)) else: cmd.append(str(options['import_file'])) subprocess.run(cmd, cwd=options.get('cwd', '.'), input=options.get('import_data'), env=env, check=True) def get_url(url: str) -> str: """ Get the contents from the given URL and return it as a UTF-8 string. """ headers = {"User-Agent": f"Nominatim/{NOMINATIM_VERSION!s}"} try: request = urlrequest.Request(url, headers=headers) with urlrequest.urlopen(request) as response: # type: IO[bytes] return response.read().decode('utf-8') except Exception: LOG.fatal('Failed to load URL: %s', url) raise
6,298
35.201149
87
py
Nominatim
Nominatim-master/nominatim/tools/collect_os_info.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Collection of host system information including software versions, memory, storage, and database configuration. """ import os import subprocess import sys from pathlib import Path from typing import List, Optional, Tuple, Union, cast import psutil from psycopg2.extensions import make_dsn, parse_dsn from nominatim.config import Configuration from nominatim.db.connection import connect from nominatim.typing import DictCursorResults from nominatim.version import NOMINATIM_VERSION def convert_version(ver_tup: Tuple[int, int]) -> str: """converts tuple version (ver_tup) to a string representation""" return ".".join(map(str, ver_tup)) def friendly_memory_string(mem: float) -> str: """Create a user friendly string for the amount of memory specified as mem""" mem_magnitude = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") mag = 0 # determine order of magnitude while mem > 1000: mem /= 1000 mag += 1 return f"{mem:.1f} {mem_magnitude[mag]}" def run_command(cmd: Union[str, List[str]]) -> str: """Runs a command using the shell and returns the output from stdout""" try: if sys.version_info < (3, 7): cap_out = subprocess.run(cmd, stdout=subprocess.PIPE, check=False) else: cap_out = subprocess.run(cmd, capture_output=True, check=False) return cap_out.stdout.decode("utf-8") except FileNotFoundError: # non-Linux system should end up here return f"Unknown (unable to find the '{cmd}' command)" def os_name_info() -> str: """Obtain Operating System Name (and possibly the version)""" os_info = None # man page os-release(5) details meaning of the fields if Path("/etc/os-release").is_file(): os_info = from_file_find_line_portion( "/etc/os-release", "PRETTY_NAME", "=") # alternative location elif Path("/usr/lib/os-release").is_file(): os_info = from_file_find_line_portion( "/usr/lib/os-release", "PRETTY_NAME", "=" ) # fallback on Python's os name if os_info is None or os_info == "": os_info = os.name # if the above is insufficient, take a look at neofetch's approach to OS detection return os_info # Note: Intended to be used on informational files like /proc def from_file_find_line_portion( filename: str, start: str, sep: str, fieldnum: int = 1 ) -> Optional[str]: """open filename, finds the line starting with the 'start' string. Splits the line using seperator and returns a "fieldnum" from the split.""" with open(filename, encoding='utf8') as file: result = "" for line in file: if line.startswith(start): result = line.split(sep)[fieldnum].strip() return result def get_postgresql_config(version: int) -> str: """Retrieve postgres configuration file""" try: with open(f"/etc/postgresql/{version}/main/postgresql.conf", encoding='utf8') as file: db_config = file.read() file.close() return db_config except IOError: return f"**Could not read '/etc/postgresql/{version}/main/postgresql.conf'**" def report_system_information(config: Configuration) -> None: """Generate a report about the host system including software versions, memory, storage, and database configuration.""" with connect(make_dsn(config.get_libpq_dsn(), dbname='postgres')) as conn: postgresql_ver: str = convert_version(conn.server_version_tuple()) with conn.cursor() as cur: cur.execute(f""" SELECT datname FROM pg_catalog.pg_database WHERE datname='{parse_dsn(config.get_libpq_dsn())['dbname']}'""") nominatim_db_exists = cast(Optional[DictCursorResults], cur.fetchall()) if nominatim_db_exists: with connect(config.get_libpq_dsn()) as conn: postgis_ver: str = convert_version(conn.postgis_version_tuple()) else: postgis_ver = "Unable to connect to database" postgresql_config: str = get_postgresql_config(int(float(postgresql_ver))) # Note: psutil.disk_partitions() is similar to run_command("lsblk") # Note: run_command("systemd-detect-virt") only works on Linux, on other OSes # should give a message: "Unknown (unable to find the 'systemd-detect-virt' command)" # Generates the Markdown report. report = f""" **Instructions** Use this information in your issue report at https://github.com/osm-search/Nominatim/issues Redirect the output to a file: $ ./collect_os_info.py > report.md **Software Environment:** - Python version: {sys.version} - Nominatim version: {NOMINATIM_VERSION!s} - PostgreSQL version: {postgresql_ver} - PostGIS version: {postgis_ver} - OS: {os_name_info()} **Hardware Configuration:** - RAM: {friendly_memory_string(psutil.virtual_memory().total)} - number of CPUs: {psutil.cpu_count(logical=False)} - bare metal/AWS/other cloud service (per systemd-detect-virt(1)): {run_command("systemd-detect-virt")} - type and size of disks: **`df -h` - df - report file system disk space usage: ** ``` {run_command(["df", "-h"])} ``` **lsblk - list block devices: ** ``` {run_command("lsblk")} ``` **Postgresql Configuration:** ``` {postgresql_config} ``` **Notes** Please add any notes about anything above anything above that is incorrect. """ print(report)
5,788
33.458333
108
py
Nominatim
Nominatim-master/nominatim/tools/freeze.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for removing unnecessary data from the database. """ from typing import Optional from pathlib import Path from psycopg2 import sql as pysql from nominatim.db.connection import Connection UPDATE_TABLES = [ 'address_levels', 'gb_postcode', 'import_osmosis_log', 'import_polygon_%', 'location_area%', 'location_road%', 'place', 'planet_osm_%', 'search_name_%', 'us_postcode', 'wikipedia_%' ] def drop_update_tables(conn: Connection) -> None: """ Drop all tables only necessary for updating the database from OSM replication data. """ parts = (pysql.SQL("(tablename LIKE {})").format(pysql.Literal(t)) for t in UPDATE_TABLES) with conn.cursor() as cur: cur.execute(pysql.SQL("SELECT tablename FROM pg_tables WHERE ") + pysql.SQL(' or ').join(parts)) tables = [r[0] for r in cur] for table in tables: cur.drop_table(table, cascade=True) conn.commit() def drop_flatnode_file(fpath: Optional[Path]) -> None: """ Remove the flatnode file if it exists. """ if fpath and fpath.exists(): fpath.unlink() def is_frozen(conn: Connection) -> bool: """ Returns true if database is in a frozen state """ return conn.table_exists('place') is False
1,522
24.813559
94
py
Nominatim
Nominatim-master/nominatim/tools/replication.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for updating a database from a replication source. """ from typing import ContextManager, MutableMapping, Any, Generator, cast, Iterator from contextlib import contextmanager import datetime as dt from enum import Enum import logging import time import types import urllib.request as urlrequest import requests from nominatim.db import status from nominatim.db.connection import Connection, connect from nominatim.tools.exec_utils import run_osm2pgsql from nominatim.errors import UsageError try: from osmium.replication.server import ReplicationServer from osmium import WriteHandler from osmium import version as pyo_version except ImportError as exc: logging.getLogger().critical("pyosmium not installed. Replication functions not available.\n" "To install pyosmium via pip: pip3 install osmium") raise UsageError("replication tools not available") from exc LOG = logging.getLogger() def init_replication(conn: Connection, base_url: str, socket_timeout: int = 60) -> None: """ Set up replication for the server at the given base URL. """ LOG.info("Using replication source: %s", base_url) date = status.compute_database_date(conn) # margin of error to make sure we get all data date -= dt.timedelta(hours=3) with _make_replication_server(base_url, socket_timeout) as repl: seq = repl.timestamp_to_sequence(date) if seq is None: LOG.fatal("Cannot reach the configured replication service '%s'.\n" "Does the URL point to a directory containing OSM update data?", base_url) raise UsageError("Failed to reach replication service") status.set_status(conn, date=date, seq=seq) LOG.warning("Updates initialised at sequence %s (%s)", seq, date) def check_for_updates(conn: Connection, base_url: str, socket_timeout: int = 60) -> int: """ Check if new data is available from the replication service at the given base URL. """ _, seq, _ = status.get_status(conn) if seq is None: LOG.error("Replication not set up. " "Please run 'nominatim replication --init' first.") return 254 with _make_replication_server(base_url, socket_timeout) as repl: state = repl.get_state_info() if state is None: LOG.error("Cannot get state for URL %s.", base_url) return 253 if state.sequence <= seq: LOG.warning("Database is up to date.") return 2 LOG.warning("New data available (%i => %i).", seq, state.sequence) return 0 class UpdateState(Enum): """ Possible states after an update has run. """ UP_TO_DATE = 0 MORE_PENDING = 2 NO_CHANGES = 3 def update(dsn: str, options: MutableMapping[str, Any], socket_timeout: int = 60) -> UpdateState: """ Update database from the next batch of data. Returns the state of updates according to `UpdateState`. """ with connect(dsn) as conn: startdate, startseq, indexed = status.get_status(conn) conn.commit() if startseq is None: LOG.error("Replication not set up. " "Please run 'nominatim replication --init' first.") raise UsageError("Replication not set up.") assert startdate is not None if not indexed and options['indexed_only']: LOG.info("Skipping update. There is data that needs indexing.") return UpdateState.MORE_PENDING last_since_update = dt.datetime.now(dt.timezone.utc) - startdate update_interval = dt.timedelta(seconds=options['update_interval']) if last_since_update < update_interval: duration = (update_interval - last_since_update).seconds LOG.warning("Sleeping for %s sec before next update.", duration) time.sleep(duration) if options['import_file'].exists(): options['import_file'].unlink() # Read updates into file. with _make_replication_server(options['base_url'], socket_timeout) as repl: outhandler = WriteHandler(str(options['import_file'])) endseq = repl.apply_diffs(outhandler, startseq + 1, max_size=options['max_diff_size'] * 1024) outhandler.close() if endseq is None: return UpdateState.NO_CHANGES with connect(dsn) as conn: run_osm2pgsql_updates(conn, options) # Write the current status to the file endstate = repl.get_state_info(endseq) status.set_status(conn, endstate.timestamp if endstate else None, seq=endseq, indexed=False) conn.commit() return UpdateState.UP_TO_DATE def run_osm2pgsql_updates(conn: Connection, options: MutableMapping[str, Any]) -> None: """ Run osm2pgsql in append mode. """ # Remove any stale deletion marks. with conn.cursor() as cur: cur.execute('TRUNCATE place_to_be_deleted') conn.commit() # Consume updates with osm2pgsql. options['append'] = True options['disable_jit'] = conn.server_version_tuple() >= (11, 0) run_osm2pgsql(options) # Handle deletions with conn.cursor() as cur: cur.execute('SELECT flush_deleted_places()') conn.commit() def _make_replication_server(url: str, timeout: int) -> ContextManager[ReplicationServer]: """ Returns a ReplicationServer in form of a context manager. Creates a light wrapper around older versions of pyosmium that did not support the context manager interface. """ if hasattr(ReplicationServer, '__enter__'): # Patches the open_url function for pyosmium >= 3.2 # where the socket timeout is no longer respected. def patched_open_url(self: ReplicationServer, url: urlrequest.Request) -> Any: """ Download a resource from the given URL and return a byte sequence of the content. """ headers = {"User-Agent" : f"Nominatim (pyosmium/{pyo_version.pyosmium_release})"} if self.session is not None: return self.session.get(url.get_full_url(), headers=headers, timeout=timeout or None, stream=True) @contextmanager def _get_url_with_session() -> Iterator[requests.Response]: with requests.Session() as session: request = session.get(url.get_full_url(), headers=headers, timeout=timeout or None, stream=True) yield request return _get_url_with_session() repl = ReplicationServer(url) setattr(repl, 'open_url', types.MethodType(patched_open_url, repl)) return cast(ContextManager[ReplicationServer], repl) @contextmanager def get_cm() -> Generator[ReplicationServer, None, None]: yield ReplicationServer(url) return get_cm()
7,279
34.339806
97
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/importer_statistics.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Contains the class which handles statistics for the import of special phrases. """ import logging LOG = logging.getLogger() class SpecialPhrasesImporterStatistics(): """ Class handling statistics of the import process of special phrases. """ def __init__(self) -> None: self._intialize_values() def _intialize_values(self) -> None: """ Set all counts for the global import to 0. """ self.tables_created = 0 self.tables_deleted = 0 self.tables_ignored = 0 self.invalids = 0 def notify_one_phrase_invalid(self) -> None: """ Add +1 to the count of invalid entries fetched from the wiki. """ self.invalids += 1 def notify_one_table_created(self) -> None: """ Add +1 to the count of created tables. """ self.tables_created += 1 def notify_one_table_deleted(self) -> None: """ Add +1 to the count of deleted tables. """ self.tables_deleted += 1 def notify_one_table_ignored(self) -> None: """ Add +1 to the count of ignored tables. """ self.tables_ignored += 1 def notify_import_done(self) -> None: """ Print stats for the whole import process and reset all values. """ LOG.info('====================================================================') LOG.info('Final statistics of the import:') LOG.info('- %s phrases were invalid.', self.invalids) if self.invalids > 0: LOG.info(' Those invalid phrases have been skipped.') LOG.info('- %s tables were ignored as they already exist on the database', self.tables_ignored) LOG.info('- %s tables were created', self.tables_created) LOG.info('- %s tables were deleted from the database', self.tables_deleted) if self.tables_deleted > 0: LOG.info(' They were deleted as they are not valid anymore.') if self.invalids > 0: LOG.warning('%s phrases were invalid and have been skipped during the whole process.', self.invalids) self._intialize_values()
2,503
30.696203
98
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/sp_wiki_loader.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Module containing the SPWikiLoader class. """ from typing import Iterable import re import logging from nominatim.config import Configuration from nominatim.tools.special_phrases.special_phrase import SpecialPhrase from nominatim.tools.exec_utils import get_url LOG = logging.getLogger() def _get_wiki_content(lang: str) -> str: """ Request and return the wiki page's content corresponding to special phrases for a given lang. Requested URL Example : https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/EN """ url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' \ + lang.upper() return get_url(url) class SPWikiLoader: """ Handles loading of special phrases from the wiki. """ def __init__(self, config: Configuration) -> None: self.config = config # Compile the regex here to increase performances. self.occurence_pattern = re.compile( r'\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([\-YN])' ) # Hack around a bug where building=yes was imported with quotes into the wiki self.type_fix_pattern = re.compile(r'\"|&quot;') self.languages = self.config.get_str_list('LANGUAGES') or \ ['af', 'ar', 'br', 'ca', 'cs', 'de', 'en', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'gl', 'hr', 'hu', 'ia', 'is', 'it', 'ja', 'mk', 'nl', 'no', 'pl', 'ps', 'pt', 'ru', 'sk', 'sl', 'sv', 'uk', 'vi', 'lv', 'tr'] def generate_phrases(self) -> Iterable[SpecialPhrase]: """ Download the wiki pages for the configured languages and extract the phrases from the page. """ for lang in self.languages: LOG.warning('Importing phrases for lang: %s...', lang) loaded_xml = _get_wiki_content(lang) # One match will be of format [label, class, type, operator, plural] matches = self.occurence_pattern.findall(loaded_xml) for match in matches: yield SpecialPhrase(match[0], match[1], self.type_fix_pattern.sub('', match[2]), match[3])
2,608
36.811594
91
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/sp_importer.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Module containing the class handling the import of the special phrases. Phrases are analyzed and imported into the database. The phrases already present in the database which are not valids anymore are removed. """ from typing import Iterable, Tuple, Mapping, Sequence, Optional, Set import logging import re from psycopg2.sql import Identifier, SQL from nominatim.config import Configuration from nominatim.db.connection import Connection from nominatim.tools.special_phrases.importer_statistics import SpecialPhrasesImporterStatistics from nominatim.tools.special_phrases.special_phrase import SpecialPhrase from nominatim.tokenizer.base import AbstractTokenizer from nominatim.typing import Protocol LOG = logging.getLogger() def _classtype_table(phrase_class: str, phrase_type: str) -> str: """ Return the name of the table for the given class and type. """ return f'place_classtype_{phrase_class}_{phrase_type}' class SpecialPhraseLoader(Protocol): """ Protocol for classes implementing a loader for special phrases. """ def generate_phrases(self) -> Iterable[SpecialPhrase]: """ Generates all special phrase terms this loader can produce. """ class SPImporter(): # pylint: disable-msg=too-many-instance-attributes """ Class handling the process of special phrases importation into the database. Take a sp loader which load the phrases from an external source. """ def __init__(self, config: Configuration, conn: Connection, sp_loader: SpecialPhraseLoader) -> None: self.config = config self.db_connection = conn self.sp_loader = sp_loader self.statistics_handler = SpecialPhrasesImporterStatistics() self.black_list, self.white_list = self._load_white_and_black_lists() self.sanity_check_pattern = re.compile(r'^\w+$') # This set will contain all existing phrases to be added. # It contains tuples with the following format: (label, class, type, operator) self.word_phrases: Set[Tuple[str, str, str, str]] = set() # This set will contain all existing place_classtype tables which doesn't match any # special phrases class/type on the wiki. self.table_phrases_to_delete: Set[str] = set() def import_phrases(self, tokenizer: AbstractTokenizer, should_replace: bool) -> None: """ Iterate through all SpecialPhrases extracted from the loader and import them into the database. If should_replace is set to True only the loaded phrases will be kept into the database. All other phrases already in the database will be removed. """ LOG.warning('Special phrases importation starting') self._fetch_existing_place_classtype_tables() # Store pairs of class/type for further processing class_type_pairs = set() for phrase in self.sp_loader.generate_phrases(): result = self._process_phrase(phrase) if result: class_type_pairs.add(result) self._create_classtype_table_and_indexes(class_type_pairs) if should_replace: self._remove_non_existent_tables_from_db() self.db_connection.commit() with tokenizer.name_analyzer() as analyzer: analyzer.update_special_phrases(self.word_phrases, should_replace) LOG.warning('Import done.') self.statistics_handler.notify_import_done() def _fetch_existing_place_classtype_tables(self) -> None: """ Fetch existing place_classtype tables. Fill the table_phrases_to_delete set of the class. """ query = """ SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_name like 'place_classtype_%'; """ with self.db_connection.cursor() as db_cursor: db_cursor.execute(SQL(query)) for row in db_cursor: self.table_phrases_to_delete.add(row[0]) def _load_white_and_black_lists(self) \ -> Tuple[Mapping[str, Sequence[str]], Mapping[str, Sequence[str]]]: """ Load white and black lists from phrases-settings.json. """ settings = self.config.load_sub_configuration('phrase-settings.json') return settings['blackList'], settings['whiteList'] def _check_sanity(self, phrase: SpecialPhrase) -> bool: """ Check sanity of given inputs in case somebody added garbage in the wiki. If a bad class/type is detected the system will exit with an error. """ class_matchs = self.sanity_check_pattern.findall(phrase.p_class) type_matchs = self.sanity_check_pattern.findall(phrase.p_type) if not class_matchs or not type_matchs: LOG.warning("Bad class/type: %s=%s. It will not be imported", phrase.p_class, phrase.p_type) return False return True def _process_phrase(self, phrase: SpecialPhrase) -> Optional[Tuple[str, str]]: """ Processes the given phrase by checking black and white list and sanity. Return the class/type pair corresponding to the phrase. """ # blacklisting: disallow certain class/type combinations if phrase.p_class in self.black_list.keys() \ and phrase.p_type in self.black_list[phrase.p_class]: return None # whitelisting: if class is in whitelist, allow only tags in the list if phrase.p_class in self.white_list.keys() \ and phrase.p_type not in self.white_list[phrase.p_class]: return None # sanity check, in case somebody added garbage in the wiki if not self._check_sanity(phrase): self.statistics_handler.notify_one_phrase_invalid() return None self.word_phrases.add((phrase.p_label, phrase.p_class, phrase.p_type, phrase.p_operator)) return (phrase.p_class, phrase.p_type) def _create_classtype_table_and_indexes(self, class_type_pairs: Iterable[Tuple[str, str]]) -> None: """ Create table place_classtype for each given pair. Also create indexes on place_id and centroid. """ LOG.warning('Create tables and indexes...') sql_tablespace = self.config.TABLESPACE_AUX_DATA if sql_tablespace: sql_tablespace = ' TABLESPACE ' + sql_tablespace with self.db_connection.cursor() as db_cursor: db_cursor.execute("CREATE INDEX idx_placex_classtype ON placex (class, type)") for pair in class_type_pairs: phrase_class = pair[0] phrase_type = pair[1] table_name = _classtype_table(phrase_class, phrase_type) if table_name in self.table_phrases_to_delete: self.statistics_handler.notify_one_table_ignored() # Remove this table from the ones to delete as it match a # class/type still existing on the special phrases of the wiki. self.table_phrases_to_delete.remove(table_name) # So don't need to create the table and indexes. continue # Table creation self._create_place_classtype_table(sql_tablespace, phrase_class, phrase_type) # Indexes creation self._create_place_classtype_indexes(sql_tablespace, phrase_class, phrase_type) # Grant access on read to the web user. self._grant_access_to_webuser(phrase_class, phrase_type) self.statistics_handler.notify_one_table_created() with self.db_connection.cursor() as db_cursor: db_cursor.execute("DROP INDEX idx_placex_classtype") def _create_place_classtype_table(self, sql_tablespace: str, phrase_class: str, phrase_type: str) -> None: """ Create table place_classtype of the given phrase_class/phrase_type if doesn't exit. """ table_name = _classtype_table(phrase_class, phrase_type) with self.db_connection.cursor() as cur: cur.execute(SQL("""CREATE TABLE IF NOT EXISTS {} {} AS SELECT place_id AS place_id, st_centroid(geometry) AS centroid FROM placex WHERE class = %s AND type = %s """).format(Identifier(table_name), SQL(sql_tablespace)), (phrase_class, phrase_type)) def _create_place_classtype_indexes(self, sql_tablespace: str, phrase_class: str, phrase_type: str) -> None: """ Create indexes on centroid and place_id for the place_classtype table. """ index_prefix = f'idx_place_classtype_{phrase_class}_{phrase_type}_' base_table = _classtype_table(phrase_class, phrase_type) # Index on centroid if not self.db_connection.index_exists(index_prefix + 'centroid'): with self.db_connection.cursor() as db_cursor: db_cursor.execute(SQL("CREATE INDEX {} ON {} USING GIST (centroid) {}") .format(Identifier(index_prefix + 'centroid'), Identifier(base_table), SQL(sql_tablespace))) # Index on place_id if not self.db_connection.index_exists(index_prefix + 'place_id'): with self.db_connection.cursor() as db_cursor: db_cursor.execute(SQL("CREATE INDEX {} ON {} USING btree(place_id) {}") .format(Identifier(index_prefix + 'place_id'), Identifier(base_table), SQL(sql_tablespace))) def _grant_access_to_webuser(self, phrase_class: str, phrase_type: str) -> None: """ Grant access on read to the table place_classtype for the webuser. """ table_name = _classtype_table(phrase_class, phrase_type) with self.db_connection.cursor() as db_cursor: db_cursor.execute(SQL("""GRANT SELECT ON {} TO {}""") .format(Identifier(table_name), Identifier(self.config.DATABASE_WEBUSER))) def _remove_non_existent_tables_from_db(self) -> None: """ Remove special phrases which doesn't exist on the wiki anymore. Delete the place_classtype tables. """ LOG.warning('Cleaning database...') # Delete place_classtype tables corresponding to class/type which # are not on the wiki anymore. with self.db_connection.cursor() as db_cursor: for table in self.table_phrases_to_delete: self.statistics_handler.notify_one_table_deleted() db_cursor.drop_table(table)
11,457
40.665455
97
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/special_phrase.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Module containing the class SpecialPhrase. This class is a model used to transfer a special phrase through the process of load and importation. """ from typing import Any class SpecialPhrase: """ Model representing a special phrase. """ def __init__(self, p_label: str, p_class: str, p_type: str, p_operator: str) -> None: self.p_label = p_label.strip() self.p_class = p_class.strip() self.p_type = p_type.strip() # Needed if some operator in the wiki are not written in english p_operator = p_operator.strip().lower() self.p_operator = '-' if p_operator not in ('near', 'in') else p_operator def __eq__(self, other: Any) -> bool: if not isinstance(other, SpecialPhrase): return False return self.p_label == other.p_label \ and self.p_class == other.p_class \ and self.p_type == other.p_type \ and self.p_operator == other.p_operator def __hash__(self) -> int: return hash((self.p_label, self.p_class, self.p_type, self.p_operator))
1,318
33.710526
89
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/sp_csv_loader.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Module containing the SPCsvLoader class. The class allows to load phrases from a csv file. """ from typing import Iterable import csv import os from nominatim.tools.special_phrases.special_phrase import SpecialPhrase from nominatim.errors import UsageError class SPCsvLoader: """ Handles loading of special phrases from external csv file. """ def __init__(self, csv_path: str) -> None: self.csv_path = csv_path def generate_phrases(self) -> Iterable[SpecialPhrase]: """ Open and parse the given csv file. Create the corresponding SpecialPhrases. """ self._check_csv_validity() with open(self.csv_path, encoding='utf-8') as fd: reader = csv.DictReader(fd, delimiter=',') for row in reader: yield SpecialPhrase(row['phrase'], row['class'], row['type'], row['operator']) def _check_csv_validity(self) -> None: """ Check that the csv file has the right extension. """ _, extension = os.path.splitext(self.csv_path) if extension != '.csv': raise UsageError(f'The file {self.csv_path} is not a csv file.')
1,403
29.521739
94
py
Nominatim
Nominatim-master/nominatim/tools/special_phrases/__init__.py
0
0
0
py
Nominatim
Nominatim-master/nominatim/db/status.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Access and helper functions for the status and status log table. """ from typing import Optional, Tuple, cast import datetime as dt import logging import re from nominatim.db.connection import Connection from nominatim.tools.exec_utils import get_url from nominatim.errors import UsageError from nominatim.typing import TypedDict LOG = logging.getLogger() ISODATE_FORMAT = '%Y-%m-%dT%H:%M:%S' class StatusRow(TypedDict): """ Dictionary of columns of the import_status table. """ lastimportdate: dt.datetime sequence_id: Optional[int] indexed: Optional[bool] def compute_database_date(conn: Connection) -> dt.datetime: """ Determine the date of the database from the newest object in the data base. """ # First, find the node with the highest ID in the database with conn.cursor() as cur: if conn.table_exists('place'): osmid = cur.scalar("SELECT max(osm_id) FROM place WHERE osm_type='N'") else: osmid = cur.scalar("SELECT max(osm_id) FROM placex WHERE osm_type='N'") if osmid is None: LOG.fatal("No data found in the database.") raise UsageError("No data found in the database.") LOG.info("Using node id %d for timestamp lookup", osmid) # Get the node from the API to find the timestamp when it was created. node_url = f'https://www.openstreetmap.org/api/0.6/node/{osmid}/1' data = get_url(node_url) match = re.search(r'timestamp="((\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}))Z"', data) if match is None: LOG.fatal("The node data downloaded from the API does not contain valid data.\n" "URL used: %s", node_url) raise UsageError("Bad API data.") LOG.debug("Found timestamp %s", match.group(1)) return dt.datetime.strptime(match.group(1), ISODATE_FORMAT).replace(tzinfo=dt.timezone.utc) def set_status(conn: Connection, date: Optional[dt.datetime], seq: Optional[int] = None, indexed: bool = True) -> None: """ Replace the current status with the given status. If date is `None` then only sequence and indexed will be updated as given. Otherwise the whole status is replaced. The change will be committed to the database. """ assert date is None or date.tzinfo == dt.timezone.utc with conn.cursor() as cur: if date is None: cur.execute("UPDATE import_status set sequence_id = %s, indexed = %s", (seq, indexed)) else: cur.execute("TRUNCATE TABLE import_status") cur.execute("""INSERT INTO import_status (lastimportdate, sequence_id, indexed) VALUES (%s, %s, %s)""", (date, seq, indexed)) conn.commit() def get_status(conn: Connection) -> Tuple[Optional[dt.datetime], Optional[int], Optional[bool]]: """ Return the current status as a triple of (date, sequence, indexed). If status has not been set up yet, a triple of None is returned. """ with conn.cursor() as cur: cur.execute("SELECT * FROM import_status LIMIT 1") if cur.rowcount < 1: return None, None, None row = cast(StatusRow, cur.fetchone()) return row['lastimportdate'], row['sequence_id'], row['indexed'] def set_indexed(conn: Connection, state: bool) -> None: """ Set the indexed flag in the status table to the given state. """ with conn.cursor() as cur: cur.execute("UPDATE import_status SET indexed = %s", (state, )) conn.commit() def log_status(conn: Connection, start: dt.datetime, event: str, batchsize: Optional[int] = None) -> None: """ Write a new status line to the `import_osmosis_log` table. """ with conn.cursor() as cur: cur.execute("""INSERT INTO import_osmosis_log (batchend, batchseq, batchsize, starttime, endtime, event) SELECT lastimportdate, sequence_id, %s, %s, now(), %s FROM import_status""", (batchsize, start, event)) conn.commit()
4,293
36.33913
99
py
Nominatim
Nominatim-master/nominatim/db/sqlalchemy_functions.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Custom functions and expressions for SQLAlchemy. """ import sqlalchemy as sa def select_index_placex_geometry_reverse_lookuppolygon(table: str) -> 'sa.TextClause': """ Create an expression with the necessary conditions over a placex table that the index 'idx_placex_geometry_reverse_lookupPolygon' can be used. """ return sa.text(f"ST_GeometryType({table}.geometry) in ('ST_Polygon', 'ST_MultiPolygon')" f" AND {table}.rank_address between 4 and 25" f" AND {table}.type != 'postcode'" f" AND {table}.name is not null" f" AND {table}.indexed_status = 0" f" AND {table}.linked_place_id is null") def select_index_placex_geometry_reverse_lookupplacenode(table: str) -> 'sa.TextClause': """ Create an expression with the necessary conditions over a placex table that the index 'idx_placex_geometry_reverse_lookupPlaceNode' can be used. """ return sa.text(f"{table}.rank_address between 4 and 25" f" AND {table}.type != 'postcode'" f" AND {table}.name is not null" f" AND {table}.linked_place_id is null" f" AND {table}.osm_type = 'N'")
1,477
41.228571
92
py
Nominatim
Nominatim-master/nominatim/db/sql_preprocessor.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Preprocessing of SQL files. """ from typing import Set, Dict, Any import jinja2 from nominatim.db.connection import Connection from nominatim.db.async_connection import WorkerPool from nominatim.config import Configuration def _get_partitions(conn: Connection) -> Set[int]: """ Get the set of partitions currently in use. """ with conn.cursor() as cur: cur.execute('SELECT DISTINCT partition FROM country_name') partitions = set([0]) for row in cur: partitions.add(row[0]) return partitions def _get_tables(conn: Connection) -> Set[str]: """ Return the set of tables currently in use. Only includes non-partitioned """ with conn.cursor() as cur: cur.execute("SELECT tablename FROM pg_tables WHERE schemaname = 'public'") return set((row[0] for row in list(cur))) def _setup_tablespace_sql(config: Configuration) -> Dict[str, str]: """ Returns a dict with tablespace expressions for the different tablespace kinds depending on whether a tablespace is configured or not. """ out = {} for subset in ('ADDRESS', 'SEARCH', 'AUX'): for kind in ('DATA', 'INDEX'): tspace = getattr(config, f'TABLESPACE_{subset}_{kind}') if tspace: tspace = f'TABLESPACE "{tspace}"' out[f'{subset.lower()}_{kind.lower()}'] = tspace return out def _setup_postgresql_features(conn: Connection) -> Dict[str, Any]: """ Set up a dictionary with various optional Postgresql/Postgis features that depend on the database version. """ pg_version = conn.server_version_tuple() postgis_version = conn.postgis_version_tuple() pg11plus = pg_version >= (11, 0, 0) ps3 = postgis_version >= (3, 0) return { 'has_index_non_key_column': pg11plus, 'spgist_geom' : 'SPGIST' if pg11plus and ps3 else 'GIST' } class SQLPreprocessor: """ A environment for preprocessing SQL files from the lib-sql directory. The preprocessor provides a number of default filters and variables. The variables may be overwritten when rendering an SQL file. The preprocessing is currently based on the jinja2 templating library and follows its syntax. """ def __init__(self, conn: Connection, config: Configuration) -> None: self.env = jinja2.Environment(autoescape=False, loader=jinja2.FileSystemLoader(str(config.lib_dir.sql))) db_info: Dict[str, Any] = {} db_info['partitions'] = _get_partitions(conn) db_info['tables'] = _get_tables(conn) db_info['reverse_only'] = 'search_name' not in db_info['tables'] db_info['tablespace'] = _setup_tablespace_sql(config) self.env.globals['config'] = config self.env.globals['db'] = db_info self.env.globals['postgres'] = _setup_postgresql_features(conn) def run_sql_file(self, conn: Connection, name: str, **kwargs: Any) -> None: """ Execute the given SQL file on the connection. The keyword arguments may supply additional parameters for preprocessing. """ sql = self.env.get_template(name).render(**kwargs) with conn.cursor() as cur: cur.execute(sql) conn.commit() def run_parallel_sql_file(self, dsn: str, name: str, num_threads: int = 1, **kwargs: Any) -> None: """ Execure the given SQL files using parallel asynchronous connections. The keyword arguments may supply additional parameters for preprocessing. After preprocessing the SQL code is cut at lines containing only '---'. Each chunk is sent to one of the `num_threads` workers. """ sql = self.env.get_template(name).render(**kwargs) parts = sql.split('\n---\n') with WorkerPool(dsn, num_threads) as pool: for part in parts: pool.next_free_worker().perform(part)
4,246
34.391667
94
py
Nominatim
Nominatim-master/nominatim/db/connection.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Specialised connection and cursor functions. """ from typing import Optional, Any, Callable, ContextManager, Dict, cast, overload, Tuple, Iterable import contextlib import logging import os import psycopg2 import psycopg2.extensions import psycopg2.extras from psycopg2 import sql as pysql from nominatim.typing import SysEnv, Query, T_cursor from nominatim.errors import UsageError LOG = logging.getLogger() class Cursor(psycopg2.extras.DictCursor): """ A cursor returning dict-like objects and providing specialised execution functions. """ # pylint: disable=arguments-renamed,arguments-differ def execute(self, query: Query, args: Any = None) -> None: """ Query execution that logs the SQL query when debugging is enabled. """ if LOG.isEnabledFor(logging.DEBUG): LOG.debug(self.mogrify(query, args).decode('utf-8')) # type: ignore[no-untyped-call] super().execute(query, args) def execute_values(self, sql: Query, argslist: Iterable[Tuple[Any, ...]], template: Optional[Query] = None) -> None: """ Wrapper for the psycopg2 convenience function to execute SQL for a list of values. """ LOG.debug("SQL execute_values(%s, %s)", sql, argslist) psycopg2.extras.execute_values(self, sql, argslist, template=template) def scalar(self, sql: Query, args: Any = None) -> Any: """ Execute query that returns a single value. The value is returned. If the query yields more than one row, a ValueError is raised. """ self.execute(sql, args) if self.rowcount != 1: raise RuntimeError("Query did not return a single row.") result = self.fetchone() assert result is not None return result[0] def drop_table(self, name: str, if_exists: bool = True, cascade: bool = False) -> None: """ Drop the table with the given name. Set `if_exists` to False if a non-existent table should raise an exception instead of just being ignored. If 'cascade' is set to True then all dependent tables are deleted as well. """ sql = 'DROP TABLE ' if if_exists: sql += 'IF EXISTS ' sql += '{}' if cascade: sql += ' CASCADE' self.execute(pysql.SQL(sql).format(pysql.Identifier(name))) class Connection(psycopg2.extensions.connection): """ A connection that provides the specialised cursor by default and adds convenience functions for administrating the database. """ @overload # type: ignore[override] def cursor(self) -> Cursor: ... @overload def cursor(self, name: str) -> Cursor: ... @overload def cursor(self, cursor_factory: Callable[..., T_cursor]) -> T_cursor: ... def cursor(self, cursor_factory = Cursor, **kwargs): # type: ignore """ Return a new cursor. By default the specialised cursor is returned. """ return super().cursor(cursor_factory=cursor_factory, **kwargs) def table_exists(self, table: str) -> bool: """ Check that a table with the given name exists in the database. """ with self.cursor() as cur: num = cur.scalar("""SELECT count(*) FROM pg_tables WHERE tablename = %s and schemaname = 'public'""", (table, )) return num == 1 if isinstance(num, int) else False def table_has_column(self, table: str, column: str) -> bool: """ Check if the table 'table' exists and has a column with name 'column'. """ with self.cursor() as cur: has_column = cur.scalar("""SELECT count(*) FROM information_schema.columns WHERE table_name = %s and column_name = %s""", (table, column)) return has_column > 0 if isinstance(has_column, int) else False def index_exists(self, index: str, table: Optional[str] = None) -> bool: """ Check that an index with the given name exists in the database. If table is not None then the index must relate to the given table. """ with self.cursor() as cur: cur.execute("""SELECT tablename FROM pg_indexes WHERE indexname = %s and schemaname = 'public'""", (index, )) if cur.rowcount == 0: return False if table is not None: row = cur.fetchone() if row is None or not isinstance(row[0], str): return False return row[0] == table return True def drop_table(self, name: str, if_exists: bool = True, cascade: bool = False) -> None: """ Drop the table with the given name. Set `if_exists` to False if a non-existent table should raise an exception instead of just being ignored. """ with self.cursor() as cur: cur.drop_table(name, if_exists, cascade) self.commit() def server_version_tuple(self) -> Tuple[int, int]: """ Return the server version as a tuple of (major, minor). Converts correctly for pre-10 and post-10 PostgreSQL versions. """ version = self.server_version if version < 100000: return (int(version / 10000), int((version % 10000) / 100)) return (int(version / 10000), version % 10000) def postgis_version_tuple(self) -> Tuple[int, int]: """ Return the postgis version installed in the database as a tuple of (major, minor). Assumes that the PostGIS extension has been installed already. """ with self.cursor() as cur: version = cur.scalar('SELECT postgis_lib_version()') version_parts = version.split('.') if len(version_parts) < 2: raise UsageError(f"Error fetching Postgis version. Bad format: {version}") return (int(version_parts[0]), int(version_parts[1])) class ConnectionContext(ContextManager[Connection]): """ Context manager of the connection that also provides direct access to the underlying connection. """ connection: Connection def connect(dsn: str) -> ConnectionContext: """ Open a connection to the database using the specialised connection factory. The returned object may be used in conjunction with 'with'. When used outside a context manager, use the `connection` attribute to get the connection. """ try: conn = psycopg2.connect(dsn, connection_factory=Connection) ctxmgr = cast(ConnectionContext, contextlib.closing(conn)) ctxmgr.connection = conn return ctxmgr except psycopg2.OperationalError as err: raise UsageError(f"Cannot connect to database: {err}") from err # Translation from PG connection string parameters to PG environment variables. # Derived from https://www.postgresql.org/docs/current/libpq-envars.html. _PG_CONNECTION_STRINGS = { 'host': 'PGHOST', 'hostaddr': 'PGHOSTADDR', 'port': 'PGPORT', 'dbname': 'PGDATABASE', 'user': 'PGUSER', 'password': 'PGPASSWORD', 'passfile': 'PGPASSFILE', 'channel_binding': 'PGCHANNELBINDING', 'service': 'PGSERVICE', 'options': 'PGOPTIONS', 'application_name': 'PGAPPNAME', 'sslmode': 'PGSSLMODE', 'requiressl': 'PGREQUIRESSL', 'sslcompression': 'PGSSLCOMPRESSION', 'sslcert': 'PGSSLCERT', 'sslkey': 'PGSSLKEY', 'sslrootcert': 'PGSSLROOTCERT', 'sslcrl': 'PGSSLCRL', 'requirepeer': 'PGREQUIREPEER', 'ssl_min_protocol_version': 'PGSSLMINPROTOCOLVERSION', 'ssl_max_protocol_version': 'PGSSLMAXPROTOCOLVERSION', 'gssencmode': 'PGGSSENCMODE', 'krbsrvname': 'PGKRBSRVNAME', 'gsslib': 'PGGSSLIB', 'connect_timeout': 'PGCONNECT_TIMEOUT', 'target_session_attrs': 'PGTARGETSESSIONATTRS', } def get_pg_env(dsn: str, base_env: Optional[SysEnv] = None) -> Dict[str, str]: """ Return a copy of `base_env` with the environment variables for PostgresSQL set up from the given database connection string. If `base_env` is None, then the OS environment is used as a base environment. """ env = dict(base_env if base_env is not None else os.environ) for param, value in psycopg2.extensions.parse_dsn(dsn).items(): if param in _PG_CONNECTION_STRINGS: env[_PG_CONNECTION_STRINGS[param]] = value else: LOG.error("Unknown connection parameter '%s' ignored.", param) return env
8,938
35.337398
97
py
Nominatim
Nominatim-master/nominatim/db/utils.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper functions for handling DB accesses. """ from typing import IO, Optional, Union, Any, Iterable import subprocess import logging import gzip import io from pathlib import Path from nominatim.db.connection import get_pg_env, Cursor from nominatim.errors import UsageError LOG = logging.getLogger() def _pipe_to_proc(proc: 'subprocess.Popen[bytes]', fdesc: Union[IO[bytes], gzip.GzipFile]) -> int: assert proc.stdin is not None chunk = fdesc.read(2048) while chunk and proc.poll() is None: try: proc.stdin.write(chunk) except BrokenPipeError as exc: raise UsageError("Failed to execute SQL file.") from exc chunk = fdesc.read(2048) return len(chunk) def execute_file(dsn: str, fname: Path, ignore_errors: bool = False, pre_code: Optional[str] = None, post_code: Optional[str] = None) -> None: """ Read an SQL file and run its contents against the given database using psql. Use `pre_code` and `post_code` to run extra commands before or after executing the file. The commands are run within the same session, so they may be used to wrap the file execution in a transaction. """ cmd = ['psql'] if not ignore_errors: cmd.extend(('-v', 'ON_ERROR_STOP=1')) if not LOG.isEnabledFor(logging.INFO): cmd.append('--quiet') with subprocess.Popen(cmd, env=get_pg_env(dsn), stdin=subprocess.PIPE) as proc: assert proc.stdin is not None try: if not LOG.isEnabledFor(logging.INFO): proc.stdin.write('set client_min_messages to WARNING;'.encode('utf-8')) if pre_code: proc.stdin.write((pre_code + ';').encode('utf-8')) if fname.suffix == '.gz': with gzip.open(str(fname), 'rb') as fdesc: remain = _pipe_to_proc(proc, fdesc) else: with fname.open('rb') as fdesc: remain = _pipe_to_proc(proc, fdesc) if remain == 0 and post_code: proc.stdin.write((';' + post_code).encode('utf-8')) finally: proc.stdin.close() ret = proc.wait() if ret != 0 or remain > 0: raise UsageError("Failed to execute SQL file.") # List of characters that need to be quoted for the copy command. _SQL_TRANSLATION = {ord('\\'): '\\\\', ord('\t'): '\\t', ord('\n'): '\\n'} class CopyBuffer: """ Data collector for the copy_from command. """ def __init__(self) -> None: self.buffer = io.StringIO() def __enter__(self) -> 'CopyBuffer': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: if self.buffer is not None: self.buffer.close() def add(self, *data: Any) -> None: """ Add another row of data to the copy buffer. """ first = True for column in data: if first: first = False else: self.buffer.write('\t') if column is None: self.buffer.write('\\N') else: self.buffer.write(str(column).translate(_SQL_TRANSLATION)) self.buffer.write('\n') def copy_out(self, cur: Cursor, table: str, columns: Optional[Iterable[str]] = None) -> None: """ Copy all collected data into the given table. """ if self.buffer.tell() > 0: self.buffer.seek(0) cur.copy_from(self.buffer, table, columns=columns) # type: ignore[no-untyped-call]
3,894
30.92623
97
py
Nominatim
Nominatim-master/nominatim/db/async_connection.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Non-blocking database connections. """ from typing import Callable, Any, Optional, Iterator, Sequence import logging import select import time import psycopg2 from psycopg2.extras import wait_select # psycopg2 emits different exceptions pre and post 2.8. Detect if the new error # module is available and adapt the error handling accordingly. try: import psycopg2.errors # pylint: disable=no-name-in-module,import-error __has_psycopg2_errors__ = True except ImportError: __has_psycopg2_errors__ = False from nominatim.typing import T_cursor, Query LOG = logging.getLogger() class DeadlockHandler: """ Context manager that catches deadlock exceptions and calls the given handler function. All other exceptions are passed on normally. """ def __init__(self, handler: Callable[[], None], ignore_sql_errors: bool = False) -> None: self.handler = handler self.ignore_sql_errors = ignore_sql_errors def __enter__(self) -> 'DeadlockHandler': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> bool: if __has_psycopg2_errors__: if exc_type == psycopg2.errors.DeadlockDetected: # pylint: disable=E1101 self.handler() return True elif exc_type == psycopg2.extensions.TransactionRollbackError \ and exc_value.pgcode == '40P01': self.handler() return True if self.ignore_sql_errors and isinstance(exc_value, psycopg2.Error): LOG.info("SQL error ignored: %s", exc_value) return True return False class DBConnection: """ A single non-blocking database connection. """ def __init__(self, dsn: str, cursor_factory: Optional[Callable[..., T_cursor]] = None, ignore_sql_errors: bool = False) -> None: self.dsn = dsn self.current_query: Optional[Query] = None self.current_params: Optional[Sequence[Any]] = None self.ignore_sql_errors = ignore_sql_errors self.conn: Optional['psycopg2.connection'] = None self.cursor: Optional['psycopg2.cursor'] = None self.connect(cursor_factory=cursor_factory) def close(self) -> None: """ Close all open connections. Does not wait for pending requests. """ if self.conn is not None: if self.cursor is not None: self.cursor.close() # type: ignore[no-untyped-call] self.cursor = None self.conn.close() self.conn = None def connect(self, cursor_factory: Optional[Callable[..., T_cursor]] = None) -> None: """ (Re)connect to the database. Creates an asynchronous connection with JIT and parallel processing disabled. If a connection was already open, it is closed and a new connection established. The caller must ensure that no query is pending before reconnecting. """ self.close() # Use a dict to hand in the parameters because async is a reserved # word in Python3. self.conn = psycopg2.connect(**{'dsn': self.dsn, 'async': True}) # type: ignore assert self.conn self.wait() if cursor_factory is not None: self.cursor = self.conn.cursor(cursor_factory=cursor_factory) else: self.cursor = self.conn.cursor() # Disable JIT and parallel workers as they are known to cause problems. # Update pg_settings instead of using SET because it does not yield # errors on older versions of Postgres where the settings are not # implemented. self.perform( """ UPDATE pg_settings SET setting = -1 WHERE name = 'jit_above_cost'; UPDATE pg_settings SET setting = 0 WHERE name = 'max_parallel_workers_per_gather';""") self.wait() def _deadlock_handler(self) -> None: LOG.info("Deadlock detected (params = %s), retry.", str(self.current_params)) assert self.cursor is not None assert self.current_query is not None assert self.current_params is not None self.cursor.execute(self.current_query, self.current_params) def wait(self) -> None: """ Block until any pending operation is done. """ while True: with DeadlockHandler(self._deadlock_handler, self.ignore_sql_errors): wait_select(self.conn) self.current_query = None return def perform(self, sql: Query, args: Optional[Sequence[Any]] = None) -> None: """ Send SQL query to the server. Returns immediately without blocking. """ assert self.cursor is not None self.current_query = sql self.current_params = args self.cursor.execute(sql, args) def fileno(self) -> int: """ File descriptor to wait for. (Makes this class select()able.) """ assert self.conn is not None return self.conn.fileno() def is_done(self) -> bool: """ Check if the connection is available for a new query. Also checks if the previous query has run into a deadlock. If so, then the previous query is repeated. """ assert self.conn is not None if self.current_query is None: return True with DeadlockHandler(self._deadlock_handler, self.ignore_sql_errors): if self.conn.poll() == psycopg2.extensions.POLL_OK: self.current_query = None return True return False class WorkerPool: """ A pool of asynchronous database connections. The pool may be used as a context manager. """ REOPEN_CONNECTIONS_AFTER = 100000 def __init__(self, dsn: str, pool_size: int, ignore_sql_errors: bool = False) -> None: self.threads = [DBConnection(dsn, ignore_sql_errors=ignore_sql_errors) for _ in range(pool_size)] self.free_workers = self._yield_free_worker() self.wait_time = 0.0 def finish_all(self) -> None: """ Wait for all connection to finish. """ for thread in self.threads: while not thread.is_done(): thread.wait() self.free_workers = self._yield_free_worker() def close(self) -> None: """ Close all connections and clear the pool. """ for thread in self.threads: thread.close() self.threads = [] self.free_workers = iter([]) def next_free_worker(self) -> DBConnection: """ Get the next free connection. """ return next(self.free_workers) def _yield_free_worker(self) -> Iterator[DBConnection]: ready = self.threads command_stat = 0 while True: for thread in ready: if thread.is_done(): command_stat += 1 yield thread if command_stat > self.REOPEN_CONNECTIONS_AFTER: self._reconnect_threads() ready = self.threads command_stat = 0 else: tstart = time.time() _, ready, _ = select.select([], self.threads, []) self.wait_time += time.time() - tstart def _reconnect_threads(self) -> None: for thread in self.threads: while not thread.is_done(): thread.wait() thread.connect() def __enter__(self) -> 'WorkerPool': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.finish_all() self.close()
7,952
32.556962
93
py
Nominatim
Nominatim-master/nominatim/db/properties.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2022 by the Nominatim developer community. # For a full list of authors see the git log. """ Query and access functions for the in-database property table. """ from typing import Optional, cast from nominatim.db.connection import Connection def set_property(conn: Connection, name: str, value: str) -> None: """ Add or replace the property with the given name. """ with conn.cursor() as cur: cur.execute('SELECT value FROM nominatim_properties WHERE property = %s', (name, )) if cur.rowcount == 0: sql = 'INSERT INTO nominatim_properties (value, property) VALUES (%s, %s)' else: sql = 'UPDATE nominatim_properties SET value = %s WHERE property = %s' cur.execute(sql, (value, name)) conn.commit() def get_property(conn: Connection, name: str) -> Optional[str]: """ Return the current value of the given property or None if the property is not set. """ if not conn.table_exists('nominatim_properties'): return None with conn.cursor() as cur: cur.execute('SELECT value FROM nominatim_properties WHERE property = %s', (name, )) if cur.rowcount == 0: return None result = cur.fetchone() assert result is not None return cast(Optional[str], result[0])
1,470
29.645833
86
py
Nominatim
Nominatim-master/nominatim/db/sqlalchemy_types.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Custom types for SQLAlchemy. """ from typing import Callable, Any, cast import sys import sqlalchemy as sa from sqlalchemy import types from nominatim.typing import SaColumn, SaBind #pylint: disable=all class Geometry(types.UserDefinedType): # type: ignore[type-arg] """ Simplified type decorator for PostGIS geometry. This type only supports geometries in 4326 projection. """ cache_ok = True def __init__(self, subtype: str = 'Geometry'): self.subtype = subtype def get_col_spec(self) -> str: return f'GEOMETRY({self.subtype}, 4326)' def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]: def process(value: Any) -> str: if isinstance(value, str): return value return cast(str, value.to_wkt()) return process def result_processor(self, dialect: 'sa.Dialect', coltype: object) -> Callable[[Any], str]: def process(value: Any) -> str: assert isinstance(value, str) return value return process def bind_expression(self, bindvalue: SaBind) -> SaColumn: return sa.func.ST_GeomFromText(bindvalue, sa.text('4326'), type_=self) class comparator_factory(types.UserDefinedType.Comparator): # type: ignore[type-arg] def intersects(self, other: SaColumn) -> 'sa.Operators': return self.op('&&')(other) def is_line_like(self) -> SaColumn: return sa.func.ST_GeometryType(self, type_=sa.String).in_(('ST_LineString', 'ST_MultiLineString')) def is_area(self) -> SaColumn: return sa.func.ST_GeometryType(self, type_=sa.String).in_(('ST_Polygon', 'ST_MultiPolygon')) def ST_DWithin(self, other: SaColumn, distance: SaColumn) -> SaColumn: return sa.func.ST_DWithin(self, other, distance, type_=sa.Float) def ST_Distance(self, other: SaColumn) -> SaColumn: return sa.func.ST_Distance(self, other, type_=sa.Float) def ST_Contains(self, other: SaColumn) -> SaColumn: return sa.func.ST_Contains(self, other, type_=sa.Float) def ST_ClosestPoint(self, other: SaColumn) -> SaColumn: return sa.func.ST_ClosestPoint(self, other, type_=Geometry) def ST_Buffer(self, other: SaColumn) -> SaColumn: return sa.func.ST_Buffer(self, other, type_=Geometry) def ST_Expand(self, other: SaColumn) -> SaColumn: return sa.func.ST_Expand(self, other, type_=Geometry) def ST_Collect(self) -> SaColumn: return sa.func.ST_Collect(self, type_=Geometry) def ST_Centroid(self) -> SaColumn: return sa.func.ST_Centroid(self, type_=Geometry) def ST_LineInterpolatePoint(self, other: SaColumn) -> SaColumn: return sa.func.ST_LineInterpolatePoint(self, other, type_=Geometry) def ST_LineLocatePoint(self, other: SaColumn) -> SaColumn: return sa.func.ST_LineLocatePoint(self, other, type_=sa.Float)
3,385
30.943396
95
py
Nominatim
Nominatim-master/nominatim/db/async_core_library.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Import the base libary to use with asynchronous SQLAlchemy. """ # pylint: disable=invalid-name from typing import Any try: import psycopg PGCORE_LIB = 'psycopg' PGCORE_ERROR: Any = psycopg.Error except ModuleNotFoundError: import asyncpg PGCORE_LIB = 'asyncpg' PGCORE_ERROR = asyncpg.PostgresError
539
23.545455
59
py
Nominatim
Nominatim-master/nominatim/db/__init__.py
0
0
0
py
Nominatim
Nominatim-master/nominatim/db/sqlalchemy_schema.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ SQLAlchemy definitions for all tables used by the frontend. """ from typing import Any import sqlalchemy as sa from sqlalchemy.dialects.postgresql import HSTORE, ARRAY, JSONB, array from sqlalchemy.dialects.sqlite import JSON as sqlite_json from nominatim.db.sqlalchemy_types import Geometry class PostgresTypes: """ Type definitions for complex types as used in Postgres variants. """ Composite = HSTORE Json = JSONB IntArray = ARRAY(sa.Integer()) #pylint: disable=invalid-name to_array = array class SqliteTypes: """ Type definitions for complex types as used in Postgres variants. """ Composite = sqlite_json Json = sqlite_json IntArray = sqlite_json @staticmethod def to_array(arr: Any) -> Any: """ Sqlite has no special conversion for arrays. """ return arr #pylint: disable=too-many-instance-attributes class SearchTables: """ Data class that holds the tables of the Nominatim database. """ def __init__(self, meta: sa.MetaData, engine_name: str) -> None: if engine_name == 'postgresql': self.types: Any = PostgresTypes elif engine_name == 'sqlite': self.types = SqliteTypes else: raise ValueError("Only 'postgresql' and 'sqlite' engines are supported.") self.meta = meta self.import_status = sa.Table('import_status', meta, sa.Column('lastimportdate', sa.DateTime(True), nullable=False), sa.Column('sequence_id', sa.Integer), sa.Column('indexed', sa.Boolean)) self.properties = sa.Table('nominatim_properties', meta, sa.Column('property', sa.Text, nullable=False), sa.Column('value', sa.Text)) self.placex = sa.Table('placex', meta, sa.Column('place_id', sa.BigInteger, nullable=False, unique=True), sa.Column('parent_place_id', sa.BigInteger), sa.Column('linked_place_id', sa.BigInteger), sa.Column('importance', sa.Float), sa.Column('indexed_date', sa.DateTime), sa.Column('rank_address', sa.SmallInteger), sa.Column('rank_search', sa.SmallInteger), sa.Column('partition', sa.SmallInteger), sa.Column('indexed_status', sa.SmallInteger), sa.Column('osm_type', sa.String(1), nullable=False), sa.Column('osm_id', sa.BigInteger, nullable=False), sa.Column('class', sa.Text, nullable=False, key='class_'), sa.Column('type', sa.Text, nullable=False), sa.Column('admin_level', sa.SmallInteger), sa.Column('name', self.types.Composite), sa.Column('address', self.types.Composite), sa.Column('extratags', self.types.Composite), sa.Column('geometry', Geometry, nullable=False), sa.Column('wikipedia', sa.Text), sa.Column('country_code', sa.String(2)), sa.Column('housenumber', sa.Text), sa.Column('postcode', sa.Text), sa.Column('centroid', Geometry)) self.addressline = sa.Table('place_addressline', meta, sa.Column('place_id', sa.BigInteger, index=True), sa.Column('address_place_id', sa.BigInteger, index=True), sa.Column('distance', sa.Float), sa.Column('cached_rank_address', sa.SmallInteger), sa.Column('fromarea', sa.Boolean), sa.Column('isaddress', sa.Boolean)) self.postcode = sa.Table('location_postcode', meta, sa.Column('place_id', sa.BigInteger, unique=True), sa.Column('parent_place_id', sa.BigInteger), sa.Column('rank_search', sa.SmallInteger), sa.Column('rank_address', sa.SmallInteger), sa.Column('indexed_status', sa.SmallInteger), sa.Column('indexed_date', sa.DateTime), sa.Column('country_code', sa.String(2)), sa.Column('postcode', sa.Text, index=True), sa.Column('geometry', Geometry)) self.osmline = sa.Table('location_property_osmline', meta, sa.Column('place_id', sa.BigInteger, nullable=False, unique=True), sa.Column('osm_id', sa.BigInteger), sa.Column('parent_place_id', sa.BigInteger), sa.Column('indexed_date', sa.DateTime), sa.Column('startnumber', sa.Integer), sa.Column('endnumber', sa.Integer), sa.Column('step', sa.SmallInteger), sa.Column('partition', sa.SmallInteger), sa.Column('indexed_status', sa.SmallInteger), sa.Column('linegeo', Geometry), sa.Column('address', self.types.Composite), sa.Column('postcode', sa.Text), sa.Column('country_code', sa.String(2))) self.country_name = sa.Table('country_name', meta, sa.Column('country_code', sa.String(2)), sa.Column('name', self.types.Composite), sa.Column('derived_name', self.types.Composite), sa.Column('country_default_language_code', sa.Text), sa.Column('partition', sa.Integer)) self.country_grid = sa.Table('country_osm_grid', meta, sa.Column('country_code', sa.String(2)), sa.Column('area', sa.Float), sa.Column('geometry', Geometry)) # The following tables are not necessarily present. self.search_name = sa.Table('search_name', meta, sa.Column('place_id', sa.BigInteger, index=True), sa.Column('importance', sa.Float), sa.Column('search_rank', sa.SmallInteger), sa.Column('address_rank', sa.SmallInteger), sa.Column('name_vector', self.types.IntArray, index=True), sa.Column('nameaddress_vector', self.types.IntArray, index=True), sa.Column('country_code', sa.String(2)), sa.Column('centroid', Geometry)) self.tiger = sa.Table('location_property_tiger', meta, sa.Column('place_id', sa.BigInteger), sa.Column('parent_place_id', sa.BigInteger), sa.Column('startnumber', sa.Integer), sa.Column('endnumber', sa.Integer), sa.Column('step', sa.SmallInteger), sa.Column('partition', sa.SmallInteger), sa.Column('linegeo', Geometry), sa.Column('postcode', sa.Text))
6,594
41.275641
85
py
Nominatim
Nominatim-master/nominatim/api/status.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Classes and function releated to status call. """ from typing import Optional import datetime as dt import dataclasses import sqlalchemy as sa from nominatim.api.connection import SearchConnection from nominatim import version @dataclasses.dataclass class StatusResult: """ Result of a call to the status API. """ status: int message: str software_version = version.NOMINATIM_VERSION data_updated: Optional[dt.datetime] = None database_version: Optional[version.NominatimVersion] = None async def get_status(conn: SearchConnection) -> StatusResult: """ Execute a status API call. """ status = StatusResult(0, 'OK') # Last update date sql = sa.select(conn.t.import_status.c.lastimportdate).limit(1) status.data_updated = await conn.scalar(sql) # Database version try: verstr = await conn.get_property('database_version') status.database_version = version.parse_version(verstr) except ValueError: pass return status
1,225
25.085106
67
py
Nominatim
Nominatim-master/nominatim/api/core.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Implementation of classes for API access via libraries. """ from typing import Mapping, Optional, Any, AsyncIterator, Dict, Sequence, List, Tuple import asyncio import contextlib from pathlib import Path import sqlalchemy as sa import sqlalchemy.ext.asyncio as sa_asyncio from nominatim.errors import UsageError from nominatim.db.sqlalchemy_schema import SearchTables from nominatim.db.async_core_library import PGCORE_LIB, PGCORE_ERROR from nominatim.config import Configuration from nominatim.api.connection import SearchConnection from nominatim.api.status import get_status, StatusResult from nominatim.api.lookup import get_detailed_place, get_simple_place from nominatim.api.reverse import ReverseGeocoder from nominatim.api.search import ForwardGeocoder, Phrase, PhraseType, make_query_analyzer import nominatim.api.types as ntyp from nominatim.api.results import DetailedResult, ReverseResult, SearchResults class NominatimAPIAsync: """ API loader asynchornous version. """ def __init__(self, project_dir: Path, environ: Optional[Mapping[str, str]] = None) -> None: self.config = Configuration(project_dir, environ) self.server_version = 0 self._engine_lock = asyncio.Lock() self._engine: Optional[sa_asyncio.AsyncEngine] = None self._tables: Optional[SearchTables] = None self._property_cache: Dict[str, Any] = {'DB:server_version': 0} async def setup_database(self) -> None: """ Set up the engine and connection parameters. This function will be implicitly called when the database is accessed for the first time. You may also call it explicitly to avoid that the first call is delayed by the setup. """ async with self._engine_lock: if self._engine: return dsn = self.config.get_database_params() pool_size = self.config.get_int('API_POOL_SIZE') query = {k: v for k, v in dsn.items() if k not in ('user', 'password', 'dbname', 'host', 'port')} dburl = sa.engine.URL.create( f'postgresql+{PGCORE_LIB}', database=dsn.get('dbname'), username=dsn.get('user'), password=dsn.get('password'), host=dsn.get('host'), port=int(dsn['port']) if 'port' in dsn else None, query=query) engine = sa_asyncio.create_async_engine(dburl, future=True, max_overflow=0, pool_size=pool_size, echo=self.config.get_bool('DEBUG_SQL')) try: async with engine.begin() as conn: result = await conn.scalar(sa.text('SHOW server_version_num')) server_version = int(result) except (PGCORE_ERROR, sa.exc.OperationalError): server_version = 0 if server_version >= 110000: @sa.event.listens_for(engine.sync_engine, "connect") def _on_connect(dbapi_con: Any, _: Any) -> None: cursor = dbapi_con.cursor() cursor.execute("SET jit_above_cost TO '-1'") cursor.execute("SET max_parallel_workers_per_gather TO '0'") # Make sure that all connections get the new settings await self.close() self._property_cache['DB:server_version'] = server_version self._tables = SearchTables(sa.MetaData(), engine.name) # pylint: disable=no-member self._engine = engine async def close(self) -> None: """ Close all active connections to the database. The NominatimAPIAsync object remains usable after closing. If a new API functions is called, new connections are created. """ if self._engine is not None: await self._engine.dispose() @contextlib.asynccontextmanager async def begin(self) -> AsyncIterator[SearchConnection]: """ Create a new connection with automatic transaction handling. This function may be used to get low-level access to the database. Refer to the documentation of SQLAlchemy for details how to use the connection object. """ if self._engine is None: await self.setup_database() assert self._engine is not None assert self._tables is not None async with self._engine.begin() as conn: yield SearchConnection(conn, self._tables, self._property_cache) async def status(self) -> StatusResult: """ Return the status of the database. """ try: async with self.begin() as conn: status = await get_status(conn) except (PGCORE_ERROR, sa.exc.OperationalError): return StatusResult(700, 'Database connection failed') return status async def details(self, place: ntyp.PlaceRef, **params: Any) -> Optional[DetailedResult]: """ Get detailed information about a place in the database. Returns None if there is no entry under the given ID. """ details = ntyp.LookupDetails.from_kwargs(params) async with self.begin() as conn: if details.keywords: await make_query_analyzer(conn) return await get_detailed_place(conn, place, details) async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults: """ Get simple information about a list of places. Returns a list of place information for all IDs that were found. """ details = ntyp.LookupDetails.from_kwargs(params) async with self.begin() as conn: if details.keywords: await make_query_analyzer(conn) return SearchResults(filter(None, [await get_simple_place(conn, p, details) for p in places])) async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]: """ Find a place by its coordinates. Also known as reverse geocoding. Returns the closest result that can be found or None if no place matches the given criteria. """ # The following negation handles NaN correctly. Don't change. if not abs(coord[0]) <= 180 or not abs(coord[1]) <= 90: # There are no results to be expected outside valid coordinates. return None details = ntyp.ReverseDetails.from_kwargs(params) async with self.begin() as conn: if details.keywords: await make_query_analyzer(conn) geocoder = ReverseGeocoder(conn, details) return await geocoder.lookup(coord) async def search(self, query: str, **params: Any) -> SearchResults: """ Find a place by free-text search. Also known as forward geocoding. """ query = query.strip() if not query: raise UsageError('Nothing to search for.') async with self.begin() as conn: geocoder = ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params)) phrases = [Phrase(PhraseType.NONE, p.strip()) for p in query.split(',')] return await geocoder.lookup(phrases) # pylint: disable=too-many-arguments,too-many-branches async def search_address(self, amenity: Optional[str] = None, street: Optional[str] = None, city: Optional[str] = None, county: Optional[str] = None, state: Optional[str] = None, country: Optional[str] = None, postalcode: Optional[str] = None, **params: Any) -> SearchResults: """ Find an address using structured search. """ async with self.begin() as conn: details = ntyp.SearchDetails.from_kwargs(params) phrases: List[Phrase] = [] if amenity: phrases.append(Phrase(PhraseType.AMENITY, amenity)) if street: phrases.append(Phrase(PhraseType.STREET, street)) if city: phrases.append(Phrase(PhraseType.CITY, city)) if county: phrases.append(Phrase(PhraseType.COUNTY, county)) if state: phrases.append(Phrase(PhraseType.STATE, state)) if postalcode: phrases.append(Phrase(PhraseType.POSTCODE, postalcode)) if country: phrases.append(Phrase(PhraseType.COUNTRY, country)) if not phrases: raise UsageError('Nothing to search for.') if amenity or street: details.restrict_min_max_rank(26, 30) elif city: details.restrict_min_max_rank(13, 25) elif county: details.restrict_min_max_rank(10, 12) elif state: details.restrict_min_max_rank(5, 9) elif postalcode: details.restrict_min_max_rank(5, 11) else: details.restrict_min_max_rank(4, 4) if 'layers' not in params: details.layers = ntyp.DataLayer.ADDRESS if amenity: details.layers |= ntyp.DataLayer.POI geocoder = ForwardGeocoder(conn, details) return await geocoder.lookup(phrases) async def search_category(self, categories: List[Tuple[str, str]], near_query: Optional[str] = None, **params: Any) -> SearchResults: """ Find an object of a certain category near another place. The near place may either be given as an unstructured search query in itself or as coordinates. """ if not categories: return SearchResults() details = ntyp.SearchDetails.from_kwargs(params) async with self.begin() as conn: if near_query: phrases = [Phrase(PhraseType.NONE, p) for p in near_query.split(',')] else: phrases = [] if details.keywords: await make_query_analyzer(conn) geocoder = ForwardGeocoder(conn, details) return await geocoder.lookup_pois(categories, phrases) class NominatimAPI: """ API loader, synchronous version. """ def __init__(self, project_dir: Path, environ: Optional[Mapping[str, str]] = None) -> None: self._loop = asyncio.new_event_loop() self._async_api = NominatimAPIAsync(project_dir, environ) def close(self) -> None: """ Close all active connections to the database. The NominatimAPIAsync object remains usable after closing. If a new API functions is called, new connections are created. """ self._loop.run_until_complete(self._async_api.close()) self._loop.close() @property def config(self) -> Configuration: """ Return the configuration used by the API. """ return self._async_api.config def status(self) -> StatusResult: """ Return the status of the database. """ return self._loop.run_until_complete(self._async_api.status()) def details(self, place: ntyp.PlaceRef, **params: Any) -> Optional[DetailedResult]: """ Get detailed information about a place in the database. """ return self._loop.run_until_complete(self._async_api.details(place, **params)) def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults: """ Get simple information about a list of places. Returns a list of place information for all IDs that were found. """ return self._loop.run_until_complete(self._async_api.lookup(places, **params)) def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]: """ Find a place by its coordinates. Also known as reverse geocoding. Returns the closest result that can be found or None if no place matches the given criteria. """ return self._loop.run_until_complete(self._async_api.reverse(coord, **params)) def search(self, query: str, **params: Any) -> SearchResults: """ Find a place by free-text search. Also known as forward geocoding. """ return self._loop.run_until_complete( self._async_api.search(query, **params)) # pylint: disable=too-many-arguments def search_address(self, amenity: Optional[str] = None, street: Optional[str] = None, city: Optional[str] = None, county: Optional[str] = None, state: Optional[str] = None, country: Optional[str] = None, postalcode: Optional[str] = None, **params: Any) -> SearchResults: """ Find an address using structured search. """ return self._loop.run_until_complete( self._async_api.search_address(amenity, street, city, county, state, country, postalcode, **params)) def search_category(self, categories: List[Tuple[str, str]], near_query: Optional[str] = None, **params: Any) -> SearchResults: """ Find an object of a certain category near another place. The near place may either be given as an unstructured search query in itself or as a geographic area through the viewbox or near parameters. """ return self._loop.run_until_complete( self._async_api.search_category(categories, near_query, **params))
14,332
39.148459
100
py
Nominatim
Nominatim-master/nominatim/api/connection.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Extended SQLAlchemy connection class that also includes access to the schema. """ from typing import cast, Any, Mapping, Sequence, Union, Dict, Optional, Set import sqlalchemy as sa from sqlalchemy.ext.asyncio import AsyncConnection from nominatim.typing import SaFromClause from nominatim.db.sqlalchemy_schema import SearchTables from nominatim.db.sqlalchemy_types import Geometry from nominatim.api.logging import log class SearchConnection: """ An extended SQLAlchemy connection class, that also contains then table definitions. The underlying asynchronous SQLAlchemy connection can be accessed with the 'connection' property. The 't' property is the collection of Nominatim tables. """ def __init__(self, conn: AsyncConnection, tables: SearchTables, properties: Dict[str, Any]) -> None: self.connection = conn self.t = tables # pylint: disable=invalid-name self._property_cache = properties self._classtables: Optional[Set[str]] = None async def scalar(self, sql: sa.sql.base.Executable, params: Union[Mapping[str, Any], None] = None ) -> Any: """ Execute a 'scalar()' query on the connection. """ log().sql(self.connection, sql, params) return await self.connection.scalar(sql, params) async def execute(self, sql: 'sa.Executable', params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None] = None ) -> 'sa.Result[Any]': """ Execute a 'execute()' query on the connection. """ log().sql(self.connection, sql, params) return await self.connection.execute(sql, params) async def get_property(self, name: str, cached: bool = True) -> str: """ Get a property from Nominatim's property table. Property values are normally cached so that they are only retrieved from the database when they are queried for the first time with this function. Set 'cached' to False to force reading the property from the database. Raises a ValueError if the property does not exist. """ if name.startswith('DB:'): raise ValueError(f"Illegal property value '{name}'.") if cached and name in self._property_cache: return cast(str, self._property_cache[name]) sql = sa.select(self.t.properties.c.value)\ .where(self.t.properties.c.property == name) value = await self.connection.scalar(sql) if value is None: raise ValueError(f"Property '{name}' not found in database.") self._property_cache[name] = cast(str, value) return cast(str, value) async def get_db_property(self, name: str) -> Any: """ Get a setting from the database. At the moment, only 'server_version', the version of the database software, can be retrieved with this function. Raises a ValueError if the property does not exist. """ if name != 'server_version': raise ValueError(f"DB setting '{name}' not found in database.") return self._property_cache['DB:server_version'] async def get_class_table(self, cls: str, typ: str) -> Optional[SaFromClause]: """ Lookup up if there is a classtype table for the given category and return a SQLAlchemy table for it, if it exists. """ if self._classtables is None: res = await self.execute(sa.text("""SELECT tablename FROM pg_tables WHERE tablename LIKE 'place_classtype_%' """)) self._classtables = {r[0] for r in res} tablename = f"place_classtype_{cls}_{typ}" if tablename not in self._classtables: return None if tablename in self.t.meta.tables: return self.t.meta.tables[tablename] return sa.Table(tablename, self.t.meta, sa.Column('place_id', sa.BigInteger), sa.Column('centroid', Geometry))
4,419
37.103448
96
py
Nominatim
Nominatim-master/nominatim/api/logging.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Functions for specialised logging with HTML output. """ from typing import Any, Iterator, Optional, List, Tuple, cast, Union, Mapping, Sequence from contextvars import ContextVar import datetime as dt import textwrap import io import re import sqlalchemy as sa from sqlalchemy.ext.asyncio import AsyncConnection try: from pygments import highlight from pygments.lexers import PythonLexer, PostgresLexer from pygments.formatters import HtmlFormatter CODE_HIGHLIGHT = True except ModuleNotFoundError: CODE_HIGHLIGHT = False def _debug_name(res: Any) -> str: if res.names: return cast(str, res.names.get('name', next(iter(res.names.values())))) return f"Hnr {res.housenumber}" if res.housenumber is not None else '[NONE]' class BaseLogger: """ Interface for logging function. The base implementation does nothing. Overwrite the functions in derived classes which implement logging functionality. """ def get_buffer(self) -> str: """ Return the current content of the log buffer. """ return '' def function(self, func: str, **kwargs: Any) -> None: """ Start a new debug chapter for the given function and its parameters. """ def section(self, heading: str) -> None: """ Start a new section with the given title. """ def comment(self, text: str) -> None: """ Add a simple comment to the debug output. """ def var_dump(self, heading: str, var: Any) -> None: """ Print the content of the variable to the debug output prefixed by the given heading. """ def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None: """ Print the table generated by the generator function. """ def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None: """ Print a list of search results generated by the generator function. """ def sql(self, conn: AsyncConnection, statement: 'sa.Executable', params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None: """ Print the SQL for the given statement. """ def format_sql(self, conn: AsyncConnection, statement: 'sa.Executable', extra_params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> str: """ Return the comiled version of the statement. """ compiled = cast('sa.ClauseElement', statement).compile(conn.sync_engine) params = dict(compiled.params) if isinstance(extra_params, Mapping): for k, v in extra_params.items(): params[k] = str(v) elif isinstance(extra_params, Sequence) and extra_params: for k in extra_params[0]: params[k] = f':{k}' sqlstr = str(compiled) if sa.__version__.startswith('1'): try: sqlstr = re.sub(r'__\[POSTCOMPILE_[^]]*\]', '%s', sqlstr) return sqlstr % tuple((repr(params.get(name, None)) for name in compiled.positiontup)) # type: ignore except TypeError: return sqlstr # Fixes an odd issue with Python 3.7 where percentages are not # quoted correctly. sqlstr = re.sub(r'%(?!\()', '%%', sqlstr) sqlstr = re.sub(r'__\[POSTCOMPILE_([^]]*)\]', r'%(\1)s', sqlstr) print(sqlstr) return sqlstr % params class HTMLLogger(BaseLogger): """ Logger that formats messages in HTML. """ def __init__(self) -> None: self.buffer = io.StringIO() def _timestamp(self) -> None: self._write(f'<p class="timestamp">[{dt.datetime.now()}]</p>') def get_buffer(self) -> str: return HTML_HEADER + self.buffer.getvalue() + HTML_FOOTER def function(self, func: str, **kwargs: Any) -> None: self._timestamp() self._write(f"<h1>Debug output for {func}()</h1>\n<p>Parameters:<dl>") for name, value in kwargs.items(): self._write(f'<dt>{name}</dt><dd>{self._python_var(value)}</dd>') self._write('</dl></p>') def section(self, heading: str) -> None: self._timestamp() self._write(f"<h2>{heading}</h2>") def comment(self, text: str) -> None: self._timestamp() self._write(f"<p>{text}</p>") def var_dump(self, heading: str, var: Any) -> None: self._timestamp() if callable(var): var = var() self._write(f'<h5>{heading}</h5>{self._python_var(var)}') def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None: self._timestamp() head = next(rows) assert head self._write(f'<table><thead><tr><th colspan="{len(head)}">{heading}</th></tr><tr>') for cell in head: self._write(f'<th>{cell}</th>') self._write('</tr></thead><tbody>') for row in rows: if row is not None: self._write('<tr>') for cell in row: self._write(f'<td>{cell}</td>') self._write('</tr>') self._write('</tbody></table>') def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None: """ Print a list of search results generated by the generator function. """ self._timestamp() def format_osm(osm_object: Optional[Tuple[str, int]]) -> str: if not osm_object: return '-' t, i = osm_object if t == 'N': fullt = 'node' elif t == 'W': fullt = 'way' elif t == 'R': fullt = 'relation' else: return f'{t}{i}' return f'<a href="https://www.openstreetmap.org/{fullt}/{i}">{t}{i}</a>' self._write(f'<h5>{heading}</h5><p><dl>') total = 0 for rank, res in results: self._write(f'<dt>[{rank:.3f}]</dt> <dd>{res.source_table.name}(') self._write(f"{_debug_name(res)}, type=({','.join(res.category)}), ") self._write(f"rank={res.rank_address}, ") self._write(f"osm={format_osm(res.osm_object)}, ") self._write(f'cc={res.country_code}, ') self._write(f'importance={res.importance or float("nan"):.5f})</dd>') total += 1 self._write(f'</dl><b>TOTAL:</b> {total}</p>') def sql(self, conn: AsyncConnection, statement: 'sa.Executable', params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None: self._timestamp() sqlstr = self.format_sql(conn, statement, params) if CODE_HIGHLIGHT: sqlstr = highlight(sqlstr, PostgresLexer(), HtmlFormatter(nowrap=True, lineseparator='<br />')) self._write(f'<div class="highlight"><code class="lang-sql">{sqlstr}</code></div>') else: self._write(f'<code class="lang-sql">{sqlstr}</code>') def _python_var(self, var: Any) -> str: if CODE_HIGHLIGHT: fmt = highlight(str(var), PythonLexer(), HtmlFormatter(nowrap=True)) return f'<div class="highlight"><code class="lang-python">{fmt}</code></div>' return f'<code class="lang-python">{str(var)}</code>' def _write(self, text: str) -> None: """ Add the raw text to the debug output. """ self.buffer.write(text) class TextLogger(BaseLogger): """ Logger creating output suitable for the console. """ def __init__(self) -> None: self.buffer = io.StringIO() def get_buffer(self) -> str: return self.buffer.getvalue() def function(self, func: str, **kwargs: Any) -> None: self._write(f"#### Debug output for {func}()\n\nParameters:\n") for name, value in kwargs.items(): self._write(f' {name}: {self._python_var(value)}\n') self._write('\n') def section(self, heading: str) -> None: self._write(f"\n# {heading}\n\n") def comment(self, text: str) -> None: self._write(f"{text}\n") def var_dump(self, heading: str, var: Any) -> None: if callable(var): var = var() self._write(f'{heading}:\n {self._python_var(var)}\n\n') def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None: self._write(f'{heading}:\n') data = [list(map(self._python_var, row)) if row else None for row in rows] assert data[0] is not None num_cols = len(data[0]) maxlens = [max(len(d[i]) for d in data if d) for i in range(num_cols)] tablewidth = sum(maxlens) + 3 * num_cols + 1 row_format = '| ' +' | '.join(f'{{:<{l}}}' for l in maxlens) + ' |\n' self._write('-'*tablewidth + '\n') self._write(row_format.format(*data[0])) self._write('-'*tablewidth + '\n') for row in data[1:]: if row: self._write(row_format.format(*row)) else: self._write('-'*tablewidth + '\n') if data[-1]: self._write('-'*tablewidth + '\n') def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None: self._write(f'{heading}:\n') total = 0 for rank, res in results: self._write(f'[{rank:.3f}] {res.source_table.name}(') self._write(f"{_debug_name(res)}, type=({','.join(res.category)}), ") self._write(f"rank={res.rank_address}, ") self._write(f"osm={''.join(map(str, res.osm_object or []))}, ") self._write(f'cc={res.country_code}, ') self._write(f'importance={res.importance or -1:.5f})\n') total += 1 self._write(f'TOTAL: {total}\n\n') def sql(self, conn: AsyncConnection, statement: 'sa.Executable', params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None: sqlstr = '\n| '.join(textwrap.wrap(self.format_sql(conn, statement, params), width=78)) self._write(f"| {sqlstr}\n\n") def _python_var(self, var: Any) -> str: return str(var) def _write(self, text: str) -> None: self.buffer.write(text) logger: ContextVar[BaseLogger] = ContextVar('logger', default=BaseLogger()) def set_log_output(fmt: str) -> None: """ Enable collecting debug information. """ if fmt == 'html': logger.set(HTMLLogger()) elif fmt == 'text': logger.set(TextLogger()) else: logger.set(BaseLogger()) def log() -> BaseLogger: """ Return the logger for the current context. """ return logger.get() def get_and_disable() -> str: """ Return the current content of the debug buffer and disable logging. """ buf = logger.get().get_buffer() logger.set(BaseLogger()) return buf HTML_HEADER: str = """<!DOCTYPE html> <html> <head> <title>Nominatim - Debug</title> <style> """ + \ (HtmlFormatter(nobackground=True).get_style_defs('.highlight') if CODE_HIGHLIGHT else '') +\ """ h2 { font-size: x-large } dl { padding-left: 10pt; font-family: monospace } dt { float: left; font-weight: bold; margin-right: 0.5em } dt::after { content: ": "; } dd::after { clear: left; display: block } .lang-sql { color: #555; font-size: small } h5 { border: solid lightgrey 0.1pt; margin-bottom: 0; background-color: #f7f7f7 } h5 + .highlight { padding: 3pt; border: solid lightgrey 0.1pt } table, th, tbody { border: thin solid; border-collapse: collapse; } td { border-right: thin solid; padding-left: 3pt; padding-right: 3pt; } .timestamp { font-size: 0.8em; color: darkblue; width: calc(100% - 5pt); text-align: right; position: absolute; left: 0; margin-top: -5px; } </style> </head> <body> """ HTML_FOOTER: str = "</body></html>"
12,424
29.231144
95
py
Nominatim
Nominatim-master/nominatim/api/localization.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper functions for localizing names of results. """ from typing import Mapping, List, Optional import re class Locales: """ Helper class for localization of names. It takes a list of language prefixes in their order of preferred usage. """ def __init__(self, langs: Optional[List[str]] = None): self.languages = langs or [] self.name_tags: List[str] = [] # Build the list of supported tags. It is currently hard-coded. self._add_lang_tags('name') self._add_tags('name', 'brand') self._add_lang_tags('official_name', 'short_name') self._add_tags('official_name', 'short_name', 'ref') def __bool__(self) -> bool: return len(self.languages) > 0 def _add_tags(self, *tags: str) -> None: for tag in tags: self.name_tags.append(tag) self.name_tags.append(f"_place_{tag}") def _add_lang_tags(self, *tags: str) -> None: for tag in tags: for lang in self.languages: self.name_tags.append(f"{tag}:{lang}") self.name_tags.append(f"_place_{tag}:{lang}") def display_name(self, names: Optional[Mapping[str, str]]) -> str: """ Return the best matching name from a dictionary of names containing different name variants. If 'names' is null or empty, an empty string is returned. If no appropriate localization is found, the first name is returned. """ if not names: return '' if len(names) > 1: for tag in self.name_tags: if tag in names: return names[tag] # Nothing? Return any of the other names as a default. return next(iter(names.values())) @staticmethod def from_accept_languages(langstr: str) -> 'Locales': """ Create a localization object from a language list in the format of HTTP accept-languages header. The functions tries to be forgiving of format errors by first splitting the string into comma-separated parts and then parsing each description separately. Badly formatted parts are then ignored. """ # split string into languages candidates = [] for desc in langstr.split(','): m = re.fullmatch(r'\s*([a-z_-]+)(?:;\s*q\s*=\s*([01](?:\.\d+)?))?\s*', desc, flags=re.I) if m: candidates.append((m[1], float(m[2] or 1.0))) # sort the results by the weight of each language (preserving order). candidates.sort(reverse=True, key=lambda e: e[1]) # If a language has a region variant, also add the language without # variant but only if it isn't already in the list to not mess up the weight. languages = [] for lid, _ in candidates: languages.append(lid) parts = lid.split('-', 1) if len(parts) > 1 and all(c[0] != parts[0] for c in candidates): languages.append(parts[0]) return Locales(languages)
3,340
33.091837
85
py
Nominatim
Nominatim-master/nominatim/api/reverse.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Implementation of reverse geocoding. """ from typing import Optional, List, Callable, Type, Tuple, Dict, Any, cast, Union import functools import sqlalchemy as sa from nominatim.typing import SaColumn, SaSelect, SaFromClause, SaLabel, SaRow,\ SaBind, SaLambdaSelect from nominatim.api.connection import SearchConnection import nominatim.api.results as nres from nominatim.api.logging import log from nominatim.api.types import AnyPoint, DataLayer, ReverseDetails, GeometryFormat, Bbox from nominatim.db.sqlalchemy_types import Geometry import nominatim.db.sqlalchemy_functions as snfn # In SQLAlchemy expression which compare with NULL need to be expressed with # the equal sign. # pylint: disable=singleton-comparison RowFunc = Callable[[Optional[SaRow], Type[nres.ReverseResult]], Optional[nres.ReverseResult]] WKT_PARAM: SaBind = sa.bindparam('wkt', type_=Geometry) MAX_RANK_PARAM: SaBind = sa.bindparam('max_rank') def no_index(expr: SaColumn) -> SaColumn: """ Wrap the given expression, so that the query planner will refrain from using the expression for index lookup. """ return sa.func.coalesce(sa.null(), expr) # pylint: disable=not-callable def _select_from_placex(t: SaFromClause, use_wkt: bool = True) -> SaSelect: """ Create a select statement with the columns relevant for reverse results. """ if not use_wkt: distance = t.c.distance centroid = t.c.centroid else: distance = t.c.geometry.ST_Distance(WKT_PARAM) centroid = sa.case((t.c.geometry.is_line_like(), t.c.geometry.ST_ClosestPoint(WKT_PARAM)), else_=t.c.centroid).label('centroid') return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, t.c.class_, t.c.type, t.c.address, t.c.extratags, t.c.housenumber, t.c.postcode, t.c.country_code, t.c.importance, t.c.wikipedia, t.c.parent_place_id, t.c.rank_address, t.c.rank_search, centroid, distance.label('distance'), t.c.geometry.ST_Expand(0).label('bbox')) def _interpolated_housenumber(table: SaFromClause) -> SaLabel: return sa.cast(table.c.startnumber + sa.func.round(((table.c.endnumber - table.c.startnumber) * table.c.position) / table.c.step) * table.c.step, sa.Integer).label('housenumber') def _interpolated_position(table: SaFromClause) -> SaLabel: fac = sa.cast(table.c.step, sa.Float) / (table.c.endnumber - table.c.startnumber) rounded_pos = sa.func.round(table.c.position / fac) * fac return sa.case( (table.c.endnumber == table.c.startnumber, table.c.linegeo.ST_Centroid()), else_=table.c.linegeo.ST_LineInterpolatePoint(rounded_pos)).label('centroid') def _locate_interpolation(table: SaFromClause) -> SaLabel: """ Given a position, locate the closest point on the line. """ return sa.case((table.c.linegeo.is_line_like(), table.c.linegeo.ST_LineLocatePoint(WKT_PARAM)), else_=0).label('position') def _is_address_point(table: SaFromClause) -> SaColumn: return sa.and_(table.c.rank_address == 30, sa.or_(table.c.housenumber != None, table.c.name.has_key('housename'))) def _get_closest(*rows: Optional[SaRow]) -> Optional[SaRow]: return min(rows, key=lambda row: 1000 if row is None else row.distance) class ReverseGeocoder: """ Class implementing the logic for looking up a place from a coordinate. """ def __init__(self, conn: SearchConnection, params: ReverseDetails) -> None: self.conn = conn self.params = params self.bind_params: Dict[str, Any] = {'max_rank': params.max_rank} @property def max_rank(self) -> int: """ Return the maximum configured rank. """ return self.params.max_rank def has_geometries(self) -> bool: """ Check if any geometries are requested. """ return bool(self.params.geometry_output) def layer_enabled(self, *layer: DataLayer) -> bool: """ Return true when any of the given layer types are requested. """ return any(self.params.layers & l for l in layer) def layer_disabled(self, *layer: DataLayer) -> bool: """ Return true when none of the given layer types is requested. """ return not any(self.params.layers & l for l in layer) def has_feature_layers(self) -> bool: """ Return true if any layer other than ADDRESS or POI is requested. """ return self.layer_enabled(DataLayer.RAILWAY, DataLayer.MANMADE, DataLayer.NATURAL) def _add_geometry_columns(self, sql: SaLambdaSelect, col: SaColumn) -> SaSelect: out = [] if self.params.geometry_simplification > 0.0: col = sa.func.ST_SimplifyPreserveTopology(col, self.params.geometry_simplification) if self.params.geometry_output & GeometryFormat.GEOJSON: out.append(sa.func.ST_AsGeoJSON(col).label('geometry_geojson')) if self.params.geometry_output & GeometryFormat.TEXT: out.append(sa.func.ST_AsText(col).label('geometry_text')) if self.params.geometry_output & GeometryFormat.KML: out.append(sa.func.ST_AsKML(col).label('geometry_kml')) if self.params.geometry_output & GeometryFormat.SVG: out.append(sa.func.ST_AsSVG(col).label('geometry_svg')) return sql.add_columns(*out) def _filter_by_layer(self, table: SaFromClause) -> SaColumn: if self.layer_enabled(DataLayer.MANMADE): exclude = [] if self.layer_disabled(DataLayer.RAILWAY): exclude.append('railway') if self.layer_disabled(DataLayer.NATURAL): exclude.extend(('natural', 'water', 'waterway')) return table.c.class_.not_in(tuple(exclude)) include = [] if self.layer_enabled(DataLayer.RAILWAY): include.append('railway') if self.layer_enabled(DataLayer.NATURAL): include.extend(('natural', 'water', 'waterway')) return table.c.class_.in_(tuple(include)) async def _find_closest_street_or_poi(self, distance: float) -> Optional[SaRow]: """ Look up the closest rank 26+ place in the database, which is closer than the given distance. """ t = self.conn.t.placex # PostgreSQL must not get the distance as a parameter because # there is a danger it won't be able to proberly estimate index use # when used with prepared statements diststr = sa.text(f"{distance}") sql: SaLambdaSelect = sa.lambda_stmt(lambda: _select_from_placex(t) .where(t.c.geometry.ST_DWithin(WKT_PARAM, diststr)) .where(t.c.indexed_status == 0) .where(t.c.linked_place_id == None) .where(sa.or_(sa.not_(t.c.geometry.is_area()), t.c.centroid.ST_Distance(WKT_PARAM) < diststr)) .order_by('distance') .limit(1)) if self.has_geometries(): sql = self._add_geometry_columns(sql, t.c.geometry) restrict: List[Union[SaColumn, Callable[[], SaColumn]]] = [] if self.layer_enabled(DataLayer.ADDRESS): max_rank = min(29, self.max_rank) restrict.append(lambda: no_index(t.c.rank_address).between(26, max_rank)) if self.max_rank == 30: restrict.append(lambda: _is_address_point(t)) if self.layer_enabled(DataLayer.POI) and self.max_rank == 30: restrict.append(lambda: sa.and_(no_index(t.c.rank_search) == 30, t.c.class_.not_in(('place', 'building')), sa.not_(t.c.geometry.is_line_like()))) if self.has_feature_layers(): restrict.append(sa.and_(no_index(t.c.rank_search).between(26, MAX_RANK_PARAM), no_index(t.c.rank_address) == 0, self._filter_by_layer(t))) if not restrict: return None sql = sql.where(sa.or_(*restrict)) return (await self.conn.execute(sql, self.bind_params)).one_or_none() async def _find_housenumber_for_street(self, parent_place_id: int) -> Optional[SaRow]: t = self.conn.t.placex sql: SaLambdaSelect = sa.lambda_stmt(lambda: _select_from_placex(t) .where(t.c.geometry.ST_DWithin(WKT_PARAM, 0.001)) .where(t.c.parent_place_id == parent_place_id) .where(_is_address_point(t)) .where(t.c.indexed_status == 0) .where(t.c.linked_place_id == None) .order_by('distance') .limit(1)) if self.has_geometries(): sql = self._add_geometry_columns(sql, t.c.geometry) return (await self.conn.execute(sql, self.bind_params)).one_or_none() async def _find_interpolation_for_street(self, parent_place_id: Optional[int], distance: float) -> Optional[SaRow]: t = self.conn.t.osmline sql: Any = sa.lambda_stmt(lambda: sa.select(t, t.c.linegeo.ST_Distance(WKT_PARAM).label('distance'), _locate_interpolation(t)) .where(t.c.linegeo.ST_DWithin(WKT_PARAM, distance)) .where(t.c.startnumber != None) .order_by('distance') .limit(1)) if parent_place_id is not None: sql += lambda s: s.where(t.c.parent_place_id == parent_place_id) def _wrap_query(base_sql: SaLambdaSelect) -> SaSelect: inner = base_sql.subquery('ipol') return sa.select(inner.c.place_id, inner.c.osm_id, inner.c.parent_place_id, inner.c.address, _interpolated_housenumber(inner), _interpolated_position(inner), inner.c.postcode, inner.c.country_code, inner.c.distance) sql += _wrap_query if self.has_geometries(): sub = sql.subquery('geom') sql = self._add_geometry_columns(sa.select(sub), sub.c.centroid) return (await self.conn.execute(sql, self.bind_params)).one_or_none() async def _find_tiger_number_for_street(self, parent_place_id: int) -> Optional[SaRow]: t = self.conn.t.tiger def _base_query() -> SaSelect: inner = sa.select(t, t.c.linegeo.ST_Distance(WKT_PARAM).label('distance'), _locate_interpolation(t))\ .where(t.c.linegeo.ST_DWithin(WKT_PARAM, 0.001))\ .where(t.c.parent_place_id == parent_place_id)\ .order_by('distance')\ .limit(1)\ .subquery('tiger') return sa.select(inner.c.place_id, inner.c.parent_place_id, _interpolated_housenumber(inner), _interpolated_position(inner), inner.c.postcode, inner.c.distance) sql: SaLambdaSelect = sa.lambda_stmt(_base_query) if self.has_geometries(): sub = sql.subquery('geom') sql = self._add_geometry_columns(sa.select(sub), sub.c.centroid) return (await self.conn.execute(sql, self.bind_params)).one_or_none() async def lookup_street_poi(self) -> Tuple[Optional[SaRow], RowFunc]: """ Find a street or POI/address for the given WKT point. """ log().section('Reverse lookup on street/address level') distance = 0.006 parent_place_id = None row = await self._find_closest_street_or_poi(distance) row_func: RowFunc = nres.create_from_placex_row log().var_dump('Result (street/building)', row) # If the closest result was a street, but an address was requested, # check for a housenumber nearby which is part of the street. if row is not None: if self.max_rank > 27 \ and self.layer_enabled(DataLayer.ADDRESS) \ and row.rank_address <= 27: distance = 0.001 parent_place_id = row.place_id log().comment('Find housenumber for street') addr_row = await self._find_housenumber_for_street(parent_place_id) log().var_dump('Result (street housenumber)', addr_row) if addr_row is not None: row = addr_row row_func = nres.create_from_placex_row distance = addr_row.distance elif row.country_code == 'us' and parent_place_id is not None: log().comment('Find TIGER housenumber for street') addr_row = await self._find_tiger_number_for_street(parent_place_id) log().var_dump('Result (street Tiger housenumber)', addr_row) if addr_row is not None: row_func = cast(RowFunc, functools.partial(nres.create_from_tiger_row, osm_type=row.osm_type, osm_id=row.osm_id)) row = addr_row else: distance = row.distance # Check for an interpolation that is either closer than our result # or belongs to a close street found. if self.max_rank > 27 and self.layer_enabled(DataLayer.ADDRESS): log().comment('Find interpolation for street') addr_row = await self._find_interpolation_for_street(parent_place_id, distance) log().var_dump('Result (street interpolation)', addr_row) if addr_row is not None: row = addr_row row_func = nres.create_from_osmline_row return row, row_func async def _lookup_area_address(self) -> Optional[SaRow]: """ Lookup large addressable areas for the given WKT point. """ log().comment('Reverse lookup by larger address area features') t = self.conn.t.placex def _base_query() -> SaSelect: # The inner SQL brings results in the right order, so that # later only a minimum of results needs to be checked with ST_Contains. inner = sa.select(t, sa.literal(0.0).label('distance'))\ .where(t.c.rank_search.between(5, MAX_RANK_PARAM))\ .where(t.c.geometry.intersects(WKT_PARAM))\ .where(snfn.select_index_placex_geometry_reverse_lookuppolygon('placex'))\ .order_by(sa.desc(t.c.rank_search))\ .limit(50)\ .subquery('area') return _select_from_placex(inner, False)\ .where(inner.c.geometry.ST_Contains(WKT_PARAM))\ .order_by(sa.desc(inner.c.rank_search))\ .limit(1) sql: SaLambdaSelect = sa.lambda_stmt(_base_query) if self.has_geometries(): sql = self._add_geometry_columns(sql, sa.literal_column('area.geometry')) address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none() log().var_dump('Result (area)', address_row) if address_row is not None and address_row.rank_search < self.max_rank: log().comment('Search for better matching place nodes inside the area') address_rank = address_row.rank_search address_id = address_row.place_id def _place_inside_area_query() -> SaSelect: inner = \ sa.select(t, t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\ .where(t.c.rank_search > address_rank)\ .where(t.c.rank_search <= MAX_RANK_PARAM)\ .where(t.c.indexed_status == 0)\ .where(snfn.select_index_placex_geometry_reverse_lookupplacenode('placex'))\ .where(t.c.geometry .ST_Buffer(sa.func.reverse_place_diameter(t.c.rank_search)) .intersects(WKT_PARAM))\ .order_by(sa.desc(t.c.rank_search))\ .limit(50)\ .subquery('places') touter = t.alias('outer') return _select_from_placex(inner, False)\ .join(touter, touter.c.geometry.ST_Contains(inner.c.geometry))\ .where(touter.c.place_id == address_id)\ .where(inner.c.distance < sa.func.reverse_place_diameter(inner.c.rank_search))\ .order_by(sa.desc(inner.c.rank_search), inner.c.distance)\ .limit(1) sql = sa.lambda_stmt(_place_inside_area_query) if self.has_geometries(): sql = self._add_geometry_columns(sql, sa.literal_column('places.geometry')) place_address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none() log().var_dump('Result (place node)', place_address_row) if place_address_row is not None: return place_address_row return address_row async def _lookup_area_others(self) -> Optional[SaRow]: t = self.conn.t.placex inner = sa.select(t, t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\ .where(t.c.rank_address == 0)\ .where(t.c.rank_search.between(5, MAX_RANK_PARAM))\ .where(t.c.name != None)\ .where(t.c.indexed_status == 0)\ .where(t.c.linked_place_id == None)\ .where(self._filter_by_layer(t))\ .where(t.c.geometry .ST_Buffer(sa.func.reverse_place_diameter(t.c.rank_search)) .intersects(WKT_PARAM))\ .order_by(sa.desc(t.c.rank_search))\ .limit(50)\ .subquery() sql = _select_from_placex(inner, False)\ .where(sa.or_(sa.not_(inner.c.geometry.is_area()), inner.c.geometry.ST_Contains(WKT_PARAM)))\ .order_by(sa.desc(inner.c.rank_search), inner.c.distance)\ .limit(1) if self.has_geometries(): sql = self._add_geometry_columns(sql, inner.c.geometry) row = (await self.conn.execute(sql, self.bind_params)).one_or_none() log().var_dump('Result (non-address feature)', row) return row async def lookup_area(self) -> Optional[SaRow]: """ Lookup large areas for the current search. """ log().section('Reverse lookup by larger area features') if self.layer_enabled(DataLayer.ADDRESS): address_row = await self._lookup_area_address() else: address_row = None if self.has_feature_layers(): other_row = await self._lookup_area_others() else: other_row = None return _get_closest(address_row, other_row) async def lookup_country(self) -> Optional[SaRow]: """ Lookup the country for the current search. """ log().section('Reverse lookup by country code') t = self.conn.t.country_grid sql: SaLambdaSelect = sa.select(t.c.country_code).distinct()\ .where(t.c.geometry.ST_Contains(WKT_PARAM)) ccodes = tuple((r[0] for r in await self.conn.execute(sql, self.bind_params))) log().var_dump('Country codes', ccodes) if not ccodes: return None t = self.conn.t.placex if self.max_rank > 4: log().comment('Search for place nodes in country') def _base_query() -> SaSelect: inner = \ sa.select(t, t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\ .where(t.c.rank_search > 4)\ .where(t.c.rank_search <= MAX_RANK_PARAM)\ .where(t.c.indexed_status == 0)\ .where(t.c.country_code.in_(ccodes))\ .where(snfn.select_index_placex_geometry_reverse_lookupplacenode('placex'))\ .where(t.c.geometry .ST_Buffer(sa.func.reverse_place_diameter(t.c.rank_search)) .intersects(WKT_PARAM))\ .order_by(sa.desc(t.c.rank_search))\ .limit(50)\ .subquery('area') return _select_from_placex(inner, False)\ .where(inner.c.distance < sa.func.reverse_place_diameter(inner.c.rank_search))\ .order_by(sa.desc(inner.c.rank_search), inner.c.distance)\ .limit(1) sql = sa.lambda_stmt(_base_query) if self.has_geometries(): sql = self._add_geometry_columns(sql, sa.literal_column('area.geometry')) address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none() log().var_dump('Result (addressable place node)', address_row) else: address_row = None if address_row is None: # Still nothing, then return a country with the appropriate country code. sql = sa.lambda_stmt(lambda: _select_from_placex(t)\ .where(t.c.country_code.in_(ccodes))\ .where(t.c.rank_address == 4)\ .where(t.c.rank_search == 4)\ .where(t.c.linked_place_id == None)\ .order_by('distance')\ .limit(1)) if self.has_geometries(): sql = self._add_geometry_columns(sql, t.c.geometry) address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none() return address_row async def lookup(self, coord: AnyPoint) -> Optional[nres.ReverseResult]: """ Look up a single coordinate. Returns the place information, if a place was found near the coordinates or None otherwise. """ log().function('reverse_lookup', coord=coord, params=self.params) self.bind_params['wkt'] = f'POINT({coord[0]} {coord[1]})' row: Optional[SaRow] = None row_func: RowFunc = nres.create_from_placex_row if self.max_rank >= 26: row, tmp_row_func = await self.lookup_street_poi() if row is not None: row_func = tmp_row_func if row is None and self.max_rank > 4: row = await self.lookup_area() if row is None and self.layer_enabled(DataLayer.ADDRESS): row = await self.lookup_country() result = row_func(row, nres.ReverseResult) if result is not None: assert row is not None result.distance = row.distance if hasattr(row, 'bbox'): result.bbox = Bbox.from_wkb(row.bbox) await nres.add_result_details(self.conn, [result], self.params) return result
24,108
40.855903
99
py
Nominatim
Nominatim-master/nominatim/api/types.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Complex datatypes used by the Nominatim API. """ from typing import Optional, Union, Tuple, NamedTuple, TypeVar, Type, Dict, \ Any, List, Sequence from collections import abc import dataclasses import enum import math from struct import unpack from binascii import unhexlify from nominatim.errors import UsageError # pylint: disable=no-member,too-many-boolean-expressions,too-many-instance-attributes @dataclasses.dataclass class PlaceID: """ Reference an object by Nominatim's internal ID. """ place_id: int @dataclasses.dataclass class OsmID: """ Reference by the OSM ID and potentially the basic category. """ osm_type: str osm_id: int osm_class: Optional[str] = None def __post_init__(self) -> None: if self.osm_type not in ('N', 'W', 'R'): raise ValueError(f"Illegal OSM type '{self.osm_type}'. Must be one of N, W, R.") PlaceRef = Union[PlaceID, OsmID] class Point(NamedTuple): """ A geographic point in WGS84 projection. """ x: float y: float @property def lat(self) -> float: """ Return the latitude of the point. """ return self.y @property def lon(self) -> float: """ Return the longitude of the point. """ return self.x def to_geojson(self) -> str: """ Return the point in GeoJSON format. """ return f'{{"type": "Point","coordinates": [{self.x}, {self.y}]}}' @staticmethod def from_wkb(wkb: Union[str, bytes]) -> 'Point': """ Create a point from EWKB as returned from the database. """ if isinstance(wkb, str): wkb = unhexlify(wkb) if len(wkb) != 25: raise ValueError(f"Point wkb has unexpected length {len(wkb)}") if wkb[0] == 0: gtype, srid, x, y = unpack('>iidd', wkb[1:]) elif wkb[0] == 1: gtype, srid, x, y = unpack('<iidd', wkb[1:]) else: raise ValueError("WKB has unknown endian value.") if gtype != 0x20000001: raise ValueError("WKB must be a point geometry.") if srid != 4326: raise ValueError("Only WGS84 WKB supported.") return Point(x, y) @staticmethod def from_param(inp: Any) -> 'Point': """ Create a point from an input parameter. The parameter may be given as a point, a string or a sequence of strings or floats. Raises a UsageError if the format is not correct. """ if isinstance(inp, Point): return inp seq: Sequence[str] if isinstance(inp, str): seq = inp.split(',') elif isinstance(inp, abc.Sequence): seq = inp if len(seq) != 2: raise UsageError('Point parameter needs 2 coordinates.') try: x, y = filter(math.isfinite, map(float, seq)) except ValueError as exc: raise UsageError('Point parameter needs to be numbers.') from exc if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0: raise UsageError('Point coordinates invalid.') return Point(x, y) def to_wkt(self) -> str: """ Return the WKT representation of the point. """ return f'POINT({self.x} {self.y})' AnyPoint = Union[Point, Tuple[float, float]] WKB_BBOX_HEADER_LE = b'\x01\x03\x00\x00\x20\xE6\x10\x00\x00\x01\x00\x00\x00\x05\x00\x00\x00' WKB_BBOX_HEADER_BE = b'\x00\x20\x00\x00\x03\x00\x00\x10\xe6\x00\x00\x00\x01\x00\x00\x00\x05' class Bbox: """ A bounding box in WSG84 projection. The coordinates are available as an array in the 'coord' property in the order (minx, miny, maxx, maxy). """ def __init__(self, minx: float, miny: float, maxx: float, maxy: float) -> None: self.coords = (minx, miny, maxx, maxy) @property def minlat(self) -> float: """ Southern-most latitude, corresponding to the minimum y coordinate. """ return self.coords[1] @property def maxlat(self) -> float: """ Northern-most latitude, corresponding to the maximum y coordinate. """ return self.coords[3] @property def minlon(self) -> float: """ Western-most longitude, corresponding to the minimum x coordinate. """ return self.coords[0] @property def maxlon(self) -> float: """ Eastern-most longitude, corresponding to the maximum x coordinate. """ return self.coords[2] @property def area(self) -> float: """ Return the area of the box in WGS84. """ return (self.coords[2] - self.coords[0]) * (self.coords[3] - self.coords[1]) def contains(self, pt: Point) -> bool: """ Check if the point is inside or on the boundary of the box. """ return self.coords[0] <= pt[0] and self.coords[1] <= pt[1]\ and self.coords[2] >= pt[0] and self.coords[3] >= pt[1] def to_wkt(self) -> str: """ Return the WKT representation of the Bbox. This is a simple polygon with four points. """ return 'POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))'\ .format(*self.coords) # pylint: disable=consider-using-f-string @staticmethod def from_wkb(wkb: Union[None, str, bytes]) -> 'Optional[Bbox]': """ Create a Bbox from a bounding box polygon as returned by the database. Return s None if the input value is None. """ if wkb is None: return None if isinstance(wkb, str): wkb = unhexlify(wkb) if len(wkb) != 97: raise ValueError("WKB must be a bounding box polygon") if wkb.startswith(WKB_BBOX_HEADER_LE): x1, y1, _, _, x2, y2 = unpack('<dddddd', wkb[17:65]) elif wkb.startswith(WKB_BBOX_HEADER_BE): x1, y1, _, _, x2, y2 = unpack('>dddddd', wkb[17:65]) else: raise ValueError("WKB has wrong header") return Bbox(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)) @staticmethod def from_point(pt: Point, buffer: float) -> 'Bbox': """ Return a Bbox around the point with the buffer added to all sides. """ return Bbox(pt[0] - buffer, pt[1] - buffer, pt[0] + buffer, pt[1] + buffer) @staticmethod def from_param(inp: Any) -> 'Bbox': """ Return a Bbox from an input parameter. The box may be given as a Bbox, a string or a list or strings or integer. Raises a UsageError if the format is incorrect. """ if isinstance(inp, Bbox): return inp seq: Sequence[str] if isinstance(inp, str): seq = inp.split(',') elif isinstance(inp, abc.Sequence): seq = inp if len(seq) != 4: raise UsageError('Bounding box parameter needs 4 coordinates.') try: x1, y1, x2, y2 = filter(math.isfinite, map(float, seq)) except ValueError as exc: raise UsageError('Bounding box parameter needs to be numbers.') from exc if x1 < -180.0 or x1 > 180.0 or y1 < -90.0 or y1 > 90.0 \ or x2 < -180.0 or x2 > 180.0 or y2 < -90.0 or y2 > 90.0: raise UsageError('Bounding box coordinates invalid.') if x1 == x2 or y1 == y2: raise UsageError('Bounding box with invalid parameters.') return Bbox(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)) class GeometryFormat(enum.Flag): """ Geometry output formats supported by Nominatim. """ NONE = 0 GEOJSON = enum.auto() KML = enum.auto() SVG = enum.auto() TEXT = enum.auto() class DataLayer(enum.Flag): """ Layer types that can be selected for reverse and forward search. """ POI = enum.auto() ADDRESS = enum.auto() RAILWAY = enum.auto() MANMADE = enum.auto() NATURAL = enum.auto() def format_country(cc: Any) -> List[str]: """ Extract a list of country codes from the input which may be either a string or list of strings. Filters out all values that are not a two-letter string. """ clist: Sequence[str] if isinstance(cc, str): clist = cc.split(',') elif isinstance(cc, abc.Sequence): clist = cc else: raise UsageError("Parameter 'country' needs to be a comma-separated list " "or a Python list of strings.") return [cc.lower() for cc in clist if isinstance(cc, str) and len(cc) == 2] def format_excluded(ids: Any) -> List[int]: """ Extract a list of place ids from the input which may be either a string or a list of strings or ints. Ignores empty value but throws a UserError on anything that cannot be converted to int. """ plist: Sequence[str] if isinstance(ids, str): plist = [s.strip() for s in ids.split(',')] elif isinstance(ids, abc.Sequence): plist = ids else: raise UsageError("Parameter 'excluded' needs to be a comma-separated list " "or a Python list of numbers.") if not all(isinstance(i, int) or (isinstance(i, str) and (not i or i.isdigit())) for i in plist): raise UsageError("Parameter 'excluded' only takes place IDs.") return [int(id) for id in plist if id] or [0] def format_categories(categories: List[Tuple[str, str]]) -> List[Tuple[str, str]]: """ Extract a list of categories. Currently a noop. """ return categories TParam = TypeVar('TParam', bound='LookupDetails') # pylint: disable=invalid-name @dataclasses.dataclass class LookupDetails: """ Collection of parameters that define the amount of details returned with a lookup or details result. """ geometry_output: GeometryFormat = GeometryFormat.NONE """ Add the full geometry of the place to the result. Multiple formats may be selected. Note that geometries can become quite large. """ address_details: bool = False """ Get detailed information on the places that make up the address for the result. """ linked_places: bool = False """ Get detailed information on the places that link to the result. """ parented_places: bool = False """ Get detailed information on all places that this place is a parent for, i.e. all places for which it provides the address details. Only POI places can have parents. """ keywords: bool = False """ Add information about the search terms used for this place. """ geometry_simplification: float = 0.0 """ Simplification factor for a geometry in degrees WGS. A factor of 0.0 means the original geometry is kept. The higher the value, the more the geometry gets simplified. """ @classmethod def from_kwargs(cls: Type[TParam], kwargs: Dict[str, Any]) -> TParam: """ Load the data fields of the class from a dictionary. Unknown entries in the dictionary are ignored, missing ones get the default setting. The function supports type checking and throws a UsageError when the value does not fit. """ def _check_field(v: Any, field: 'dataclasses.Field[Any]') -> Any: if v is None: return field.default_factory() \ if field.default_factory != dataclasses.MISSING \ else field.default if field.metadata and 'transform' in field.metadata: return field.metadata['transform'](v) if not isinstance(v, field.type): raise UsageError(f"Parameter '{field.name}' needs to be of {field.type!s}.") return v return cls(**{f.name: _check_field(kwargs[f.name], f) for f in dataclasses.fields(cls) if f.name in kwargs}) @dataclasses.dataclass class ReverseDetails(LookupDetails): """ Collection of parameters for the reverse call. """ max_rank: int = dataclasses.field(default=30, metadata={'transform': lambda v: max(0, min(v, 30))} ) """ Highest address rank to return. """ layers: DataLayer = DataLayer.ADDRESS | DataLayer.POI """ Filter which kind of data to include. """ @dataclasses.dataclass class SearchDetails(LookupDetails): """ Collection of parameters for the search call. """ max_results: int = 10 """ Maximum number of results to be returned. The actual number of results may be less. """ min_rank: int = dataclasses.field(default=0, metadata={'transform': lambda v: max(0, min(v, 30))} ) """ Lowest address rank to return. """ max_rank: int = dataclasses.field(default=30, metadata={'transform': lambda v: max(0, min(v, 30))} ) """ Highest address rank to return. """ layers: Optional[DataLayer] = dataclasses.field(default=None, metadata={'transform': lambda r : r}) """ Filter which kind of data to include. When 'None' (the default) then filtering by layers is disabled. """ countries: List[str] = dataclasses.field(default_factory=list, metadata={'transform': format_country}) """ Restrict search results to the given countries. An empty list (the default) will disable this filter. """ excluded: List[int] = dataclasses.field(default_factory=list, metadata={'transform': format_excluded}) """ List of OSM objects to exclude from the results. Currenlty only works when the internal place ID is given. An empty list (the default) will disable this filter. """ viewbox: Optional[Bbox] = dataclasses.field(default=None, metadata={'transform': Bbox.from_param}) """ Focus the search on a given map area. """ bounded_viewbox: bool = False """ Use 'viewbox' as a filter and restrict results to places within the given area. """ near: Optional[Point] = dataclasses.field(default=None, metadata={'transform': Point.from_param}) """ Order results by distance to the given point. """ near_radius: Optional[float] = dataclasses.field(default=None, metadata={'transform': lambda r : r}) """ Use near point as a filter and drop results outside the given radius. Radius is given in degrees WSG84. """ categories: List[Tuple[str, str]] = dataclasses.field(default_factory=list, metadata={'transform': format_categories}) """ Restrict search to places with one of the given class/type categories. An empty list (the default) will disable this filter. """ viewbox_x2: Optional[Bbox] = None def __post_init__(self) -> None: if self.viewbox is not None: xext = (self.viewbox.maxlon - self.viewbox.minlon)/2 yext = (self.viewbox.maxlat - self.viewbox.minlat)/2 self.viewbox_x2 = Bbox(self.viewbox.minlon - xext, self.viewbox.minlat - yext, self.viewbox.maxlon + xext, self.viewbox.maxlat + yext) def restrict_min_max_rank(self, new_min: int, new_max: int) -> None: """ Change the min_rank and max_rank fields to respect the given boundaries. """ assert new_min <= new_max self.min_rank = max(self.min_rank, new_min) self.max_rank = min(self.max_rank, new_max) def is_impossible(self) -> bool: """ Check if the parameter configuration is contradictionary and cannot yield any results. """ return (self.min_rank > self.max_rank or (self.bounded_viewbox and self.viewbox is not None and self.near is not None and self.viewbox.contains(self.near)) or self.layers is not None and not self.layers) def layer_enabled(self, layer: DataLayer) -> bool: """ Check if the given layer has been choosen. Also returns true when layer restriction has been disabled completely. """ return self.layers is None or bool(self.layers & layer)
16,879
34.020747
100
py
Nominatim
Nominatim-master/nominatim/api/result_formatting.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper classes and functions for formating results into API responses. """ from typing import Type, TypeVar, Dict, List, Callable, Any, Mapping from collections import defaultdict T = TypeVar('T') # pylint: disable=invalid-name FormatFunc = Callable[[T, Mapping[str, Any]], str] class FormatDispatcher: """ Helper class to conveniently create formatting functions in a module using decorators. """ def __init__(self) -> None: self.format_functions: Dict[Type[Any], Dict[str, FormatFunc[Any]]] = defaultdict(dict) def format_func(self, result_class: Type[T], fmt: str) -> Callable[[FormatFunc[T]], FormatFunc[T]]: """ Decorator for a function that formats a given type of result into the selected format. """ def decorator(func: FormatFunc[T]) -> FormatFunc[T]: self.format_functions[result_class][fmt] = func return func return decorator def list_formats(self, result_type: Type[Any]) -> List[str]: """ Return a list of formats supported by this formatter. """ return list(self.format_functions[result_type].keys()) def supports_format(self, result_type: Type[Any], fmt: str) -> bool: """ Check if the given format is supported by this formatter. """ return fmt in self.format_functions[result_type] def format_result(self, result: Any, fmt: str, options: Mapping[str, Any]) -> str: """ Convert the given result into a string using the given format. The format is expected to be in the list returned by `list_formats()`. """ return self.format_functions[type(result)][fmt](result, options)
1,943
33.105263
94
py
Nominatim
Nominatim-master/nominatim/api/results.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Dataclasses for search results and helper functions to fill them. Data classes are part of the public API while the functions are for internal use only. That's why they are implemented as free-standing functions instead of member functions. """ from typing import Optional, Tuple, Dict, Sequence, TypeVar, Type, List, Any, Union import enum import dataclasses import datetime as dt import sqlalchemy as sa from nominatim.typing import SaSelect, SaRow, SaColumn from nominatim.api.types import Point, Bbox, LookupDetails from nominatim.api.connection import SearchConnection from nominatim.api.logging import log from nominatim.api.localization import Locales # This file defines complex result data classes. # pylint: disable=too-many-instance-attributes def _mingle_name_tags(names: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]: """ Mix-in names from linked places, so that they show up as standard names where necessary. """ if not names: return None out = {} for k, v in names.items(): if k.startswith('_place_'): outkey = k[7:] out[k if outkey in names else outkey] = v else: out[k] = v return out class SourceTable(enum.Enum): """ Enumeration of kinds of results. """ PLACEX = 1 OSMLINE = 2 TIGER = 3 POSTCODE = 4 COUNTRY = 5 @dataclasses.dataclass class AddressLine: """ Detailed information about a related place. """ place_id: Optional[int] osm_object: Optional[Tuple[str, int]] category: Tuple[str, str] names: Dict[str, str] extratags: Optional[Dict[str, str]] admin_level: Optional[int] fromarea: bool isaddress: bool rank_address: int distance: float local_name: Optional[str] = None class AddressLines(List[AddressLine]): """ Sequence of address lines order in descending order by their rank. """ def localize(self, locales: Locales) -> List[str]: """ Set the local name of address parts according to the chosen locale. Return the list of local names without duplications. Only address parts that are marked as isaddress are localized and returned. """ label_parts: List[str] = [] for line in self: if line.isaddress and line.names: line.local_name = locales.display_name(line.names) if not label_parts or label_parts[-1] != line.local_name: label_parts.append(line.local_name) return label_parts @dataclasses.dataclass class WordInfo: """ Detailed information about a search term. """ word_id: int word_token: str word: Optional[str] = None WordInfos = Sequence[WordInfo] @dataclasses.dataclass class BaseResult: """ Data class collecting information common to all types of search results. """ source_table: SourceTable category: Tuple[str, str] centroid: Point place_id : Optional[int] = None osm_object: Optional[Tuple[str, int]] = None locale_name: Optional[str] = None display_name: Optional[str] = None names: Optional[Dict[str, str]] = None address: Optional[Dict[str, str]] = None extratags: Optional[Dict[str, str]] = None housenumber: Optional[str] = None postcode: Optional[str] = None wikipedia: Optional[str] = None rank_address: int = 30 rank_search: int = 30 importance: Optional[float] = None country_code: Optional[str] = None address_rows: Optional[AddressLines] = None linked_rows: Optional[AddressLines] = None parented_rows: Optional[AddressLines] = None name_keywords: Optional[WordInfos] = None address_keywords: Optional[WordInfos] = None geometry: Dict[str, str] = dataclasses.field(default_factory=dict) @property def lat(self) -> float: """ Get the latitude (or y) of the center point of the place. """ return self.centroid[1] @property def lon(self) -> float: """ Get the longitude (or x) of the center point of the place. """ return self.centroid[0] def calculated_importance(self) -> float: """ Get a valid importance value. This is either the stored importance of the value or an artificial value computed from the place's search rank. """ return self.importance or (0.7500001 - (self.rank_search/40.0)) def localize(self, locales: Locales) -> None: """ Fill the locale_name and the display_name field for the place and, if available, its address information. """ self.locale_name = locales.display_name(self.names) if self.address_rows: self.display_name = ', '.join(self.address_rows.localize(locales)) else: self.display_name = self.locale_name BaseResultT = TypeVar('BaseResultT', bound=BaseResult) @dataclasses.dataclass class DetailedResult(BaseResult): """ A search result with more internal information from the database added. """ parent_place_id: Optional[int] = None linked_place_id: Optional[int] = None admin_level: int = 15 indexed_date: Optional[dt.datetime] = None @dataclasses.dataclass class ReverseResult(BaseResult): """ A search result for reverse geocoding. """ distance: Optional[float] = None bbox: Optional[Bbox] = None class ReverseResults(List[ReverseResult]): """ Sequence of reverse lookup results ordered by distance. May be empty when no result was found. """ @dataclasses.dataclass class SearchResult(BaseResult): """ A search result for forward geocoding. """ bbox: Optional[Bbox] = None accuracy: float = 0.0 @property def ranking(self) -> float: """ Return the ranking, a combined measure of accuracy and importance. """ return (self.accuracy if self.accuracy is not None else 1) \ - self.calculated_importance() class SearchResults(List[SearchResult]): """ Sequence of forward lookup results ordered by relevance. May be empty when no result was found. """ def localize(self, locales: Locales) -> None: """ Apply the given locales to all results. """ for result in self: result.localize(locales) def _filter_geometries(row: SaRow) -> Dict[str, str]: return {k[9:]: v for k, v in row._mapping.items() # pylint: disable=W0212 if k.startswith('geometry_')} def create_from_placex_row(row: Optional[SaRow], class_type: Type[BaseResultT]) -> Optional[BaseResultT]: """ Construct a new result and add the data from the result row from the placex table. 'class_type' defines the type of result to return. Returns None if the row is None. """ if row is None: return None return class_type(source_table=SourceTable.PLACEX, place_id=row.place_id, osm_object=(row.osm_type, row.osm_id), category=(row.class_, row.type), names=_mingle_name_tags(row.name), address=row.address, extratags=row.extratags, housenumber=row.housenumber, postcode=row.postcode, wikipedia=row.wikipedia, rank_address=row.rank_address, rank_search=row.rank_search, importance=row.importance, country_code=row.country_code, centroid=Point.from_wkb(row.centroid), geometry=_filter_geometries(row)) def create_from_osmline_row(row: Optional[SaRow], class_type: Type[BaseResultT]) -> Optional[BaseResultT]: """ Construct a new result and add the data from the result row from the address interpolation table osmline. 'class_type' defines the type of result to return. Returns None if the row is None. If the row contains a housenumber, then the housenumber is filled out. Otherwise the result contains the interpolation information in extratags. """ if row is None: return None hnr = getattr(row, 'housenumber', None) res = class_type(source_table=SourceTable.OSMLINE, place_id=row.place_id, osm_object=('W', row.osm_id), category=('place', 'houses' if hnr is None else 'house'), address=row.address, postcode=row.postcode, country_code=row.country_code, centroid=Point.from_wkb(row.centroid), geometry=_filter_geometries(row)) if hnr is None: res.extratags = {'startnumber': str(row.startnumber), 'endnumber': str(row.endnumber), 'step': str(row.step)} else: res.housenumber = str(hnr) return res def create_from_tiger_row(row: Optional[SaRow], class_type: Type[BaseResultT], osm_type: Optional[str] = None, osm_id: Optional[int] = None) -> Optional[BaseResultT]: """ Construct a new result and add the data from the result row from the Tiger data interpolation table. 'class_type' defines the type of result to return. Returns None if the row is None. If the row contains a housenumber, then the housenumber is filled out. Otherwise the result contains the interpolation information in extratags. """ if row is None: return None hnr = getattr(row, 'housenumber', None) res = class_type(source_table=SourceTable.TIGER, place_id=row.place_id, osm_object=(osm_type or row.osm_type, osm_id or row.osm_id), category=('place', 'houses' if hnr is None else 'house'), postcode=row.postcode, country_code='us', centroid=Point.from_wkb(row.centroid), geometry=_filter_geometries(row)) if hnr is None: res.extratags = {'startnumber': str(row.startnumber), 'endnumber': str(row.endnumber), 'step': str(row.step)} else: res.housenumber = str(hnr) return res def create_from_postcode_row(row: Optional[SaRow], class_type: Type[BaseResultT]) -> Optional[BaseResultT]: """ Construct a new result and add the data from the result row from the postcode table. 'class_type' defines the type of result to return. Returns None if the row is None. """ if row is None: return None return class_type(source_table=SourceTable.POSTCODE, place_id=row.place_id, category=('place', 'postcode'), names={'ref': row.postcode}, rank_search=row.rank_search, rank_address=row.rank_address, country_code=row.country_code, centroid=Point.from_wkb(row.centroid), geometry=_filter_geometries(row)) def create_from_country_row(row: Optional[SaRow], class_type: Type[BaseResultT]) -> Optional[BaseResultT]: """ Construct a new result and add the data from the result row from the fallback country tables. 'class_type' defines the type of result to return. Returns None if the row is None. """ if row is None: return None return class_type(source_table=SourceTable.COUNTRY, category=('place', 'country'), centroid=Point.from_wkb(row.centroid), names=row.name, rank_address=4, rank_search=4, country_code=row.country_code) async def add_result_details(conn: SearchConnection, results: List[BaseResultT], details: LookupDetails) -> None: """ Retrieve more details from the database according to the parameters specified in 'details'. """ if results: log().section('Query details for result') if details.address_details: log().comment('Query address details') await complete_address_details(conn, results) if details.linked_places: log().comment('Query linked places') for result in results: await complete_linked_places(conn, result) if details.parented_places: log().comment('Query parent places') for result in results: await complete_parented_places(conn, result) if details.keywords: log().comment('Query keywords') for result in results: await complete_keywords(conn, result) def _result_row_to_address_row(row: SaRow) -> AddressLine: """ Create a new AddressLine from the results of a datbase query. """ extratags: Dict[str, str] = getattr(row, 'extratags', {}) if hasattr(row, 'place_type') and row.place_type: extratags['place'] = row.place_type names = _mingle_name_tags(row.name) or {} if getattr(row, 'housenumber', None) is not None: names['housenumber'] = row.housenumber return AddressLine(place_id=row.place_id, osm_object=None if row.osm_type is None else (row.osm_type, row.osm_id), category=(getattr(row, 'class'), row.type), names=names, extratags=extratags, admin_level=row.admin_level, fromarea=row.fromarea, isaddress=getattr(row, 'isaddress', True), rank_address=row.rank_address, distance=row.distance) def _get_housenumber_details(results: List[BaseResultT]) -> Tuple[List[int], List[int]]: places = [] hnrs = [] for result in results: if result.place_id: housenumber = -1 if result.source_table in (SourceTable.TIGER, SourceTable.OSMLINE): if result.housenumber is not None: housenumber = int(result.housenumber) elif result.extratags is not None and 'startnumber' in result.extratags: # details requests do not come with a specific house number housenumber = int(result.extratags['startnumber']) places.append(result.place_id) hnrs.append(housenumber) return places, hnrs async def complete_address_details(conn: SearchConnection, results: List[BaseResultT]) -> None: """ Retrieve information about places that make up the address of the result. """ places, hnrs = _get_housenumber_details(results) if not places: return def _get_addressdata(place_id: Union[int, SaColumn], hnr: Union[int, SaColumn]) -> Any: return sa.func.get_addressdata(place_id, hnr)\ .table_valued( # type: ignore[no-untyped-call] sa.column('place_id', type_=sa.Integer), 'osm_type', sa.column('osm_id', type_=sa.BigInteger), sa.column('name', type_=conn.t.types.Composite), 'class', 'type', 'place_type', sa.column('admin_level', type_=sa.Integer), sa.column('fromarea', type_=sa.Boolean), sa.column('isaddress', type_=sa.Boolean), sa.column('rank_address', type_=sa.SmallInteger), sa.column('distance', type_=sa.Float), joins_implicitly=True) if len(places) == 1: # Optimized case for exactly one result (reverse) sql = sa.select(_get_addressdata(places[0], hnrs[0]))\ .order_by(sa.column('rank_address').desc(), sa.column('isaddress').desc()) alines = AddressLines() for row in await conn.execute(sql): alines.append(_result_row_to_address_row(row)) for result in results: if result.place_id == places[0]: result.address_rows = alines return darray = sa.func.unnest(conn.t.types.to_array(places), conn.t.types.to_array(hnrs))\ .table_valued( # type: ignore[no-untyped-call] sa.column('place_id', type_= sa.Integer), sa.column('housenumber', type_= sa.Integer) ).render_derived() sfn = _get_addressdata(darray.c.place_id, darray.c.housenumber) sql = sa.select(darray.c.place_id.label('result_place_id'), sfn)\ .order_by(darray.c.place_id, sa.column('rank_address').desc(), sa.column('isaddress').desc()) current_result = None for row in await conn.execute(sql): if current_result is None or row.result_place_id != current_result.place_id: for result in results: if result.place_id == row.result_place_id: current_result = result break else: assert False current_result.address_rows = AddressLines() current_result.address_rows.append(_result_row_to_address_row(row)) # pylint: disable=consider-using-f-string def _placex_select_address_row(conn: SearchConnection, centroid: Point) -> SaSelect: t = conn.t.placex return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, t.c.class_.label('class'), t.c.type, t.c.admin_level, t.c.housenumber, sa.literal_column("""ST_GeometryType(geometry) in ('ST_Polygon','ST_MultiPolygon')""").label('fromarea'), t.c.rank_address, sa.literal_column( """ST_DistanceSpheroid(geometry, 'SRID=4326;POINT(%f %f)'::geometry, 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]') """ % centroid).label('distance')) async def complete_linked_places(conn: SearchConnection, result: BaseResult) -> None: """ Retrieve information about places that link to the result. """ result.linked_rows = AddressLines() if result.source_table != SourceTable.PLACEX: return sql = _placex_select_address_row(conn, result.centroid)\ .where(conn.t.placex.c.linked_place_id == result.place_id) for row in await conn.execute(sql): result.linked_rows.append(_result_row_to_address_row(row)) async def complete_keywords(conn: SearchConnection, result: BaseResult) -> None: """ Retrieve information about the search terms used for this place. Requires that the query analyzer was initialised to get access to the word table. """ t = conn.t.search_name sql = sa.select(t.c.name_vector, t.c.nameaddress_vector)\ .where(t.c.place_id == result.place_id) result.name_keywords = [] result.address_keywords = [] t = conn.t.meta.tables['word'] sel = sa.select(t.c.word_id, t.c.word_token, t.c.word) for name_tokens, address_tokens in await conn.execute(sql): for row in await conn.execute(sel.where(t.c.word_id == sa.any_(name_tokens))): result.name_keywords.append(WordInfo(*row)) for row in await conn.execute(sel.where(t.c.word_id == sa.any_(address_tokens))): result.address_keywords.append(WordInfo(*row)) async def complete_parented_places(conn: SearchConnection, result: BaseResult) -> None: """ Retrieve information about places that the result provides the address for. """ result.parented_rows = AddressLines() if result.source_table != SourceTable.PLACEX: return sql = _placex_select_address_row(conn, result.centroid)\ .where(conn.t.placex.c.parent_place_id == result.place_id)\ .where(conn.t.placex.c.rank_search == 30) for row in await conn.execute(sql): result.parented_rows.append(_result_row_to_address_row(row))
20,735
35.062609
99
py
Nominatim
Nominatim-master/nominatim/api/__init__.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ The public interface of the Nominatim library. Classes and functions defined in this file are considered stable. Always import from this file, not from the source files directly. """ # See also https://github.com/PyCQA/pylint/issues/6006 # pylint: disable=useless-import-alias from .core import (NominatimAPI as NominatimAPI, NominatimAPIAsync as NominatimAPIAsync) from .status import (StatusResult as StatusResult) from .types import (PlaceID as PlaceID, OsmID as OsmID, PlaceRef as PlaceRef, Point as Point, Bbox as Bbox, GeometryFormat as GeometryFormat, DataLayer as DataLayer) from .results import (SourceTable as SourceTable, AddressLine as AddressLine, AddressLines as AddressLines, WordInfo as WordInfo, WordInfos as WordInfos, DetailedResult as DetailedResult, ReverseResult as ReverseResult, ReverseResults as ReverseResults, SearchResult as SearchResult, SearchResults as SearchResults) from .localization import (Locales as Locales)
1,501
38.526316
72
py
Nominatim
Nominatim-master/nominatim/api/lookup.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Implementation of place lookup by ID. """ from typing import Optional, Callable, Tuple, Type import datetime as dt import sqlalchemy as sa from nominatim.typing import SaColumn, SaRow, SaSelect from nominatim.api.connection import SearchConnection import nominatim.api.types as ntyp import nominatim.api.results as nres from nominatim.api.logging import log RowFunc = Callable[[Optional[SaRow], Type[nres.BaseResultT]], Optional[nres.BaseResultT]] GeomFunc = Callable[[SaSelect, SaColumn], SaSelect] async def find_in_placex(conn: SearchConnection, place: ntyp.PlaceRef, add_geometries: GeomFunc) -> Optional[SaRow]: """ Search for the given place in the placex table and return the base information. """ log().section("Find in placex table") t = conn.t.placex sql = sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, t.c.class_, t.c.type, t.c.admin_level, t.c.address, t.c.extratags, t.c.housenumber, t.c.postcode, t.c.country_code, t.c.importance, t.c.wikipedia, t.c.indexed_date, t.c.parent_place_id, t.c.rank_address, t.c.rank_search, t.c.linked_place_id, t.c.centroid) if isinstance(place, ntyp.PlaceID): sql = sql.where(t.c.place_id == place.place_id) elif isinstance(place, ntyp.OsmID): sql = sql.where(t.c.osm_type == place.osm_type)\ .where(t.c.osm_id == place.osm_id) if place.osm_class: sql = sql.where(t.c.class_ == place.osm_class) else: sql = sql.order_by(t.c.class_) sql = sql.limit(1) else: return None return (await conn.execute(add_geometries(sql, t.c.geometry))).one_or_none() async def find_in_osmline(conn: SearchConnection, place: ntyp.PlaceRef, add_geometries: GeomFunc) -> Optional[SaRow]: """ Search for the given place in the osmline table and return the base information. """ log().section("Find in interpolation table") t = conn.t.osmline sql = sa.select(t.c.place_id, t.c.osm_id, t.c.parent_place_id, t.c.indexed_date, t.c.startnumber, t.c.endnumber, t.c.step, t.c.address, t.c.postcode, t.c.country_code, t.c.linegeo.ST_Centroid().label('centroid')) if isinstance(place, ntyp.PlaceID): sql = sql.where(t.c.place_id == place.place_id) elif isinstance(place, ntyp.OsmID) and place.osm_type == 'W': # There may be multiple interpolations for a single way. # If 'class' contains a number, return the one that belongs to that number. sql = sql.where(t.c.osm_id == place.osm_id).limit(1) if place.osm_class and place.osm_class.isdigit(): sql = sql.order_by(sa.func.greatest(0, sa.func.least(int(place.osm_class) - t.c.endnumber), t.c.startnumber - int(place.osm_class))) else: return None return (await conn.execute(add_geometries(sql, t.c.linegeo))).one_or_none() async def find_in_tiger(conn: SearchConnection, place: ntyp.PlaceRef, add_geometries: GeomFunc) -> Optional[SaRow]: """ Search for the given place in the table of Tiger addresses and return the base information. Only lookup by place ID is supported. """ if not isinstance(place, ntyp.PlaceID): return None log().section("Find in TIGER table") t = conn.t.tiger parent = conn.t.placex sql = sa.select(t.c.place_id, t.c.parent_place_id, parent.c.osm_type, parent.c.osm_id, t.c.startnumber, t.c.endnumber, t.c.step, t.c.postcode, t.c.linegeo.ST_Centroid().label('centroid'))\ .where(t.c.place_id == place.place_id)\ .join(parent, t.c.parent_place_id == parent.c.place_id, isouter=True) return (await conn.execute(add_geometries(sql, t.c.linegeo))).one_or_none() async def find_in_postcode(conn: SearchConnection, place: ntyp.PlaceRef, add_geometries: GeomFunc) -> Optional[SaRow]: """ Search for the given place in the postcode table and return the base information. Only lookup by place ID is supported. """ if not isinstance(place, ntyp.PlaceID): return None log().section("Find in postcode table") t = conn.t.postcode sql = sa.select(t.c.place_id, t.c.parent_place_id, t.c.rank_search, t.c.rank_address, t.c.indexed_date, t.c.postcode, t.c.country_code, t.c.geometry.label('centroid')) \ .where(t.c.place_id == place.place_id) return (await conn.execute(add_geometries(sql, t.c.geometry))).one_or_none() async def find_in_all_tables(conn: SearchConnection, place: ntyp.PlaceRef, add_geometries: GeomFunc ) -> Tuple[Optional[SaRow], RowFunc[nres.BaseResultT]]: """ Search for the given place in all data tables and return the base information. """ row = await find_in_placex(conn, place, add_geometries) log().var_dump('Result (placex)', row) if row is not None: return row, nres.create_from_placex_row row = await find_in_osmline(conn, place, add_geometries) log().var_dump('Result (osmline)', row) if row is not None: return row, nres.create_from_osmline_row row = await find_in_postcode(conn, place, add_geometries) log().var_dump('Result (postcode)', row) if row is not None: return row, nres.create_from_postcode_row row = await find_in_tiger(conn, place, add_geometries) log().var_dump('Result (tiger)', row) return row, nres.create_from_tiger_row async def get_detailed_place(conn: SearchConnection, place: ntyp.PlaceRef, details: ntyp.LookupDetails) -> Optional[nres.DetailedResult]: """ Retrieve a place with additional details from the database. """ log().function('get_detailed_place', place=place, details=details) if details.geometry_output and details.geometry_output != ntyp.GeometryFormat.GEOJSON: raise ValueError("lookup only supports geojosn polygon output.") if details.geometry_output & ntyp.GeometryFormat.GEOJSON: def _add_geometry(sql: SaSelect, column: SaColumn) -> SaSelect: return sql.add_columns(sa.literal_column(f""" ST_AsGeoJSON(CASE WHEN ST_NPoints({column.name}) > 5000 THEN ST_SimplifyPreserveTopology({column.name}, 0.0001) ELSE {column.name} END) """).label('geometry_geojson')) else: def _add_geometry(sql: SaSelect, column: SaColumn) -> SaSelect: return sql.add_columns(sa.func.ST_GeometryType(column).label('geometry_type')) row_func: RowFunc[nres.DetailedResult] row, row_func = await find_in_all_tables(conn, place, _add_geometry) if row is None: return None result = row_func(row, nres.DetailedResult) assert result is not None # add missing details assert result is not None result.parent_place_id = row.parent_place_id result.linked_place_id = getattr(row, 'linked_place_id', None) result.admin_level = getattr(row, 'admin_level', 15) indexed_date = getattr(row, 'indexed_date', None) if indexed_date is not None: result.indexed_date = indexed_date.replace(tzinfo=dt.timezone.utc) await nres.add_result_details(conn, [result], details) return result async def get_simple_place(conn: SearchConnection, place: ntyp.PlaceRef, details: ntyp.LookupDetails) -> Optional[nres.SearchResult]: """ Retrieve a place as a simple search result from the database. """ log().function('get_simple_place', place=place, details=details) def _add_geometry(sql: SaSelect, col: SaColumn) -> SaSelect: if not details.geometry_output: return sql out = [] if details.geometry_simplification > 0.0: col = col.ST_SimplifyPreserveTopology(details.geometry_simplification) if details.geometry_output & ntyp.GeometryFormat.GEOJSON: out.append(col.ST_AsGeoJSON().label('geometry_geojson')) if details.geometry_output & ntyp.GeometryFormat.TEXT: out.append(col.ST_AsText().label('geometry_text')) if details.geometry_output & ntyp.GeometryFormat.KML: out.append(col.ST_AsKML().label('geometry_kml')) if details.geometry_output & ntyp.GeometryFormat.SVG: out.append(col.ST_AsSVG().label('geometry_svg')) return sql.add_columns(*out) row_func: RowFunc[nres.SearchResult] row, row_func = await find_in_all_tables(conn, place, _add_geometry) if row is None: return None result = row_func(row, nres.SearchResult) assert result is not None # add missing details assert result is not None result.bbox = getattr(row, 'bbox', None) await nres.add_result_details(conn, [result], details) return result
9,512
38.6375
91
py
Nominatim
Nominatim-master/nominatim/api/v1/classtypes.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Hard-coded information about tag catagories. These tables have been copied verbatim from the old PHP code. For future version a more flexible formatting is required. """ from typing import Tuple, Optional, Mapping, Union import nominatim.api as napi def get_label_tag(category: Tuple[str, str], extratags: Optional[Mapping[str, str]], rank: int, country: Optional[str]) -> str: """ Create a label tag for the given place that can be used as an XML name. """ if rank < 26 and extratags and 'place' in extratags: label = extratags['place'] elif rank < 26 and extratags and 'linked_place' in extratags: label = extratags['linked_place'] elif category == ('boundary', 'administrative'): label = ADMIN_LABELS.get((country or '', int(rank/2)))\ or ADMIN_LABELS.get(('', int(rank/2)))\ or 'Administrative' elif category[1] == 'postal_code': label = 'postcode' elif rank < 26: label = category[1] if category[1] != 'yes' else category[0] elif rank < 28: label = 'road' elif category[0] == 'place'\ and category[1] in ('house_number', 'house_name', 'country_code'): label = category[1] else: label = category[0] return label.lower().replace(' ', '_') def bbox_from_result(result: Union[napi.ReverseResult, napi.SearchResult]) -> napi.Bbox: """ Compute a bounding box for the result. For ways and relations a given boundingbox is used. For all other object, a box is computed around the centroid according to dimensions dereived from the search rank. """ if (result.osm_object and result.osm_object[0] == 'N') or result.bbox is None: extent = NODE_EXTENT.get(result.category, 0.00005) return napi.Bbox.from_point(result.centroid, extent) return result.bbox # pylint: disable=line-too-long OSM_ATTRIBUTION = 'Data © OpenStreetMap contributors, ODbL 1.0. http://osm.org/copyright' OSM_TYPE_NAME = { 'N': 'node', 'W': 'way', 'R': 'relation' } ADMIN_LABELS = { ('', 1): 'Continent', ('', 2): 'Country', ('', 3): 'Region', ('', 4): 'State', ('', 5): 'State District', ('', 6): 'County', ('', 7): 'Municipality', ('', 8): 'City', ('', 9): 'City District', ('', 10): 'Suburb', ('', 11): 'Neighbourhood', ('', 12): 'City Block', ('no', 3): 'State', ('no', 4): 'County', ('se', 3): 'State', ('se', 4): 'County' } ICONS = { ('boundary', 'administrative'): 'poi_boundary_administrative', ('place', 'city'): 'poi_place_city', ('place', 'town'): 'poi_place_town', ('place', 'village'): 'poi_place_village', ('place', 'hamlet'): 'poi_place_village', ('place', 'suburb'): 'poi_place_village', ('place', 'locality'): 'poi_place_village', ('place', 'airport'): 'transport_airport2', ('aeroway', 'aerodrome'): 'transport_airport2', ('railway', 'station'): 'transport_train_station2', ('amenity', 'place_of_worship'): 'place_of_worship_unknown3', ('amenity', 'pub'): 'food_pub', ('amenity', 'bar'): 'food_bar', ('amenity', 'university'): 'education_university', ('tourism', 'museum'): 'tourist_museum', ('amenity', 'arts_centre'): 'tourist_art_gallery2', ('tourism', 'zoo'): 'tourist_zoo', ('tourism', 'theme_park'): 'poi_point_of_interest', ('tourism', 'attraction'): 'poi_point_of_interest', ('leisure', 'golf_course'): 'sport_golf', ('historic', 'castle'): 'tourist_castle', ('amenity', 'hospital'): 'health_hospital', ('amenity', 'school'): 'education_school', ('amenity', 'theatre'): 'tourist_theatre', ('amenity', 'library'): 'amenity_library', ('amenity', 'fire_station'): 'amenity_firestation3', ('amenity', 'police'): 'amenity_police2', ('amenity', 'bank'): 'money_bank2', ('amenity', 'post_office'): 'amenity_post_office', ('tourism', 'hotel'): 'accommodation_hotel2', ('amenity', 'cinema'): 'tourist_cinema', ('tourism', 'artwork'): 'tourist_art_gallery2', ('historic', 'archaeological_site'): 'tourist_archaeological2', ('amenity', 'doctors'): 'health_doctors', ('leisure', 'sports_centre'): 'sport_leisure_centre', ('leisure', 'swimming_pool'): 'sport_swimming_outdoor', ('shop', 'supermarket'): 'shopping_supermarket', ('shop', 'convenience'): 'shopping_convenience', ('amenity', 'restaurant'): 'food_restaurant', ('amenity', 'fast_food'): 'food_fastfood', ('amenity', 'cafe'): 'food_cafe', ('tourism', 'guest_house'): 'accommodation_bed_and_breakfast', ('amenity', 'pharmacy'): 'health_pharmacy_dispensing', ('amenity', 'fuel'): 'transport_fuel', ('natural', 'peak'): 'poi_peak', ('natural', 'wood'): 'landuse_coniferous_and_deciduous', ('shop', 'bicycle'): 'shopping_bicycle', ('shop', 'clothes'): 'shopping_clothes', ('shop', 'hairdresser'): 'shopping_hairdresser', ('shop', 'doityourself'): 'shopping_diy', ('shop', 'estate_agent'): 'shopping_estateagent2', ('shop', 'car'): 'shopping_car', ('shop', 'garden_centre'): 'shopping_garden_centre', ('shop', 'car_repair'): 'shopping_car_repair', ('shop', 'bakery'): 'shopping_bakery', ('shop', 'butcher'): 'shopping_butcher', ('shop', 'apparel'): 'shopping_clothes', ('shop', 'laundry'): 'shopping_laundrette', ('shop', 'beverages'): 'shopping_alcohol', ('shop', 'alcohol'): 'shopping_alcohol', ('shop', 'optician'): 'health_opticians', ('shop', 'chemist'): 'health_pharmacy', ('shop', 'gallery'): 'tourist_art_gallery2', ('shop', 'jewelry'): 'shopping_jewelry', ('tourism', 'information'): 'amenity_information', ('historic', 'ruins'): 'tourist_ruin', ('amenity', 'college'): 'education_school', ('historic', 'monument'): 'tourist_monument', ('historic', 'memorial'): 'tourist_monument', ('historic', 'mine'): 'poi_mine', ('tourism', 'caravan_site'): 'accommodation_caravan_park', ('amenity', 'bus_station'): 'transport_bus_station', ('amenity', 'atm'): 'money_atm2', ('tourism', 'viewpoint'): 'tourist_view_point', ('tourism', 'guesthouse'): 'accommodation_bed_and_breakfast', ('railway', 'tram'): 'transport_tram_stop', ('amenity', 'courthouse'): 'amenity_court', ('amenity', 'recycling'): 'amenity_recycling', ('amenity', 'dentist'): 'health_dentist', ('natural', 'beach'): 'tourist_beach', ('railway', 'tram_stop'): 'transport_tram_stop', ('amenity', 'prison'): 'amenity_prison', ('highway', 'bus_stop'): 'transport_bus_stop2' } NODE_EXTENT = { ('place', 'continent'): 25, ('place', 'country'): 7, ('place', 'state'): 2.6, ('place', 'province'): 2.6, ('place', 'region'): 1.0, ('place', 'county'): 0.7, ('place', 'city'): 0.16, ('place', 'municipality'): 0.16, ('place', 'island'): 0.32, ('place', 'postcode'): 0.16, ('place', 'town'): 0.04, ('place', 'village'): 0.02, ('place', 'hamlet'): 0.02, ('place', 'district'): 0.02, ('place', 'borough'): 0.02, ('place', 'suburb'): 0.02, ('place', 'locality'): 0.01, ('place', 'neighbourhood'): 0.01, ('place', 'quarter'): 0.01, ('place', 'city_block'): 0.01, ('landuse', 'farm'): 0.01, ('place', 'farm'): 0.01, ('place', 'airport'): 0.015, ('aeroway', 'aerodrome'): 0.015, ('railway', 'station'): 0.005 }
7,618
36.905473
89
py
Nominatim
Nominatim-master/nominatim/api/v1/format.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Output formatters for API version v1. """ from typing import Mapping, Any import collections import nominatim.api as napi from nominatim.api.result_formatting import FormatDispatcher from nominatim.api.v1.classtypes import ICONS from nominatim.api.v1 import format_json, format_xml from nominatim.utils.json_writer import JsonWriter dispatch = FormatDispatcher() @dispatch.format_func(napi.StatusResult, 'text') def _format_status_text(result: napi.StatusResult, _: Mapping[str, Any]) -> str: if result.status: return f"ERROR: {result.message}" return 'OK' @dispatch.format_func(napi.StatusResult, 'json') def _format_status_json(result: napi.StatusResult, _: Mapping[str, Any]) -> str: out = JsonWriter() out.start_object()\ .keyval('status', result.status)\ .keyval('message', result.message)\ .keyval_not_none('data_updated', result.data_updated, lambda v: v.isoformat())\ .keyval('software_version', str(result.software_version))\ .keyval_not_none('database_version', result.database_version, str)\ .end_object() return out() def _add_address_row(writer: JsonWriter, row: napi.AddressLine, locales: napi.Locales) -> None: writer.start_object()\ .keyval('localname', locales.display_name(row.names))\ .keyval_not_none('place_id', row.place_id) if row.osm_object is not None: writer.keyval('osm_id', row.osm_object[1])\ .keyval('osm_type', row.osm_object[0]) if row.extratags: writer.keyval_not_none('place_type', row.extratags.get('place_type')) writer.keyval('class', row.category[0])\ .keyval('type', row.category[1])\ .keyval_not_none('admin_level', row.admin_level)\ .keyval('rank_address', row.rank_address)\ .keyval('distance', row.distance)\ .keyval('isaddress', row.isaddress)\ .end_object() def _add_address_rows(writer: JsonWriter, section: str, rows: napi.AddressLines, locales: napi.Locales) -> None: writer.key(section).start_array() for row in rows: _add_address_row(writer, row, locales) writer.next() writer.end_array().next() def _add_parent_rows_grouped(writer: JsonWriter, rows: napi.AddressLines, locales: napi.Locales) -> None: # group by category type data = collections.defaultdict(list) for row in rows: sub = JsonWriter() _add_address_row(sub, row, locales) data[row.category[1]].append(sub()) writer.key('hierarchy').start_object() for group, grouped in data.items(): writer.key(group).start_array() grouped.sort() # sorts alphabetically by local name for line in grouped: writer.raw(line).next() writer.end_array().next() writer.end_object().next() @dispatch.format_func(napi.DetailedResult, 'json') def _format_details_json(result: napi.DetailedResult, options: Mapping[str, Any]) -> str: locales = options.get('locales', napi.Locales()) geom = result.geometry.get('geojson') centroid = result.centroid.to_geojson() out = JsonWriter() out.start_object()\ .keyval_not_none('place_id', result.place_id)\ .keyval_not_none('parent_place_id', result.parent_place_id) if result.osm_object is not None: out.keyval('osm_type', result.osm_object[0])\ .keyval('osm_id', result.osm_object[1]) out.keyval('category', result.category[0])\ .keyval('type', result.category[1])\ .keyval('admin_level', result.admin_level)\ .keyval('localname', result.locale_name or '')\ .keyval('names', result.names or {})\ .keyval('addresstags', result.address or {})\ .keyval_not_none('housenumber', result.housenumber)\ .keyval_not_none('calculated_postcode', result.postcode)\ .keyval_not_none('country_code', result.country_code)\ .keyval_not_none('indexed_date', result.indexed_date, lambda v: v.isoformat())\ .keyval_not_none('importance', result.importance)\ .keyval('calculated_importance', result.calculated_importance())\ .keyval('extratags', result.extratags or {})\ .keyval_not_none('calculated_wikipedia', result.wikipedia)\ .keyval('rank_address', result.rank_address)\ .keyval('rank_search', result.rank_search)\ .keyval('isarea', 'Polygon' in (geom or result.geometry.get('type') or ''))\ .key('centroid').raw(centroid).next()\ .key('geometry').raw(geom or centroid).next() if options.get('icon_base_url', None): icon = ICONS.get(result.category) if icon: out.keyval('icon', f"{options['icon_base_url']}/{icon}.p.20.png") if result.address_rows is not None: _add_address_rows(out, 'address', result.address_rows, locales) if result.linked_rows is not None: _add_address_rows(out, 'linked_places', result.linked_rows, locales) if result.name_keywords is not None or result.address_keywords is not None: out.key('keywords').start_object() for sec, klist in (('name', result.name_keywords), ('address', result.address_keywords)): out.key(sec).start_array() for word in (klist or []): out.start_object()\ .keyval('id', word.word_id)\ .keyval('token', word.word_token)\ .end_object().next() out.end_array().next() out.end_object().next() if result.parented_rows is not None: if options.get('group_hierarchy', False): _add_parent_rows_grouped(out, result.parented_rows, locales) else: _add_address_rows(out, 'hierarchy', result.parented_rows, locales) out.end_object() return out() @dispatch.format_func(napi.ReverseResults, 'xml') def _format_reverse_xml(results: napi.ReverseResults, options: Mapping[str, Any]) -> str: return format_xml.format_base_xml(results, options, True, 'reversegeocode', {'querystring': options.get('query', '')}) @dispatch.format_func(napi.ReverseResults, 'geojson') def _format_reverse_geojson(results: napi.ReverseResults, options: Mapping[str, Any]) -> str: return format_json.format_base_geojson(results, options, True) @dispatch.format_func(napi.ReverseResults, 'geocodejson') def _format_reverse_geocodejson(results: napi.ReverseResults, options: Mapping[str, Any]) -> str: return format_json.format_base_geocodejson(results, options, True) @dispatch.format_func(napi.ReverseResults, 'json') def _format_reverse_json(results: napi.ReverseResults, options: Mapping[str, Any]) -> str: return format_json.format_base_json(results, options, True, class_label='class') @dispatch.format_func(napi.ReverseResults, 'jsonv2') def _format_reverse_jsonv2(results: napi.ReverseResults, options: Mapping[str, Any]) -> str: return format_json.format_base_json(results, options, True, class_label='category') @dispatch.format_func(napi.SearchResults, 'xml') def _format_search_xml(results: napi.SearchResults, options: Mapping[str, Any]) -> str: extra = {'querystring': options.get('query', '')} for attr in ('more_url', 'exclude_place_ids', 'viewbox'): if options.get(attr): extra[attr] = options[attr] return format_xml.format_base_xml(results, options, False, 'searchresults', extra) @dispatch.format_func(napi.SearchResults, 'geojson') def _format_search_geojson(results: napi.SearchResults, options: Mapping[str, Any]) -> str: return format_json.format_base_geojson(results, options, False) @dispatch.format_func(napi.SearchResults, 'geocodejson') def _format_search_geocodejson(results: napi.SearchResults, options: Mapping[str, Any]) -> str: return format_json.format_base_geocodejson(results, options, False) @dispatch.format_func(napi.SearchResults, 'json') def _format_search_json(results: napi.SearchResults, options: Mapping[str, Any]) -> str: return format_json.format_base_json(results, options, False, class_label='class') @dispatch.format_func(napi.SearchResults, 'jsonv2') def _format_search_jsonv2(results: napi.SearchResults, options: Mapping[str, Any]) -> str: return format_json.format_base_json(results, options, False, class_label='category')
9,129
37.851064
97
py
Nominatim
Nominatim-master/nominatim/api/v1/server_glue.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Generic part of the server implementation of the v1 API. Combine with the scaffolding provided for the various Python ASGI frameworks. """ from typing import Optional, Any, Type, Callable, NoReturn, Dict, cast from functools import reduce import abc import dataclasses import math from urllib.parse import urlencode from nominatim.errors import UsageError from nominatim.config import Configuration import nominatim.api as napi import nominatim.api.logging as loglib from nominatim.api.v1.format import dispatch as formatting from nominatim.api.v1 import helpers CONTENT_TYPE = { 'text': 'text/plain; charset=utf-8', 'xml': 'text/xml; charset=utf-8', 'debug': 'text/html; charset=utf-8' } class ASGIAdaptor(abc.ABC): """ Adapter class for the different ASGI frameworks. Wraps functionality over concrete requests and responses. """ content_type: str = 'text/plain; charset=utf-8' @abc.abstractmethod def get(self, name: str, default: Optional[str] = None) -> Optional[str]: """ Return an input parameter as a string. If the parameter was not provided, return the 'default' value. """ @abc.abstractmethod def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]: """ Return a HTTP header parameter as a string. If the parameter was not provided, return the 'default' value. """ @abc.abstractmethod def error(self, msg: str, status: int = 400) -> Exception: """ Construct an appropriate exception from the given error message. The exception must result in a HTTP error with the given status. """ @abc.abstractmethod def create_response(self, status: int, output: str) -> Any: """ Create a response from the given parameters. The result will be returned by the endpoint functions. The adaptor may also return None when the response is created internally with some different means. The response must return the HTTP given status code 'status', set the HTTP content-type headers to the string provided and the body of the response to 'output'. """ @abc.abstractmethod def config(self) -> Configuration: """ Return the current configuration object. """ def build_response(self, output: str, status: int = 200) -> Any: """ Create a response from the given output. Wraps a JSONP function around the response, if necessary. """ if self.content_type == 'application/json' and status == 200: jsonp = self.get('json_callback') if jsonp is not None: if any(not part.isidentifier() for part in jsonp.split('.')): self.raise_error('Invalid json_callback value') output = f"{jsonp}({output})" self.content_type = 'application/javascript' return self.create_response(status, output) def raise_error(self, msg: str, status: int = 400) -> NoReturn: """ Raise an exception resulting in the given HTTP status and message. The message will be formatted according to the output format chosen by the request. """ if self.content_type == 'text/xml; charset=utf-8': msg = f"""<?xml version="1.0" encoding="UTF-8" ?> <error> <code>{status}</code> <message>{msg}</message> </error> """ elif self.content_type == 'application/json': msg = f"""{{"error":{{"code":{status},"message":"{msg}"}}}}""" elif self.content_type == 'text/html; charset=utf-8': loglib.log().section('Execution error') loglib.log().var_dump('Status', status) loglib.log().var_dump('Message', msg) msg = loglib.get_and_disable() raise self.error(msg, status) def get_int(self, name: str, default: Optional[int] = None) -> int: """ Return an input parameter as an int. Raises an exception if the parameter is given but not in an integer format. If 'default' is given, then it will be returned when the parameter is missing completely. When 'default' is None, an error will be raised on a missing parameter. """ value = self.get(name) if value is None: if default is not None: return default self.raise_error(f"Parameter '{name}' missing.") try: intval = int(value) except ValueError: self.raise_error(f"Parameter '{name}' must be a number.") return intval def get_float(self, name: str, default: Optional[float] = None) -> float: """ Return an input parameter as a flaoting-point number. Raises an exception if the parameter is given but not in an float format. If 'default' is given, then it will be returned when the parameter is missing completely. When 'default' is None, an error will be raised on a missing parameter. """ value = self.get(name) if value is None: if default is not None: return default self.raise_error(f"Parameter '{name}' missing.") try: fval = float(value) except ValueError: self.raise_error(f"Parameter '{name}' must be a number.") if math.isnan(fval) or math.isinf(fval): self.raise_error(f"Parameter '{name}' must be a number.") return fval def get_bool(self, name: str, default: Optional[bool] = None) -> bool: """ Return an input parameter as bool. Only '0' is accepted as an input for 'false' all other inputs will be interpreted as 'true'. If 'default' is given, then it will be returned when the parameter is missing completely. When 'default' is None, an error will be raised on a missing parameter. """ value = self.get(name) if value is None: if default is not None: return default self.raise_error(f"Parameter '{name}' missing.") return value != '0' def get_accepted_languages(self) -> str: """ Return the accepted languages. """ return self.get('accept-language')\ or self.get_header('accept-language')\ or self.config().DEFAULT_LANGUAGE def setup_debugging(self) -> bool: """ Set up collection of debug information if requested. Return True when debugging was requested. """ if self.get_bool('debug', False): loglib.set_log_output('html') self.content_type = 'text/html; charset=utf-8' return True return False def get_layers(self) -> Optional[napi.DataLayer]: """ Return a parsed version of the layer parameter. """ param = self.get('layer', None) if param is None: return None return cast(napi.DataLayer, reduce(napi.DataLayer.__or__, (getattr(napi.DataLayer, s.upper()) for s in param.split(',')))) def parse_format(self, result_type: Type[Any], default: str) -> str: """ Get and check the 'format' parameter and prepare the formatter. `result_type` is the type of result to be returned by the function and `default` the format value to assume when no parameter is present. """ fmt = self.get('format', default=default) assert fmt is not None if not formatting.supports_format(result_type, fmt): self.raise_error("Parameter 'format' must be one of: " + ', '.join(formatting.list_formats(result_type))) self.content_type = CONTENT_TYPE.get(fmt, 'application/json') return fmt def parse_geometry_details(self, fmt: str) -> Dict[str, Any]: """ Create details strucutre from the supplied geometry parameters. """ numgeoms = 0 output = napi.GeometryFormat.NONE if self.get_bool('polygon_geojson', False): output |= napi.GeometryFormat.GEOJSON numgeoms += 1 if fmt not in ('geojson', 'geocodejson'): if self.get_bool('polygon_text', False): output |= napi.GeometryFormat.TEXT numgeoms += 1 if self.get_bool('polygon_kml', False): output |= napi.GeometryFormat.KML numgeoms += 1 if self.get_bool('polygon_svg', False): output |= napi.GeometryFormat.SVG numgeoms += 1 if numgeoms > self.config().get_int('POLYGON_OUTPUT_MAX_TYPES'): self.raise_error('Too many polygon output options selected.') return {'address_details': True, 'geometry_simplification': self.get_float('polygon_threshold', 0.0), 'geometry_output': output } async def status_endpoint(api: napi.NominatimAPIAsync, params: ASGIAdaptor) -> Any: """ Server glue for /status endpoint. See API docs for details. """ result = await api.status() fmt = params.parse_format(napi.StatusResult, 'text') if fmt == 'text' and result.status: status_code = 500 else: status_code = 200 return params.build_response(formatting.format_result(result, fmt, {}), status=status_code) async def details_endpoint(api: napi.NominatimAPIAsync, params: ASGIAdaptor) -> Any: """ Server glue for /details endpoint. See API docs for details. """ fmt = params.parse_format(napi.DetailedResult, 'json') place_id = params.get_int('place_id', 0) place: napi.PlaceRef if place_id: place = napi.PlaceID(place_id) else: osmtype = params.get('osmtype') if osmtype is None: params.raise_error("Missing ID parameter 'place_id' or 'osmtype'.") place = napi.OsmID(osmtype, params.get_int('osmid'), params.get('class')) debug = params.setup_debugging() locales = napi.Locales.from_accept_languages(params.get_accepted_languages()) result = await api.details(place, address_details=params.get_bool('addressdetails', False), linked_places=params.get_bool('linkedplaces', False), parented_places=params.get_bool('hierarchy', False), keywords=params.get_bool('keywords', False), geometry_output = napi.GeometryFormat.GEOJSON if params.get_bool('polygon_geojson', False) else napi.GeometryFormat.NONE ) if debug: return params.build_response(loglib.get_and_disable()) if result is None: params.raise_error('No place with that OSM ID found.', status=404) result.localize(locales) output = formatting.format_result(result, fmt, {'locales': locales, 'group_hierarchy': params.get_bool('group_hierarchy', False), 'icon_base_url': params.config().MAPICON_URL}) return params.build_response(output) async def reverse_endpoint(api: napi.NominatimAPIAsync, params: ASGIAdaptor) -> Any: """ Server glue for /reverse endpoint. See API docs for details. """ fmt = params.parse_format(napi.ReverseResults, 'xml') debug = params.setup_debugging() coord = napi.Point(params.get_float('lon'), params.get_float('lat')) details = params.parse_geometry_details(fmt) details['max_rank'] = helpers.zoom_to_rank(params.get_int('zoom', 18)) details['layers'] = params.get_layers() result = await api.reverse(coord, **details) if debug: return params.build_response(loglib.get_and_disable()) if fmt == 'xml': queryparts = {'lat': str(coord.lat), 'lon': str(coord.lon), 'format': 'xml'} zoom = params.get('zoom', None) if zoom: queryparts['zoom'] = zoom query = urlencode(queryparts) else: query = '' fmt_options = {'query': query, 'extratags': params.get_bool('extratags', False), 'namedetails': params.get_bool('namedetails', False), 'addressdetails': params.get_bool('addressdetails', True)} if result: result.localize(napi.Locales.from_accept_languages(params.get_accepted_languages())) output = formatting.format_result(napi.ReverseResults([result] if result else []), fmt, fmt_options) return params.build_response(output) async def lookup_endpoint(api: napi.NominatimAPIAsync, params: ASGIAdaptor) -> Any: """ Server glue for /lookup endpoint. See API docs for details. """ fmt = params.parse_format(napi.SearchResults, 'xml') debug = params.setup_debugging() details = params.parse_geometry_details(fmt) places = [] for oid in (params.get('osm_ids') or '').split(','): oid = oid.strip() if len(oid) > 1 and oid[0] in 'RNWrnw' and oid[1:].isdigit(): places.append(napi.OsmID(oid[0], int(oid[1:]))) if len(places) > params.config().get_int('LOOKUP_MAX_COUNT'): params.raise_error('Too many object IDs.') if places: results = await api.lookup(places, **details) else: results = napi.SearchResults() if debug: return params.build_response(loglib.get_and_disable()) fmt_options = {'extratags': params.get_bool('extratags', False), 'namedetails': params.get_bool('namedetails', False), 'addressdetails': params.get_bool('addressdetails', True)} results.localize(napi.Locales.from_accept_languages(params.get_accepted_languages())) output = formatting.format_result(results, fmt, fmt_options) return params.build_response(output) async def _unstructured_search(query: str, api: napi.NominatimAPIAsync, details: Dict[str, Any]) -> napi.SearchResults: if not query: return napi.SearchResults() # Extract special format for coordinates from query. query, x, y = helpers.extract_coords_from_query(query) if x is not None: assert y is not None details['near'] = napi.Point(x, y) details['near_radius'] = 0.1 # If no query is left, revert to reverse search. if x is not None and not query: result = await api.reverse(details['near'], **details) if not result: return napi.SearchResults() return napi.SearchResults( [napi.SearchResult(**{f.name: getattr(result, f.name) for f in dataclasses.fields(napi.SearchResult) if hasattr(result, f.name)})]) query, cls, typ = helpers.extract_category_from_query(query) if cls is not None: assert typ is not None return await api.search_category([(cls, typ)], near_query=query, **details) return await api.search(query, **details) async def search_endpoint(api: napi.NominatimAPIAsync, params: ASGIAdaptor) -> Any: """ Server glue for /search endpoint. See API docs for details. """ fmt = params.parse_format(napi.SearchResults, 'jsonv2') debug = params.setup_debugging() details = params.parse_geometry_details(fmt) details['countries'] = params.get('countrycodes', None) details['excluded'] = params.get('exclude_place_ids', None) details['viewbox'] = params.get('viewbox', None) or params.get('viewboxlbrt', None) details['bounded_viewbox'] = params.get_bool('bounded', False) details['dedupe'] = params.get_bool('dedupe', True) max_results = max(1, min(50, params.get_int('limit', 10))) details['max_results'] = max_results + min(10, max_results) \ if details['dedupe'] else max_results details['min_rank'], details['max_rank'] = \ helpers.feature_type_to_rank(params.get('featureType', '')) if params.get('featureType', None) is not None: details['layers'] = napi.DataLayer.ADDRESS query = params.get('q', None) queryparts = {} try: if query is not None: queryparts['q'] = query results = await _unstructured_search(query, api, details) else: for key in ('amenity', 'street', 'city', 'county', 'state', 'postalcode', 'country'): details[key] = params.get(key, None) if details[key]: queryparts[key] = details[key] query = ', '.join(queryparts.values()) results = await api.search_address(**details) except UsageError as err: params.raise_error(str(err)) results.localize(napi.Locales.from_accept_languages(params.get_accepted_languages())) if details['dedupe'] and len(results) > 1: results = helpers.deduplicate_results(results, max_results) if debug: return params.build_response(loglib.get_and_disable()) if fmt == 'xml': helpers.extend_query_parts(queryparts, details, params.get('featureType', ''), params.get_bool('namedetails', False), params.get_bool('extratags', False), (str(r.place_id) for r in results if r.place_id)) queryparts['format'] = fmt moreurl = urlencode(queryparts) else: moreurl = '' fmt_options = {'query': query, 'more_url': moreurl, 'exclude_place_ids': queryparts.get('exclude_place_ids'), 'viewbox': queryparts.get('viewbox'), 'extratags': params.get_bool('extratags', False), 'namedetails': params.get_bool('namedetails', False), 'addressdetails': params.get_bool('addressdetails', False)} output = formatting.format_result(results, fmt, fmt_options) return params.build_response(output) EndpointFunc = Callable[[napi.NominatimAPIAsync, ASGIAdaptor], Any] ROUTES = [ ('status', status_endpoint), ('details', details_endpoint), ('reverse', reverse_endpoint), ('lookup', lookup_endpoint), ('search', search_endpoint) ]
18,862
36.278656
97
py
Nominatim
Nominatim-master/nominatim/api/v1/__init__.py
# SPDX-License-Identifier: GPL-2.0-only # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Implementation of API version v1 (aka the legacy version). """ #pylint: disable=useless-import-alias from nominatim.api.v1.server_glue import (ASGIAdaptor as ASGIAdaptor, EndpointFunc as EndpointFunc, ROUTES as ROUTES) import nominatim.api.v1.format as _format list_formats = _format.dispatch.list_formats supports_format = _format.dispatch.supports_format format_result = _format.dispatch.format_result
703
31
71
py
Nominatim
Nominatim-master/nominatim/api/v1/format_json.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper functions for output of results in json formats. """ from typing import Mapping, Any, Optional, Tuple, Union import nominatim.api as napi import nominatim.api.v1.classtypes as cl from nominatim.utils.json_writer import JsonWriter #pylint: disable=too-many-branches def _write_osm_id(out: JsonWriter, osm_object: Optional[Tuple[str, int]]) -> None: if osm_object is not None: out.keyval_not_none('osm_type', cl.OSM_TYPE_NAME.get(osm_object[0], None))\ .keyval('osm_id', osm_object[1]) def _write_typed_address(out: JsonWriter, address: Optional[napi.AddressLines], country_code: Optional[str]) -> None: parts = {} for line in (address or []): if line.isaddress: if line.local_name: label = cl.get_label_tag(line.category, line.extratags, line.rank_address, country_code) if label not in parts: parts[label] = line.local_name if line.names and 'ISO3166-2' in line.names and line.admin_level: parts[f"ISO3166-2-lvl{line.admin_level}"] = line.names['ISO3166-2'] for k, v in parts.items(): out.keyval(k, v) if country_code: out.keyval('country_code', country_code) def _write_geocodejson_address(out: JsonWriter, address: Optional[napi.AddressLines], obj_place_id: Optional[int], country_code: Optional[str]) -> None: extra = {} for line in (address or []): if line.isaddress and line.local_name: if line.category[1] in ('postcode', 'postal_code'): out.keyval('postcode', line.local_name) elif line.category[1] == 'house_number': out.keyval('housenumber', line.local_name) elif (obj_place_id is None or obj_place_id != line.place_id) \ and line.rank_address >= 4 and line.rank_address < 28: rank_name = GEOCODEJSON_RANKS[line.rank_address] if rank_name not in extra: extra[rank_name] = line.local_name for k, v in extra.items(): out.keyval(k, v) if country_code: out.keyval('country_code', country_code) def format_base_json(results: Union[napi.ReverseResults, napi.SearchResults], options: Mapping[str, Any], simple: bool, class_label: str) -> str: """ Return the result list as a simple json string in custom Nominatim format. """ out = JsonWriter() if simple: if not results: return '{"error":"Unable to geocode"}' else: out.start_array() for result in results: out.start_object()\ .keyval_not_none('place_id', result.place_id)\ .keyval('licence', cl.OSM_ATTRIBUTION)\ _write_osm_id(out, result.osm_object) out.keyval('lat', result.centroid.lat)\ .keyval('lon', result.centroid.lon)\ .keyval(class_label, result.category[0])\ .keyval('type', result.category[1])\ .keyval('place_rank', result.rank_search)\ .keyval('importance', result.calculated_importance())\ .keyval('addresstype', cl.get_label_tag(result.category, result.extratags, result.rank_address, result.country_code))\ .keyval('name', result.locale_name or '')\ .keyval('display_name', result.display_name or '') if options.get('icon_base_url', None): icon = cl.ICONS.get(result.category) if icon: out.keyval('icon', f"{options['icon_base_url']}/{icon}.p.20.png") if options.get('addressdetails', False): out.key('address').start_object() _write_typed_address(out, result.address_rows, result.country_code) out.end_object().next() if options.get('extratags', False): out.keyval('extratags', result.extratags) if options.get('namedetails', False): out.keyval('namedetails', result.names) bbox = cl.bbox_from_result(result) out.key('boundingbox').start_array()\ .value(f"{bbox.minlat:0.7f}").next()\ .value(f"{bbox.maxlat:0.7f}").next()\ .value(f"{bbox.minlon:0.7f}").next()\ .value(f"{bbox.maxlon:0.7f}").next()\ .end_array().next() if result.geometry: for key in ('text', 'kml'): out.keyval_not_none('geo' + key, result.geometry.get(key)) if 'geojson' in result.geometry: out.key('geojson').raw(result.geometry['geojson']).next() out.keyval_not_none('svg', result.geometry.get('svg')) out.end_object() if simple: return out() out.next() out.end_array() return out() def format_base_geojson(results: Union[napi.ReverseResults, napi.SearchResults], options: Mapping[str, Any], simple: bool) -> str: """ Return the result list as a geojson string. """ if not results and simple: return '{"error":"Unable to geocode"}' out = JsonWriter() out.start_object()\ .keyval('type', 'FeatureCollection')\ .keyval('licence', cl.OSM_ATTRIBUTION)\ .key('features').start_array() for result in results: out.start_object()\ .keyval('type', 'Feature')\ .key('properties').start_object() out.keyval_not_none('place_id', result.place_id) _write_osm_id(out, result.osm_object) out.keyval('place_rank', result.rank_search)\ .keyval('category', result.category[0])\ .keyval('type', result.category[1])\ .keyval('importance', result.calculated_importance())\ .keyval('addresstype', cl.get_label_tag(result.category, result.extratags, result.rank_address, result.country_code))\ .keyval('name', result.locale_name or '')\ .keyval('display_name', result.display_name or '') if options.get('addressdetails', False): out.key('address').start_object() _write_typed_address(out, result.address_rows, result.country_code) out.end_object().next() if options.get('extratags', False): out.keyval('extratags', result.extratags) if options.get('namedetails', False): out.keyval('namedetails', result.names) out.end_object().next() # properties out.key('bbox').start_array() for coord in cl.bbox_from_result(result).coords: out.float(coord, 7).next() out.end_array().next() out.key('geometry').raw(result.geometry.get('geojson') or result.centroid.to_geojson()).next() out.end_object().next() out.end_array().next().end_object() return out() def format_base_geocodejson(results: Union[napi.ReverseResults, napi.SearchResults], options: Mapping[str, Any], simple: bool) -> str: """ Return the result list as a geocodejson string. """ if not results and simple: return '{"error":"Unable to geocode"}' out = JsonWriter() out.start_object()\ .keyval('type', 'FeatureCollection')\ .key('geocoding').start_object()\ .keyval('version', '0.1.0')\ .keyval('attribution', cl.OSM_ATTRIBUTION)\ .keyval('licence', 'ODbL')\ .keyval_not_none('query', options.get('query'))\ .end_object().next()\ .key('features').start_array() for result in results: out.start_object()\ .keyval('type', 'Feature')\ .key('properties').start_object()\ .key('geocoding').start_object() out.keyval_not_none('place_id', result.place_id) _write_osm_id(out, result.osm_object) out.keyval('osm_key', result.category[0])\ .keyval('osm_value', result.category[1])\ .keyval('type', GEOCODEJSON_RANKS[max(3, min(28, result.rank_address))])\ .keyval_not_none('accuracy', getattr(result, 'distance', None), transform=int)\ .keyval('label', result.display_name or '')\ .keyval_not_none('name', result.locale_name or None)\ if options.get('addressdetails', False): _write_geocodejson_address(out, result.address_rows, result.place_id, result.country_code) out.key('admin').start_object() if result.address_rows: for line in result.address_rows: if line.isaddress and (line.admin_level or 15) < 15 and line.local_name: out.keyval(f"level{line.admin_level}", line.local_name) out.end_object().next() out.end_object().next().end_object().next() out.key('geometry').raw(result.geometry.get('geojson') or result.centroid.to_geojson()).next() out.end_object().next() out.end_array().next().end_object() return out() GEOCODEJSON_RANKS = { 3: 'locality', 4: 'country', 5: 'state', 6: 'state', 7: 'state', 8: 'state', 9: 'state', 10: 'county', 11: 'county', 12: 'county', 13: 'city', 14: 'city', 15: 'city', 16: 'city', 17: 'district', 18: 'district', 19: 'district', 20: 'district', 21: 'district', 22: 'locality', 23: 'locality', 24: 'locality', 25: 'street', 26: 'street', 27: 'street', 28: 'house'}
10,074
35.636364
92
py
Nominatim
Nominatim-master/nominatim/api/v1/helpers.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper function for parsing parameters and and outputting data specifically for the v1 version of the API. """ from typing import Tuple, Optional, Any, Dict, Iterable from itertools import chain import re from nominatim.api.results import SearchResult, SearchResults, SourceTable from nominatim.api.types import SearchDetails, GeometryFormat REVERSE_MAX_RANKS = [2, 2, 2, # 0-2 Continent/Sea 4, 4, # 3-4 Country 8, # 5 State 10, 10, # 6-7 Region 12, 12, # 8-9 County 16, 17, # 10-11 City 18, # 12 Town 19, # 13 Village/Suburb 22, # 14 Hamlet/Neighbourhood 25, # 15 Localities 26, # 16 Major Streets 27, # 17 Minor Streets 30 # 18 Building ] def zoom_to_rank(zoom: int) -> int: """ Convert a zoom parameter into a rank according to the v1 API spec. """ return REVERSE_MAX_RANKS[max(0, min(18, zoom))] FEATURE_TYPE_TO_RANK: Dict[Optional[str], Any] = { 'country': (4, 4), 'state': (8, 8), 'city': (14, 16), 'settlement': (8, 20) } def feature_type_to_rank(feature_type: Optional[str]) -> Tuple[int, int]: """ Convert a feature type parameter to a tuple of feature type name, minimum rank and maximum rank. """ return FEATURE_TYPE_TO_RANK.get(feature_type, (0, 30)) #pylint: disable=too-many-arguments,too-many-branches def extend_query_parts(queryparts: Dict[str, Any], details: Dict[str, Any], feature_type: Optional[str], namedetails: bool, extratags: bool, excluded: Iterable[str]) -> None: """ Add parameters from details dictionary to the query parts dictionary which is suitable as URL parameter dictionary. """ parsed = SearchDetails.from_kwargs(details) if parsed.geometry_output != GeometryFormat.NONE: if GeometryFormat.GEOJSON in parsed.geometry_output: queryparts['polygon_geojson'] = '1' if GeometryFormat.KML in parsed.geometry_output: queryparts['polygon_kml'] = '1' if GeometryFormat.SVG in parsed.geometry_output: queryparts['polygon_svg'] = '1' if GeometryFormat.TEXT in parsed.geometry_output: queryparts['polygon_text'] = '1' if parsed.address_details: queryparts['addressdetails'] = '1' if namedetails: queryparts['namedetails'] = '1' if extratags: queryparts['extratags'] = '1' if parsed.geometry_simplification > 0.0: queryparts['polygon_threshold'] = f"{parsed.geometry_simplification:.6g}" if parsed.max_results != 10: queryparts['limit'] = str(parsed.max_results) if parsed.countries: queryparts['countrycodes'] = ','.join(parsed.countries) queryparts['exclude_place_ids'] = \ ','.join(chain(excluded, map(str, (e for e in parsed.excluded if e > 0)))) if parsed.viewbox: queryparts['viewbox'] = ','.join(f"{c:.7g}" for c in parsed.viewbox.coords) if parsed.bounded_viewbox: queryparts['bounded'] = '1' if not details['dedupe']: queryparts['dedupe'] = '0' if feature_type in FEATURE_TYPE_TO_RANK: queryparts['featureType'] = feature_type def deduplicate_results(results: SearchResults, max_results: int) -> SearchResults: """ Remove results that look like duplicates. Two results are considered the same if they have the same OSM ID or if they have the same category, display name and rank. """ osm_ids_done = set() classification_done = set() deduped = SearchResults() for result in results: if result.source_table == SourceTable.POSTCODE: assert result.names and 'ref' in result.names if any(_is_postcode_relation_for(r, result.names['ref']) for r in results): continue classification = (result.osm_object[0] if result.osm_object else None, result.category, result.display_name, result.rank_address) if result.osm_object not in osm_ids_done \ and classification not in classification_done: deduped.append(result) osm_ids_done.add(result.osm_object) classification_done.add(classification) if len(deduped) >= max_results: break return deduped def _is_postcode_relation_for(result: SearchResult, postcode: str) -> bool: return result.source_table == SourceTable.PLACEX \ and result.osm_object is not None \ and result.osm_object[0] == 'R' \ and result.category == ('boundary', 'postal_code') \ and result.names is not None \ and result.names.get('ref') == postcode def _deg(axis:str) -> str: return f"(?P<{axis}_deg>\\d+\\.\\d+)°?" def _deg_min(axis: str) -> str: return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>[\\d.]+)?[′']*" def _deg_min_sec(axis: str) -> str: return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>\\d+)[′'\\s]+(?P<{axis}_sec>[\\d.]+)?[\"″]*" COORD_REGEX = [re.compile(r'(?:(?P<pre>.*?)\s+)??' + r + r'(?:\s+(?P<post>.*))?') for r in ( r"(?P<ns>[NS])\s*" + _deg('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg('lon'), _deg('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg('lon') + r"\s*(?P<ew>[EW])", r"(?P<ns>[NS])\s*" + _deg_min('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min('lon'), _deg_min('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min('lon') + r"\s*(?P<ew>[EW])", r"(?P<ns>[NS])\s*" + _deg_min_sec('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min_sec('lon'), _deg_min_sec('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min_sec('lon') + r"\s*(?P<ew>[EW])", r"\[?(?P<lat_deg>[+-]?\d+\.\d+)[\s,]+(?P<lon_deg>[+-]?\d+\.\d+)\]?" )] def extract_coords_from_query(query: str) -> Tuple[str, Optional[float], Optional[float]]: """ Look for something that is formated like a coordinate at the beginning or end of the query. If found, extract the coordinate and return the remaining query (or the empty string if the query consisted of nothing but a coordinate). Only the first match will be returned. """ for regex in COORD_REGEX: match = regex.fullmatch(query) if match is None: continue groups = match.groupdict() if not groups['pre'] or not groups['post']: x = float(groups['lon_deg']) \ + float(groups.get('lon_min', 0.0)) / 60.0 \ + float(groups.get('lon_sec', 0.0)) / 3600.0 if groups.get('ew') == 'W': x = -x y = float(groups['lat_deg']) \ + float(groups.get('lat_min', 0.0)) / 60.0 \ + float(groups.get('lat_sec', 0.0)) / 3600.0 if groups.get('ns') == 'S': y = -y return groups['pre'] or groups['post'] or '', x, y return query, None, None CATEGORY_REGEX = re.compile(r'(?P<pre>.*?)\[(?P<cls>[a-zA-Z_]+)=(?P<typ>[a-zA-Z_]+)\](?P<post>.*)') def extract_category_from_query(query: str) -> Tuple[str, Optional[str], Optional[str]]: """ Extract a hidden category specification of the form '[key=value]' from the query. If found, extract key and value and return the remaining query (or the empty string if the query consisted of nothing but a category). Only the first match will be returned. """ match = CATEGORY_REGEX.search(query) if match is not None: return (match.group('pre').strip() + ' ' + match.group('post').strip()).strip(), \ match.group('cls'), match.group('typ') return query, None, None
8,180
40.110553
100
py
Nominatim
Nominatim-master/nominatim/api/v1/format_xml.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Helper functions for output of results in XML format. """ from typing import Mapping, Any, Optional, Union import datetime as dt import xml.etree.ElementTree as ET import nominatim.api as napi import nominatim.api.v1.classtypes as cl #pylint: disable=too-many-branches def _write_xml_address(root: ET.Element, address: napi.AddressLines, country_code: Optional[str]) -> None: parts = {} for line in address: if line.isaddress: if line.local_name: label = cl.get_label_tag(line.category, line.extratags, line.rank_address, country_code) if label not in parts: parts[label] = line.local_name if line.names and 'ISO3166-2' in line.names and line.admin_level: parts[f"ISO3166-2-lvl{line.admin_level}"] = line.names['ISO3166-2'] for k,v in parts.items(): ET.SubElement(root, k).text = v if country_code: ET.SubElement(root, 'country_code').text = country_code def _create_base_entry(result: Union[napi.ReverseResult, napi.SearchResult], root: ET.Element, simple: bool) -> ET.Element: place = ET.SubElement(root, 'result' if simple else 'place') if result.place_id is not None: place.set('place_id', str(result.place_id)) if result.osm_object: osm_type = cl.OSM_TYPE_NAME.get(result.osm_object[0], None) if osm_type is not None: place.set('osm_type', osm_type) place.set('osm_id', str(result.osm_object[1])) if result.names and 'ref' in result.names: place.set('ref', result.names['ref']) elif result.locale_name: # bug reproduced from PHP place.set('ref', result.locale_name) place.set('lat', f"{result.centroid.lat:.7f}") place.set('lon', f"{result.centroid.lon:.7f}") bbox = cl.bbox_from_result(result) place.set('boundingbox', f"{bbox.minlat:.7f},{bbox.maxlat:.7f},{bbox.minlon:.7f},{bbox.maxlon:.7f}") place.set('place_rank', str(result.rank_search)) place.set('address_rank', str(result.rank_address)) if result.geometry: for key in ('text', 'svg'): if key in result.geometry: place.set('geo' + key, result.geometry[key]) if 'kml' in result.geometry: ET.SubElement(root if simple else place, 'geokml')\ .append(ET.fromstring(result.geometry['kml'])) if 'geojson' in result.geometry: place.set('geojson', result.geometry['geojson']) if simple: place.text = result.display_name or '' else: place.set('display_name', result.display_name or '') place.set('class', result.category[0]) place.set('type', result.category[1]) place.set('importance', str(result.calculated_importance())) return place def format_base_xml(results: Union[napi.ReverseResults, napi.SearchResults], options: Mapping[str, Any], simple: bool, xml_root_tag: str, xml_extra_info: Mapping[str, str]) -> str: """ Format the result into an XML response. With 'simple' exactly one result will be output, otherwise a list. """ root = ET.Element(xml_root_tag) root.set('timestamp', dt.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S +00:00')) root.set('attribution', cl.OSM_ATTRIBUTION) for k, v in xml_extra_info.items(): root.set(k, v) if simple and not results: ET.SubElement(root, 'error').text = 'Unable to geocode' for result in results: place = _create_base_entry(result, root, simple) if not simple and options.get('icon_base_url', None): icon = cl.ICONS.get(result.category) if icon: place.set('icon', icon) if options.get('addressdetails', False) and result.address_rows: _write_xml_address(ET.SubElement(root, 'addressparts') if simple else place, result.address_rows, result.country_code) if options.get('extratags', False): eroot = ET.SubElement(root if simple else place, 'extratags') if result.extratags: for k, v in result.extratags.items(): ET.SubElement(eroot, 'tag', attrib={'key': k, 'value': v}) if options.get('namedetails', False): eroot = ET.SubElement(root if simple else place, 'namedetails') if result.names: for k,v in result.names.items(): ET.SubElement(eroot, 'name', attrib={'desc': k}).text = v return '<?xml version="1.0" encoding="UTF-8" ?>\n' + ET.tostring(root, encoding='unicode')
4,970
38.452381
94
py
Nominatim
Nominatim-master/nominatim/api/search/query_analyzer_factory.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Factory for creating a query analyzer for the configured tokenizer. """ from typing import List, cast, TYPE_CHECKING from abc import ABC, abstractmethod from pathlib import Path import importlib from nominatim.api.logging import log from nominatim.api.connection import SearchConnection if TYPE_CHECKING: from nominatim.api.search.query import Phrase, QueryStruct class AbstractQueryAnalyzer(ABC): """ Class for analysing incomming queries. Query analyzers are tied to the tokenizer used on import. """ @abstractmethod async def analyze_query(self, phrases: List['Phrase']) -> 'QueryStruct': """ Analyze the given phrases and return the tokenized query. """ async def make_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer: """ Create a query analyzer for the tokenizer used by the database. """ name = await conn.get_property('tokenizer') src_file = Path(__file__).parent / f'{name}_tokenizer.py' if not src_file.is_file(): log().comment(f"No tokenizer named '{name}' available. Database not set up properly.") raise RuntimeError('Tokenizer not found') module = importlib.import_module(f'nominatim.api.search.{name}_tokenizer') return cast(AbstractQueryAnalyzer, await module.create_query_analyzer(conn))
1,533
32.347826
94
py
Nominatim
Nominatim-master/nominatim/api/search/db_searches.py
# SPDX-License-Identifier: GPL-3.0-or-later # # This file is part of Nominatim. (https://nominatim.org) # # Copyright (C) 2023 by the Nominatim developer community. # For a full list of authors see the git log. """ Implementation of the acutal database accesses for forward search. """ from typing import List, Tuple, AsyncIterator, Dict, Any, Callable import abc import sqlalchemy as sa from sqlalchemy.dialects.postgresql import ARRAY, array_agg from nominatim.typing import SaFromClause, SaScalarSelect, SaColumn, \ SaExpression, SaSelect, SaLambdaSelect, SaRow, SaBind from nominatim.api.connection import SearchConnection from nominatim.api.types import SearchDetails, DataLayer, GeometryFormat, Bbox import nominatim.api.results as nres from nominatim.api.search.db_search_fields import SearchData, WeightedCategories from nominatim.db.sqlalchemy_types import Geometry #pylint: disable=singleton-comparison,not-callable #pylint: disable=too-many-branches,too-many-arguments,too-many-locals,too-many-statements def _details_to_bind_params(details: SearchDetails) -> Dict[str, Any]: """ Create a dictionary from search parameters that can be used as bind parameter for SQL execute. """ return {'limit': details.max_results, 'min_rank': details.min_rank, 'max_rank': details.max_rank, 'viewbox': details.viewbox, 'viewbox2': details.viewbox_x2, 'near': details.near, 'near_radius': details.near_radius, 'excluded': details.excluded, 'countries': details.countries} LIMIT_PARAM: SaBind = sa.bindparam('limit') MIN_RANK_PARAM: SaBind = sa.bindparam('min_rank') MAX_RANK_PARAM: SaBind = sa.bindparam('max_rank') VIEWBOX_PARAM: SaBind = sa.bindparam('viewbox', type_=Geometry) VIEWBOX2_PARAM: SaBind = sa.bindparam('viewbox2', type_=Geometry) NEAR_PARAM: SaBind = sa.bindparam('near', type_=Geometry) NEAR_RADIUS_PARAM: SaBind = sa.bindparam('near_radius') COUNTRIES_PARAM: SaBind = sa.bindparam('countries') def _within_near(t: SaFromClause) -> Callable[[], SaExpression]: return lambda: t.c.geometry.ST_DWithin(NEAR_PARAM, NEAR_RADIUS_PARAM) def _exclude_places(t: SaFromClause) -> Callable[[], SaExpression]: return lambda: t.c.place_id.not_in(sa.bindparam('excluded')) def _select_placex(t: SaFromClause) -> SaSelect: return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, t.c.class_, t.c.type, t.c.address, t.c.extratags, t.c.housenumber, t.c.postcode, t.c.country_code, t.c.importance, t.c.wikipedia, t.c.parent_place_id, t.c.rank_address, t.c.rank_search, t.c.centroid, t.c.geometry.ST_Expand(0).label('bbox')) def _add_geometry_columns(sql: SaLambdaSelect, col: SaColumn, details: SearchDetails) -> SaSelect: out = [] if details.geometry_simplification > 0.0: col = sa.func.ST_SimplifyPreserveTopology(col, details.geometry_simplification) if details.geometry_output & GeometryFormat.GEOJSON: out.append(sa.func.ST_AsGeoJSON(col).label('geometry_geojson')) if details.geometry_output & GeometryFormat.TEXT: out.append(sa.func.ST_AsText(col).label('geometry_text')) if details.geometry_output & GeometryFormat.KML: out.append(sa.func.ST_AsKML(col).label('geometry_kml')) if details.geometry_output & GeometryFormat.SVG: out.append(sa.func.ST_AsSVG(col).label('geometry_svg')) return sql.add_columns(*out) def _make_interpolation_subquery(table: SaFromClause, inner: SaFromClause, numerals: List[int], details: SearchDetails) -> SaScalarSelect: all_ids = array_agg(table.c.place_id) # type: ignore[no-untyped-call] sql = sa.select(all_ids).where(table.c.parent_place_id == inner.c.place_id) if len(numerals) == 1: sql = sql.where(sa.between(numerals[0], table.c.startnumber, table.c.endnumber))\ .where((numerals[0] - table.c.startnumber) % table.c.step == 0) else: sql = sql.where(sa.or_( *(sa.and_(sa.between(n, table.c.startnumber, table.c.endnumber), (n - table.c.startnumber) % table.c.step == 0) for n in numerals))) if details.excluded: sql = sql.where(_exclude_places(table)) return sql.scalar_subquery() def _filter_by_layer(table: SaFromClause, layers: DataLayer) -> SaColumn: orexpr: List[SaExpression] = [] if layers & DataLayer.ADDRESS and layers & DataLayer.POI: orexpr.append(table.c.rank_address.between(1, 30)) elif layers & DataLayer.ADDRESS: orexpr.append(table.c.rank_address.between(1, 29)) orexpr.append(sa.and_(table.c.rank_address == 30, sa.or_(table.c.housenumber != None, table.c.address.has_key('housename')))) elif layers & DataLayer.POI: orexpr.append(sa.and_(table.c.rank_address == 30, table.c.class_.not_in(('place', 'building')))) if layers & DataLayer.MANMADE: exclude = [] if not layers & DataLayer.RAILWAY: exclude.append('railway') if not layers & DataLayer.NATURAL: exclude.extend(('natural', 'water', 'waterway')) orexpr.append(sa.and_(table.c.class_.not_in(tuple(exclude)), table.c.rank_address == 0)) else: include = [] if layers & DataLayer.RAILWAY: include.append('railway') if layers & DataLayer.NATURAL: include.extend(('natural', 'water', 'waterway')) orexpr.append(sa.and_(table.c.class_.in_(tuple(include)), table.c.rank_address == 0)) if len(orexpr) == 1: return orexpr[0] return sa.or_(*orexpr) def _interpolated_position(table: SaFromClause, nr: SaColumn) -> SaColumn: pos = sa.cast(nr - table.c.startnumber, sa.Float) / (table.c.endnumber - table.c.startnumber) return sa.case( (table.c.endnumber == table.c.startnumber, table.c.linegeo.ST_Centroid()), else_=table.c.linegeo.ST_LineInterpolatePoint(pos)).label('centroid') async def _get_placex_housenumbers(conn: SearchConnection, place_ids: List[int], details: SearchDetails) -> AsyncIterator[nres.SearchResult]: t = conn.t.placex sql = _select_placex(t).where(t.c.place_id.in_(place_ids)) if details.geometry_output: sql = _add_geometry_columns(sql, t.c.geometry, details) for row in await conn.execute(sql): result = nres.create_from_placex_row(row, nres.SearchResult) assert result result.bbox = Bbox.from_wkb(row.bbox) yield result async def _get_osmline(conn: SearchConnection, place_ids: List[int], numerals: List[int], details: SearchDetails) -> AsyncIterator[nres.SearchResult]: t = conn.t.osmline values = sa.values(sa.Column('nr', sa.Integer()), name='housenumber')\ .data([(n,) for n in numerals]) sql = sa.select(t.c.place_id, t.c.osm_id, t.c.parent_place_id, t.c.address, values.c.nr.label('housenumber'), _interpolated_position(t, values.c.nr), t.c.postcode, t.c.country_code)\ .where(t.c.place_id.in_(place_ids))\ .join(values, values.c.nr.between(t.c.startnumber, t.c.endnumber)) if details.geometry_output: sub = sql.subquery() sql = _add_geometry_columns(sa.select(sub), sub.c.centroid, details) for row in await conn.execute(sql): result = nres.create_from_osmline_row(row, nres.SearchResult) assert result yield result async def _get_tiger(conn: SearchConnection, place_ids: List[int], numerals: List[int], osm_id: int, details: SearchDetails) -> AsyncIterator[nres.SearchResult]: t = conn.t.tiger values = sa.values(sa.Column('nr', sa.Integer()), name='housenumber')\ .data([(n,) for n in numerals]) sql = sa.select(t.c.place_id, t.c.parent_place_id, sa.literal('W').label('osm_type'), sa.literal(osm_id).label('osm_id'), values.c.nr.label('housenumber'), _interpolated_position(t, values.c.nr), t.c.postcode)\ .where(t.c.place_id.in_(place_ids))\ .join(values, values.c.nr.between(t.c.startnumber, t.c.endnumber)) if details.geometry_output: sub = sql.subquery() sql = _add_geometry_columns(sa.select(sub), sub.c.centroid, details) for row in await conn.execute(sql): result = nres.create_from_tiger_row(row, nres.SearchResult) assert result yield result class AbstractSearch(abc.ABC): """ Encapuslation of a single lookup in the database. """ def __init__(self, penalty: float) -> None: self.penalty = penalty @abc.abstractmethod async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ class NearSearch(AbstractSearch): """ Category search of a place type near the result of another search. """ def __init__(self, penalty: float, categories: WeightedCategories, search: AbstractSearch) -> None: super().__init__(penalty) self.search = search self.categories = categories async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ results = nres.SearchResults() base = await self.search.lookup(conn, details) if not base: return results base.sort(key=lambda r: (r.accuracy, r.rank_search)) max_accuracy = base[0].accuracy + 0.5 base = nres.SearchResults(r for r in base if r.source_table == nres.SourceTable.PLACEX and r.accuracy <= max_accuracy and r.bbox and r.bbox.area < 20) if base: baseids = [b.place_id for b in base[:5] if b.place_id] for category, penalty in self.categories: await self.lookup_category(results, conn, baseids, category, penalty, details) if len(results) >= details.max_results: break return results async def lookup_category(self, results: nres.SearchResults, conn: SearchConnection, ids: List[int], category: Tuple[str, str], penalty: float, details: SearchDetails) -> None: """ Find places of the given category near the list of place ids and add the results to 'results'. """ table = await conn.get_class_table(*category) t = conn.t.placex.alias('p') tgeom = conn.t.placex.alias('pgeom') sql = _select_placex(t).where(tgeom.c.place_id.in_(ids))\ .where(t.c.class_ == category[0])\ .where(t.c.type == category[1]) if table is None: # No classtype table available, do a simplified lookup in placex. sql = sql.join(tgeom, t.c.geometry.ST_DWithin(tgeom.c.centroid, 0.01))\ .order_by(tgeom.c.centroid.ST_Distance(t.c.centroid)) else: # Use classtype table. We can afford to use a larger # radius for the lookup. sql = sql.join(table, t.c.place_id == table.c.place_id)\ .join(tgeom, sa.case((sa.and_(tgeom.c.rank_address < 9, tgeom.c.geometry.is_area()), tgeom.c.geometry.ST_Contains(table.c.centroid)), else_ = tgeom.c.centroid.ST_DWithin(table.c.centroid, 0.05)))\ .order_by(tgeom.c.centroid.ST_Distance(table.c.centroid)) sql = sql.where(t.c.rank_address.between(MIN_RANK_PARAM, MAX_RANK_PARAM)) if details.countries: sql = sql.where(t.c.country_code.in_(COUNTRIES_PARAM)) if details.excluded: sql = sql.where(_exclude_places(t)) if details.layers is not None: sql = sql.where(_filter_by_layer(t, details.layers)) sql = sql.limit(LIMIT_PARAM) for row in await conn.execute(sql, _details_to_bind_params(details)): result = nres.create_from_placex_row(row, nres.SearchResult) assert result result.accuracy = self.penalty + penalty result.bbox = Bbox.from_wkb(row.bbox) results.append(result) class PoiSearch(AbstractSearch): """ Category search in a geographic area. """ def __init__(self, sdata: SearchData) -> None: super().__init__(sdata.penalty) self.categories = sdata.qualifiers self.countries = sdata.countries async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ bind_params = _details_to_bind_params(details) t = conn.t.placex rows: List[SaRow] = [] if details.near and details.near_radius is not None and details.near_radius < 0.2: # simply search in placex table def _base_query() -> SaSelect: return _select_placex(t) \ .where(t.c.linked_place_id == None) \ .where(t.c.geometry.ST_DWithin(NEAR_PARAM, NEAR_RADIUS_PARAM)) \ .order_by(t.c.centroid.ST_Distance(NEAR_PARAM)) \ .limit(LIMIT_PARAM) classtype = self.categories.values if len(classtype) == 1: cclass, ctype = classtype[0] sql: SaLambdaSelect = sa.lambda_stmt(lambda: _base_query() .where(t.c.class_ == cclass) .where(t.c.type == ctype)) else: sql = _base_query().where(sa.or_(*(sa.and_(t.c.class_ == cls, t.c.type == typ) for cls, typ in classtype))) if self.countries: sql = sql.where(t.c.country_code.in_(self.countries.values)) if details.viewbox is not None and details.bounded_viewbox: sql = sql.where(t.c.geometry.intersects(VIEWBOX_PARAM)) rows.extend(await conn.execute(sql, bind_params)) else: # use the class type tables for category in self.categories.values: table = await conn.get_class_table(*category) if table is not None: sql = _select_placex(t)\ .join(table, t.c.place_id == table.c.place_id)\ .where(t.c.class_ == category[0])\ .where(t.c.type == category[1]) if details.viewbox is not None and details.bounded_viewbox: sql = sql.where(table.c.centroid.intersects(VIEWBOX_PARAM)) if details.near and details.near_radius is not None: sql = sql.order_by(table.c.centroid.ST_Distance(NEAR_PARAM))\ .where(table.c.centroid.ST_DWithin(NEAR_PARAM, NEAR_RADIUS_PARAM)) if self.countries: sql = sql.where(t.c.country_code.in_(self.countries.values)) sql = sql.limit(LIMIT_PARAM) rows.extend(await conn.execute(sql, bind_params)) results = nres.SearchResults() for row in rows: result = nres.create_from_placex_row(row, nres.SearchResult) assert result result.accuracy = self.penalty + self.categories.get_penalty((row.class_, row.type)) result.bbox = Bbox.from_wkb(row.bbox) results.append(result) return results class CountrySearch(AbstractSearch): """ Search for a country name or country code. """ def __init__(self, sdata: SearchData) -> None: super().__init__(sdata.penalty) self.countries = sdata.countries async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ t = conn.t.placex ccodes = self.countries.values sql: SaLambdaSelect = sa.lambda_stmt(lambda: _select_placex(t)\ .where(t.c.country_code.in_(ccodes))\ .where(t.c.rank_address == 4)) if details.geometry_output: sql = _add_geometry_columns(sql, t.c.geometry, details) if details.excluded: sql = sql.where(_exclude_places(t)) if details.viewbox is not None and details.bounded_viewbox: sql = sql.where(lambda: t.c.geometry.intersects(VIEWBOX_PARAM)) if details.near is not None and details.near_radius is not None: sql = sql.where(_within_near(t)) results = nres.SearchResults() for row in await conn.execute(sql, _details_to_bind_params(details)): result = nres.create_from_placex_row(row, nres.SearchResult) assert result result.accuracy = self.penalty + self.countries.get_penalty(row.country_code, 5.0) results.append(result) return results or await self.lookup_in_country_table(conn, details) async def lookup_in_country_table(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Look up the country in the fallback country tables. """ # Avoid the fallback search when this is a more search. Country results # usually are in the first batch of results and it is not possible # to exclude these fallbacks. if details.excluded: return nres.SearchResults() t = conn.t.country_name tgrid = conn.t.country_grid sql = sa.select(tgrid.c.country_code, tgrid.c.geometry.ST_Centroid().ST_Collect().ST_Centroid() .label('centroid'))\ .where(tgrid.c.country_code.in_(self.countries.values))\ .group_by(tgrid.c.country_code) if details.viewbox is not None and details.bounded_viewbox: sql = sql.where(tgrid.c.geometry.intersects(VIEWBOX_PARAM)) if details.near is not None and details.near_radius is not None: sql = sql.where(_within_near(tgrid)) sub = sql.subquery('grid') sql = sa.select(t.c.country_code, (t.c.name + sa.func.coalesce(t.c.derived_name, sa.cast('', type_=conn.t.types.Composite)) ).label('name'), sub.c.centroid)\ .join(sub, t.c.country_code == sub.c.country_code) results = nres.SearchResults() for row in await conn.execute(sql, _details_to_bind_params(details)): result = nres.create_from_country_row(row, nres.SearchResult) assert result result.accuracy = self.penalty + self.countries.get_penalty(row.country_code, 5.0) results.append(result) return results class PostcodeSearch(AbstractSearch): """ Search for a postcode. """ def __init__(self, extra_penalty: float, sdata: SearchData) -> None: super().__init__(sdata.penalty + extra_penalty) self.countries = sdata.countries self.postcodes = sdata.postcodes self.lookups = sdata.lookups self.rankings = sdata.rankings async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ t = conn.t.postcode pcs = self.postcodes.values sql: SaLambdaSelect = sa.lambda_stmt(lambda: sa.select(t.c.place_id, t.c.parent_place_id, t.c.rank_search, t.c.rank_address, t.c.postcode, t.c.country_code, t.c.geometry.label('centroid')) .where(t.c.postcode.in_(pcs))) if details.geometry_output: sql = _add_geometry_columns(sql, t.c.geometry, details) penalty: SaExpression = sa.literal(self.penalty) if details.viewbox is not None: if details.bounded_viewbox: sql = sql.where(t.c.geometry.intersects(VIEWBOX_PARAM)) else: penalty += sa.case((t.c.geometry.intersects(VIEWBOX_PARAM), 0.0), (t.c.geometry.intersects(VIEWBOX2_PARAM), 1.0), else_=2.0) if details.near is not None: if details.near_radius is not None: sql = sql.where(_within_near(t)) sql = sql.order_by(t.c.geometry.ST_Distance(NEAR_PARAM)) if self.countries: sql = sql.where(t.c.country_code.in_(self.countries.values)) if details.excluded: sql = sql.where(_exclude_places(t)) if self.lookups: assert len(self.lookups) == 1 assert self.lookups[0].lookup_type == 'restrict' tsearch = conn.t.search_name sql = sql.where(tsearch.c.place_id == t.c.parent_place_id)\ .where(sa.func.array_cat(tsearch.c.name_vector, tsearch.c.nameaddress_vector, type_=ARRAY(sa.Integer)) .contains(self.lookups[0].tokens)) for ranking in self.rankings: penalty += ranking.sql_penalty(conn.t.search_name) penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes), else_=1.0) sql = sql.add_columns(penalty.label('accuracy')) sql = sql.order_by('accuracy').limit(LIMIT_PARAM) results = nres.SearchResults() for row in await conn.execute(sql, _details_to_bind_params(details)): result = nres.create_from_postcode_row(row, nres.SearchResult) assert result result.accuracy = row.accuracy results.append(result) return results class PlaceSearch(AbstractSearch): """ Generic search for an address or named place. """ def __init__(self, extra_penalty: float, sdata: SearchData, expected_count: int) -> None: super().__init__(sdata.penalty + extra_penalty) self.countries = sdata.countries self.postcodes = sdata.postcodes self.housenumbers = sdata.housenumbers self.qualifiers = sdata.qualifiers self.lookups = sdata.lookups self.rankings = sdata.rankings self.expected_count = expected_count async def lookup(self, conn: SearchConnection, details: SearchDetails) -> nres.SearchResults: """ Find results for the search in the database. """ t = conn.t.placex tsearch = conn.t.search_name sql: SaLambdaSelect = sa.lambda_stmt(lambda: sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, t.c.class_, t.c.type, t.c.address, t.c.extratags, t.c.housenumber, t.c.postcode, t.c.country_code, t.c.wikipedia, t.c.parent_place_id, t.c.rank_address, t.c.rank_search, t.c.centroid, t.c.geometry.ST_Expand(0).label('bbox')) .where(t.c.place_id == tsearch.c.place_id)) if details.geometry_output: sql = _add_geometry_columns(sql, t.c.geometry, details) penalty: SaExpression = sa.literal(self.penalty) for ranking in self.rankings: penalty += ranking.sql_penalty(tsearch) for lookup in self.lookups: sql = sql.where(lookup.sql_condition(tsearch)) if self.countries: sql = sql.where(tsearch.c.country_code.in_(self.countries.values)) if self.postcodes: # if a postcode is given, don't search for state or country level objects sql = sql.where(tsearch.c.address_rank > 9) tpc = conn.t.postcode pcs = self.postcodes.values if self.expected_count > 1000: # Many results expected. Restrict by postcode. sql = sql.where(lambda: sa.select(tpc.c.postcode) .where(tpc.c.postcode.in_(pcs)) .where(tsearch.c.centroid.ST_DWithin(tpc.c.geometry, 0.12)) .exists()) # Less results, only have a preference for close postcodes pc_near = sa.select(sa.func.min(tpc.c.geometry.ST_Distance(tsearch.c.centroid)))\ .where(tpc.c.postcode.in_(pcs))\ .scalar_subquery() penalty += sa.case((t.c.postcode.in_(pcs), 0.0), else_=sa.func.coalesce(pc_near, 2.0)) if details.viewbox is not None: if details.bounded_viewbox: sql = sql.where(tsearch.c.centroid.intersects(VIEWBOX_PARAM)) else: penalty += sa.case((t.c.geometry.intersects(VIEWBOX_PARAM), 0.0), (t.c.geometry.intersects(VIEWBOX2_PARAM), 1.0), else_=2.0) if details.near is not None: if details.near_radius is not None: sql = sql.where(tsearch.c.centroid.ST_DWithin(NEAR_PARAM, NEAR_RADIUS_PARAM)) sql = sql.add_columns(-tsearch.c.centroid.ST_Distance(NEAR_PARAM) .label('importance')) sql = sql.order_by(sa.desc(sa.text('importance'))) else: sql = sql.order_by(penalty - sa.case((tsearch.c.importance > 0, tsearch.c.importance), else_=0.75001-(sa.cast(tsearch.c.search_rank, sa.Float())/40))) sql = sql.add_columns(t.c.importance) sql = sql.add_columns(penalty.label('accuracy'))\ .order_by(sa.text('accuracy')) if self.housenumbers: hnr_regexp = f"\\m({'|'.join(self.housenumbers.values)})\\M" sql = sql.where(tsearch.c.address_rank.between(16, 30))\ .where(sa.or_(tsearch.c.address_rank < 30, t.c.housenumber.op('~*')(hnr_regexp))) # Cross check for housenumbers, need to do that on a rather large # set. Worst case there are 40.000 main streets in OSM. inner = sql.limit(10000).subquery() # Housenumbers from placex thnr = conn.t.placex.alias('hnr') pid_list = array_agg(thnr.c.place_id) # type: ignore[no-untyped-call] place_sql = sa.select(pid_list)\ .where(thnr.c.parent_place_id == inner.c.place_id)\ .where(thnr.c.housenumber.op('~*')(hnr_regexp))\ .where(thnr.c.linked_place_id == None)\ .where(thnr.c.indexed_status == 0) if details.excluded: place_sql = place_sql.where(_exclude_places(thnr)) if self.qualifiers: place_sql = place_sql.where(self.qualifiers.sql_restrict(thnr)) numerals = [int(n) for n in self.housenumbers.values if n.isdigit()] interpol_sql: SaColumn tiger_sql: SaColumn if numerals and \ (not self.qualifiers or ('place', 'house') in self.qualifiers.values): # Housenumbers from interpolations interpol_sql = _make_interpolation_subquery(conn.t.osmline, inner, numerals, details) # Housenumbers from Tiger tiger_sql = sa.case((inner.c.country_code == 'us', _make_interpolation_subquery(conn.t.tiger, inner, numerals, details) ), else_=None) else: interpol_sql = sa.null() tiger_sql = sa.null() unsort = sa.select(inner, place_sql.scalar_subquery().label('placex_hnr'), interpol_sql.label('interpol_hnr'), tiger_sql.label('tiger_hnr')).subquery('unsort') sql = sa.select(unsort)\ .order_by(sa.case((unsort.c.placex_hnr != None, 1), (unsort.c.interpol_hnr != None, 2), (unsort.c.tiger_hnr != None, 3), else_=4), unsort.c.accuracy) else: sql = sql.where(t.c.linked_place_id == None)\ .where(t.c.indexed_status == 0) if self.qualifiers: sql = sql.where(self.qualifiers.sql_restrict(t)) if details.excluded: sql = sql.where(_exclude_places(tsearch)) if details.min_rank > 0: sql = sql.where(sa.or_(tsearch.c.address_rank >= MIN_RANK_PARAM, tsearch.c.search_rank >= MIN_RANK_PARAM)) if details.max_rank < 30: sql = sql.where(sa.or_(tsearch.c.address_rank <= MAX_RANK_PARAM, tsearch.c.search_rank <= MAX_RANK_PARAM)) if details.layers is not None: sql = sql.where(_filter_by_layer(t, details.layers)) sql = sql.limit(LIMIT_PARAM) results = nres.SearchResults() for row in await conn.execute(sql, _details_to_bind_params(details)): result = nres.create_from_placex_row(row, nres.SearchResult) assert result result.bbox = Bbox.from_wkb(row.bbox) result.accuracy = row.accuracy if not details.excluded or not result.place_id in details.excluded: results.append(result) if self.housenumbers and row.rank_address < 30: if row.placex_hnr: subs = _get_placex_housenumbers(conn, row.placex_hnr, details) elif row.interpol_hnr: subs = _get_osmline(conn, row.interpol_hnr, numerals, details) elif row.tiger_hnr: subs = _get_tiger(conn, row.tiger_hnr, numerals, row.osm_id, details) else: subs = None if subs is not None: async for sub in subs: assert sub.housenumber sub.accuracy = result.accuracy if not any(nr in self.housenumbers.values for nr in sub.housenumber.split(';')): sub.accuracy += 0.6 results.append(sub) result.accuracy += 1.0 # penalty for missing housenumber return results
32,170
42.18255
98
py