repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
anandakelvin/instagrump | instagrump/__init__.py | from instagrump.Profile import Profile
from instagrump.Content import Content |
anandakelvin/instagrump | instagrump/Content.py | import requests
class Content:
def __init__(self, url):
api_endp = '__a=1'
if not api_endp in url:
if url[-1] != '/':
url += '/'
if '?' in url:
url += '&%s' % api_endp
else:
url += '?%s' % api_endp
self.data = requests.get(url).json()['graphql']['shortcode_media']
self.type = self.data['__typename']
self.id = self.data['id']
self.caption = self.data['edge_media_to_caption']['edges']
self.caption_is_edited = self.data['caption_is_edited']
self.tagged_user = self.data['edge_media_to_tagged_user']['edges']
self.is_video = self.data['is_video']
self.dimensions = self.data['dimensions']
self.location = self.data['location']
self.timestamp = self.data['taken_at_timestamp']
self.likes = self.data['edge_media_preview_like']['count']
self.comments_disabled = self.data['comments_disabled']
self.viewer_can_reshare = self.data['viewer_can_reshare']
self.has_ranked_comments = self.data['has_ranked_comments']
try:
self.title = self.data['title']
except:
self.title = None
def get_content(self):
if self.type == 'GraphSidecar':
data = self.data['edge_sidecar_to_children']['edges']
else:
data = [{'node':self.data}]
result = []
for node in data:
item = {}
if node['node']['is_video']:
item['content'] = node['node']['video_url']
else:
item['content'] = node['node']['display_url']
item['display_url'] = node['node']['display_url']
result.append(item)
return result
|
anurag-ks/check-pep8 | plugin.py | <reponame>anurag-ks/check-pep8
import sublime
import sublime_plugin
import subprocess
import os
class Pep8Command(sublime_plugin.TextCommand):
"""pep8 command"""
def run(self, view):
if self.view.is_dirty():
sublime.message_dialog("Please save the file")
else:
filename = self.view.file_name()
if filename is None:
sublime.message_dialog('Wirte some code, save it and then run \
this command')
return
name, ext = os.path.splitext(filename)
if ext == ".py":
try:
p = subprocess.Popen(['pep8', str(filename)],
stdout=subprocess.PIPE)
(output, _) = p.communicate()
output = output.decode('utf-8')
if output == "" or output is None:
sublime.message_dialog('No pep8 errors found.')
else:
sublime.message_dialog(output)
except subprocess.CalledProcessError as e:
sublime.message_dialog(str(e.output))
else:
sublime.message_dialog("This is not a Python file")
|
kayru/librush | Scripts/embed_shaders.py | <gh_stars>10-100
import codecs
from collections import namedtuple
import os
import subprocess
import sys
import textwrap
BinText = namedtuple('BinText', 'declaration, definition')
def bin2cpp(filename, variableName):
s = open(filename, 'rb').read()
s = codecs.encode(s, "hex").decode().upper()
t=",".join(["0x"+x+y for (x,y) in zip(s[0::2], s[1::2])])
declarationText = "extern const unsigned char " + variableName + "data[];"
declarationText += "\n";
declarationText += "extern const size_t " + variableName + "size;"
definitionText = "const unsigned char " + variableName + "data[] = {\n\t%s\n};"%" \n\t".join(textwrap.wrap(t,80))
definitionText += "\n";
definitionText += "const size_t " + variableName + "size = sizeof(" + variableName + "data);"
return BinText(
declaration = declarationText,
definition = definitionText
)
# Compile shaders and generate .h / .cpp code
header = '#pragma once\n// clang-format off\nnamespace Rush\n{\n'
source = '#include "GfxEmbeddedShaders.h"\n// clang-format off\nnamespace Rush\n{\n'
Shader = namedtuple('Shader', 'filename, entry, target')
hlslShaders = [
Shader(
filename = "..\\Shaders\\Primitive.hlsl",
entry = "psMain",
target = "ps_5_0",
),
Shader(
filename = "..\\Shaders\\Primitive.hlsl",
entry = "psMainTextured",
target = "ps_5_0",
),
Shader(
filename = "..\\Shaders\\Primitive.hlsl",
entry = "vsMain3D",
target = "vs_5_0",
),
Shader(
filename = "..\\Shaders\\Primitive.hlsl",
entry = "vsMain2D",
target = "vs_5_0",
),
]
# SPIR-V shaders
for shader in hlslShaders:
# TODO: find glslc.exe and report error if not found
hlslCompiler = "glslc.exe"
inputFilename = os.path.normpath(shader.filename)
shaderDirectory = os.path.dirname(shader.filename)
outputFilename = os.path.join(shaderDirectory, shader.entry+".spv")
stageMap = dict(
vs_5_0="vertex",
ps_5_0="fragment"
)
command = [
hlslCompiler,
"-x", "hlsl",
"-o", outputFilename,
"-fentry-point=" + shader.entry,
"-fshader-stage=" + stageMap[shader.target],
#"-DVULKAN=1",
inputFilename
]
print(" ".join(command))
compileResult = subprocess.call(command, stdout=subprocess.DEVNULL)
if compileResult != 0:
exit(1)
variableNamePrefix = "SPV_" + shader.entry + "_"
text = bin2cpp(outputFilename, variableNamePrefix)
header += text.declaration + "\n";
source += text.definition + "\n";
os.remove(outputFilename)
# DXBC shaders
for shader in hlslShaders:
# TODO: find fxc.exe and report error if not found
hlslCompiler = "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\x64\\fxc.exe"
inputFilename = os.path.normpath(shader.filename)
shaderDirectory = os.path.dirname(shader.filename)
outputFilename = os.path.join(shaderDirectory, shader.entry+".dxbc")
command = [
hlslCompiler,
"/nologo",
"/Fo", outputFilename,
"/E", shader.entry,
"/T", shader.target,
inputFilename
]
print(" ".join(command))
compileResult = subprocess.call(command, stdout=subprocess.DEVNULL)
if compileResult != 0:
exit(1)
variableNamePrefix = "DXBC_" + shader.entry + "_"
text = bin2cpp(outputFilename, variableNamePrefix)
header += text.declaration + "\n";
source += text.definition + "\n";
os.remove(outputFilename)
# Output results
header += "}"
source += "}"
with open("..\\Rush\\GfxEmbeddedShaders.h", "w") as fileOut:
fileOut.write(header)
with open("..\\Rush\\GfxEmbeddedShaders.cpp", "w") as fileOut:
fileOut.write(source)
|
jakobjassmann/ecodes-dk-lidar | scripts/fill_processing_gaps.py | # fix_processing_gaps.py
# short script to fix missing tiles for each variable
# to be run post completion of processing with process_tiles.py
# <NAME> <EMAIL> 11 May 2021
# NB: uses scandir for speed, on OPALS Shell an install might be required with
# python -m pip install scandir --user
# Dependencies
import os
import glob
import pandas
import re
import scandir
import copy
import sys
import subprocess
import shutil
from dklidar import settings
## 1) Determine output folder structure
# Status
print('#' * 80 + 'Check EcoDes-DK processing outputs for completness' + '\n\n')
print('Preparing environment...'),
# Initiate list
folders = []
# Check for subfolders present (max depth = 1)
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Remove tile_footprints folder if present
folders = [folder for folder in folders if not bool(re.match('.*tile_footprints.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*point_source_proportion.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*point_source_counts.*', folder))]
## Get reference set of tiles based on dtm_10m
dtm_10m = [folder for folder in folders if bool(re.match('.*dtm_10m.*', folder))][0]
dtm_10m_tiles = [re.sub('.*_(\d*_\d*).tif', '\g<1>', file_name) for file_name in glob.glob(dtm_10m + '/*.tif')]
dtm_10m_tiles = set(dtm_10m_tiles)
print(' done.')
## 2) Check completeness of tiles for all variables
# Status
print('Scanning tiles for...')
# Initiate empty dictionary
missing_tiles = {}
# Scan folders for missing tiles
for folder in folders:
variable_name = re.sub('.*[\\\\\/]', '', folder)
print('\t' + variable_name)
tiles = [re.sub('.*_(\d*_\d*).tif', '\g<1>', file_name) for file_name in glob.glob(folder + '/*.tif')]
tiles = set(tiles)
tiles_missing = dtm_10m_tiles - tiles
missing_tiles.update({variable_name: tiles_missing})
# Status
print('Scan complete.\n')
print('Exporting missing tile_ids to csv...'),
# Save missing tiles for each variable to csv
missing_tiles_df_list = []
for variable in missing_tiles.keys():
missing_diles_df_local = pandas.DataFrame(missing_tiles[variable], columns = ['tile_id'])
missing_diles_df_local['variable'] = variable
missing_tiles_df_list.append(missing_diles_df_local)
# Concatenate list of dfs into one df and export to csv
missing_tiles_df = pandas.concat(missing_tiles_df_list)
missing_tiles_df.to_csv(settings.wd + '/documentation/empty_tile_ids.csv', index = False)
# Status
print(' done.')
## 3) Generate empty tiles for all missing tile ids
# Plenty of overlap means this is faster to do once then just copy
# Make temp dir
os.mkdir(settings.scratch_folder + '/fill_temp')
# Get unique tile ids
unique_missing_tiles = [tile_id for tile_list in missing_tiles.values() for tile_id in tile_list]
unique_missing_tiles = set(unique_missing_tiles)
# Status
print('Generating ' + str(len(unique_missing_tiles)) + ' empty tiles in temp folder...')
# Generate empty tiles using gdal
for tile_id in unique_missing_tiles:
in_file = dtm_10m + '/dtm_10m_' + tile_id + '.tif'
out_file = settings.scratch_folder + '/fill_temp/' + "empty_" + tile_id + '.tif'
cmd = settings.gdal_calc_bin + ' -A ' + in_file + ' --outfile ' + out_file + ' --calc=(-9999*greater(A,-9999)) --type=Int16 --NoDataValue=-9999'
call_return = subprocess.check_output(cmd, shell=False)
sys.stdout.write('.')
sys.stdout.flush()
# Status
print(' done.')
## 4) Perpare summary oputput and fill missing rasters
# Status
print('Generating stummary stats...'),
# Duplicate missing_tiles dict for summary stats
summary_stats = copy.copy(missing_tiles)
# Loop through key value pairs, get stats and write missing NA rasters
for variable, tiles_missing in missing_tiles.items():
# Status
print(variable + ' with ' + str(len(tiles_missing)) + ' tiles.')
# Determine output folder
output_folder = [folder for folder in folders if bool(re.match('.*[^_]' + variable + '.*', folder))][0]
# Set summary stats
summary_stats[variable] = len(tiles_missing)
# Fill missing NA rasters if needed
if len(tiles_missing)>0:
for tile_id in tiles_missing:
source = settings.scratch_folder + '/fill_temp/' + "empty_" + tile_id + '.tif'
destination = output_folder + '/' + variable + "_" + tile_id + '.tif'
if not os.path.exists(destination):
shutil.copy(source, destination)
else:
print('Warning: file already exists')
# Write summary stats to text file
stats_file = open(output_folder + '/empty_tiles_' + variable + '.txt', 'w')
stats_file.write(variable + '\n' + '' + str(len(tiles_missing)) + ' tiles did not complete processing and were replaced with empty tiles (all values = NA).\nThe affected tile_ids are:\n' + '\n'.join(list(tiles_missing)) + '\n')
stats_file.close()
# Status
print(' done.')
print('Exporting summary stats to csv...'),
# Export stats as csv
summary_stats_df = pandas.DataFrame()
summary_stats_df['variable'] = summary_stats.keys()
summary_stats_df['n_missing_tiles'] = summary_stats.values()
summary_stats_df = summary_stats_df.sort_values('variable')
summary_stats_df.to_csv(settings.wd + '/documentation/empty_tiles_summary.csv', index=False)
# Status
print(' done.')
print('Cleaning up temp files...'),
# Remove temp files
for tile_id in unique_missing_tiles:
temp_file = settings.scratch_folder + '/fill_temp/' + "empty_" + tile_id + '.tif'
os.remove(temp_file)
os.rmdir(settings.scratch_folder + '/fill_temp/')
# Status
print(' done.')
print('Script complete.' + '\n' + 80 * '#')
## End of File
|
jakobjassmann/ecodes-dk-lidar | scripts/generate_tile_footprints.py | ### DK Nationwide LiDAR - A short R script to generate the tile footprints file.
### <NAME> <EMAIL> 27 October 2021
import os
import subprocess
import re
import ogr
from dklidar import settings
# Status update
print('#' * 80)
print(' EcoDes-DK15 generating tile_footprint variable\n')
print(' 1. Creating output folder ...'),
# Check whether output folder exists if not create
if not os.path.exists(settings.output_folder + '/tile_footprints/'):
os.mkdir(settings.output_folder + '/tile_footprints/')
print('Done.')
# Set variable to base the footprints on
base_var = 'dtm_10m'
# Export tile footprints using gdaltlindex
print(' 2. Exporting tile footprints using gdaltindex based on "' + base_var + '" ...'),
cmd = settings.gdaltlindex_bin + \
settings.output_folder + '/tile_footprints/tile_footprints.shp ' + \
settings.output_folder + '/' + base_var + '/*.tif'
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
print('Done.')
# Status
print(' 3. Extracting tile_ids from file name attributes ...'),
# Load shapefile
shp_driver = ogr.GetDriverByName('ESRI Shapefile')
tile_footprints = shp_driver.Open(settings.output_folder + '/tile_footprints/tile_footprints.shp', 1)
# Open layer and get attribute name
tile_layer = tile_footprints.GetLayer()
tile_attribute_name = tile_layer.GetLayerDefn().GetFieldDefn(0).name
# Set new attribute name
new_attribute = ogr.FieldDefn('tile_id', ogr.OFTString)
new_attribute.SetWidth(8) #16 char string width
tile_layer.CreateField(new_attribute)
# Convert file names to tile_id
for feature in tile_layer:
tile_id = feature.GetField(tile_attribute_name)
tile_id = re.sub('.*(\d{4}_\d{3}).tif', '\g<1>', tile_id)
feature.SetField('tile_id', tile_id)
tile_layer.SetFeature(feature)
feature = None
tile_id = None
# Delete file name field
tile_layer.DeleteField(0)
# Close file connection and tidy up
tile_footprints.Destroy()
tile_layer = None
new_attribute = None
tile_attribute_name = None
# Status
print('Done.\n')
print(' Export of "tile_footprints" complete.\n')
print('#' * 80),
|
jakobjassmann/ecodes-dk-lidar | dklidar/dtm.py | ### Functions for DTM raster file handling for DK Lidar project
### <NAME> <EMAIL> 29 January 2019
import os
import subprocess
import re
import pandas
import opals
import glob
import time
import shutil
from dklidar import settings
from dklidar import common
#### Function definitions
## Generate tile footprint
def dtm_generate_footprint(tile_id):
"""
Generates a footprint file using gdal.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return execution status
"""
# Initiate return value and log output
return_value = ''
log_file = open('log.txt', 'a')
try:
cmd = settings.gdaltlindex_bin + \
settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif'
cmd_return = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
log_file.write( '\n' + tile_id + ' footprint generation... \n' +
cmd_return + \
'\n' + tile_id + ' successful.\n\n')
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' footprint generation failed. \n')
return_value = 'gdalError'
# Close log file
log_file.close()
return return_value
## Create neighbourhood mosaic
def dtm_neighbourhood_mosaic(tile_id):
"""
Generates a mosaic of the dem for a given tile and it's 8 neighbourings. Incoplete mosaics are generated should
a given neighbour be missing.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initate return value and open log file
return_value = ''
log_file = open('log.txt', 'a+')
# get current (temporary) work directory
temp_wd = os.getcwd()
# Retrieve row and col numbers for the current tile_id
center_row = int(re.sub('(\d+)_\d+', '\g<1>', tile_id))
center_col = int(re.sub('\d+_(\d+)', '\g<1>', tile_id))
# Determine row and column numbers for tiles in the 3 x 3 window
rows_to_load = [center_row - 1, center_row, center_row + 1]
cols_to_load = [center_col - 1, center_col, center_col + 1]
# Generate list of tile_ids for tiles to load
tiles_to_load = []
for row in rows_to_load:
for col in cols_to_load:
tile_to_load = str(row) + '_' + str(col)
tiles_to_load.extend([tile_to_load])
# Prep filenames and check if files exists:
tile_file_names = []
for tile_to_load in tiles_to_load:
tile_file_name = settings.dtm_folder + '/DTM_1km_' + tile_to_load + '.tif'
if os.path.exists(tile_file_name):
tile_file_names.append(tile_file_name)
n_neighbours = len(tile_file_names)
tile_file_names = ' '.join(tile_file_names)
if n_neighbours == 9:
log_file.write(tile_id + ' mosaicing...\n' + 'Number of neighbours = ' + str(n_neighbours) + '. Complete!\n')
else:
log_file.write(tile_id + ' mosaicing...\n' + 'Warning! Number of neighbours = ' + str(n_neighbours) +
'. Incomplete. Edge effects possible!\n')
# Construct command:
cmd = settings.gdalwarp_bin + ' ' + tile_file_names + ' ' + \
settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif '
# Execute command as subprocess and return message:
try:
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
log_file.write('\n' + tile_id + ' successful.\n\n')
return_value = 'success'
except:
log_file.write(tile_id + ' failed.\n\n')
return_value = "gdalError"
# Close log file
log_file.close()
return return_value
## Validate crs
def dtm_validate_crs(tile_id, mosaic = True):
"""
Function to validate the crs for dtm files (single tile and mosaic)
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param mosaic: if Ture validates crs for mosaic also, default: True
:return: execution status
"""
# Initiate return value
return_value = ''
# Generate odm files path names
dtm_file = settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif'
dtm_mosaic = settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif'
# Retrieve CRS string for single tile
try:
crs_str = subprocess.check_output(settings.gdalsrsinfo_bin + '-o proj4 ' + dtm_file,
shell=False, stderr=subprocess.STDOUT)
# Clean up string by removing first line all white space before and after just in case
crs_str = re.sub('^.*?\n', '', crs_str)
# Check whether CRS exists, if different issue warning.
if crs_str.strip() == settings.crs_proj4_gdal.strip():
return_value = 'Tile: match'
else:
return_value = 'Tile: warning - no match'
except:
return_value = 'Single: error'
# Retrieve CRS string for mosaic
if mosaic == True:
try:
crs_str = subprocess.check_output(settings.gdalsrsinfo_bin + '-o proj4 ' + dtm_mosaic,
shell=False, stderr=subprocess.STDOUT)
# Clean up string by removing first line all white space before and after just in case
crs_str = re.sub('^.*?\n', '', crs_str)
# Check whether CRS exists, if not assign, if different throw error.
if crs_str.strip() == settings.crs_proj4_gdal.strip():
return_value = return_value + '; Mosaic: match'
else:
return_value = return_value + '; Mosaic: warning - no match'
except:
return_value = return_value + '; Mosaic: error'
return return_value
## Aggregate dem to 10 m
def dtm_aggregate_tile(tile_id):
"""
Aggregates the 0.4 m DTM to 10 m size for final output and further calculations.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return valule and open log file
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/dtm_10m'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
## Aggregate dtm to temporary file:
# Specify gdal command
cmd = settings.gdalwarp_bin + \
'-tr 10 10 -r average ' + \
settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif ' + \
wd + '/dtm_10m_' + tile_id + '_float.tif '
# Execute gdal command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' aggregating dtm_10m successful.\n\n')
out_file = out_folder + '/dtm_10m_' + tile_id + '.tif'
# Stretch by 100, round and store as int16
# Specify gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/dtm_10m_' + tile_id + '_float.tif ' + \
' --outfile=' + out_file + \
' --calc=rint(100*A)' + ' --type=Int16' + ' --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' converting dtm_10m to int16... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file)
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' dtm_10m aggregation failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temporary files
try:
os.remove(wd + '/dtm_10m_' + tile_id + '_float.tif')
except:
pass
return return_value
## Aggregate dem mosaic to 10 m
def dtm_aggregate_mosaic(tile_id):
"""
Aggregates the 0.4 m DTM mosaic to 10 m size for final output and other calculations.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return valule and open log file
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
out_folder = settings.dtm_mosaics_10m_folder
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Specify gdal command
cmd = settings.gdalwarp_bin + \
'-tr 10 10 -r average -overwrite ' + \
settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif ' + \
settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif '
# Execute gdal command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' aggregating dtm_10m mosaic successful.\n\n')
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' dtm_10m mosaic aggregation failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
return return_value
## Calculate slope for tile
def dtm_calc_slope(tile_id):
"""
Calculates the slope parameter for a DTM neighbourhood mosaic and crops to original tile_size.
Requires dtm_generate_mosaic() to be executed.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: stdout and stderr command line output of gdal command execution.
"""
# Initiate return value and log output
return_value = ''
log_file = open('log.txt', 'a+')
# Get current wd
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/slope'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Calculate slope parameter
cmd = settings.gdaldem_bin + ' slope ' + \
settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif ' + \
wd + '/slope_' + tile_id + '_mosaic.tif '
log_file.write(tile_id + ' slope calculation... \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop slope mosaic output to original tile size
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/slope_' + tile_id + '_mosaic.tif ' + \
wd + '/slope_' + tile_id + '_mosaic_cropped.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropped slope.\n\n')
# Round and store slope as int16
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/slope_' + tile_id + '_mosaic_cropped.tif ' + \
' --outfile=' + out_folder + '/slope_' + tile_id + '.tif ' + \
' --calc=rint(10*A) --type=Int16 --NoDataValue=-9999'
log_file.write('\n' + tile_id + ' rounding slope and calculation successful. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_folder + '/slope_' + tile_id + '.tif ')
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' slope calculation failed. \n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temporary file
try:
os.remove(wd + '/slope_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/slope_' + tile_id + '_mosaic_cropped.tif ')
except:
pass
# Return return value
return return_value
## Calculate aspect for a tile
def dtm_calc_aspect(tile_id, slope_zero = 'nodata'):
"""
Calculates the aspect for all 10 m cells in a DTM neighbourhood mosaic and crops to original tile_size.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param slope_zero: integer value or 'nodata', sets the value for cells were slope = 0.
:return: execution status
"""
# Initiate return valule and log output
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/aspect'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Calculate aspect
cmd = settings.gdaldem_bin + ' aspect ' + \
settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif ' + \
wd + '/aspect_' + tile_id + '_mosaic.tif '
log_file.write(tile_id + ' aspect calculation... \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop aspect output to original tile size
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/aspect_' + tile_id + '_mosaic.tif ' + \
wd + '/aspect_' + tile_id + '_mosaic_cropped.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' aspect mosaic cropped.\n\n')
# Set aspect for cells slope = 0 if the value is not nodata
if slope_zero != 'nodata':
# Calculate slope
cmd = settings.gdaldem_bin + ' slope ' + \
settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif ' + \
wd + '/slope_' + tile_id + '_mosaic.tif '
log_file.write(tile_id + ' slope calculated. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop slope mosaic
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/slope_' + tile_id + '_mosaic.tif ' + \
wd + '/slope_' + tile_id + '_mosaic_cropped.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' slope mosaic cropped.\n\n')
# Prepare mask from slope raster for slope = 0
cmd = settings.gdal_calc_bin + \
'-S ' + wd + '/slope_' + tile_id + '_mosaic_cropped.tif ' + \
' --outfile=' + wd + '/slope_mask_' + tile_id + '.tif ' + \
' --calc=' + str(slope_zero) + '*(S==0)-9999*(S!=0)'
log_file.write('\n' + tile_id + ' generated slope mask successfuly. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Merge mask over aspect to set value for aspect where slope = 0
cmd = settings.gdal_merge_bin + \
' -o ' + wd + '/aspect_' + tile_id + '_mosaic_cropped_masked.tif '+ \
' -a_nodata -9999 -init -9999 -ot Float32 ' + \
wd + '/slope_mask_' + tile_id + '.tif ' + \
wd + '/aspect_' + tile_id + '_mosaic_cropped.tif '
log_file.write('\n' + tile_id + ' set aspect for slope = 0 successfuly. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
else:
# Rename cropped mosaic
os.rename(wd + '/aspect_' + tile_id + '_mosaic_cropped.tif ',
wd + '/aspect_' + tile_id + '_mosaic_cropped_masked.tif ')
# Round and store as int16
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/aspect_' + tile_id + '_mosaic_cropped_masked.tif ' + \
' --outfile=' + out_folder + '/aspect_' + tile_id + '.tif ' + \
' --calc=rint(10*A) --type=Int16 --NoDataValue=-9999'
log_file.write('\n' + tile_id + ' rounding aspect to int16 and calculation success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_folder + '/aspect_' + tile_id + '.tif ')
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' aspect calculation failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temporary files
try:
os.remove(wd + '/aspect_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/aspect_' + tile_id + '_mosaic_cropped.tif ')
os.remove(wd + '/aspect_' + tile_id + '_mosaic_cropped_masked.tif ')
os.remove(wd + '/slope_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/slope_' + tile_id + '_mosaic_cropped.tif ')
os.remove(wd + '/slope_mask_' + tile_id + '.tif ')
except:
pass
return return_value
## Calculcate heat index
def dtm_calc_heat_index(tile_id, slope_zero = 'nodata'):
"""
Calculates the heat index from McCune and Keon (2002) based on the aspect only. Aspect must have been
calculated using dtm_calc_aspect().
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param zero_slope: zero_slope - inteeger value assigned to the aspect when slope = 0 or 'nodata'
:return: execution status
"""
# Intialise return value and log
return_value = ''
log_file = open('log.txt', 'a+')
# Get current wd
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/heat_load_index'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Specify path to aspect raster A
aspect_file = '-A ' + settings.output_folder + '/aspect/aspect_' + tile_id + '.tif '
# Construct numpy equation, stretch by 10k and round
heat_index = 'rint(10000*((1-cos(radians((A/10)-45)))/2))'
# Specify temp_file path
temp_file = wd + '/heat_load_index_' + tile_id + '.tif '
# Specify output path
out_file = out_folder + '/heat_load_index_' + tile_id + '.tif '
# Construct gdal command, save file as Int16:
cmd = settings.gdal_calc_bin + \
aspect_file + \
'--outfile=' + temp_file + \
' --calc=' + heat_index + \
' --type=Int16 --NoDataValue=-9999 --overwrite'
# Execute gdal command
log_file.write('\n' + tile_id + ' calculating heat index success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# maks index for zero slope value if specified
if slope_zero != 'nodata':
cmd = settings.gdal_calc_bin + \
aspect_file + \
' -B ' + temp_file + \
' --outfile=' + out_file + \
' --calc=-9999*(A==' + str(slope_zero) + ')+B*(A!=' + str(slope_zero) + ')' + \
' --type=Int16 --NoDataValue=-9999 --overwrite '
log_file.write('\n' + tile_id + ' applied aspect mask successfuly. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
else:
# Move file
try:
os.remove(out_file)
except:
pass
os.rename(temp_file, out_file)
# Apply mask(s)
common.apply_mask(out_file)
# remove temp file
try:
os.remove(temp_file)
except:
pass
return_value = 'success'
except:
log_file.write(tile_id + ' calculating heat index failed. \n ')
return_value = 'gdal_error'
# Close log file
log_file.close()
return return_value
## Calculate solar radiation
def dtm_calc_solar_radiation(tile_id):
"""
Returns cell by cell solar radiation following McCune and Keon 2002. Slope and aspect must have been calculated
beforehand using dtm_calc_slope() and dtm_calc_aspect().
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# The calculation of the solar radiation is a two step process
# 1) Obtain a raster with the latitude of the centre of the cell in radians
# 2) Calculate the solar radiation using the formula form McCune and Keon 2002
# initiate return value and log ouptut
return_value = ''
log_file = open('log.txt', 'a')
# Get current wd
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/solar_radiation'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# 1) Create raster with latitude of the centre of a cell
# Construct gdal command to export xyz file in utm
dtm_file = settings.output_folder + '/slope/slope_' + tile_id + '.tif'
out_file = wd + '/xyz_' + tile_id + '.xyz'
cmd = settings.gdal_translate_bin + \
' -of xyz -co COLUMN_SEPARATOR="," -co ADD_HEADER_LINE=YES ' + \
dtm_file + ' ' + \
out_file
log_file.write('\n converting slope raster to xyz with gdal command: ' + cmd)
# Execute gdal command and log
log_file.write('\n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' generated xyz. \n\n ')
log_file.write('\n conversion completed ')
# Read in xyz as a pandas dataframe
xyz = pandas.read_csv(wd + '/xyz_' + tile_id + '.xyz')
xy = xyz[["X", "Y"]]
xy.to_csv(wd + '/xy_' + tile_id + '.csv', index=False, header=False, sep=' ')
log_file.write('\n removed z coordinate. ')
# Construct gdal commands to transform cell coordinates from utm to lat long
in_file = wd + '\\xy_' + tile_id + '.csv'
# Script used to break here insert a pause
time.sleep(1)
cmd = settings.gdaltransform_bin + \
' -s_srs EPSG:25832 -t_srs WGS84 ' + \
' < ' + in_file
log_file.write('\n gdal transform command: ' + cmd)
# And execute the gdal command
xy_transformed = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
log_file.write(xy_transformed[0:200] + '\n')
# Write to file
out_file = wd + '/xy_' + tile_id + '_latlong.csv'
xy_trans_file = open(out_file, 'w')
xy_trans_file.write(xy_transformed)
xy_trans_file.close()
log_file.write('\n gdal transform completed. ')
# Load lat long file as pandas df
skip_rows = 1
if settings.gdal_version == '3.3.3':
skip_rows = 0
xy_latlong = pandas.read_csv(wd + '/xy_' + tile_id + '_latlong.csv', sep='\s+', names=['X', 'Y', 'return_status'],
skiprows=skip_rows)
log_file.write('\n check whether lenght matches. ')
# check data frames are of the same length
if len(xyz.index) != len(xy_latlong.index):
log_file.write('\n lenght of dataframes did not match \n')
raise Exception("")
log_file.write('\n added latitude utm as z to latlong file. ')
# Assign lat (deg) to UTM z coordinate
xyz["Z"] = xy_latlong["Y"]
xyz.to_csv(wd + '/xyz_' + tile_id + '.xyz', index=False, header=False, sep=' ')
# Convert back to geotiff, prepare gdal translate command
in_file = wd + '/xyz_' + tile_id + '.xyz'
out_file = wd + '/lat_' + tile_id + '.tif'
cmd = settings.gdal_translate_bin + \
' -of GTiff -a_srs EPSG:25832 ' + \
in_file + ' ' + \
out_file
log_file.write('\n converting to geotiff using gdal command:' + cmd)
# Execute command and log
log_file.write('\n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' generated lat tif. \n\n')
# Intermediate clean up
os.remove(wd + '/xyz_' + tile_id + '.xyz')
os.remove(wd + '/xy_' + tile_id + '.csv')
os.remove(wd + '/xy_' + tile_id + '_latlong.csv')
del (xy)
del (xy_latlong)
del (xyz)
log_file.write('\n finished preparations. ')
## 2) Calculate Solar radiation
# The equation from McCune and Keon goes as follows:
# solar radiation = 0.339 +
# 0.808 x cos(lat) x cos(slope) +
# -0.196 x sin(lat) x sin(slope) +
# -0.482 x cos(asp) x sin(slope)
# Aspect must be foldered around the S-N line:
# asp = 180 - |180 - asp|
# and all values mus be in radians:
# rad = deg * pi / 180 or using numpy simply: rad = radians(deg)
# Finally, the result needs to be stretched by 1000 and rounded for storage as an Int16
# Specify path to latitude raster L
lat_file = '-L ' + wd + '/lat_' + tile_id + '.tif '
# Specify path to slope raster as raster S
slope_file = '-S ' + settings.output_folder + '/slope/slope_' + tile_id + '.tif '
# Specify path to aspect raster A
aspect_file = '-A ' + settings.output_folder + '/aspect/aspect_' + tile_id + '.tif '
# Construct numpy equation (based on McCune and Keon 2002) and stretch by 1000 and round to nearest int.
#solar_rad_eq = 'rint(1000*(0.339+0.808*cos(radians(L))*cos(radians((S/10)))-0.196*sin(radians(L))*sin(radians((S/10)))-0.482*cos(radians(180-absolute(180-(A/10))))*sin(radians((S/10)))))'
# Construct numpy equation (based on McCune and Keon 2002), convert to MJ/yr/100m2 (cell) and round to nearest int.
solar_rad_eq = 'rint(1000000*(numpy.exp(0.339+0.808*cos(radians(L))*cos(radians((S/10)))-0.196*sin(radians(L))*sin(radians((S/10)))-0.482*cos(radians(180-absolute(180-(A/10))))*sin(radians((S/10))))))'
# Specify output path
out_file = out_folder + '/solar_radiation_' + tile_id + '.tif '
# Construct gdal command:
cmd = settings.gdal_calc_bin + \
lat_file + \
slope_file + \
aspect_file + \
'--outfile=' + out_file + \
'--calc=' + solar_rad_eq + ' ' + \
'--type=Int32 --NoDataValue=-9999 --overwrite' #Int16 if original equation is used.
log_file.write('\n calculating solar radiaiton with gdal calc: ' + cmd)
# Execute gdal command
log_file.write('\n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' calculated solar radiation. \n\n')
# Apply mask(s)
common.apply_mask(out_file)
# Remove latitude tif
os.remove(wd + '/lat_' + tile_id + '.tif')
return_value = 'success'
log_file.write('\n done. ')
except:
log_file.write('\n\n' + tile_id + ' calculating solar radiation failed. \n ')
return_value = 'gdal_error'
# Write log output to log file
log_file.close()
return return_value
## Calculate landscape openness mean
def dtm_openness_mean(tile_id):
"""
Exports the mean landscape openness for all eight cardinal directions with a 150 m search radius
using the OPALS Openness module.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Get working directory
wd = os.getcwd()
# Generate folder paths
out_folder = settings.output_folder + '/openness_mean'
if not os.path.exists(out_folder): os.mkdir(out_folder)
# Attempt calculation of mean openness
try:
# Initialise Opals Openness Module
export_openness = opals.Openness.Openness()
# Export positive openness for a given cell cell with a search radius of 150 m (15 cells)
export_openness.inFile = settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif '
export_openness.outFile = wd + '/openness_150m_' + tile_id + '_mosaic.tif '
export_openness.feature = opals.Types.OpennessFeature.positive
export_openness.kernelSize = 15 # 15 x 10 m = 150 m
export_openness.selMode = 0
export_openness.noData = -9999
export_openness.commons.screenLogLevel = opals.Types.LogLevel.none
export_openness.commons.nbThreads = settings.nbThreads
export_openness.run()
# Convert to degrees, round and store as int16
# Specify gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/openness_150m_' + tile_id + '_mosaic.tif ' + \
' --outfile=' + wd + '/landscape_openness_' + tile_id + '_mosaic.tif ' + \
' --calc=rint(degrees(A))' + \
' --type=Int16 --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' converted and rounded to degrees. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Obtain file extent for cropping then remove outer 150 m of mosaic to avoid edge effects
cmd = settings.gdalinfo_bin + wd + '/landscape_openness_' + tile_id + '_mosaic.tif '
mosaic_info = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
upper_left = re.search("Upper *Left *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
lower_right = re.search("Lower *Right *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
xmin = float(upper_left.group(1)) + 150
ymax = float(upper_left.group(2)) - 150
xmax = float(lower_right.group(1)) - 150
ymin = float(lower_right.group(2)) + 150
# remove 150 m on outer edge using gdalwarp
cmd = settings.gdalwarp_bin + \
'-te ' + str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax) + ' -overwrite ' + \
wd + '/landscape_openness_' + tile_id + '_mosaic.tif ' + \
wd + '/landscape_openness_' + tile_id + '_mosaic_cropped.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropping landscape openness mosaic finished.\n\n')
# Crop openness mosaic to original tile size (this will set all edges removed earlier to NA)
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/landscape_openness_' + tile_id + '_mosaic_cropped.tif ' + \
out_folder + '/openness_mean_' + tile_id + '.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' landscape openness calculation successful.\n\n')
# Apply mask(s)
common.apply_mask(out_folder + '/openness_mean_' + tile_id + '.tif ')
return_value = 'success'
except:
return_value = 'opals/gdal/Error'
# Remove temporary files
try:
os.remove(wd + '/openness_150m_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/landscape_openness_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/landscape_openness_' + tile_id + '_mosaic_cropped.tif ')
# and are super random files created by OPALS
for temp_file in glob.glob(wd + '/../*' + tile_id + '_mosaic_dz._dz.tif'):
os.remove(temp_file)
except:
pass
# Close log file
log_file.close()
return return_value
## Calculate landscape openness difference
def dtm_openness_difference(tile_id):
"""
Exports the difference between the minimum and maximum positive openness within a 50 m search radius
using the Opals Openness module.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Get working directory
wd = os.getcwd()
# Generate folder paths
out_folder = settings.output_folder + '/openness_difference'
if not os.path.exists(out_folder): os.mkdir(out_folder)
# Attempt openness difference calculation
try:
# Initialise Opals Openness Module
export_openness = opals.Openness.Openness()
# Export minimum positive openness for a given cell cell with a kernel size of 50 m x 50 m
export_openness.inFile = settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif '
export_openness.outFile = wd + '/openness_50m_min_' + tile_id + '_mosaic.tif '
export_openness.feature = opals.Types.OpennessFeature.positive
export_openness.kernelSize = 5 # 5 x 10 m = 50 m
export_openness.selMode = 1
export_openness.noData = -9999
export_openness.commons.screenLogLevel = opals.Types.LogLevel.none
export_openness.commons.nbThreads = settings.nbThreads
export_openness.run()
export_openness.reset()
# Export maximum positive openness for a given cell with a kernel size of 50 m x 50 m
export_openness.inFile = settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif '
export_openness.outFile = wd + '/openness_50m_max_' + tile_id + '_mosaic.tif '
export_openness.feature = opals.Types.OpennessFeature.positive
export_openness.kernelSize = 5 # 5 x 10 m = 50 m
export_openness.selMode = 2
export_openness.noData = -9999
export_openness.commons.screenLogLevel = opals.Types.LogLevel.none
export_openness.commons.nbThreads = settings.nbThreads
export_openness.run()
# Calculate difference, round and store as int16
# Specify gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/openness_50m_min_' + tile_id + '_mosaic.tif ' + \
'-B ' + wd + '/openness_50m_max_' + tile_id + '_mosaic.tif ' + \
' --outfile=' + wd + '/diff_openness_' + tile_id + '_mosaic.tif ' + \
' --calc=rint(degrees(B)-degrees(A))' + \
' --type=Int16 --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' calculated difference openness. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Obtain file extent for cropping (remove outer 50 m of mosaic)
cmd = settings.gdalinfo_bin + wd + '/diff_openness_' + tile_id + '_mosaic.tif '
mosaic_info = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
upper_left = re.search("Upper *Left *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
lower_right = re.search("Lower *Right *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
xmin = float(upper_left.group(1)) + 50
ymax = float(upper_left.group(2)) - 50
xmax = float(lower_right.group(1)) - 50
ymin = float(lower_right.group(2)) + 50
# Remove 50 m on outer edge using gdalwarp
cmd = settings.gdalwarp_bin + \
'-te ' + str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax) + ' -overwrite ' + \
wd + '/diff_openness_' + tile_id + '_mosaic.tif ' + \
wd + '/diff_openness_' + tile_id + '_mosaic_cropped.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropped openness difference mosaic.\n\n')
# Crop diff openness to original tile size (this will set all edges removed earlier to NA)
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/diff_openness_' + tile_id + '_mosaic_cropped.tif ' + \
out_folder + '/openness_difference_' + tile_id + '.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' openness calculation successful.\n\n')
# Apply mask(s)
common.apply_mask(out_folder + '/openness_difference_' + tile_id + '.tif ')
return_value = 'success'
except:
return_value = 'opals/gdal/Error'
# Remove temporary files
try:
os.remove(wd + '/openness_50m_min_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/openness_50m_max_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/diff_openness_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/diff_openness_' + tile_id + '_mosaic_cropped.tif ')
# and are super random files created by OPALS
for temp_file in glob.glob(wd + '/../*' + tile_id + '_mosaic_dz._dz.tif'):
os.remove(temp_file)
except:
pass
# Close log file
log_file.close()
# Return exist status
return return_value
## Calculate TWI following Kopecky et al. 2020
def dtm_kopecky_twi(tile_id):
"""
Calculates the topographic wetness indec (TWI) following Kopecky et al. 2020.
Requires SAGA GIS 7.8.2 or later to be specified in settings.py.
Calculations are done on the aggregated 10 m tile neighbourhood mosaic.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return valule and log output
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Replace backslash with forward slash for SAGA GIS
wd = re.sub('\\\\', '/', wd)
# Prepare output folder
out_folder = settings.output_folder + '/twi'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Fill in sinks on mosaic
cmd = settings.saga_bin + 'ta_preprocessor 5 ' + \
'-ELEV ' + settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif ' + \
'-FILLED ' + wd + '/' + tile_id + '_mosaic_10m_filled.sdat ' + \
'-MINSLOPE 0.01'
log_file.write(tile_id + ' finished filling sinks. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Calculate Flow Accumulation on filled mosaic
cmd = settings.saga_bin + 'ta_hydrology 0 ' + \
'-ELEVATION ' + wd + '/' + tile_id + '_mosaic_10m_filled.sdat ' + \
'-METHOD 4 -CONVERGENCE 1.0 ' + \
'-FLOW ' + wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd.sdat '
log_file.write(tile_id + ' finished flow claculation. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Calculate Flow Width and Catchment Area
cmd = settings.saga_bin + 'ta_hydrology 19 ' + \
'-DEM ' + wd + '/' + tile_id + '_mosaic_10m_filled.sdat ' + \
'-TCA ' + wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd.sdat ' + \
'-WIDTH ' + wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd_width.sdat ' + \
'-SCA ' + wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd_sca.sdat '
log_file.write(tile_id + ' finished flow width and catchment area calculation. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Calculate slope on mosaic
cmd = settings.saga_bin + 'ta_morphometry 0 ' + \
'-ELEVATION ' + wd + '/' + tile_id + '_mosaic_10m_filled.sdat ' + \
'-METHOD 7 ' + \
'-SLOPE ' + wd + '/' + tile_id + '_mosaic_10m_filled_slope.sdat '
log_file.write(tile_id + ' finished slope claculation. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Calculate TWI on mosaic
cmd = settings.saga_bin + 'ta_hydrology 20 ' + \
'-SLOPE ' + wd + '/' + tile_id + '_mosaic_10m_filled_slope.sdat ' + \
'-AREA ' +wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd_sca.sdat ' + \
'-TWI '+ wd + '/' + tile_id + '_mosaic_10m_filled_twi.sdat '
log_file.write(tile_id + ' finished twi claculation. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop output to original tile size and convert to tif:
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
' -crop_to_cutline -overwrite ' + \
wd + '/' + tile_id + '_mosaic_10m_filled_twi.sdat ' + \
wd + '/twi_' + tile_id + '_float.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropping wetness index mosaic successful.\n\n')
return_value = 'success'
# Stretch to by 1000, round and convert to int 16
# Construct gdal command:
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/twi_' + tile_id + '_float.tif ' + \
'--outfile=' + out_folder + '/twi_' + tile_id + '.tif ' + \
' --calc=rint(1000*A) --type=Int16 --NoDataValue=-9999 --overwrite'
# Execute gdal command
log_file.write('\n' + tile_id + ' rounding and conversion finished. ' \
'TWI calculation successful. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
except:
log_file.write('\n' + tile_id + ' wetness index calculation failed.\n\n')
return_value = 'SAGA/GDAL Error'
# Close log file
log_file.close()
# Remove temporary file
try:
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled.*'):
os.remove(temp_file)
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd.*'):
os.remove(temp_file)
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd_width.*'):
os.remove(temp_file)
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled_flow_mfd_sca.*'):
os.remove(temp_file)
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled_slope.*'):
os.remove(temp_file)
for temp_file in glob.glob(wd + '/' + tile_id + '_mosaic_10m_filled_twi.*'):
os.remove(temp_file)
os.remove(wd + '/twi_' + tile_id + '_float.tif ')
except:
pass
return return_value
## Calculate SAGA Wetness Index for a tile
def dtm_saga_wetness(tile_id):
"""
Calculates the saga wetness index for a tile mosaic then crops to the original tile.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return valule and log output
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/saga_wetness_index'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
# Calculate wetness index at DTM scale
cmd = settings.saga_wetness_bin + '-DEM ' + \
settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif ' + \
'-TWI ' + wd + '/wetness_index_' + tile_id + '_mosaic.tif'
log_file.write(tile_id + ' wetness index calculation finished. \n ' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop output to original tile size:
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-tr 10 10 -r med -crop_to_cutline -overwrite ' + \
wd + '/wetness_index_' + tile_id + '_mosaic.sdat ' + \
wd + '/wetness_index_' + tile_id + '.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropping wetness index mosaic successful.\n\n')
return_value = 'success'
# Set input file path
in_file = wd + '/wetness_index_' + tile_id + '.tif '
# Set output file path
out_file = out_folder + '/wetness_index_' + tile_id + '.tif '
# Stretch to by 1000, round and convert to int 16
# Construct gdal command:
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/wetness_index_' + tile_id + '.tif ' + \
'--outfile=' + out_file + \
' --calc=rint(1000*A) --type=Int16 --NoDataValue=-9999 --overwrite'
# Execute gdal command
log_file.write('\n' + tile_id + ' rounding and conversion finished. ' \
'Wetness index calculation successful. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
except:
log_file.write('\n' + tile_id + ' wetness index calculation failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temporary file
try:
os.remove(wd + '/wetness_index_' + tile_id + '_mosaic.sdat ')
os.remove(wd + '/wetness_index_' + tile_id + '_mosaic.prj ')
os.remove(wd + '/wetness_index_' + tile_id + '_mosaic.sgrd ')
os.remove(wd + '/wetness_index_' + tile_id + '_mosaic.mgrd ')
os.remove(wd + '/wetness_index_' + tile_id + '_mosaic.tif ')
os.remove(wd + '/wetness_index_' + tile_id + '.tif ')
except:
pass
return return_value
## Calculate landscape openness (mean) using SAGA
def dtm_saga_landscape_openness(tile_id):
"""
Calculates landscape opennes following Yokoyama et al. 2002 based on an aggregated 10 m dtm and a 150 m search radius.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return valule and log output
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
out_folder = settings.output_folder + '/landscape_openness'
if not os.path.exists(out_folder): os.mkdir(out_folder)
try:
## Aggregate dtm mosaic to temporary file:
# Specify gdal command
cmd = settings.gdalwarp_bin + \
'-tr 10 10 -r average ' + \
settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif ' + \
wd + '/dtm_10m_' + tile_id + '_mosaic_float.tif ' + ' -overwrite'
# Execute gdal command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' aggregating dtm_10m mosaic successful.\n\n')
# Use saga gis openness module for calculating the openness in 150 m
cmd = settings.saga_openness_bin + \
'-DEM ' + wd + '/dtm_10m_' + tile_id + '_mosaic_float.tif ' + \
'-POS ' + wd + '/openness_10m_' + tile_id + '_mosaic.sdat ' + \
'-RADIUS 150 -METHOD 1'
# Execute saga command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' openness from mosaic successful.\n\n')
# Obtain file extent for cropping (remove outer 150 m of mosaic)
cmd = settings.gdalinfo_bin + wd + '/openness_10m_' + tile_id + '_mosaic.sdat '
mosaic_info = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
upper_left = re.search("Upper *Left *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
lower_right = re.search("Lower *Right *\( *(\d+.\d+), *(\d+.\d+)\)", mosaic_info)
xmin = float(upper_left.group(1)) + 150
ymax = float(upper_left.group(2)) - 150
xmax = float(lower_right.group(1)) - 150
ymin = float(lower_right.group(2)) + 150
# remove 150 m on outer edge using gdal warp
cmd = settings.gdalwarp_bin + \
'-te ' + str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax) + ' -overwrite ' + \
wd + '/openness_10m_' + tile_id + '_mosaic.sdat ' + \
wd + '/openness_10m_' + tile_id + '_mosaic.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' cropping wetness index mosaic.\n\n')
# Convert to degrees, round and store as int16
# Specify gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + wd + '/openness_10m_' + tile_id + '_mosaic.tif ' + \
' --outfile=' + wd + '/openness_10m_' + tile_id + '_mosaic_deg.tif ' + \
' --calc=rint(degrees(A))' + ' --type=Int16' + ' --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' converting dtm_10m to int16 successful. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Crop slope output to original tile size:
cmd = settings.gdalwarp_bin + \
' -cutline ' + settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \
'-crop_to_cutline -overwrite ' + \
wd + '/openness_10m_' + tile_id + '_mosaic_deg.tif ' + \
out_folder + '/openness_10m_' + tile_id + '.tif '
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' openness calculation successful.\n\n')
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' opennes calculation failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temporary files
try:
os.remove(wd + '/dtm_10m_' + tile_id + '_mosaic_float.tif')
os.remove(wd + '/openness_10m_' + tile_id + '_mosaic.sdat')
os.remove(wd + '/openness_10m_' + tile_id + '_mosaic.sgrd')
os.remove(wd + '/openness_10m_' + tile_id + '_mosaic.prj')
os.remove(wd + '/openness_10m_' + tile_id + '_mosaic.mgrd')
os.remove(wd + wd + '/openness_10m_' + tile_id + '_mosaic.tif')
os.remove(wd + '/openness_10m_' + tile_id + '_mosaic_deg.tif')
except:
pass
return return_value
def dtm_remove_temp_files(tile_id):
"""
Removes footprint and mosaic files for the dtm to clear up space for subsequent processing.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# initiate return value
return_value = ''
dtm_mosaic = settings.dtm_mosaics_folder + '/dtm_' + tile_id + '_mosaic.tif'
dtm_mosaic_10m = settings.dtm_mosaics_10m_folder + '/dtm_' + tile_id + '_float_mosaic_10m.tif'
dtm_footprint_files = glob.glob(settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.*')
try:
os.remove(dtm_mosaic)
return_value = 'success'
except:
return_value = 'unable to delete dtm mosaic file'
try:
os.remove(dtm_mosaic_10m)
return_value = 'success'
except:
return_value = 'unable to delete dtm mosaic 10 m file'
try:
for file in dtm_footprint_files: os.remove(file)
return_value = 'success'
except:
return_value = return_value + 'unable to delete dtm footprint file'
# return execution status
return return_value
|
jakobjassmann/ecodes-dk-lidar | scripts/archive_outputs.py | <reponame>jakobjassmann/ecodes-dk-lidar
# Quick helper script to archive all main variable folders
# <NAME> j.assmann
# Zip all folders
import shutil
import glob
import re
import multiprocessing
import os
from dklidar import settings
# Function to archive folders using shutil
def zip_folder(folder_path):
# Set folder for zips
zip_output = 'D:/Jakob/dk_nationwide_lidar/data/zipped_outputs'
folder_name = zip_output + '/' + re.sub('.*\\\\(.*)\\\\', '\g<1>', folder_path)
shutil.make_archive(folder_name, 'bztar', folder_path)
# Main body of script
if __name__ == '__main__':
# Get list of folders in output directories and their names
folders = glob.glob(settings.output_folder + '*/')
# archive in parallel
multiprocessing.set_executable(settings.python_exec_path)
pool = multiprocessing.Pool(processes=len(folders))
zipping = pool.map_async(zip_folder, folders)
zipping.wait()
pool.close()
|
jakobjassmann/ecodes-dk-lidar | scripts/check_vrt_completeness.py | # EcoDes-DK15 check validity of outputs based on whether gdal can open the file.
# <NAME> <EMAIL> 2 December 2021
# Dependencies
import scandir
import pandas
import re
import os
import tqdm
import itertools
import multiprocessing
from osgeo import gdal
from dklidar import settings
# Switch on gdal Python exceptions
gdal.UseExceptions()
## 1) Function definitions
# Check file function
def check_vrt_completeness(folder):
var_name = re.sub('.*/(.*)$', '\g<1>', folder)
vrt_file_name = folder + '/' + var_name + '.vrt'
file_names = []
status = []
# Check presence of vrt file
if os.path.exists(vrt_file_name):
# Open file
vrt_file = open(vrt_file_name, 'r')
# Read content
vrt_file_contents = ''.join(vrt_file.readlines())
# List folder content
file_list = list_files(folder)
# drop non tif files from list
file_list = list(itertools.compress(
file_list,
[bool(re.search('tif', x)) for x in file_list]))
# Search tif files in vrt
#progress = 0
for i in range(0, len(file_list)):
if not bool(re.search(
re.sub('.*/(.*)$', '\g<1>', file_list[i]),
vrt_file_contents)):
file_names.append(re.sub('.*/(.*)$', '\g<1>', file_list[i]))
status.append('missing')
## # Update progress
## progress = float(i + 1) / float(len(file_list))
## # Update progress bar
## print('\r|' +
## '#' * int(round(progress * 54)) +
## '-' * int(round((1 - progress) * 54)) +
## '| ' +
## str(int(round(progress * 100))) + '%'),
else:
file_names.append('')
status.append('vrt file missing')
# Combined outputs to df
status_check_df = pandas.DataFrame(zip(*[file_names, status]),
columns = ['file_name','error'])
status_check_df['variable'] = var_name
return(status_check_df)
def list_files(folder_path):
files = []
# Scan directory
for file_name in scandir.scandir(folder_path):
files.append(folder_path + '/' + file_name.name)
return(files)
if __name__ == '__main__':
## 2) Prepare environment
# Status
print('#' * 80 + '\n')
print('Checking EcoDes-DK15 vrts for completeness\n\n')
print('Preparing environment... '),
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Clean up folder paths
folders = map(lambda folder: re.sub('\\\\', '/', folder), folders)
# Status
print('done.'),
print('Checking vrts... '),
# Run file checks in parallel
multiprocessing.set_executable(settings.python_exec_path)
pool = multiprocessing.Pool(processes=62)
file_checks = list(tqdm.tqdm(pool.imap_unordered(check_vrt_completeness, folders),
total = len(folders)))
# Concatenate into one dataframe
status_df = pandas.concat(file_checks)
# Write to file
status_df.to_csv(settings.log_folder + '/missing_files_in_vrts.csv',
index = False)
# Status
print('done.'),
print('\nFound ' + str(len(status_df.index)) + ' missing files.')
print('Check log file for names of missing files:\n\t' +
settings.log_folder + '/missing_files_in_vrts.csv')
# End of File
|
jakobjassmann/ecodes-dk-lidar | documentation/source_data/merger_scripts/merge_outputs.py | <reponame>jakobjassmann/ecodes-dk-lidar
## EcoDes-DK output merger
## This script is used to merge the outputs of the various EcoDes processing
## runs to create the merged dataset based on the DHM_201415 merger.
## <NAME> <EMAIL>
# Dependencies
import os
import shutil
import pandas
import glob
import re
import scandir
# Status
print('#' * 80)
print('Merging EcoDes-DK outputs from the different reprocessing batches.')
print('\nPreparing environment...'),
## 1) Set global variables
# tile_ids to source
tiles_to_source_original_processing = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_from_DHM2018.csv')
tiles_to_source_reprocessing_1 = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_from_DHM2015.csv')
tiles_to_source_reprocessing_2 = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_to_process_dhm201415_merger.csv')
tiles_incomplete = pandas.read_csv('D:/Jakob/dhm201415_merger/incomplete_tile_pairs.csv')
# Remove incomplete tiles from tile_ids
tiles_to_source_original_processing = pandas.DataFrame(
set(tiles_to_source_original_processing['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_1 = pandas.DataFrame(
set(tiles_to_source_reprocessing_1['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_2 = pandas.DataFrame(
set(tiles_to_source_reprocessing_2['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
# Remove redundancies
tiles_to_source_original_processing = pandas.DataFrame(
set(tiles_to_source_original_processing['tile_id'].tolist()) -
set(tiles_to_source_reprocessing_2['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_1 = pandas.DataFrame(
set(tiles_to_source_reprocessing_1['tile_id'].tolist()) -
set(tiles_to_source_reprocessing_2['tile_id'].tolist()),
columns = ['tile_id'])
# source folders
folder_original_processing = 'D:/Jakob/dk_nationwide_lidar/data/outputs'
folder_reprocessing_1 = 'D:/Jakob/ecodes-dk-lidar/data/outputs'
folder_reprocessing_2 = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/outputs'
# destination folder
dest_folder = 'D:/Jakob/ecodes-dk-lidar-rev1/data/outputs'
# base folders
dtm_files = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/dtm'
laz_files = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/laz'
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(folder_original_processing):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# remove variables that were / will be separately reprocessed (if present)
folders = [folder for folder in folders if not bool(re.match('.*tile_footprints.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*solar_radiation.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*date_stamp.*', folder))]
# Clean up file paths
folders = map(lambda folder: re.sub('\\\\', '/', folder), folders)
# Keep only relative paths
folders = map(lambda folder: '/' + os.path.relpath(folder,
folder_original_processing),
folders)
###!!! Break(s) for debugging !!!
##folders = [folders[0]]
##folders = ['/point_source_info/point_source_counts']
## 2) Function definitons
def list_files(folder_path):
files = []
# Scan directory
for file_name in scandir.scandir(folder_path):
files.append(file_name.name)
return(files)
def get_tile_ids(file_names):
# initiate empty list for tile_ids
tile_ids = []
# clean up files names
for i in range(0, len(file_names)):
file_names[i] = re.sub('\\\\', '/', file_names[i])
# fill list with tile_id
for file_name in file_names:
tile_id = re.sub('.*(\d{4}_\d{3}).*', '\g<1>', file_name)
tile_ids.append(tile_id)
# combine to data frame
files_df = pandas.DataFrame(zip(*[tile_ids, file_names]),
columns = ['tile_id', 'file_name'])
# return files_df
return(files_df)
def copy_tiles(tiles_to_copy, files_df, source_folder, out_folder):
# Subset files to copy
files_to_copy = files_df[files_df['tile_id'].
isin(tiles_to_copy['tile_id'].tolist())]['file_name'].tolist()
# Set counter to 0:
progress = 0
# Copy files
for i in range(0, len(files_to_copy)):
# Check whether file exists, if not copy
if not os.path.isfile(out_folder + '/' + files_to_copy[i]):
# Copy file
shutil.copy(source_folder + '/' + files_to_copy[i],
out_folder + '/' + files_to_copy[i])
# Update progress
progress = float(i + 1) / float(len(files_to_copy))
# Update progress bar
print('\r\t|' +
'-' * int(round(progress * 54)) +
' ' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%'),
def check_dir(folder_path):
# Check if dir exists
if os.path.exists(folder_path): return(0)
# Determine file path components and check filpath from root to end
subfolders = [folder_path]
current_dir = folder_path
while not current_dir == (os.path.splitdrive(folder_path)[0] + '/'):
parent_dir = os.path.dirname(current_dir)
subfolders.append(parent_dir)
current_dir = parent_dir
subfolders = subfolders[::-1][1:len(subfolders)]
for folder in subfolders:
if not os.path.exists(folder): os.mkdir(folder)
return(0)
def compare_tile_dfs(df1, df2):
if(set(df1['tile_id'].tolist()) ==
set(df2['tile_id'].tolist())):
return(pandas.DataFrame([], columns = ['tile_id']))
else:
diff = pandas.DataFrame(
set(df1['tile_id'].tolist()) -
set(df2['tile_id'].tolist()),
columns = ['tile_id'])
return(diff)
def get_var_from_source(var_folder, tiles_df, source_folder):
# Get all files for one variable from one source
# Set dest directory based on global variable
global dest_folder
out_folder = dest_folder + var_folder
# and create if needed
check_dir(out_folder)
# Set source folder
source_folder = source_folder + var_folder
# Generate df of tile_ids and file names
files_to_copy = get_tile_ids(list_files(source_folder))
# Copy files
copy_tiles(tiles_df, files_to_copy, source_folder, out_folder)
return(0)
def get_var(var_folder):
# Get global variables
global tiles_to_source_original_processing, tiles_to_source_reprocessing_1, tiles_to_source_reprocessing_2, folder_original_processing, folder_reprocessing_1, folder_reprocessing_2
# Get all files for one variable from the three data sources
print('Sourcing: ' + var_folder)
print('\tOriginal processing')
get_var_from_source(var_folder,
tiles_to_source_original_processing,
folder_original_processing)
print('\n\tReprocessing #1')
get_var_from_source(var_folder,
tiles_to_source_reprocessing_1,
folder_reprocessing_1)
print('\n\tReprocessing #2')
get_var_from_source(var_folder,
tiles_to_source_reprocessing_2,
folder_reprocessing_2)
# Return df of copied tiles and
files_copied = get_tile_ids(list_files(dest_folder + var_folder))
files_copied['var_folder'] = var_folder
print('\tDone.\n')
return(files_copied)
# Status
print(' done.\n')
## 3) Main body of script
## Check completeness of tile lists
# Status
print('Checking completness of tile list to copy...'),
# Get all tiles in data set
dhm_merged_tiles = get_tile_ids(list_files(laz_files))
# Compare with tiles to merge
missing_tiles = len(compare_tile_dfs(dhm_merged_tiles,
pandas.concat([tiles_to_source_original_processing,
tiles_to_source_reprocessing_1,
tiles_to_source_reprocessing_2])))
if missing_tiles > 0:
# Prompt for choice to stop
del_choice = raw_input('\n' + str(missing_tiles) + ' are missing! Continue anyways?' +
'[y/n]')
if not del_choice == 'y':
print('Aborting on request or invalid choice!')
quit()
else:
print('Okay, continuing merger.')
else:
print('\n=> Sets are complete, proceeding as planned.\n')
## Copy tiles for all variables
# Status
print('Starting merger:\n\n')
files_copied_dfs = []
for var_folder in folders:
files_copied_df = get_var(var_folder)
files_copied_dfs.append(files_copied_df)
# Stauts
print('Merger complete!\n')
print('#' * 80 + '\n')
## Quality control
# Status
print('Quality control:\n')
# Quality control:
files_missing_dfs = []
for files_copied_df in files_copied_dfs:
# Get tiles missed in copying (e.g. due to absence in source)
files_missing_df = compare_tile_dfs(dhm_merged_tiles,
files_copied_df)
if len(files_missing_df) > 0:
# Status
print('\t' +
files_copied_df['var_folder'][1] +
' is missing: ' +
str(len(files_missing_df)) +
' tiles.\n')
# Add missing tiles to list
files_missing_df['var_folder'] = files_copied_df['var_folder'][1]
files_missing_dfs.append(files_missing_df)
else:
# Status
print('\t' + files_copied_df['var_folder'][1] + ' complete.\n')
files_missing_dfs = pandas.concat(files_missing_dfs)
files_missing_dfs.to_csv('final_merger_missing_files.csv', index = False)
# Status
print('A total of ' + str(len(files_missing_dfs)) + ' files are missing.\n')
print('Quality control complete.\n')
print('Merger complete.\n')
print('#' * 80 + '\n')
# EOF
|
jakobjassmann/ecodes-dk-lidar | scripts/generate_dems.py | import multiprocessing
import pandas
import subprocess
import re
import datetime
import os
import opals
from dklidar import settings
# initate opals
opals.loadAllModules()
# make folder for temp odms
os.mkdir('temp_odms')
os.chdir('temp_odms')
# load missing tiles
missing_dtms = pandas.read_csv(settings.dtm_folder + '../missing_dtm_tile_ids.csv')['tile_id'].tolist()
print('Generating dems for ' + str(len(missing_dtms)) + ' tiles...')
for tile_id in missing_dtms:
os.mkdir(tile_id)
os.chdir(tile_id)
laz_file = settings.laz_folder + '/PUNKTSKY_1km_' + tile_id + '.laz'
odm_file = tile_id + '_temp.odm'
dtm_file = settings.dtm_folder + '/DTM_1km_' + tile_id
# 1) Import tile into temporary odm
try:
import_tile = opals.Import.Import()
import_tile.inFile = laz_file
import_tile.outFile = odm_file
import_tile.commons.screenLogLevel = opals.Types.LogLevel.none
import_tile.run()
print('Imported ' + tile_id + '. '),
except:
print('Unable to import ' + tile_id + '.'),
#1) Validate CRS
try:
odm_dm = opals.pyDM.Datamanager.load(odm_file)
crs_str = odm_dm.getCRS()
# Check whether CRS exists, if not assign, if different throw error.
if crs_str == settings.crs_wkt_opals:
print ('crs: match. '),
elif crs_str == '':
odm_dm.setCRS(settings.crs_wkt_opals)
print('crs: empty - set. '),
else:
print('crs: warning - no match. '),
odm_dm = None # This is needed as opals locks the file connection otherwise.
except:
print('crs validation error. '),
#2) Generate DEM
# Section adapted from <NAME> code in ALS Calculator.py
# ---
try:
dtm_export = opals.DTM.DTM()
dtm_export.inFile = odm_file
dtm_export.feature = opals.Types.GridFeature.sigmaz
dtm_export.outFile = dtm_file
dtm_export.gridSize = 0.4
dtm_export.filter = "Generic[Classification == 2]"
dtm_export.multiBand = False
dtm_export.neighbours = 8 # recommendation by TU Wien (<NAME>)
dtm_export.searchRadius = 6 # recommendation by TU Wien (<NAME>)
dtm_export.commons.screenLogLevel = opals.Types.LogLevel.none
dtm_export.run()
print('dtm export success.'),
except:
print('dtm export failed. '),
# ---
# tidy up
try:
os.rename((dtm_file + '_dtm.tif'), (dtm_file + '.tif'))
os.remove(dtm_file + '_sigmaz.tif')
print('tidy up successfull')
except:
print('tidy up failed.')
out_file = open(settings.laz_folder + '../missing_dtms_generated.txt', 'a+')
out_file.write(dtm_file + '.tif\n')
out_file.close()
os.chdir('..')
|
jakobjassmann/ecodes-dk-lidar | scripts/check_outputs_integrity.py | # EcoDes-DK15 check validity of outputs based on whether gdal can open the file.
# <NAME> <EMAIL> 2 December 2021
# Dependencies
import scandir
import pandas
import re
import tqdm
from osgeo import gdal
import multiprocessing
from dklidar import settings
# Switch on gdal Python exceptions
gdal.UseExceptions()
## 1) Function definitions
# Check file function
def check_file(file_name):
file_path = []
error = []
try:
gtif = gdal.Open(file_name)
gtif = None
except RuntimeError, e:
file_path.append(file_name)
error.append(e)
return(pandas.DataFrame(zip(*[file_path, error]),
columns = ['file_name','error']))
def list_files(folder_path):
files = []
# Scan directory
for file_name in scandir.scandir(folder_path):
files.append(folder_path + '/' + file_name.name)
return(files)
if __name__ == '__main__':
## 2) Prepare environment
# Status
print('#' * 80 + '\n')
print('Validating EcoDes-DK15 outputs through a gdal load\n\n')
print('Preparing environment...'),
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Scan files
file_lists = [list_files(folder) for folder in folders]
file_list = [file_name for file_list in file_lists for file_name in file_list]
# break for debug:
file_list = file_list
# Status print check - if needed
print(' done.\n')
#print('\n')
#print(''.join(map(lambda folder: folder + '\n', folders)))
print('Checking ' + str(len(folders)) + ' output folders.')
print('Containing: ' + str(len(file_list)) + ' files.\n')
# Run file checks in parallel
multiprocessing.set_executable(settings.python_exec_path)
pool = multiprocessing.Pool(processes=62)
file_checks = list(tqdm.tqdm(pool.imap_unordered(check_file, file_list),
total = len(file_list)))
# Concatenate into one dataframe
errors_df = pandas.concat(file_checks)
# Write to file
errors_df.to_csv(settings.log_folder + '/output_file_erros.csv', index = False)
# Status
print('\nFound ' + str(len(errors_df.index)) + ' errors.')
print('Check log file for errors:\n\t' +
settings.log_folder + '/output_file_erros.csv')
# End of File
|
jakobjassmann/ecodes-dk-lidar | scripts/download_files.py | <reponame>jakobjassmann/ecodes-dk-lidar
import multiprocessing
import pandas
import subprocess
import re
import datetime
import os
from dklidar import settings
remote_url_laz = "https://download.kortforsyningen.dk/system/files/Statsaftalen/DANMARK/3_HOJDEDATA/PUNKTSKY/"
remote_url_dtm = "https://download.kortforsyningen.dk/system/files/Statsaftalen/DANMARK/3_HOJDEDATA/DTM/"
file_list_laz = 'D:/Jakob/dk_nationwide_lidar/data/file_lists_kortforsyningen/all_laz_files.csv'
file_list_dtm = 'D:/Jakob/dk_nationwide_lidar/data/file_lists_kortforsyningen/all_dtm_files.csv'
# A cookie is needd to access the data. To generate the following curl command you can use Google Chrome.
# First, log into the kortforsyningen website then select one tile from the laz files in the catalogues and check out
# Open the "Mine downloads" section of the website and activate the Chrome Developer Mode by pressing "F12".
# In the newly opened panel, select the "Network" tap and pres "crtl + R" to activate the monitoring for the website.
# Then start the download of the file that you checked out and right-click on the associated item in the network tab.
# Click "copy" and select "curl CMD". Then replace the following cookie string with the cookie string in
# the copied curl command. You may have to adapt the curl command in download_file() accordingly if you change system.
cookie = 'Cookie: nmstat=1571735317171; _1d3fe=http://10.0.0.177:80; MY_SESSION=rd4o00000000000000000000ffffac1105c7o80; has_js=1; SESS1fef01291fb7205e2d46065129ba3bc3=jrFlh_HehTZC8-4x7l0loZ92TuhL0ikAewDwXNCpKiQ; downloadticket=1853700203f2d9b03cce33e4cb0c8c37'
def download_file(file_name):
""" Wee helper function to download file from Kortforsyningen server
:param file_name - file name on server and to be saved as
:returns exit code of curl download command
"""
remote_url = 'empty_url'
local_path = '/empty_path'
if not(re.match('.*LAZ.*', file_name) is None):
remote_url = remote_url_laz + file_name
local_path = settings.laz_folder + file_name
if not (re.match('.*DTM.*', file_name) is None):
remote_url = remote_url_dtm + file_name
local_path = settings.dtm_folder + file_name
curl_cmd = '"C:/Program Files/Git/mingw64/bin/curl.exe" ' + \
'"' + remote_url + '" ' +\
'-H "Connection: keep-alive" ' +\
'-H "Upgrade-Insecure-Requests: 1" ' + \
'-H "User-Agent: Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36" ' + \
'-H "Sec-Fetch-Dest: iframe" ' + \
'-H "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" ' + \
'-H "Sec-Fetch-Site: same-origin" ' + \
'-H "Sec-Fetch-Mode: navigate" ' +\
'-H "Sec-Fetch-User: ?1" ' + \
'-H "Referer: https://download.kortforsyningen.dk/mine/downloads" ' + \
'-H "Accept-Language: en,da-DK;q=0.9,da;q=0.8,en-US;q=0.7" ' + \
'-H ' + '"' + cookie + '" ' +\
'--compressed ' + \
'--output ' + '"' + local_path + '"'
try:
FNULL = open(os.devnull, 'w')
subprocess.check_call(curl_cmd, stdout=FNULL, stderr=subprocess.STDOUT)
return_value = 'success'
except subprocess.CalledProcessError as error:
return_value = 'curl non-zero exit status: ' + str(error.returncode)
print '.',
return return_value
def extract_file(file_name):
""" Wee helper function to download file from Kortforsyningen server
:param file_name - file name on server and to be saved as
:returns exit code of curl download command
"""
local_path = '/empty_path'
dest_folder = '/empty_path'
if not(re.match('.*LAZ.*', file_name) is None):
local_path = settings.laz_folder + file_name
dest_folder = settings.laz_folder
if not (re.match('.*DTM.*', file_name) is None):
local_path = settings.dtm_folder + file_name
dest_folder = settings.dtm_folder
extract_cmd = '"C:/Program Files/7-Zip/7z" ' + \
'e ' + \
'-y ' + local_path + ' ' + \
'-o' + dest_folder
try:
FNULL = open(os.devnull, 'w')
subprocess.check_call(extract_cmd, stdout=FNULL, stderr=subprocess.STDOUT)
return_value = 'success'
except subprocess.CalledProcessError as error:
return_value = '7zip non-zero exit status: ' + str(error.returncode)
print '.',
return return_value
#### Main body of script
if __name__ == '__main__':
# Start timer
startTime = datetime.datetime.now()
print('\n')
print('-' * 80)
print("DK nationwide LiDAR download script")
print(str(startTime))
# Load file lists as data frames:
laz_files_df = pandas.read_csv(file_list_laz)
dtm_files_df = pandas.read_csv(file_list_dtm)
if not ('download' in laz_files_df): laz_files_df['download'] = 'pending'
if not ('extraction' in laz_files_df): laz_files_df['extraction'] = 'pending'
if not ('download' in dtm_files_df): dtm_files_df['download'] = 'pending'
if not ('extraction' in dtm_files_df): dtm_files_df['extraction'] = 'pending'
# Prepare download lists
laz_files_to_download = laz_files_df['file_name'][laz_files_df['download'] != 'success'].tolist()
dtm_files_to_download = dtm_files_df['file_name'][laz_files_df['download'] != 'success'].tolist()
# Prep processing pool
multiprocessing.set_executable(settings.python_exec_path)
pool = multiprocessing.Pool(processes=4)
# Download LAZ files
if len(laz_files_to_download) > 0:
print(datetime.datetime.now().strftime('%X') + ' Downloading ' + str(len(laz_files_to_download)) + ' laz files: '),
download_pool = pool.map_async(download_file, laz_files_to_download)
download_pool.wait()
laz_files_df['download'] = download_pool.get()
else: print('No LAZ files to download.')
print('\n')
# Download DTM files
if len(dtm_files_to_download) > 0:
print(datetime.datetime.now().strftime('%X') + ' Downloading ' + str(len(dtm_files_to_download)) + ' dtm files: '),
download_pool = pool.map_async(download_file, dtm_files_to_download)
download_pool.wait()
dtm_files_df['download'] = download_pool.get()
else: print('No DTM files to download.')
print('\n')
# Prepare extraction lists
laz_files_to_extract = laz_files_df['file_name'][laz_files_df['extraction'] != 'success'].tolist()
dtm_files_to_extract = dtm_files_df['file_name'][dtm_files_df['extraction'] != 'success'].tolist()
# # Extract LAZ files
if len(laz_files_to_extract) > 0:
print(datetime.datetime.now().strftime('%X') + ' Extracting ' + str(len(laz_files_to_extract)) + ' laz files: '),
download_pool = pool.map_async(extract_file, laz_files_to_extract)
download_pool.wait()
laz_files_df['extraction'] = download_pool.get()
else: print('No LAZ files to extract.')
print('\n')
# Extract DTM files
if len(dtm_files_to_extract) > 0:
print(datetime.datetime.now().strftime('%X') + ' Extracting ' + str(len(dtm_files_to_extract)) + ' dtm files: '),
download_pool = pool.map_async(extract_file, dtm_files_to_extract)
download_pool.wait()
dtm_files_df['extraction'][dtm_files_df['extraction'] != 'success'] = download_pool.get()
else: print('No LAZ files to extract.')
print('\n')
laz_files_df.to_csv(file_list_laz, index=False, header=True)
dtm_files_df.to_csv(file_list_dtm, index=False, header=True)
print('-' * 80 + '\nDone.\nTime elapsed: ' + str(datetime.datetime.now() - startTime)) |
jakobjassmann/ecodes-dk-lidar | scripts/process_tiles.py | <filename>scripts/process_tiles.py
### Script to rasterise point clouds for the DK nationwide lidar re-processing
### <NAME> <EMAIL> 29 January 2019
## Imports
import glob
import re
import os
import shutil
import datetime
import time
import multiprocessing
import pandas
import opals
from dklidar import points
from dklidar import dtm
from dklidar import settings
from dklidar import common
#### Prepare the environment
# Set working directory
os.chdir(settings.wd)
# Set number of parallel processes:
n_processes = 62 # 54
# Confirm essential folders exist
if not os.path.exists(settings.wd):
print('Working Directory ' + settings.wd + ' does not exist. Exiting script...')
exit()
if not os.path.exists(settings.laz_folder):
print('laz_folder ' + settings.laz_folder + ' does not exist. Exiting script...')
exit()
if not os.path.exists(settings.dtm_folder):
print('dtm_folder ' + settings.dtm_folder + ' does not exist. Exiting script...')
exit()
# Conmfirm other folders exists and if not create them
for folder in [settings.dtm_mosaics_folder, settings.dtm_footprint_folder,
settings.odm_folder, settings.odm_footprint_folder, settings.output_folder]:
if not os.path.exists(folder):
os.mkdir(folder)
# Load file names
dtm_files = glob.glob(settings.dtm_folder + '/*.tif')
laz_files = glob.glob(settings.laz_folder + '/*.laz')
# initiate empty lists for tile_ids
dtm_tile_ids = []
laz_tile_ids = []
# fill dictionaries with tile_id, as well as row number and column number for each file name:
for file_name in dtm_files:
tile_id = re.sub('.*DTM_1km_(\d*_\d*).tif', '\g<1>', file_name)
dtm_tile_ids.append(tile_id)
for file_name in laz_files:
tile_id = re.sub('.*PUNKTSKY_1km_(\d*_\d*).laz', '\g<1>', file_name)
laz_tile_ids.append(tile_id)
## Define processing steps for each tile to be carried out in parallel
def process_tile(tile_id):
## Prepare environment
# Generate temporary wd for parallel worker, this will allow for smooth logging and opals sessions to run in parallel
wd = os.getcwd()
current_pid = re.sub('[(),]', '', str(multiprocessing.current_process()._identity))
temp_wd = settings.scratch_folder + '/temp_' + current_pid
if not os.path.exists(temp_wd):
os.mkdir(temp_wd)
os.chdir(temp_wd)
# Create folder for logging
tile_log_folder = settings.log_folder + '/process_tiles/' + tile_id
if not os.path.exists(tile_log_folder):
os.mkdir(tile_log_folder)
# Stagger processing if this is the first turn during a processing id
# This is a crucial step to reduce the chance of the functions using gdal_rasterize to run directly in parallel
# (generate mask function), if more than 10 gdla_rasterize instances run in parallel there is a massive drop in
# performance for some reason
if not os.path.exists(temp_wd + '/first_go_complete.txt'):
# sleep for 5 seconds. Providing the maximum performance gain for all full integers < 6s = sequential
time.sleep(5 * int(re.sub('[(),]', '', str(multiprocessing.current_process()._identity))))
lock_file = open(temp_wd + '/first_go_complete.txt', 'w')
lock_file.close()
# opals loadModules
opals.loadAllModules()
## Logging: Keep track of progress for each step and overall progress
# Initiate progress variables
steps = ['processing']
status_steps = [['complete']]
## Generate masks
return_value = common.generate_water_masks(tile_id)
# Update progress variables
steps.append('generate_water_masks')
status_steps.append([return_value])
# gather logs for step and tile
common.gather_logs('process_tiles', 'generate_water_masks', tile_id)
## Import tile to ODM
return_value = points.odm_import_single_tile(tile_id)
# Update progress variables
steps.append('odm_import_single_tile')
status_steps.append([return_value])
# gather logs for step and tile
common.gather_logs('process_tiles', 'odm_import_single_tile', tile_id)
## Validate CRS of odm files
return_value = points.odm_validate_crs(tile_id)
# Update progress variables
steps.append('odm_validate_crs')
status_steps.append([return_value])
## Export footprint
return_value = points.odm_generate_footprint(tile_id)
# Update progress variables
steps.append('odm_generate_footprint')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_generate_footprint', tile_id)
## Normalise height
return_value = points.odm_add_normalized_z(tile_id)
# Update progress variables
steps.append('odm_add_normalized_z')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_add_normalized_z', tile_id)
## Export mean normalised height for 10 m x 10 m cell
return_value = points.odm_export_normalized_z(tile_id)
# Update progress variables
steps.append('odm_export_normalized_z')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_normalized_z', tile_id)
## Export canopy height
return_value = points.odm_export_canopy_height(tile_id)
# Update progress variables
steps.append('odm_export_canopy_height')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_canopy_height', tile_id)
## Export point counts for pre-defined intervals and classess
return_value = points.odm_export_point_counts(tile_id)
# Update progress variables
steps.append('odm_export_point_counts')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_point_counts', tile_id)
## Export proportions based on point counts
return_value = points.odm_export_proportions(tile_id)
# Update progress variables
steps.append('odm_export_proportions')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_proportions', tile_id)
## Export point source information
return_value = points.odm_export_point_source_info(tile_id)
# Update progress variables
steps.append('odm_export_point_source_info')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_point_source_info', tile_id)
## Export amplitude mean and sd
return_value = points.odm_export_amplitude(tile_id)
# Update progress variables
steps.append('odm_export_amplitude')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_amplitude', tile_id)
## Export date stamps
return_value = points.odm_export_date_stamp(tile_id)
# Update progress variables
steps.append('odm_export_date_stamp')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_export_date_stamp', tile_id)
## Remove unneeded odm files
return_value = points.odm_remove_temp_files(tile_id)
# Update progress variables
steps.append('odm_remove_temp_files')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'odm_remove_temp_files', tile_id)
## Terrain model derived variables
## Generate tile footprint
return_value = dtm.dtm_generate_footprint(tile_id)
# Update progress variables
steps.append('dtm_generate_footprint')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_generate_footprint', tile_id)
## Generate neighbourhood mosaic
return_value = dtm.dtm_neighbourhood_mosaic(tile_id)
# Update progress variables
steps.append('dtm_neighbourhood_mosaic')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_neighbourhood_mosaic', tile_id)
## Validate CRS
return_value = dtm.dtm_validate_crs(tile_id)
# Update progress variables
steps.append('dtm_validate_crs')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_validate_crs', tile_id)
## Generate 10 m aggregate of DEM
return_value = dtm.dtm_aggregate_tile(tile_id)
# Update progress variables
steps.append('dtm_aggregate_tile')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_aggregate_tile', tile_id)
## Generate 10 m aggregate of neighbourhood mosaic
return_value = dtm.dtm_aggregate_mosaic(tile_id)
# Update progress variables
steps.append('dtm_aggregate_mosaic')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_aggregate_mosaic', tile_id)
## Calculate slope
return_value = dtm.dtm_calc_slope(tile_id)
# Update progress variables
steps.append('dtm_calc_slope')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_calc_slope', tile_id)
## Calculate aspect
return_value = dtm.dtm_calc_aspect(tile_id)
# Update progress variables
steps.append('dtm_calc_aspect')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_calc_aspect', tile_id)
## Calculate heat index
return_value = dtm.dtm_calc_heat_index(tile_id)
# Update progress variables
steps.append('dtm_calc_heat_index')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_calc_heat_index', tile_id)
## Calculate solar radiation
return_value = dtm.dtm_calc_solar_radiation(tile_id)
# Update progress variables
steps.append('dtm_calc_solar_radiation')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_calc_solar_radiation', tile_id)
## Calculate landscape openness mean
return_value = dtm.dtm_openness_mean(tile_id)
# Update progress variables
steps.append('dtm_openness_mean')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_openness_mean', tile_id)
## Calculate landscape openness difference
return_value = dtm.dtm_openness_difference(tile_id)
# Update progress variables
steps.append('dtm_openness_difference')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_openness_difference', tile_id)
## Calculate Kopecky TWI
return_value = dtm.dtm_kopecky_twi(tile_id)
# Update progress variables
steps.append('dtm_kopecky_twi')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_kopecky_twi', tile_id)
## Remove unneeded dtm files
return_value = dtm.dtm_remove_temp_files(tile_id)
# Update progress variables
steps.append('dtm_remove_temp_files')
status_steps.append([return_value])
# gather logs for step and tile]
common.gather_logs('process_tiles', 'dtm_remove_temp_files', tile_id)
## Logging: finalise log outputs
# Zip into pandas data frame
status_df = pandas.DataFrame(zip(*status_steps), index = [tile_id], columns=steps)
status_df.index.name = 'tile_id'
# Export as CSV
status_df.to_csv(tile_log_folder + '/status.csv', index=True, header=True)
# Change back to original working directory
os.chdir(wd)
# Print tile_id to console to update on status
print(datetime.datetime.now().strftime('%X') + ' ' + tile_id + ' '),
#### Main body of script
if __name__ == '__main__':
## Start timer
startTime = datetime.datetime.now()
## Status output to console
print('\n' + '-' * 80 + 'Starting process_tiles.py at ' + str(startTime.strftime('%c')) + '\n')
## Prepare process managment and logging
progress_df = common.init_log_folder('process_tiles', laz_tile_ids)
## Identify which tiles still require processing
tiles_to_process = set(progress_df.index.values[progress_df['processing'] != 'complete'].tolist())
## If processing of a specific subset of tiles is needed
## the following lines can be helpful in achieving the task
## Remove comments as needed.
## tiles_to_process_completed = set(progress_df.index.values[progress_df['processing'] == 'complete'].tolist())
## tiles_to_process_dhm201415 = set(pandas.read_csv('auxillary_files/tiles_to_process_dhm201415_merger.csv')['tile_id'].tolist())
## tiles_to_process = tiles_to_process_dhm201415 - tiles_to_process_completed
## tiles_to_process = tiles_to_process - (tiles_to_process - set(progress_df.index.values.tolist()))
## print('Processing ' + str(len(tiles_to_process)) + ' tiles. \n')
## time.sleep(60)
# Set up processing pool
multiprocessing.set_executable(settings.python_exec_path)
pool = multiprocessing.Pool(processes=n_processes)
# Execute processing of tiles
print(datetime.datetime.now().strftime('%X') + ' Processing tiles: ... '),
tile_processing = pool.map_async(process_tile, tiles_to_process)
# Make sure all processes finish before carrying on.
tile_processing.wait()
print('... done.')
# Clear scratch folder
shutil.rmtree('scratch')
os.mkdir('scratch')
shutil.copy('data/empty_on_purpose.txt', 'scratch/empty_on_purpose.txt')
# Update progress status
progress_df = common.update_progress_df('process_tiles', progress_df)
# Export progress_df as CSV
progress_file = settings.log_folder + '/process_tiles/' + 'overall_progress.csv'
progress_df.to_csv(progress_file, index=True, header=True)
# Print out time elapsed:
print('\nTime elapsed: ' + str(datetime.datetime.now() - startTime))
|
jakobjassmann/ecodes-dk-lidar | dklidar/settings.py | ### Settings file for the DK Lidar project
### <NAME> <EMAIL> 29 January 2019
# This file is used to provide global variables to all scripts in the DK nationwide lidar project.
# Path to python executable
python_exec_path = 'C:/Program Files/opals_nightly_2.3.2/opals/python.exe'
# Set paths to gdal executables / binaries (here we use OSGE4W64) as the OPALS gdal binaries do not work reliably
# remember the trailing spaces at the end!
# simply set to the gdal command e.g. 'gdalwarp ' if you want to use the OPALS gdal binaries (gdaldem slope won't work)
gdalwarp_bin = 'C:/OSGeo4W/OSGeo4W.bat gdalwarp '
gdaldem_bin = 'C:/OSGeo4W/OSGeo4W.bat gdaldem '
gdaltlindex_bin = 'C:/OSGeo4W/OSGeo4W.bat gdaltindex '
gdal_translate_bin = 'C:/OSGeo4W/OSGeo4W.bat gdal_translate '
gdal_calc_bin = 'C:/OSGeo4W/OSGeo4W.bat gdal_calc '
gdal_merge_bin = 'C:/OSGeo4W/OSGeo4W.bat gdal_merge '
gdaltransform_bin = 'C:/OSGeo4W/OSGeo4W.bat gdaltransform '
gdalinfo_bin = 'C:/OSGeo4W/OSGeo4W.bat gdalinfo '
gdal_rasterize_bin = 'C:/OSGeo4W/OSGeo4W.bat gdal_rasterize '
gdalsrsinfo_bin = 'C:/OSGeo4W/OSGeo4W.bat gdalsrsinfo '
# set gdal version (this matters for the crs outputs)
gdal_version = '3.3.3' # '2.2.4'
# Saga binary commands
saga_wetness_bin = 'C:/OSGeo4W/apps/saga-ltr/saga_cmd.exe --cores=1 ta_hydrology 15 '
saga_openness_bin = 'C:/OSGeo4W/apps/saga-ltr/saga_cmd.exe --cores=1 ta_lighting 5 '
saga_bin = 'D:/Jakob/saga-7.8.2_x64/saga_cmd.exe --cores=1 '
### Set folder locations
# Main working directory
wd = 'D:/Jakob/ecodes-dk-lidar-rev1'
# Point cloud folder
laz_folder = wd + '/data/laz/'
# DTM folder
dtm_folder = wd + '/data/dtm/'
# DTM mosaics and footprints
dtm_mosaics_folder = wd + '/data/dtm_mosaics'
dtm_mosaics_10m_folder = wd + '/data/dtm_mosaics_10m'
dtm_footprint_folder = wd + '/data/dtm_footprints'
# ODM folder
odm_folder = wd + '/data/odm/'
# ODM Mosaics and footprint
odm_mosaics_folder = wd +'/data/odm_mosaics'
odm_footprint_folder = wd + '/data/odm_footprint/'
# Folder for ouptuts
output_folder = wd + '/data/outputs/'
# Scratch folder for temporary data storage
scratch_folder = wd + '/scratch'
# Log folder for global log ouptus
log_folder = wd + '/log'
# Mask files
dk_coastline_poly = 'D:/Jakob/ecodes-dk-lidar-rev1/auxillary_files/dk_bounds_jesper_poly.shp'
dk_lakes_poly = 'D:/Jakob/eecodes-dk-lidar-rev1/auxillary_files/lake_mask_jesper.shp'
## Spatial reference settings
# common crs as WKT string
crs_wkt_opals = r'PROJCS["ETRS89 / UTM 32N",GEOGCS["ETRS89",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6258"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4258"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","25832"]]'
crs_wkt_gdal = r'PROJCS["ETRS89 / UTM zone 32N",GEOGCS["ETRS89",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6258"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4258"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","25832"]]'
crs_proj4_gdal = r'+proj=utm +zone=32 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
## Multiprocessing settings
# common nbThreads parameter - a throttle limiter for OPALS, ensures Opals subprocesses use only a single core
nbThreads = 1
## Processing Options
# Output cell size
out_cell_size = 10
## Filter Strings
# point filter for all three vegetation classes as OPALS WKT
veg_classes_filter = "Generic[Classification == 3 OR Classification == 4 OR Classification == 5]"
ground_and_veg_classes_filter = "Generic[Classification == 2 OR Classification == 3 OR Classification == 4 OR Classification == 5]"
all_classes = "Generic[Classification == 2 OR Classification == 3 OR Classification == 4 OR Classification == 5 OR Classification == 6 OR Classification == 9]"
|
jakobjassmann/ecodes-dk-lidar | scripts/debug.py | # Script for debugging of dklidar module functons
# <NAME> <EMAIL> 18 February 2021
# This script requires the input files (dtms and laz) to be stored
# in the folders specified in dklidar/settings.py
# This also applies to the binaries specified in the same file
## Preparations
print('Preparing environment\n')
# Dependencies
import os
import sys
import opals
import datetime
# Load dklidar module parts for direct access
from dklidar import points
from dklidar import dtm
from dklidar import settings
from dklidar import common
# Set temporary workdirectory
temp_wd = 'D:/Jakob/ecodes-dk-lidar-reprocessing/scratch/debug'
# Set temporary out directory
settings.output_folder = 'D:/Jakob/ecodes-dk-lidar-reprocessing/scratch/debug/output'
# Create folders if needed
for folder in [temp_wd, settings.output_folder]:
if not os.path.exists(folder):
os.mkdir(folder)
# Change to temporary work dir
os.chdir(temp_wd)
# Set tile_id if not set as argument
args = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
if len(args) == 0:
tile_id = '6186_524'
else:
tile_id = args[0]
print('Processing tile_id: ' + tile_id + '\n')
### Load opals modules
opals.loadAllModules()
# Confirm essential folders exist
if not os.path.exists(settings.wd):
print('Working Directory ' + settings.wd + ' does not exist. Exiting script...')
exit()
if not os.path.exists(settings.laz_folder):
print('laz_folder ' + settings.laz_folder + ' does not exist. Exiting script...')
exit()
if not os.path.exists(settings.dtm_folder):
print('dtm_folder ' + settings.dtm_folder + ' does not exist. Exiting script...')
exit()
# Conmfirm other folders exists and if not create them
for folder in [settings.dtm_mosaics_folder, settings.dtm_footprint_folder,
settings.odm_folder, settings.odm_footprint_folder, settings.output_folder]:
if not os.path.exists(folder):
os.mkdir(folder)
## -------------------
## Start timer
print('\nStarting Timer\n')
startTime = datetime.datetime.now()
## -------------------
## Test dtm functions
print('#' * 60)
print('\nTesting DTM Functions\n')
# Generate tile footprint
print('=> Generate Tile Footprint')
print(dtm.dtm_generate_footprint(tile_id))
# Generate tile neighbourhood mosaic
print('=> Generate Neighbourhood Mosaic')
print(dtm.dtm_neighbourhood_mosaic(tile_id))
# Validate CRS
print('=> Validate CRS')
print(dtm.dtm_validate_crs(tile_id))
# Aggregate tile to 10 m
print('=> Aggregate tile to 10 m')
print(dtm.dtm_aggregate_tile(tile_id))
# Aggregate neighbourhood mosaic to 10 m
print('=> Aggregate Neighbourhood Mosaic to 10 m')
print(dtm.dtm_aggregate_mosaic(tile_id))
# Calculate slope
print('=> Calculate Slope')
print(dtm.dtm_calc_slope(tile_id))
# Calculate aspect
print('=> Calculate Aspect')
print(dtm.dtm_calc_aspect(tile_id, -1))
# Calculate heat index
print('=> Calculate Heat Index')
print(dtm.dtm_calc_heat_index(tile_id, -10))
# Calculate solar radiation
print('=> Calculate Solar Radiation')
print(dtm.dtm_calc_solar_radiation(tile_id))
# Calculate landscape openness mean
print('=> Calculate Openness Mean')
print(dtm.dtm_openness_mean(tile_id))
# Calculate landscape openness difference
print('=> Calculate Openness Difference')
print(dtm.dtm_openness_difference(tile_id))
## Calculate Kopecky TWI
print('=> Calculating Kopecky TWI')
print(dtm.dtm_kopecky_twi(tile_id))
# Remove old temp dtm files
print('=> Removing DTM temp files')
print(dtm.dtm_remove_temp_files(tile_id))
## -------------------
## Test common functions
print('\n')
print('#' * 60)
print('\nTesting Common Functions\n')
## Generate Masks
print('=> Generating Masks')
print(common.generate_water_masks(tile_id))
## -------------------
## Test points functions
print('\n')
print('#' * 60)
print('\nTesting Point Cloud Functions\n')
## Import tile to ODM
print('=> Importing ODM')
print(points.odm_import_single_tile(tile_id))
## Validate CRS of odm files
print('=> Validate CRS of ODMs')
print(points.odm_validate_crs(tile_id))
## Export footprint
print('=> Generating footprints from ODM')
print(points.odm_generate_footprint(tile_id))
## Normalise height
print('=> Normalize Height')
print(points.odm_add_normalized_z(tile_id))
## Export mean normalised height for 10 m x 10 m cell
print('=> Export Normalize Height')
print(points.odm_export_normalized_z(tile_id))
## Export canopy height
print('=> Export Canopy Height')
print(points.odm_export_canopy_height(tile_id))
## Export point counts for pre-defined intervals and classess
print('=> Export Point Counts')
print(points.odm_export_point_counts(tile_id))
## Export proportions based on point counts
print('=> Export Proportions')
print(points.odm_export_proportions(tile_id))
## Export point source information
print('=> Export Point Source Information')
print(points.odm_export_point_source_info(tile_id))
## Export amplitude mean and sd
print('=> Export Amplitude Mean and SD')
print(points.odm_export_amplitude(tile_id))
## Export date stamp
print('=> Exporting Date Stamps')
print(points.odm_export_date_stamp(tile_id))
## Additional export for selected point counts (to speed up testing)
#### Export total point count
##print('=> Export Total Point Count')
##print(points.odm_export_point_count(tile_id, 'total_point_count', -1, 50, [2,3,4,5,6,9]))
##
#### Export veg point count
##print('=> Export Total Point Count')
##print(points.odm_export_point_count(tile_id, 'vegetation_point_count', 0, 50, [3,4,5]))
## Remove unneeded odm files
print('=> Remove ODM Temp Files')
print(points.odm_remove_temp_files(tile_id))
## -------------------
# Report time elapsed
print('\n')
print('#' * 60)
print('Debug Complete\nTime elapsed: ' + str(datetime.datetime.now() - startTime))
print("Run debug.Rmd for visual analysis")
print('\n')
|
jakobjassmann/ecodes-dk-lidar | scripts/fix_na_tile_data_type.py | <gh_stars>0
# EcoDes-DK - Fix files missing from VRTs
# <NAME> <EMAIL> 2 December 2021
# Most files missing from the VRTs are missing because they are the wrong raster
# file type (e.g. Int16 instead of float32) - the only tiles affected seem to
# be NA tiles.
# These originate from the processing workflow and OPALS output, but
# also from the fill_processing_gaps.py script that does not accoutn for
# differences in the file paths. This script is here to correct the raster
# type of those files.
# !!! This scripts requires check_vrt_completeness.py to be run beforehand !!!
# Prep environment:
# Dependencies
import pandas
import os
import re
import scandir
import shutil
import itertools
import subprocess
from osgeo import gdal
from dklidar import settings
# Function definitions
def get_data_type(file_name):
raster = gdal.Open(file_name)
dataType = gdal.GetDataTypeName(raster.GetRasterBand(1).DataType)
raster = None
return(dataType)
def translate_file(file_name, data_type):
# Copy file to temp folder:
temp_file = settings.scratch_folder + '/temp_raster.tif'
shutil.copy(file_name, temp_file)
# remove old file
os.remove(file_name)
# translate file
os.system(settings.gdal_translate_bin +
'-ot ' + data_type + ' ' +
temp_file + ' ' +
file_name)
# Load missing tiles
missing_files = pandas.read_csv(settings.log_folder +
'/missing_files_in_vrts.csv')
# Set DataTypes for non-int16 variables
data_types_df = pandas.DataFrame(
zip(*[
['solar_radiation',
'amplitude_mean',
'amplitude_sd',
'date_stamp_min',
'date_stamp_max',
'date_stamp_mode'],
['Int32',
'Float32',
'Float32',
'Int32',
'Int32',
'Int32']]),
columns = ['variable','data_type'])
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Clean up folder paths
folders = map(lambda folder: re.sub('\\\\', '/', folder), folders)
# Set up progres bar
progress = 0
for i in range(0,len(missing_files.index)):
# Grab folder name
folder = list(
itertools.compress(
folders,
[bool(re.search(missing_files.variable[i], folder)) for folder in folders]))
folder = folder[0]
# Grab data_type
data_type = list(
itertools.compress(
data_types_df.data_type,
[bool(re.search(missing_files.variable[i], variable)) for variable in data_types_df.variable]))
data_type = data_type[0]
# Set file path
file_name = folder + '/' + missing_files.file_name[i]
# Check wehether data types match
if data_type == get_data_type(file_name):
break
# Copy to temp file
temp_file = settings.scratch_folder + '/' + missing_files.file_name[i]
shutil.copy(file_name, temp_file)
# Break for debugging
# file_name = settings.scratch_folder + '/test_out/' + missing_files.file_name[i]
# Remove file from original folder
os.remove(file_name)
# Construct gdal command
cmd = settings.gdal_translate_bin + '-ot ' + data_type + ' ' + temp_file + ' ' + file_name
print(cmd)
# Execute gdal commannd and swallow output
os.system(cmd)
# Remove temp_file
os.remove(temp_file)
# Update progress
progress = float(i + 1) / float(len(missing_files.index))
# Update progress bar
print('\n\r|' +
'#' * int(round(progress * 54)) +
'-' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%\n'),
|
jakobjassmann/ecodes-dk-lidar | documentation/source_data/merger_scripts/check_merged_outputs.py | ## EcoDes-DK output merger
## This script is used to merge the outputs of the various EcoDes processing
## runs to create the merged dataset based on the DHM_201415 merger.
## <NAME> <EMAIL>
# Dependencies
import os
import shutil
import pandas
import glob
import re
import scandir
import hashlib
# Status
print('#' * 80)
print('Check MD5 sums of merged EcoDes-DK outputs from the different reprocessing batches.')
print('\nPreparing environment...'),
## 1) Set global variables
# tile_ids to source
tiles_to_source_original_processing = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_from_DHM2018.csv')
tiles_to_source_reprocessing_1 = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_from_DHM2015.csv')
tiles_to_source_reprocessing_2 = pandas.read_csv('D:/Jakob/dhm201415_merger/tiles_to_process_dhm201415_merger.csv')
tiles_incomplete = pandas.read_csv('D:/Jakob/dhm201415_merger/incomplete_tile_pairs.csv')
# Remove incomplete tiles from tile_ids
tiles_to_source_original_processing = pandas.DataFrame(
set(tiles_to_source_original_processing['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_1 = pandas.DataFrame(
set(tiles_to_source_reprocessing_1['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_2 = pandas.DataFrame(
set(tiles_to_source_reprocessing_2['tile_id'].tolist()) -
set(tiles_incomplete['tile_id'].tolist()),
columns = ['tile_id'])
# Remove redundancies
tiles_to_source_original_processing = pandas.DataFrame(
set(tiles_to_source_original_processing['tile_id'].tolist()) -
set(tiles_to_source_reprocessing_2['tile_id'].tolist()),
columns = ['tile_id'])
tiles_to_source_reprocessing_1 = pandas.DataFrame(
set(tiles_to_source_reprocessing_1['tile_id'].tolist()) -
set(tiles_to_source_reprocessing_2['tile_id'].tolist()),
columns = ['tile_id'])
# source folders
folder_original_processing = 'D:/Jakob/dk_nationwide_lidar/data/outputs'
folder_reprocessing_1 = 'D:/Jakob/ecodes-dk-lidar/data/outputs'
folder_reprocessing_2 = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/outputs'
# destination folder
dest_folder = 'D:/Jakob/ecodes-dk-lidar-rev1/data/outputs'
# base folders
dtm_files = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/dtm'
laz_files = 'D:/Jakob/ecodes-dk-lidar-reprocessing/data/laz'
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(folder_original_processing):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# remove variables that were / will be separately reprocessed (if present)
folders = [folder for folder in folders if not bool(re.match('.*tile_footprints.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*solar_radiation.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*date_stamp.*', folder))]
# Clean up file paths
folders = map(lambda folder: re.sub('\\\\', '/', folder), folders)
# Keep only relative paths
folders = map(lambda folder: '/' + os.path.relpath(folder,
folder_original_processing),
folders)
###!!! Break(s) for debugging !!!
##folders = [folders[1]]
##folders = ['/point_source_info/point_source_counts']
## 2) Function definitons
def list_files(folder_path):
files = []
# Scan directory
for file_name in scandir.scandir(folder_path):
files.append(file_name.name)
return(files)
def get_tile_ids(file_names):
# initiate empty list for tile_ids
tile_ids = []
# clean up files names
for i in range(0, len(file_names)):
file_names[i] = re.sub('\\\\', '/', file_names[i])
# fill list with tile_id
for file_name in file_names:
tile_id = re.sub('.*(\d{4}_\d{3}).*', '\g<1>', file_name)
tile_ids.append(tile_id)
# combine to data frame
files_df = pandas.DataFrame(zip(*[tile_ids, file_names]),
columns = ['tile_id', 'file_name'])
# return files_df
return(files_df)
def compare_tile_dfs(df1, df2):
if(set(df1['tile_id'].tolist()) ==
set(df2['tile_id'].tolist())):
return(pandas.DataFrame([], columns = ['tile_id']))
else:
diff = pandas.DataFrame(
set(df1['tile_id'].tolist()) -
set(df2['tile_id'].tolist()),
columns = ['tile_id'])
return(diff)
def check_var_from_source(var_folder, tiles_df, source_folder):
# Get all files for one variable from one source
# Set dest directory based on global variable
global dest_folder
out_folder = dest_folder + var_folder
# Set source folder
source_folder = source_folder + var_folder
# Generate df of tile_ids and file names
files_to_check = get_tile_ids(list_files(source_folder))
# check files
check_df = check_tiles(tiles_df, files_to_check, source_folder, out_folder)
return(check_df)
def check_var(var_folder):
# Get global variables
global tiles_to_source_original_processing, tiles_to_source_reprocessing_1, tiles_to_source_reprocessing_2, folder_original_processing, folder_reprocessing_1, folder_reprocessing_2
# Get all files for one variable from the three data sources
print('Checking: ' + var_folder)
print('\tOriginal processing')
original_check = check_var_from_source(var_folder,
tiles_to_source_original_processing,
folder_original_processing)
print('\n\tReprocessing #1')
reprocessing_1_check = check_var_from_source(var_folder,
tiles_to_source_reprocessing_1,
folder_reprocessing_1)
print('\n\tReprocessing #2')
reprocessing_2_check = check_var_from_source(var_folder,
tiles_to_source_reprocessing_2,
folder_reprocessing_2)
# Return df of copied tiles and
files_with_errors = pandas.concat([original_check,
reprocessing_1_check,
reprocessing_2_check])
files_with_errors['variable'] = var_folder
print('\tDone.\n')
return(files_with_errors)
# The function below is thanks to stackoverflow usesrs quantumSoup and user2653663
# https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def check_tiles(tiles_to_check, files_df, source_folder, out_folder):
# Subset files to check
files_to_check = files_df[files_df['tile_id'].
isin(tiles_to_check['tile_id'].tolist())]['file_name'].tolist()
# initate output lists
files = []
status = []
# Set counter to 0:
progress = 0
# Check files
for i in range(0, len(files_to_check)):
# Check whether files exists
if os.path.isfile(source_folder + '/' + files_to_check[i]):
if os.path.isfile(out_folder + '/' + files_to_check[i]):
match = md5(source_folder + '/' + files_to_check[i]) == md5(out_folder + '/' + files_to_check[i])
if not match:
files.append(files_to_check[i])
status.append('md5_mismatch')
else:
files.append(files_to_check[i])
status.append('out_file_missing')
else:
files.append(files_to_check[i])
status.append('source_file_missing')
# Update progress
progress = float(i + 1) / float(len(files_to_check))
# Update progress bar
print('\r\t|' +
'-' * int(round(progress * 54)) +
' ' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%'),
# Compile outputs to dataframe and retun
status_df = pandas.DataFrame(zip(*[files, status]), columns = ['file_name','status'])
return(status_df)
# Status
print(' done.\n')
## 3) Main body of script
## Check completeness of tile lists
# Status
print('Checking completness of tile list to check...'),
# Get all tiles in data set
dhm_merged_tiles = get_tile_ids(list_files(laz_files))
# Compare with tiles to merge
missing_tiles = len(compare_tile_dfs(dhm_merged_tiles,
pandas.concat([tiles_to_source_original_processing,
tiles_to_source_reprocessing_1,
tiles_to_source_reprocessing_2])))
if missing_tiles > 0:
# Prompt for choice to stop
del_choice = raw_input('\n' + str(missing_tiles) + ' are missing! Continue anyways?' +
'[y/n]')
if not del_choice == 'y':
print('Aborting on request or invalid choice!')
quit()
else:
print('Okay, continuing merger.')
else:
print('\n=> Sets are complete, proceeding as planned.\n')
## Check tiles for all variables
# Status
print('Starting check:\n\n')
files_checked_dfs = []
for var_folder in folders:
files_copied_df = check_var(var_folder)
files_checked_dfs.append(files_copied_df)
files_checked_df = pandas.concat(files_checked_dfs)
files_checked_df.to_csv('checkusm_errors.csv', index = False)
# Stauts
print('Check complete!\n')
print('#' * 80 + '\n')
# EOF
|
jakobjassmann/ecodes-dk-lidar | scripts/generate_list_of_vrts.py | # Short script to generate a list of all vrt files
import glob
import re
from dklidar import settings
# list vrts
vrts = glob.glob(settings.output_folder + '*/*.vrt')
more_vrts = glob.glob(settings.output_folder + '*/*/*.vrt')
for vrt in more_vrts:
vrts.append(vrt)
# Clean up file paths
vrts = [re.sub(settings.output_folder[0:len(settings.output_folder)-1], '', vrt) for vrt in vrts]
vrts = [re.sub('\\\\', '/', vrt) for vrt in vrts]
vrts = [re.sub('^/', '', vrt) for vrt in vrts]
# write out to file
out_file = open(settings.output_folder + '/list_of_vrts.txt', 'w')
out_file.write('\n'.join(vrts))
out_file.close()
|
jakobjassmann/ecodes-dk-lidar | dklidar/points.py | <reponame>jakobjassmann/ecodes-dk-lidar
### Functions for point cloud handling for the DK Lidar project
### <NAME> <EMAIL> 29 January 2019
## Imports
import re
import os
import opals
import subprocess
import numpy
import glob
import shutil
from osgeo import gdal_array
from datetime import datetime, timedelta
from dklidar import common
from dklidar import settings
##### Function definitions
## Import a single tile into ODM
def odm_import_single_tile(tile_id):
"""
Imports a single tile (specified by tile_id) into an ODM for subsequent processing.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: returns execution status
"""
# Initiate return value
return_value = ''
# Generate relevant file names:
laz_file = settings.laz_folder + '/PUNKTSKY_1km_' + tile_id + '.laz'
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
# Try import
try:
# Import tile id
import_tile = opals.Import.Import()
import_tile.inFile = laz_file
import_tile.outFile = odm_file
import_tile.commons.screenLogLevel = opals.Types.LogLevel.none
import_tile.run()
return_value = 'success'
except:
return_value = 'opalsError'
# return execution status
return return_value
## Load neighbourhood of tiles into ODM (this is currently not neede by any of the funcitons below)
def odm_import_mosaic(tile_id):
"""
Imports a tile (specified by tile_id) and it's 3 x 3 neighbourhood into a mosaiced ODM file for subsequent
processing.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: returns execution status
"""
# Initiate return value, open log file
return_value = ''
log_file = open('log.txt', 'a+')
# Retrieve row and col numbers for the current tile_id
center_row = int(re.sub('(\d+)_\d+', '\g<1>', tile_id))
center_col = int(re.sub('\d+_(\d+)', '\g<1>', tile_id))
# Determine row and column numbers for tiles in the 3 x 3 window
rows_to_load = [center_row - 1, center_row, center_row + 1]
cols_to_load = [center_col - 1, center_col, center_col + 1]
# Generate list of tile_ids for tiles to load
tiles_to_load = []
for row in rows_to_load:
for col in cols_to_load:
tile_to_load = str(row) + '_' + str(col)
tiles_to_load.extend([tile_to_load])
# Prep filenames and check if files exists:
tile_file_names = []
for tile_to_load in tiles_to_load:
tile_file_name = settings.laz_folder + '/PUNKTSKY_1km_' + tile_to_load + '.laz'
if os.path.exists(tile_file_name):
tile_file_names.append(tile_file_name)
n_neighbours = len(tile_file_names)
# Update log output depending of the number of valid neighbours
if n_neighbours == 9:
log_file.write(' importing point clouds into ODM mosaic...\n' +
'Number of neighbours = ' + str(n_neighbours) + '. Complete!\n')
else:
log_file.write(tile_id + ' importing point clouds into ODM mosaic...\n' +
'Warning! Number of neighbours = ' + str(n_neighbours) +
'. Incomplete. Edge effects possible!\n')
# Generate output file name string
odm_file = settings.odm_mosaics_folder + '/odm_mosaic_' + tile_id + '.odm'
# Execute command as subprocess and return message:
try:
# Import tiles into odm.
import_tile = opals.Import.Import()
import_tile.inFile = tile_file_names
import_tile.outFile = odm_file
import_tile.commons.screenLogLevel = opals.Types.LogLevel.none
import_tile.run()
log_file.write(tile_id + ' success.\n\n')
return_value = return_value + 'success'
if n_neighbours != 9: return_value = 'Warning: Incomplete Neighbourhood!'
except:
return_value = 'opalsError'
log_file.write(tile_id + ' failed. OpalsError.\n\n')
# Write log output to log file
log_file.close()
# return status output
return return_value
## Def: Export tile footprint
def odm_generate_footprint(tile_id):
"""
Exports footprint from an odm file based on the tile_id in the DK nationwide dataset
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: returns execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Generate relevant file names:
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
temp_tif_file = os.getcwd() + '/temp_' + tile_id + '.tif'
footprint_file = settings.odm_footprint_folder + '/footprint_' + tile_id + '.shp'
# Try token raster export to temp tif
try:
# Export temporary tif
export_tif = opals.Cell.Cell()
export_tif.inFile = odm_file
export_tif.outFile = temp_tif_file
export_tif.feature = 'min'
export_tif.cellSize = 10 # This is also the default cell size, so technically not needed.
export_tif.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_tif.commons.screenLogLevel = opals.Types.LogLevel.none
export_tif.run()
log_file.write('\n' + tile_id + ' temporary raster export successful.\n\n')
except:
return_value = 'opalsError'
log_file.write('\n' + tile_id + ' temporary raster export failed.\n\n')
# Try generating footprint from temp tif
try:
# Specify gdal command
cmd = settings.gdaltlindex_bin + ' ' + footprint_file + ' ' + temp_tif_file
# Execute gdal command
log_file.write('\n' + tile_id + ' footprint generation... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
tile_id + ' successful.\n\n')
# set exit status
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' footprint generation... \n' + tile_id + ' failed.\n\n')
if return_value == 'opalsError': pass
else: return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temp raster file
try:
os.remove(temp_tif_file)
except:
pass
# return status output
return return_value
## Def: Validiate CRS
def odm_validate_crs(tile_id, mosaic = False):
"""
Function to validate the crs for odm files (single tile and mosaic)
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param mosaic: if True validates crs for mosaic also, default: False
:return: execution status
"""
# Initiate return value
return_value = ''
# Generate odm files path names
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
odm_mosaic = settings.odm_mosaics_folder + '/odm_mosaic_' + tile_id + '.odm'
# Retrieve CRS string for single tile
try:
odm_dm = opals.pyDM.Datamanager.load(odm_file)
crs_str = odm_dm.getCRS()
# Check whether CRS exists, if not assign, if different throw error.
if crs_str == settings.crs_wkt_opals:
return_value = 'Tile: match'
elif crs_str == '':
odm_dm.setCRS(settings.crs_wkt_opals)
return_value = 'Tile: empty - set'
else:
return_value = 'Tile: warning - no match'
odm_dm = None # This is needed as opals locks the file connection otherwise.
except:
return_value = 'Single: error; '
# Retrieve CRS string for mosaic
if mosaic == True:
try:
odm_dm = opals.pyDM.Datamanager.load(odm_mosaic)
crs_str = odm_dm.getCRS()
# Check whether CRS exists, if not assign, if different throw error.
if crs_str == settings.crs_wkt_opals:
return_value = return_value + '; Mosaic: match'
elif crs_str == '':
odm_dm.setCRS(settings.crs_wkt_opals)
return_value = return_value + '; Mosaic: empty - set'
else:
return_value = return_value + '; Mosaic: warning - no match'
odm_dm = None # This is needed as opals locks the file connection otherwise.
except:
return_value = return_value + 'Mosaic: error;'
return return_value
## Add height above ground (normalized z) to a tile odm
def odm_add_normalized_z(tile_id, mosaic = False):
"""
Adds a "normalizedZ' variable to each point in an ODM file by normalising the height using the 0.4 m DTM.
Can deal with either single tile odms or neighbourhood mosaics (option mosaic). If a mosaic is normalised,
then the corresponding dtm mosaic will have to be generated first (use dtm_neighbourhood_mosaic()).
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param mosaic: boolean (true or false) specifies whether a single tile pointcloud or a neighbourhood mosaic
should be normalised
:return: execution status
"""
# Initiate return value
return_value = ''
# Generate file paths
if(mosaic == False):
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
dtm_file = settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif'
else:
odm_file = settings.odm_mosaics_folder + '/odm_mosaic_' + tile_id + '.odm'
dtm_file = settings.dtm_folder + '/dtm_' + tile_id + '_mosaic.tif'
# Normalise the point cloud data
try:
add_normalized_z = opals.AddInfo.AddInfo()
add_normalized_z.inFile = odm_file
add_normalized_z.gridFile = dtm_file
add_normalized_z.attribute = 'normalizedZ = z - r[0]'
add_normalized_z.commons.screenLogLevel = opals.Types.LogLevel.none
add_normalized_z.commons.nbThreads = settings.nbThreads
add_normalized_z.run()
return_value = 'success'
except:
return_value = 'opalsError'
# Return exist status
return return_value
## Export mean and sd of height above ground for all 10 m cells in a tile
def odm_export_normalized_z(tile_id):
"""
Exports mean and standard deviation of the normalisedZ variable for the 10 m x 10 m raster grid.
This function requires the normalizedZ to be added first (use odm_add_normalized_z()).
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Set file and folder paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
temp_file_mean = os.getcwd() + '/temp_' + tile_id + '_mean.tif'
temp_file_sd = os.getcwd() + '/temp_' + tile_id + '_sd.tif'
out_folder = settings.output_folder + '/normalized_z'
out_file_mean = out_folder + '/normalized_z_mean/normalized_z_mean_' + tile_id + '.tif'
out_file_sd = out_folder + '/normalized_z_sd/normalized_z_sd_' + tile_id + '.tif'
# Create folders if they do not already exists
if not os.path.exists(out_folder): os.mkdir(out_folder)
if not os.path.exists(out_folder + '/normalized_z_mean'): os.mkdir(out_folder + '/normalized_z_mean')
if not os.path.exists(out_folder + '/normalized_z_sd'): os.mkdir(out_folder + '/normalized_z_sd')
# Export normalized z raster mean and sd
try:
# Initialise exporter
export_normalized_z = opals.Cell.Cell()
# Export mean
export_normalized_z.inFile = odm_file
export_normalized_z.outFile = temp_file_mean
export_normalized_z.attribute = 'normalizedZ'
export_normalized_z.feature = 'mean'
export_normalized_z.cellSize = settings.out_cell_size
export_normalized_z.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_normalized_z.filter = settings.all_classes
export_normalized_z.commons.screenLogLevel = opals.Types.LogLevel.none
export_normalized_z.commons.nbThreads = settings.nbThreads
export_normalized_z.run()
# Reset exporter
export_normalized_z.reset()
# Export sd
export_normalized_z = opals.Cell.Cell()
export_normalized_z.inFile = odm_file
export_normalized_z.outFile = temp_file_sd
export_normalized_z.attribute = 'normalizedZ'
export_normalized_z.feature = 'stdDev'
export_normalized_z.cellSize = settings.out_cell_size
export_normalized_z.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_normalized_z.filter = settings.all_classes
export_normalized_z.commons.screenLogLevel = opals.Types.LogLevel.none
export_normalized_z.commons.nbThreads = settings.nbThreads
export_normalized_z.run()
return_value = 'success'
except:
return_value = 'opalsError'
# Stretch and convert to 16 bit integer
try:
# Construct gdal command for mean
cmd = settings.gdal_calc_bin + \
'-A ' + temp_file_mean + ' ' + \
'--outfile=' + out_file_mean + ' ' + \
'--calc=rint(A*100) ' + \
'--type=Int16 --NoDataValue=-9999 '
# Execute and log command
log_file.write('\n' + tile_id + ' rounding mean to int16 and calculation success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file_mean)
# Construct gdal command for sd
cmd = settings.gdal_calc_bin + \
'-A ' + temp_file_sd + ' ' + \
'--outfile=' + out_file_sd + ' ' + \
'--calc=rint(A*100) ' + \
'--type=Int16 --NoDataValue=-9999 '
# Execute and log command
log_file.write('\n' + tile_id + ' rounding sd to int16 and calculation success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply masks
common.apply_mask(out_file_sd)
return_value = 'success'
except:
if return_value == 'opalsError':
pass
else:
return_value = 'gdalError'
log_file.write('\n' + tile_id + ' normalized_z export failed. \n')
# Tidy up
try:
os.remove(temp_file_mean)
os.remove(temp_file_sd)
except:
pass
# Close log file
log_file.close()
# Return exist status
return return_value
## Export canopy height for all 10 m cells in a tile
def odm_export_canopy_height(tile_id):
"""
Exports the canopy height (95 percentile of normalised height only for points classified as vegetation)
for the 10 m x 10 m raster grid. This function requires the normalizedZ to be added first (use odm_add_normalized_z()).
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Generate file paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
temp_file1 = os.getcwd() + '/' + tile_id + '_temp1.tif'
temp_file2 = os.getcwd() + '/' + tile_id + '_temp2.tif'
out_folder = settings.output_folder + '/canopy_height'
out_file = out_folder + '/canopy_height_' + tile_id + '.tif'
# Create folder if it does not exist
if not os.path.exists(out_folder): os.mkdir(out_folder)
# Export canopy height
try:
# Initialise exporter
export_canopy_height = opals.Cell.Cell()
# Export mean
export_canopy_height.inFile = odm_file
export_canopy_height.outFile = temp_file1
export_canopy_height.attribute = 'normalizedZ'
export_canopy_height.feature = 'quantile:0.95'
# Apply extraction only to points classified as vegetation:
export_canopy_height.filter = settings.veg_classes_filter
# Set no data value to zero. Later the no data value will be set to -9999 this will assure that there are no
# holes in the dataset for pixels where there are no points classified as vegetation.
export_canopy_height.noData = 0
export_canopy_height.cellSize = settings.out_cell_size
export_canopy_height.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_canopy_height.commons.screenLogLevel = opals.Types.LogLevel.none
export_canopy_height.commons.nbThreads = settings.nbThreads
export_canopy_height.run()
return_value = 'success'
except:
return_value = 'opalsError'
# Stretch by 100, round to Int16 and set Nodata value to -9999
try:
# Construct gdal command to set no data value (this is done first to keep no data point counts as 0)
cmd = settings.gdal_translate_bin + ' -a_nodata -9999 ' + temp_file1 + ' ' + temp_file2
# Execute gdal commant and add to log output
log_file.write('\n' + tile_id + ' setting no data value... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
tile_id + ' successful.\n\n')
# Construct gdal command to stredtch and round to int 16
cmd = settings.gdal_calc_bin + \
'-A ' + temp_file2 + ' ' + \
'--outfile=' + out_file + ' ' + \
'--calc=rint(A*100) ' + \
'--type=Int16 ' + \
'--NoDataValue=-9999'
# Execute command and log
log_file.write('\n' + tile_id + ' stretching and rounding success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file)
# set exit status
return_value = 'success'
except:
log_file.write(tile_id + ' setting no data value for canopy height failed.\n\n')
if return_value == 'opalsError': pass
else: return_value = 'gdalError'
# Close log file
log_file.close()
# Remove temp raster file
try:
os.remove(temp_file1)
os.remove(temp_file2)
except:
pass
# Return exist status
return return_value
## Export a point count for a specific height range and set of classes for all 10 m cells in a tile
def odm_export_point_count(tile_id, name = 'vegetation_point_count',
lower_limit = -1, upper_limit = 50.0,
point_classes = None):
"""
Exports point counts for each 10 m x 10 m cell in an ODM and a given height interval specified by
the lower and upper limit parameters, as well as a given set of point classes specified by the pint_classes parameter.
Height parameters are given as normalised height above ground.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param name: identifier name for the point count - used in file and folder naming during export
:param lower_limit: lower limit for the height interval (normalised height in m)
:param upper_limit: upper limit for the height interval (normalised height in m)
:param point_classes: classes to subset from
:return: execution status
"""
# Initiate return value and log_output
return_value = ''
log_file = open('log.txt', 'a+')
# Get temporary working directory
wd = os.getcwd()
# Initiate point_classes default value if no value is provided:
if point_classes is None: point_classes = [3,4,5] # Veg class points
# Generate paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
out_folder = settings.output_folder + '/point_count'
if lower_limit < 10 and lower_limit >= 0: lower_limit_str = '0' + str(lower_limit)
elif lower_limit == -1: lower_limit_str = '-01'
else: lower_limit_str = str(lower_limit)
if upper_limit < 10 and upper_limit >= 0: upper_limit_str = '0' + str(upper_limit)
else: upper_limit_str = str(upper_limit)
prefix = name + '_' + lower_limit_str + 'm-' + upper_limit_str + 'm'
temp_file = wd + '/temp_' + tile_id + '.tif'
out_file = out_folder + '/' + prefix + '/' + prefix + '_' + tile_id + '.tif'
# Create folders if they don't exist
if not os.path.exists(out_folder): os.mkdir(out_folder)
if not os.path.exists(out_folder + '/' + prefix): os.mkdir(out_folder + '/' + prefix)
# Export point count
try:
# Initialise exporter
export_point_count = opals.Cell.Cell()
# Specificy filter strings:
height_filter = 'generic[NormalizedZ >= ' + str(lower_limit) + ' and NormalizedZ < ' + str(upper_limit) + ']'
class_filter = 'Generic[Classification == ' + \
' OR Classification == '.join([str(point_class) for point_class in point_classes]) + ']'
# Export point count
export_point_count.inFile = odm_file
export_point_count.outFile = temp_file
export_point_count.filter = height_filter + ' AND ' + class_filter
export_point_count.feature = 'pcount'
export_point_count.cellSize = settings.out_cell_size
export_point_count.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_point_count.noData = 0
export_point_count.commons.screenLogLevel = opals.Types.LogLevel.none
export_point_count.commons.nbThreads = settings.nbThreads
export_point_count.run()
return_value = 'success'
except:
return_value = 'opalsError'
# Convert to 16 bit integer and set no data value to -9999
try:
# Construct gdal command
cmd = settings.gdal_translate_bin + \
'-ot Int16 -a_nodata -9999 ' + \
temp_file + ' ' + \
out_file
# Execute and log command
log_file.write('\n' + tile_id + ' converting to Int16 success. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file)
return_value = 'success'
except:
if return_value == 'opalsError':
pass
else:
log_file.write('\n' + tile_id + ' converting to Int16 for ' + prefix + ' failed. \n')
return_value = 'gdalError'
# Tidy up
try:
os.remove(temp_file)
except:
pass
# Close log file
log_file.close()
# Return exist status
return return_value
## Export point counts for a pre-defined set of height ranges and classes
def odm_export_point_counts(tile_id):
"""
Exports point counts for multiple pre-defined classes and height intervals by calling the
odm_export_point_count() function.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate empty list for return values
return_values = []
## Ground point count
return_values.append(odm_export_point_count(tile_id, 'ground_point_count', -1, 1, [2]))
## Water point count
return_values.append(odm_export_point_count(tile_id, 'water_point_count', -1, 1, [9]))
## Ground and water point count
return_values.append(odm_export_point_count(tile_id, 'ground_and_water_point_count', -1, 1, [2,9]))
## Vegetation point count
return_values.append(odm_export_point_count(tile_id, 'vegetation_point_count', 0, 50, [3,4,5]))
## Building point counts
return_values.append(odm_export_point_count(tile_id, 'building_point_count', -1, 50, [6]))
## All classes
return_values.append(odm_export_point_count(tile_id, 'total_point_count', -1, 50, [2,3,4,5,6,9]))
## Vegetation point counts for continous height bins
# 0-2 m at 0.5 m intervals
for lower in numpy.arange(0, 2.0, 0.5):
return_values.append(odm_export_point_count(tile_id, 'vegetation_point_count', lower, lower + 0.5, [3,4,5]))
# 2-20 m at 1 m intervals
for lower in range(2, 20, 1):
return_values.append(odm_export_point_count(tile_id, 'vegetation_point_count', lower, lower + 1, [3,4,5]))
# 20-25 m at 5 m interval
return_values.append(odm_export_point_count(tile_id, 'vegetation_point_count', 20, 25, [3,4,5]))
# 25 m to 50 m
return_values.append(odm_export_point_count(tile_id, 'vegetation_point_count', 25, 50, [3,4,5]))
# Set return value status
# There are only two return value states so if there is more than one return value in the list
# one of them has to be an opalsError
return_values = set(return_values)
if len(return_values) > 1:
return_value = "opalsError"
else:
return_value = list(return_values)[0]
return return_value
## Calculate proportions based on two point counts
def odm_calc_proportions(tile_id, prop_name, point_count_id1, point_count_id2):
"""
Function to calculate point count proportions for two point counts.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:param prop_name: name to be assinged to the proportions output
:param point_count_id1: name of point count to be rationed (numerator)
:param point_count_id2: name of point count to be rationed to (denominator)
:return: execution status
"""
return_value = ''
log_file = open('log.txt', 'a+')
# Generate paths for numerator and denominator
num_file = settings.output_folder + '/point_count/' + point_count_id1 + '/' + point_count_id1 + '_' + tile_id + '.tif'
den_file = settings.output_folder + '/point_count/' + point_count_id2 + '/' + point_count_id2 + '_' + tile_id + '.tif'
out_folder = settings.output_folder + '/proportions'
out_file = out_folder + '/' + prop_name + '/' + prop_name + '_' + tile_id + '.tif'
# Create folders if they do not exist
if not os.path.exists(out_folder): os.mkdir(out_folder)
if not os.path.exists(out_folder + '/' + prop_name): os.mkdir(out_folder + '/' + prop_name)
# get wd
temp_wd = os.getcwd()
# specify temp file path
temp_file = temp_wd + '/temp.tif'
# Attempt calculating the proportions using gdal_calc
try:
# Construct gdal command nb. needed to use true_divide here. A cast into int16 will have to follow separately
cmd = settings.gdal_calc_bin + \
'-A ' + num_file + ' ' +\
'-B ' + den_file + ' ' +\
'--outfile=' + temp_file + ' ' + \
'--type=Int16 ' +\
'--calc=rint(10000*true_divide(A,B)) ' + \
'--NoDataValue=-9999'
log_file.write(cmd)
# Execute gdal command
log_file.write('\n' + tile_id + ' calculated proportions ' + prop_name + '... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Round and convert to int16
cmd = settings.gdal_translate_bin + \
'-ot Int16 ' + \
'-a_nodata -9999 ' +\
temp_file + ' ' +\
out_file + ' '
log_file.write(cmd)
# Execute gdal command
log_file.write('\n' + tile_id + ' calculated proportions ' + prop_name + '... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
os.remove(temp_file)
# Apply mask(s)
common.apply_mask(out_file)
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' calculation of proportions ' + prop_name + ' failed. gdalError \n')
return_value = 'gdalError'
# Close log file
log_file.close()
return return_value
## Export a pre-defined list of proportions for a tile
def odm_export_proportions(tile_id):
"""
Exports proportions for: canopy openness, canopy height profile, buildings point counts
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: exit status
"""
# Initiate return values
return_values = []
## Export canopy openness
return_values.append(odm_calc_proportions(tile_id, 'canopy_openness', 'ground_and_water_point_count_-01m-01m',
'total_point_count_-01m-50m'))
## Export vegeation density
return_values.append(odm_calc_proportions(tile_id, 'vegetation_density', 'vegetation_point_count_00m-50m',
'total_point_count_-01m-50m'))
## Export canopy height profile
# 0-2 m at 0.5 m intervals
for lower in numpy.arange(0, 2, 0.5):
veg_height_bin = 'vegetation_point_count_0' + str(lower) + 'm-0' + str(lower + 0.5) + 'm'
prop_variable_bin = 'vegetation_proportion_0' + str(lower) + 'm-0' + str(lower + 0.5) + 'm'
return_values.append(odm_calc_proportions(tile_id, prop_variable_bin, veg_height_bin,
'vegetation_point_count_00m-50m'))
# 2-9 m at 1 m intervals
for lower in range(2, 9, 1):
veg_height_bin = 'vegetation_point_count_0' + str(lower) + 'm-0' + str(lower + 1) + 'm'
prop_variable_bin = 'vegetation_proportion_0' + str(lower) + 'm-0' + str(lower + 1) + 'm'
return_values.append(odm_calc_proportions(tile_id, prop_variable_bin, veg_height_bin,
'vegetation_point_count_00m-50m'))
# 9-10 m
return_values.append(odm_calc_proportions(tile_id, 'vegetation_proportion_09m-10m',
'vegetation_point_count_09m-10m',
'vegetation_point_count_00m-50m'))
# 10-20 m at 1 m intervals
for lower in range(10, 20, 1):
veg_height_bin = 'vegetation_point_count_' + str(lower) + 'm-' + str(lower + 1) + 'm'
prop_variable_bin = 'vegetation_proportion_' + str(lower) + 'm-' + str(lower + 1) + 'm'
return_values.append(odm_calc_proportions(tile_id, prop_variable_bin, veg_height_bin,
'vegetation_point_count_00m-50m'))
# 20-25 m
return_values.append(odm_calc_proportions(tile_id, 'vegetation_proportion_20m-25m',
'vegetation_point_count_20m-25m',
'vegetation_point_count_00m-50m'))
# 25-50 m
return_values.append(odm_calc_proportions(tile_id, 'vegetation_proportion_25m-50m',
'vegetation_point_count_25m-50m',
'vegetation_point_count_00m-50m'))
# Export building proportion
return_values.append(odm_calc_proportions(tile_id, 'building_proportion',
'building_point_count_-01m-50m',
'total_point_count_-01m-50m'))
# Set return value status
# There are only two return value states so if there is more than one return value in the list
# one of them has to be an opalsError
return_values = set(return_values)
if len(return_values) > 1:
return_value = "gdalError"
else:
return_value = list(return_values)[0]
return return_value
## Export mean and sd in the amplitude variable for all 10 m cells in a tile
def odm_export_amplitude(tile_id):
"""
Exports mean and sd for the lidar amplitude for all 10 m x 10 m cells in a tile.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
# Generate file paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
out_folder = settings.output_folder + '/amplitude'
out_file_mean = out_folder + '/amplitude_mean/amplitude_mean_' + tile_id + '.tif'
out_file_sd = out_folder + '/amplitude_sd/amplitude_sd_' + tile_id + '.tif'
# Create folders if they do not exist
if not os.path.exists(out_folder): os.mkdir(out_folder)
if not os.path.exists(out_folder + '/amplitude_mean'): os.mkdir(out_folder + '/amplitude_mean')
if not os.path.exists(out_folder + '/amplitude_sd'): os.mkdir(out_folder + '/amplitude_sd')
# Export amplitude mean and sd using OPALS Cell
try:
# Initialise exporter
export_amplitude = opals.Cell.Cell()
# Export mean
export_amplitude.inFile = odm_file
export_amplitude.outFile = out_file_mean
export_amplitude.attribute = 'amplitude'
export_amplitude.feature = 'mean'
export_amplitude.cellSize = settings.out_cell_size
export_amplitude.filter = settings.all_classes # all classes (2,3,4,5,6,9)
export_amplitude.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_amplitude.commons.screenLogLevel = opals.Types.LogLevel.none
export_amplitude.commons.nbThreads = settings.nbThreads
export_amplitude.run()
# Reset exporter
export_amplitude.reset()
# Export sd
export_amplitude = opals.Cell.Cell()
export_amplitude.inFile = odm_file
export_amplitude.outFile = out_file_sd
export_amplitude.attribute = 'amplitude'
export_amplitude.feature = 'stdDev'
export_amplitude.cellSize = settings.out_cell_size
export_amplitude.filter = settings.all_classes # all classes (2,3,4,5,6,9)
export_amplitude.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_amplitude.commons.screenLogLevel = opals.Types.LogLevel.none
export_amplitude.commons.nbThreads = settings.nbThreads
export_amplitude.run()
# Apply mask(s)
common.apply_mask(out_file_mean)
common.apply_mask(out_file_sd)
return_value = 'success'
except:
return_value = 'opalsError'
# Return exist status
return return_value
## Export flight strip information for all 10 m cells in a tile
def odm_export_point_source_info(tile_id):
"""
Extracts point source statistics for the 10 m x 10 m cells of the point cloud.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initate return value
return_value = ''
# Initiate log output
log_file = open('log.txt', 'a+')
# get current work dir string
temp_wd = os.getcwd()
# Set file paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
out_folder = settings.output_folder + '/point_source_info'
out_folder_ids = out_folder + '/point_source_ids'
out_folder_nids = out_folder + '/point_source_nids'
out_folder_counts = out_folder + '/point_source_counts'
out_folder_prop = out_folder + '/point_source_proportion'
# Create folders if they do not exist
if not os.path.exists(out_folder): os.mkdir(out_folder)
if not os.path.exists(out_folder_ids): os.mkdir(out_folder_ids)
if not os.path.exists(out_folder_nids): os.mkdir(out_folder_nids)
if not os.path.exists(out_folder_counts): os.mkdir(out_folder_counts)
if not os.path.exists(out_folder_prop): os.mkdir(out_folder_prop)
## Look up unique point source ids found in odm file
try:
# Open odm in python DM
dm = opals.pyDM.Datamanager.load(odm_file)
# Create layout and add 'PointSourceID' column
lf = opals.pyDM.AddInfoLayoutFactory()
lf.addColumn(dm, 'PointSourceId', True)
layout = lf.getLayout()
# Get set of histograms for layout
# (this will only have one item, the histogram for the point source id column)
histograms_set = dm.getHistogramSet(layout)
# Initate empty list of point source ids
point_source_ids = []
# Load unique point source ids from histogram
# Note: The histogram set does not allow subsetting, so we loop through it.
# As it will only have one object, this does not matter
for histo in histograms_set.histograms():
for value, count in histo.values():
point_source_ids.append(value)
# Remove dm object and close connection to odm file for later use
del dm
## Use opals cell to extract point counts for each point source id
for point_source_id in point_source_ids:
# Initate opals cell module
export_point_count = opals.Cell.Cell()
# Initate filter string
point_classes = [2, 3, 4, 5, 6, 9]
class_filter = 'Generic[Classification == ' + \
' OR Classification == '.join([str(point_class) for point_class in point_classes]) + ']'
# Export point count
export_point_count.inFile = odm_file
export_point_count.outFile = 'temp_count_' + str(point_source_id) + '.tif'
export_point_count.filter = class_filter + \
' AND Generic[PointSourceId == ' + str(point_source_id) + ']'
export_point_count.feature = 'pcount'
export_point_count.cellSize = settings.out_cell_size
export_point_count.limit = 'corner' # This switch is really important when working with tiles!
# It sets the ROI to the extent to the bounding box of points in the ODM
export_point_count.noData = 0
export_point_count.commons.screenLogLevel = opals.Types.LogLevel.none
export_point_count.commons.nbThreads = settings.nbThreads
export_point_count.run()
export_point_count.reset()
## Convert to int16 and set no data to -9999 and apply mask
for point_source_id in point_source_ids:
cmd = settings.gdal_translate_bin + '-ot Int16 -a_nodata -9999 ' + \
temp_wd + '/temp_count_' + str(point_source_id) + '.tif ' + \
out_folder_counts + '/point_source_counts_' + tile_id + '_' + str(point_source_id) + '.tif '
log_file.write('\n' + tile_id + ' ' + str(point_source_id) + ' converted point source file to int16. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
common.apply_mask(out_folder_counts +
'/point_source_counts_' + tile_id + '_' + str(point_source_id) + '.tif ')
## Determine the number of uniuqe point source ids per cell using gdal_calc.
# Prepare in file string and equation string
alphabet = ['A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z']
files_string = ''
equation = []
for i in range(len(point_source_ids)):
files_string = files_string + ' -' + alphabet[i] + ' ' + out_folder_counts + \
'/point_source_counts_' + tile_id + '_' + str(point_source_ids[i]) + '.tif '
equation.append(alphabet[i])
equation = '1*greater(' + ',0)+1*greater('.join(equation) + ',0)'
# Construct gdal command
cmd = settings.gdal_calc_bin + files_string + \
'--outfile=' + out_folder_nids + '/point_source_nids_' + tile_id + '.tif' + \
' --calc=' + equation + \
' --type=Int16 --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' extracted number of unique point source ids. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask('--outfile=' + out_folder_nids + '/point_source_nids_' + tile_id + '.tif')
## Calculate proportion of hits pre cell per point source using gdal_calc
# Calculate total sum of points per cell, prepare gdal command
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
files_string = ''
equation = []
for i in range(len(point_source_ids)):
files_string = files_string + ' -' + alphabet[i] + ' ' + out_folder_counts + \
'/point_source_counts_' + tile_id + '_' + str(point_source_ids[i]) + '.tif '
equation.append(alphabet[i])
equation = '+'.join(equation)
# Construct gdal command
cmd = settings.gdal_calc_bin + files_string + '--outfile=' + temp_wd + '/temp_total_points.tif ' + \
'--calc=' + equation + ' --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' created temporary total point count file. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
## Calculate proportions using gdal_calc, round, stretch by 10000
for point_source_id in point_source_ids:
# Construct gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + out_folder_counts + '/point_source_counts_' + tile_id + '_' + str(point_source_id) + '.tif ' + \
'-B ' + temp_wd + '/temp_total_points.tif ' + \
'--outfile=' + temp_wd + '/point_source_prop_' + str(point_source_id) + '.tif ' + \
'--calc=rint(true_divide(A,B)*10000) ' + '--type=Float32 --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' calculated proportions for ' + str(point_source_id) + '. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
## Convert proportions to int16
for point_source_id in point_source_ids:
cmd = settings.gdal_translate_bin + '-a_nodata -9999 -ot Int16 ' + \
temp_wd + '/point_source_prop_' + str(point_source_id) + '.tif ' + \
out_folder_prop + '/point_source_prop_' + tile_id + '_' + str(point_source_id) + '.tif '
log_file.write('\n' + tile_id + ' ' + str(point_source_id) + ' converted proportion to int16. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_folder_prop + '/point_source_prop_' + tile_id + '_' + str(point_source_id) + '.tif')
## Create a layer with presence / absence of point source id indicated by the point source id itself
for point_source_id in point_source_ids:
# Construct gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + out_folder_counts + '/point_source_counts_' + tile_id + '_' + str(point_source_id) + '.tif ' + \
'--outfile=' + temp_wd + '/temp_presence_' + str(point_source_id) + '.tif ' + \
'--calc=' + str(point_source_id) + '*greater(A,0)' + \
' --type=Int32 --NoDataValue=-9999'
# Execute gdal command
log_file.write('\n' + tile_id + ' created temporary presence layer for ' + \
str(point_source_id) + '. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
## Merge files into one using gdal_merge
# Prepare gdal command
in_files_string = '.tif ' + temp_wd + '/temp_presence_'
in_files_string = temp_wd + '/temp_presence_' + in_files_string.join(
[str(i) for i in point_source_ids]) + '.tif'
# Construct gdal command
cmd = settings.gdal_merge_bin + '-a_nodata -9999 -separate ' + \
'-o ' + out_folder_ids + '/point_source_ids_' + tile_id + '.tif ' + \
in_files_string
# Execute gdal command
log_file.write('\n' + tile_id + ' merged temporary layers in point source ids file. \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_folder_ids + '/point_source_ids_' + tile_id + '.tif')
# The 'majority' stat produced by opals is not reliable... I'm leaving the below code for leagcy reasons.
# 'majority statistics will have to be calculate from the above generated rasters by hand.
# ## Extract mode of point count ids
# # Initate opals cell module
# export_point_mode = opals.Cell.Cell()
#
# # Export point count
# export_point_mode.inFile = odm_file
# export_point_mode.outFile = out_folder_mode + '/point_source_mode_' + tile_id + '.tif'
# export_point_mode.filter = settings.ground_and_veg_classes_filter
# export_point_mode.attribute = 'PointSourceId'
# export_point_mode.feature = 'majority'
# export_point_mode.cellSize = settings.out_cell_size
# export_point_mode.limit = 'corner' # This switch is really important when working with tiles!
# # It sets the ROI to the extent to the bounding box of points in the ODM
# export_point_mode.commons.screenLogLevel = opals.Types.LogLevel.none
# export_point_mode.commons.nbThreads = settings.nbThreads
# export_point_mode.run()
return_value = 'success'
except:
return_value = 'opalsError'
# Close log file
log_file.close()
# remove temporary files
for temp_file in glob.glob(temp_wd + '/*.tif'):
try:
os.remove(temp_file)
except:
pass
return return_value
## Export mean and sd of height above ground for all 10 m cells in a tile
def odm_export_date_stamp(tile_id):
"""
Export estimated most common date stamp for each 10 m x 10 m cells.
This is done by converting GPS time to a Int32 with the format YYYYMMDD.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# Initiate return value
return_value = ''
log_file = open('log.txt', 'a+')
# Set file and folder paths
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
temp_file_maj = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_maj.tif'
temp_file_min = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_min.tif'
temp_file_max = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_max.tif'
temp_file_maj_2 = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_maj_2.tif'
temp_file_min_2 = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_min_2.tif'
temp_file_max_2 = re.sub('\\\\', '/', os.getcwd()) + '/temp_' + tile_id + '_max_2.tif'
out_folder_all = settings.output_folder + '/date_stamp'
out_folder_maj = out_folder_all + '/date_stamp_mode'
out_folder_min = out_folder_all + '/date_stamp_min'
out_folder_max = out_folder_all + '/date_stamp_max'
out_file_maj = out_folder_maj + '/date_stamp_mode_' + tile_id + '.tif'
out_file_min = out_folder_min + '/date_stamp_min_' + tile_id + '.tif'
out_file_max = out_folder_max + '/date_stamp_max_' + tile_id + '.tif'
# Create folders if they do not already exists
for folder in [out_folder_all, out_folder_maj, out_folder_min, out_folder_max]:
if not os.path.exists(folder): os.mkdir(folder)
# Add GPSDate attribute to point cloud (veg points only)
try:
log_file.write('\n' + tile_id + ' adding GPSdate. \n')
add_GPSDate = opals.AddInfo.AddInfo()
add_GPSDate.inFile = odm_file
add_GPSDate.attribute = '_GPSDay=floor(GPSTime/(60*60*24))'
add_GPSDate.commons.screenLogLevel = opals.Types.LogLevel.none
add_GPSDate.commons.nbThreads = settings.nbThreads
add_GPSDate.filter = settings.veg_classes_filter
add_GPSDate.run()
add_GPSDate.reset()
add_GPSDate = None
log_file.write(tile_id + ' success. \n')
except:
return_value = 'opalsError'
# Export time stamp mode, min and max
try:
# Initialise exporter
export_time_stamp = opals.Cell.Cell()
# Export most common date stamp (mode)
log_file.write(tile_id + ' exporting mode GPSDate raster... \n')
export_time_stamp.inFile = odm_file
export_time_stamp.outFile = temp_file_maj
export_time_stamp.attribute = '_GPSDay'
export_time_stamp.feature = 'majority'
export_time_stamp.cellSize = settings.out_cell_size
export_time_stamp.limit = 'corner' # This switch is really important when working with tiles!
export_time_stamp.filter = settings.veg_classes_filter
export_time_stamp.noData = 0.0
export_time_stamp.commons.screenLogLevel = opals.Types.LogLevel.none
export_time_stamp.commons.nbThreads = settings.nbThreads
export_time_stamp.run()
export_time_stamp.reset()
log_file.write(tile_id + ' success. \n')
# Export earliest date stamp (min)
log_file.write(tile_id + ' exporting min GPSDate raster... \n')
export_time_stamp.inFile = odm_file
export_time_stamp.outFile = temp_file_min
export_time_stamp.attribute = '_GPSDay'
export_time_stamp.feature = 'min'
export_time_stamp.cellSize = settings.out_cell_size
export_time_stamp.limit = 'corner' # This switch is really important when working with tiles!
export_time_stamp.filter = settings.veg_classes_filter
export_time_stamp.noData = 0.0
export_time_stamp.commons.screenLogLevel = opals.Types.LogLevel.none
export_time_stamp.commons.nbThreads = settings.nbThreads
export_time_stamp.run()
export_time_stamp.reset()
log_file.write(tile_id + ' success. \n')
# Export most recent date stamp (max)
log_file.write(tile_id + ' exporting max GPSDate raster... \n')
export_time_stamp.inFile = odm_file
export_time_stamp.outFile = temp_file_max
export_time_stamp.attribute = '_GPSDay'
export_time_stamp.feature = 'max'
export_time_stamp.cellSize = settings.out_cell_size
export_time_stamp.limit = 'corner' # This switch is really important when working with tiles!
export_time_stamp.filter = settings.veg_classes_filter
export_time_stamp.noData = 0.0
export_time_stamp.commons.screenLogLevel = opals.Types.LogLevel.none
export_time_stamp.commons.nbThreads = settings.nbThreads
export_time_stamp.run()
export_time_stamp.reset()
log_file.write(tile_id + ' success. \n')
except:
return_value = 'opalsError'
# Convert GPSDate to YYYYMMDD date and save as 32 bit integer
try:
## GPSDate mode ----
log_file.write(tile_id + ' converting mode raster... \n')
# Load raster as numpy array
temp_raster_maj = gdal_array.LoadFile(temp_file_maj)
# Generate mask
temp_raster_mask = temp_raster_maj == 0
# Convert to seconds and add 10^9 to all values (as this has been previously substracted) and copy to new object
temp_raster = temp_raster_maj * (60*60*24) + 10**9
# close original file connection
temp_raster_maj = None
# Convert time stamp to CET date as integer
# Note we assume that the difference in leap seconds is constant despite a shift
# on 1 July 2015 (it is 36 afterwards) -> we drop the hours anyways
# https://hpiers.obspm.fr/eop-pc/index.php?index=TAI-UTC_tab&lang=en
# Conversion based on https://stackoverflow.com/questions/33415475/how-to-get-current-date-and-time-from-gps-unsegment-time-in-python
# Thanks to user jfs
convert_to_utc = lambda t: int((datetime(1980, 1, 6) + timedelta(seconds=t - (35 - 19) - 3600)).strftime("%Y%m%d"))
convert_to_utc_vec = numpy.vectorize(convert_to_utc)
temp_raster = convert_to_utc_vec(temp_raster)
# Re-assing NA values
temp_raster[temp_raster_mask] = -9999
# Set any values in 2011 to NA (these result from GPS time stamps that
# were not converted from GPS seconds per week to GPS time => they end up in Sept. 2011).
# We know that there were no flights in 2011 which allows us to create this mask.
temp_raster[numpy.logical_and(temp_raster >= 20110101, temp_raster <= 20111231)] = -9999
# Write array as raster (Int32 as default - needed for 8 digit date format)
temp_raster = gdal_array.SaveArray(temp_raster, temp_file_maj_2, format = "GTiff", prototype = temp_file_maj)
log_file.write(tile_id + ' done. \n')
# Close file connection
temp_raster = None
# Reset no data value
cmd = settings.gdal_translate_bin + ' -of GTiff -a_nodata -9999 ' + \
temp_file_maj_2 + ' ' + out_file_maj
# Execute gdal command
log_file.write('\n' + tile_id + ' re-setting no data value... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file_maj)
# Log file output
log_file.write('\n' + tile_id + ' time_stamp export successful. \n')
## GPSDate min -----
log_file.write(tile_id + ' converting min raster... \n')
# Load raster as numpy array
temp_raster_min = gdal_array.LoadFile(temp_file_min)
# Generate mask
temp_raster_mask = temp_raster_min == 0
# Convert to seconds and add 10^9 to all values (as this has been previously substracted) and copy to new object
temp_raster = temp_raster_min * (60*60*24) + 10**9
# close original file connection
temp_raster_min = None
# Convert time stamp to CET date as integer
# Note we assume that the difference in leap seconds is constant despite a shift
# on 1 July 2015 (it is 36 afterwards) -> we drop the hours anyways
# https://hpiers.obspm.fr/eop-pc/index.php?index=TAI-UTC_tab&lang=en
# Conversion based on https://stackoverflow.com/questions/33415475/how-to-get-current-date-and-time-from-gps-unsegment-time-in-python
# Thanks to user jfs
convert_to_utc = lambda t: int((datetime(1980, 1, 6) + timedelta(seconds=t - (35 - 19) - 3600)).strftime("%Y%m%d"))
convert_to_utc_vec = numpy.vectorize(convert_to_utc)
temp_raster = convert_to_utc_vec(temp_raster)
# Re-assing NA values
temp_raster[temp_raster_mask] = -9999
# Set any values in 2011 to NA (these result from GPS time stamps that
# were not converted from GPS seconds per week to GPS time => they end up in Sept. 2011).
# We know that there were no flights in 2011 which allows us to create this mask.
temp_raster[numpy.logical_and(temp_raster >= 20110101, temp_raster <= 20111231)] = -9999
# Write array as raster (Int32 as default - needed for 8 digit date format)
temp_raster = gdal_array.SaveArray(temp_raster, temp_file_min_2, format = "GTiff", prototype = temp_file_min)
log_file.write(tile_id + ' done. \n')
# Close file connection
temp_raster = None
# Reset no data value
cmd = settings.gdal_translate_bin + ' -of GTiff -a_nodata -9999 ' + \
temp_file_min_2 + ' ' + out_file_min
# Execute gdal command
log_file.write('\n' + tile_id + ' re-setting no data value... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file_min)
## GPSDate max -----
log_file.write(tile_id + ' converting max raster... \n')
# Load raster as numpy array
temp_raster_max = gdal_array.LoadFile(temp_file_max)
# Generate mask
temp_raster_mask = temp_raster_max == 0
# Convert to seconds and add 10^9 to all values (as this has been previously substracted) and copy to new object
temp_raster = temp_raster_max * (60*60*24) + 10**9
# close original file connection
temp_raster_max = None
# Convert time stamp to CET date as integer
# Note we assume that the difference in leap seconds is constant despite a shift
# on 1 July 2015 (it is 36 afterwards) -> we drop the hours anyways
# https://hpiers.obspm.fr/eop-pc/index.php?index=TAI-UTC_tab&lang=en
# Conversion based on https://stackoverflow.com/questions/33415475/how-to-get-current-date-and-time-from-gps-unsegment-time-in-python
# Thanks to user jfs
convert_to_utc = lambda t: int((datetime(1980, 1, 6) + timedelta(seconds=t - (35 - 19) - 3600)).strftime("%Y%m%d"))
convert_to_utc_vec = numpy.vectorize(convert_to_utc)
temp_raster = convert_to_utc_vec(temp_raster)
# Re-assing NA values
temp_raster[temp_raster_mask] = -9999
# Set any values in 2011 to NA (these result from GPS time stamps that
# were not converted from GPS seconds per week to GPS time => they end up in Sept. 2011).
# We know that there were no flights in 2011 which allows us to create this mask.
temp_raster[numpy.logical_and(temp_raster >= 20110101, temp_raster <= 20111231)] = -9999
# Write array as raster (Int32 as default - needed for 8 digit date format)
temp_raster = gdal_array.SaveArray(temp_raster, temp_file_max_2, format = "GTiff", prototype = temp_file_max)
log_file.write(tile_id + ' done. \n')
# Close file connection
temp_raster = None
# Reset no data value
cmd = settings.gdal_translate_bin + ' -of GTiff -a_nodata -9999 ' + \
temp_file_max_2 + ' ' + out_file_max
# Execute gdal command
log_file.write('\n' + tile_id + ' re-setting no data value... \n' + \
subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT))
# Apply mask(s)
common.apply_mask(out_file_max)
# Log file output
log_file.write('\n' + tile_id + ' time_stamp export successful. \n')
return_value = 'success'
except:
if return_value == 'opalsError':
pass
else:
return_value = 'gdalError'
log_file.write('\n' + tile_id + ' date_stamp export failed. \n')
# Tidy up
try:
os.remove(temp_file_maj)
os.remove(temp_file_min)
os.remove(temp_file_max)
os.remove(temp_file_maj_2)
os.remove(temp_file_min_2)
os.remove(temp_file_max_2)
except:
pass
# Close log file
log_file.close()
# Return exist status
return return_value
def odm_remove_temp_files(tile_id):
"""
Removes footprint and odm files to clear up space for subsequent processing.
:param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: execution status
"""
# initiate return value
return_value = ''
odm_file = settings.odm_folder + '/odm_' + tile_id + '.odm'
odm_footprint_files = glob.glob(settings.odm_footprint_folder + '/footprint_' + tile_id + '.*')
try:
os.remove(odm_file)
return_value = 'success'
except:
return_value = 'unable to delete odm file'
try:
for file in odm_footprint_files: os.remove(file)
return_value = 'success'
except:
return_value = return_value + 'unable to delete odm footprint file'
# return execution status
return return_value
|
jakobjassmann/ecodes-dk-lidar | dklidar/common.py | ### Common module for the dklidar reporcessing - general functions likely used by all scripts
### <NAME> <EMAIL> 29 January 2019
# Imports
import os
import glob
import pandas
import re
import shutil
import datetime
import subprocess
from dklidar import settings
## Function definitons
## Logging function to initalise logging process key to progress managment.
def init_log_folder(script_name, tile_ids):
"""
Initiates a log folder for storing the processing output and progress management
:param script_name: name of the processing script for which to initialise the logging database / folder
:param tile_ids: tile ids in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number
:return: pandas DataFrame with progress data
"""
# Check and create root folder for script
log_folder = settings.log_folder + '/' + script_name
if not os.path.exists(log_folder):
os.mkdir(log_folder)
# Check whether processing status file exists
progress_file = log_folder + '/' + 'overall_progress.csv'
if not os.path.exists(progress_file):
print(datetime.datetime.now().strftime('%X') +
' No progress file found, creating log folder and progress file...'),
## Initiate pandas data frame with tile_ids as rows and processing_steps as columns
# Create empty list to hold columns
cols = []
# Add processing status column
cols.append(['pending'] * len(tile_ids))
# prepare colnames
colnames = ['processing']
# Zip into pandas data frame
progress_df = pandas.DataFrame(zip(*cols), index = tile_ids, columns = colnames)
progress_df.index.name = 'tile_id'
# Status update
print(' done.')
else:
print(datetime.datetime.now().strftime('%X') +
' Progress file found, loading previous processing status...'),
## Load progress status_file
try:
progress_df = pandas.read_csv(progress_file, index_col='tile_id')
print(' done.')
# update progress dataframe
progress_df = update_progress_df(script_name, progress_df)
except:
print('\n' + datetime.datetime.now().strftime('%X') + 'Can\'t load progress file. Exiting script!')
quit()
# Compare tile_id column with tile_ids list
if not progress_df.index.values.tolist() == tile_ids:
print('\n' +datetime.datetime.now().strftime('%X') +
'Warning: lists of tile_ids in laz folder( ' + settings.laz_folder +
') and progress file (' + progress_file + ') do not match.' +
'\nPlease remove manually to reset.\nExiting script!')
quit()
# Export progress_df as CSV
progress_df.to_csv(progress_file, index=True, header=True)
# return progress_df
return(progress_df)
## Function to gather progress update, key to progress management and logging.
def update_progress_df(script_name, progress_df):
"""
Searches a script's log folder for subfolders matching the tile_id pattern (rrrr_ccc), then crawls these folders for
status.csv files, compiling them into a single pandas dataframe and returning it.
:param script_name: name of the script (for folder matching)
:param progress_df: progress dataframe to be updated
:return: returns an updated progress_df
"""
# Status update
print(datetime.datetime.now().strftime('%X') + ' Updating progress management...'),
# Check log root folder for script if not existing... quit!
log_folder = settings.log_folder + '/' + script_name
if not os.path.exists(log_folder):
print('\n' + datetime.datetime.now().strftime('%X') +
'Warning: script log folder does not exist. Exiting script')
quit()
# Gather list of tile folders
tile_folders = glob.glob(log_folder + '/*/')
# Filter only those folders matching a tile/id
tile_id_pattern = re.compile('.*\d+_\d+.*')
tile_folders = filter(tile_id_pattern.match, tile_folders)
# Loop through each tile_id_folder updating the log
for tile_folder in tile_folders:
# set path to status csv file
status_csv = tile_folder + '/status.csv'
# Check whether status.csv file exists.
if os.path.exists(status_csv):
# load status csv
status_df = pandas.read_csv(status_csv, index_col='tile_id')
# get tile id from first value in index
tile_id = status_df.index.values.tolist()[0]
# get list of colnames
col_names = status_df.columns.values.tolist()
# for each col name copy cell value into progress_df
for col_name in col_names:
# set cell value
status_value = status_df.at[tile_id, col_name]
# check whether col_name exists in progress data frame if not add empty col to data frame
if col_name in progress_df.columns.values.tolist():
# set cell value for col and tile id
progress_df.at[tile_id, col_name] = status_value
else:
# create empty 'pending' colum for colname that does not exist.
progress_df[col_name] = ['pending'] * len(progress_df.index)
progress_df.at[tile_id, col_name] = status_value
else:
# if the status_csv does not exist, something must have gone wrong.
# do not update the progress for this tile, re-process tile.
pass
# Status update
print(' done.')
# Return progress_df
return(progress_df)
## Define function to gather logs
def gather_logs(script_name, step_name, tile_id):
"""
Gather logs from temporary work dir.
This wee function is to be run out of a temporary working directory by a pool process from a multiprocessing
workflow. It then copies all log files in the temporary working directory to the main log folder for the processing script,
where it stores them in a sub-subfolder according to the step_name and tile_id parameters.
:param script_name: name of the script that is calling the function
:param step_name: name of the step that should be logged for
:param tile_id: tile id in the usual format (rrrr_ccc)
:return: nothing
"""
# Generate string for log folder for the tile and create the directory if it does not exist
log_folder_tile = settings.log_folder + '/' + script_name + '/' + tile_id
if not os.path.exists(log_folder_tile):
os.mkdir(log_folder_tile)
# Generate string for log folder and create the directory if it does not exist
log_folder_step = log_folder_tile + '/' + step_name
if not os.path.exists(log_folder_step):
os.mkdir(log_folder_step)
# Confirm function is executed from temporary work dir using regex
wd = os.getcwd()
temp_re = re.compile('.*temp_.*')
if not temp_re.match(wd):
print(datetime.datetime.now().strftime('%X') + ' Error: gather_logs function called from outside temporary work dir.')
print(wd)
return('Error: gather_logs.')
# Check whether dklidar logfile exists if yes copy:
if os.path.exists(wd + '/log.txt'):
# copy file to tile log directory
shutil.copy(wd + '/log.txt', log_folder_step)
# remove log file from temp directory
os.remove(wd + '/log.txt')
# Check whether opalslog logfile exists if yes copy:
if os.path.exists(wd + '/opalsLog.xml'):
# copy file to tile log directory
shutil.copy(wd + '/opalsLog.xml', log_folder_step)
# remove log file from temp directory
os.remove(wd + '/opalsLog.xml')
# Check whether opalsError logfile exists if yes copy:
if os.path.exists(wd + '/opalsErrors.txt'):
# copy file to tile log directory
shutil.copy(wd + '/opalsErrors.txt', log_folder_step)
# remove log file from temp directory
os.remove(wd + '/opalsErrors.txt')
## Function to generate sea and inland water masks for a tile
def generate_water_masks(tile_id):
"""
Generates both sea and inland water mask rasters with 10 m grain size, based on the dtm as a template and the
the nationwide sea and inland water masks vector files specified in the settings file.
:param tile_id: id of the tile
:return: execution status
"""
# Initiate return valule and log output
return_value = ''
log_file = open('log.txt', 'a+')
# get temporary work directory
wd = os.getcwd()
# Prepare output folder
sea_out_folder = settings.output_folder + '/masks/sea_mask'
inland_water_out_folder = settings.output_folder + '/masks/inland_water_mask'
if not os.path.exists(settings.output_folder + '/masks'): os.mkdir(settings.output_folder + '/masks')
if not os.path.exists(sea_out_folder): os.mkdir(sea_out_folder)
if not os.path.exists(inland_water_out_folder): os.mkdir(inland_water_out_folder)
sea_mask_file = sea_out_folder + '/sea_mask_' + tile_id + '.tif '
inland_mask_file = inland_water_out_folder + '/inland_water_mask_' + tile_id + '.tif '
temp_file = os.getcwd() + '/temp.tif'
try:
## Aggregate dtm
cmd = settings.gdalwarp_bin + \
'-tr 10 10 -r min -ot Int16 -dstnodata -9999 -overwrite ' + \
settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif ' + \
temp_file
# Execute gdal command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' aggregating dtm to 10 m for mask successful.\n\n')
# Set all cells with data in raster to 0 and set it as no data value
cmd = settings.gdal_calc_bin + \
'--calc=1 --NoDataValue=-9999 --overwrite --type Int16 ' + \
'-A ' + temp_file + ' ' + \
'--outfile=' + sea_mask_file
# Execute gdal command
log_file.write(subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) + \
'\n' + tile_id + ' set all cells with data to 1.\n\n')
# Dublicate file
shutil.copyfile(sea_mask_file, inland_mask_file)
# Make a local copies of the nationwide masks to speed up simulatneaous access
dk_sea_mask_path = re.sub('(.*/)(.*)\.shp$', '\g<1>', settings.dk_coastline_poly)
dk_inland_mask_path = re.sub('(.*/)(.*)\.shp$', '\g<1>', settings.dk_lakes_poly)
dk_sea_mask_file_base = re.sub('(.*/)(.*)\.shp$', '\g<2>', settings.dk_coastline_poly)
dk_inland_mask_file_base = re.sub('(.*/)(.*)\.shp$', '\g<2>', settings.dk_lakes_poly)
for file in glob.glob(dk_sea_mask_path + dk_sea_mask_file_base + '.*'):
temp_file = os.getcwd() + '/temp_' + re.sub('(.*\\\)(.*)\.*$', '\g<2>', file)
shutil.copy(file, temp_file)
for file in glob.glob(dk_inland_mask_path + dk_inland_mask_file_base + '.*'):
temp_file = os.getcwd() + '/temp_' + re.sub('(.*\\\)(.*)\.*$', '\g<2>', file)
shutil.copy(file, temp_file)
dk_sea_mask_temp_file = os.getcwd() + '/temp_' + dk_sea_mask_file_base + '.shp'
dk_inland_mask_temp_file = os.getcwd() + '/temp_' + dk_inland_mask_file_base + '.shp'
# Generate sea mask
cmd = settings.gdal_rasterize_bin + \
'-b 1 ' + '-burn -9999 ' + '-i ' + '-at ' + \
dk_sea_mask_temp_file + ' ' + \
sea_mask_file
log_file.write('\n' + \
subprocess.check_output(
cmd,
shell=False,
stderr=subprocess.STDOUT) + \
'\n' + sea_mask_file + ' sea mask created. \n\n ')
# Generate inland water mask
cmd = settings.gdal_rasterize_bin + \
'-b 1 ' + '-burn -9999 ' + '-at ' + \
dk_inland_mask_temp_file + ' ' + \
inland_mask_file
log_file.write('\n' + \
subprocess.check_output(
cmd,
shell=False,
stderr=subprocess.STDOUT) + \
'\n' + inland_mask_file + ' inland water mask created. \n\n ')
# Remove temporary files
temp_file_list = glob.glob(os.getcwd() + '/temp*.*')
for file in temp_file_list: os.remove(file)
return_value = 'success'
except:
log_file.write('\n' + tile_id + ' creating mask rasters failed.\n\n')
return_value = 'gdalError'
# Close log file
log_file.close()
return return_value
## Function to apply water masks, sea or inland water.
def apply_mask(target_raster = '', sea_mask = False, inland_water_mask = False):
"""
For a given target raster, this function masks all sea off the coastline of Denmark (sea_mask = True),
or all inland water bodies such as lakes or ponds (inland_water_mask = True) or both.
Requires raster masks to be generated using generate_water_masks().
:param sea_mask: boolean switch for applying sea mask
:param inland_water_mask: boolean switch for applying the inland water mask
:param target_raster: target raster file path
:return: execution status
"""
# initiate return value and log ouptut
return_value = ''
log_file = open('log.txt', 'a+')
# Check whether input raster was provided
if (target_raster == ''): raise Exception('No input raster provided.')
# Get current wd
temp_wd = os.getcwd()
# Get tile_id from path
tile_id = re.sub('.*?_(\d*_)(\d*)(_\d*)?\.tif *', '\g<1>\g<2>', target_raster)
# set mask paths
sea_out_folder = settings.output_folder + '/masks/sea_mask'
inland_water_out_folder = settings.output_folder + '/masks/inland_water_mask'
sea_mask_file = sea_out_folder + '/sea_mask_' + tile_id + '.tif'
inland_mask_file = inland_water_out_folder + '/inland_water_mask_' + tile_id + '.tif'
temp_file = temp_wd + '/temp_raster.tif'
# Apply sea mask
if (sea_mask == True):
try:
# Construct gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + sea_mask_file + ' ' + \
'-B ' + target_raster + ' ' + \
'--outfile=' + temp_file + ' ' + \
'--calc=B --NoDataValue=-9999 --overwrite --type Int16 '
log_file.write('\n' + \
subprocess.check_output(
cmd,
shell=False,
stderr=subprocess.STDOUT) + \
'\n' + target_raster + ' sea mask applied. \n\n ')
shutil.copyfile(temp_file, target_raster)
os.remove(temp_file)
return_value = 'success'
except:
log_file.write('\n' + target_raster + ' applying sea mask failed. \n\n ')
return_value = 'gdalError'
# Apply lake mask
if (inland_water_mask == True):
try:
# Construct gdal command
cmd = settings.gdal_calc_bin + \
'-A ' + inland_mask_file + ' ' + \
'-B ' + target_raster + ' ' + \
'--outfile=' + temp_file + ' ' + \
'--calc=B --NoDataValue=-9999 --overwrite --type Int16 '
log_file.write('\n' + \
subprocess.check_output(
cmd,
shell=False,
stderr=subprocess.STDOUT) + \
'\n' + target_raster + ' inland water mask applied. \n\n ')
shutil.copyfile(temp_file, target_raster)
os.remove(temp_file)
except:
log_file.write('\n' + target_raster + ' applying inland water mask failed. \n\n ')
return_value = 'gdalError'
if (sea_mask == False & inland_water_mask == False):
log_file.write('\n' + target_raster + ' no masks to be applied. \n\n ')
return_value = 'success'
# Close log file
log_file.close()
return return_value
|
jakobjassmann/ecodes-dk-lidar | scripts/remove_missing_tiles.py | # Short script to remove tiles that have missing point cloud or dtm data
# This script can be run after executing checksum_qa.py and directly uses the output files
# generated by the script.
# <NAME> <EMAIL> 16 January 2020
import os
from dklidar import settings
# Prompt user input
confirmation_string = raw_input('Are you sure you would like to remove the files for all incompleted tiles?\n[yes/no]: ')
if(confirmation_string == 'yes' or confirmation_string == 'Yes'):
print('Deleting LAZ files with missing DTM tiles...')
# Remove LAZ files with missing DTMs
with open(settings.laz_folder + '../laz_files_with_missing_dtm.txt') as f:
files = f.read().splitlines()
for file in files:
os.remove(file)
print('Done.')
print('Deleting DTM files with missing LAZ tiles...')
# Remove DTM files with missing LAZs
with open(settings.laz_folder + '../dtm_files_with_missing_laz.txt') as f:
files = f.read().splitlines()
for file in files:
os.remove(file)
print('Done.')
else:
print("No or invalid input, aborting script...")
|
jakobjassmann/ecodes-dk-lidar | scripts/checksum_qa.py | <gh_stars>0
# Short script to confirm transferred file integrity using md5 checksums.
# Prerequisite is that checsum were generated using create_checksums.bat
# A comparsions of the sets of laz tiles and dtm tiles is also carried out.
# <NAME> <EMAIL> 16 January 2020
# Imports
import os
import glob
import numpy as np
import pandas
import re
from dklidar import settings
### Read in filenames using unix filename extensions from glob
### 1) Pointcloud files (laz)
##orig_md5_files = glob.glob(settings.laz_folder + '*.md5')
##local_md5_files = glob.glob(settings.laz_folder + '*.local_md5')
### 2) DTM files (tif)
##orig_md5_files.extend(glob.glob(settings.dtm_folder + "*.md5"))
##local_md5_files.extend(glob.glob(settings.dtm_folder + "*.local_md5"))
##
### Initiate empty lists
##md5_orig = list()
##md5_local = list()
##
### Fill lists with md5 sums from files
##for file_name in orig_md5_files:
## file = open(file_name)
## md5_orig.append(file.read(32))
##for file_name in local_md5_files:
## file = open(file_name)
## md5_local.append(file.read(32))
##
### Zip all lists into one data frame
##df = pandas.DataFrame(zip(orig_md5_files, md5_orig, local_md5_files, md5_local),
## columns=['orig_file', 'orig_md5', 'local_file', 'local_md5'])
##
### Add md5_vheck comparison column to df
##md5_check = df['orig_md5'] == df['local_md5']
##df['md5_check'] = md5_check
##
### Print dataframe overiview to console
##print()
##print('df overview:')
##print(df.head())
##print()
##
### Filter rows where the check returned false
##print('Non matching md5 checksums:' + str(np.sum([not i for i in df['md5_check']])))
##damaged_files = df[df['md5_check'] == False]
##print(damaged_files)
##
### Export csv
##damaged_files.to_csv(settings.laz_folder + '../damaged_files.csv', index=False)
##
# ---------------------------------------------------
# Check for completeness of datasets
# Load file names
dtm_files = glob.glob(settings.dtm_folder + '/*.tif')
laz_files = glob.glob(settings.laz_folder + '/*.laz')
# initiate empty lists for tile_ids
dtm_tile_ids = []
laz_tile_ids = []
# fill dictionaries with tile_id, as well as row number and column number for each file name:
for file_name in dtm_files:
tile_id = re.sub('.*DTM_1km_(\d*_\d*).tif', '\g<1>', file_name)
dtm_tile_ids.append(tile_id)
for file_name in laz_files:
tile_id = re.sub('.*PUNKTSKY_1km_(\d*_\d*).laz', '\g<1>', file_name)
laz_tile_ids.append(tile_id)
# Determine differences between sets of tiles
missing_laz_tiles = set(dtm_tile_ids) - set(laz_tile_ids)
missing_dtm_tiles = set(laz_tile_ids) - set(dtm_tile_ids)
df_missing_dtm = pandas.DataFrame(zip(missing_dtm_tiles), columns=['tile_id'])
df_missing_dtm.to_csv(settings.dtm_folder + '../missing_dtm_tile_ids.csv', index=False)
# Print out a quick overview of data frame for control
print(df_missing_dtm.head())
df_missing_laz = pandas.DataFrame(zip(missing_laz_tiles), columns=['tile_id'])
df_missing_laz.to_csv(settings.laz_folder + '../missing_laz_tile_ids.csv', index=False)
# Print out a quick overview of data frame for control
print(df_missing_laz.head())
# Export lists to files
# DTMs with missing LAZs
out_file = open(settings.dtm_folder + '../dtm_files_with_missing_laz.txt', 'a+')
for tile_id in missing_laz_tiles:
out_file.write(settings.dtm_folder + 'DTM_1km_' + tile_id + '.tif\n')
out_file.close()
# LAZs with missing DTMs
out_file = open(settings.laz_folder + '../laz_files_with_missing_dtm.txt', 'a+')
for tile_id in missing_dtm_tiles:
out_file.write(settings.laz_folder +'PUNKTSKY_1km_' + tile_id + '.laz\n')
out_file.close()
|
jakobjassmann/ecodes-dk-lidar | scripts/progress_monitor.py | ### Script to mointor progress of the process_tiles.py script
### Execute in separate terminal in when running process_tile.py
### <NAME> <EMAIL> 20 April 2020
## Imports
import glob
import os
import datetime
import time
from dklidar import settings
# Set working directory
os.chdir(settings.wd)
# Set number of parallel processes:
n_processes = 62 # 54
# set update interval
update_interval = 60 # 60 s
# Load tile file names and derive total number of files to process
laz_files = glob.glob(settings.laz_folder + '/*.laz')
n_total = len(laz_files)
# Check nubmer of files already processed
n_processed_at_start = len(glob.glob(settings.log_folder + '/process_tiles/*_*'))
# set start date and time:
start_time = datetime.datetime.fromtimestamp(os.path.getmtime(settings.log_folder + '/process_tiles/overall_progress.csv'))
# Initate progress variables
progress = 0
# Update progress till complete
while progress < 1:
# Obtain n sum of n tiles in process and completed
n_sum = len(glob.glob(settings.log_folder + '/process_tiles/*_*'))
# Calculate n of fully processed tiles
n_processed = n_sum - n_processes
progress = float(n_processed) / float(n_total)
# Calculate time differences
time_passed = datetime.datetime.now() - start_time
if (n_processed - n_processed_at_start) <= 54:
time_estimated = 'estimating'
else:
time_estimated = (time_passed / (n_processed - n_processed_at_start)) * (n_total - n_processed)
# Check whether the overall_progress.csv has been updated, if so
# assume the processing of the last 54 files is done
overal_progress_timestamp = datetime.datetime.fromtimestamp(
os.path.getmtime(settings.log_folder + '/process_tiles/overall_progress.csv'))
if not overal_progress_timestamp == start_time:
progress = 1
n_processed = n_total
# Print stats on screen
os.system('cls')
print('\n')
print('-' * 80),
print(' ' * (79 - len('<NAME> <EMAIL> 2020 ')) +
'<NAME> <EMAIL> 2020 ')
print(' \'process_tiles.py\' progress:')
print(' - processing with ' + str(n_processes) + ' parallel threads')
print(' - start time: ' + str(start_time.ctime()))
print(' - execute \'stop.bat\' to pause \ interrupt processing')
print('\n')
print(' update interval: ' + str(update_interval) + ' s' +
' ' * (79 - len(' update interval: ' + str(update_interval) + ' s' + 'last update: ' + str(datetime.datetime.now().ctime()))) +
'last update: ' + str(datetime.datetime.now().ctime()))
print('-' * 80)
print('\n' * 2)
print(' ' + str(n_processed) + ' / ' + str(n_total) + ' tiles' +
' ' * (80 - len(' ' + str(n_processed) + ' / ' + str(n_total) + ' tiles' + str(int(round(progress * 100))) + '% ')) +
str(int(round(progress * 100))) + '%'),
print('-' * 80),
print('#' * int(round(78*progress)))
print('-' * 80),
print('passed: ' + str(time_passed).split('.')[0] +
' ' * (79 - len('passed: ' + str(time_passed).split('.')[0] +
'remaining (estimate): ' + str(time_estimated).split('.')[0] + ' ')) +
'remaining (estimate): ' + str(time_estimated).split('.')[0] + ' ')
print(' ' * (79 - len('press CRTL+C to exit progress monitor ')) + 'press CRTL+C to exit progress monitor ')
# Wait for next update
time.sleep(update_interval)
# End of while loop
|
jakobjassmann/ecodes-dk-lidar | documentation/source_data/merger_scripts/copy_files.py | <reponame>jakobjassmann/ecodes-dk-lidar
## DHM2018+ and DHM2015 merger
## Script to copy the files and create a physical version of the merger
## using the outputs from dhm201415_merger.R
## <NAME> <EMAIL>
## Dependencies
import os
import shutil
import pandas
import glob
import re
## Status
print('\n' + '#' * 80)
print('Merging DHM2018+, DHM2015 and GST2014\n')
print('Setting up work environment...')
## Prepare environment
# Set wd
os.chdir('D:/Jakob/dhm201415_merger/')
# Set file paths
gst2014_laz = 'O:/Nat_Ecoinformatics/B_Read/LegacyData/Denmark/Elevation/GST_2014/Punktsky/laz/'
dhm2015_laz = 'D:/Jakob/datafordeler_downloads/DHM2015_punktsky'
dhm2018_laz = 'D:/Jakob/dk_nationwide_lidar/data/laz'
gst2014_dtm = 'O:/Nat_Ecoinformatics/B_Read/LegacyData/Denmark/Elevation/GST_2014/DTM_tif'
dhm2015_dtm = 'D:/Jakob/datafordeler_downloads/DHM2015_terraen'
dhm2018_dtm = 'D:/Jakob/dk_nationwide_lidar/data/dtm'
laz_out = 'laz/'
dtm_out = 'dtm/'
os.mkdir(laz_out)
os.mkdir(dtm_out)
# Load tile_ids to source from DHM2015
gst2014_tile_ids = pandas.read_csv('tiles_from_GST2014.csv')
# Load tile_ids to source from DHM2015
dhm2015_tile_ids = pandas.read_csv('tiles_from_DHM2015.csv')
# Load tile_ids to source from DHM2018+
dhm2018_tile_ids = pandas.read_csv('tiles_from_DHM2018.csv')
# Load file names
gst2014_laz_files = glob.glob(gst2014_laz + '/*.laz')
dhm2015_laz_files = glob.glob(dhm2015_laz + '/*.laz')
dhm2018_laz_files = glob.glob(dhm2018_laz + '/*.laz')
gst2014_dtm_files = glob.glob(gst2014_dtm + '/*.tif')
dhm2015_dtm_files = glob.glob(dhm2015_dtm + '/*.tif')
dhm2018_dtm_files = glob.glob(dhm2018_dtm + '/*.tif')
# Helper function to retrieve tile_ids from file names
def get_tile_ids(file_names):
# initiate empty list for tile_ids
tile_ids = []
# clean up files names
for i in range(0, len(file_names)):
file_names[i] = re.sub('\\\\', '/', file_names[i])
# fill dictionaries with tile_id, as well as row number and column number for each file name:
for file_name in file_names:
tile_id = re.sub('.*(\d{4}_\d{3}).*', '\g<1>', file_name)
tile_ids.append(tile_id)
# combine to data frame
files_df = pandas.DataFrame(zip(*[tile_ids, file_names]),
columns = ['tile_id', 'file_name'])
# return files_df
return(files_df)
# Appy helper function to all file names
gst2014_laz_files_df = get_tile_ids(gst2014_laz_files)
dhm2015_laz_files_df = get_tile_ids(dhm2015_laz_files)
dhm2018_laz_files_df = get_tile_ids(dhm2018_laz_files)
gst2014_dtm_files_df = get_tile_ids(gst2014_dtm_files)
dhm2015_dtm_files_df = get_tile_ids(dhm2015_dtm_files)
dhm2018_dtm_files_df = get_tile_ids(dhm2018_dtm_files)
## Copy files
# Helper function to select and copy files with progress bar
def copy_tiles(tiles_to_copy, files_df, out_folder):
# Subset files to copy
files_to_copy = files_df[files_df['tile_id'].
isin(tiles_to_copy)]['file_name'].tolist()
# Set counter to 0:
progress = 0
# Copy files
for i in range(0, len(files_to_copy)):
# Check whether file exists, if not copy
if not os.path.isfile(os.getcwd() + '/' + out_folder +
re.sub('.*/(.*)', '\g<1>', files_to_copy[i])):
# Copy file
shutil.copy(files_to_copy[i],
os.getcwd() + '/' + out_folder +
re.sub('.*/(.*)', '\g<1>', files_to_copy[i]))
# Update progress
progress = float(i + 1) / float(len(files_to_copy))
# Update progress bar
print('\r|' +
'#' * int(round(progress * 54)) +
'-' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%'),
# Copy laz files
print('\nCopying GST2014 laz files...')
copy_tiles(gst2014_tile_ids['tile_id'], gst2014_laz_files_df, laz_out)
print('\n\nCopying DHM2015 laz files...')
copy_tiles(dhm2015_tile_ids['tile_id'], dhm2015_laz_files_df, laz_out)
print('\n\nCopying DHM2018 laz files...')
copy_tiles(dhm2018_tile_ids['tile_id'], dhm2018_laz_files_df, laz_out)
# Copy dtm files
print('\n\nCopying GST2014 dtm files...')
copy_tiles(gst2014_tile_ids['tile_id'], gst2014_dtm_files_df, dtm_out)
print('\n\nCopying DHM2015 dtm files...')
copy_tiles(dhm2015_tile_ids['tile_id'], dhm2015_dtm_files_df, dtm_out)
print('\n\nCopying DHM2018 dtm files...')
copy_tiles(dhm2018_tile_ids['tile_id'], dhm2018_dtm_files_df, dtm_out)
## Remove incomplete laz / dtm pairs from dataset
# Status
print('\n\nDetermining completness of laz / dtm pairs in merged data set...\n')
# Retrieve files names and tile ids in merged data set
laz_merged_files = glob.glob(laz_out + '/*.laz')
dtm_merged_files = glob.glob(dtm_out + '/*.tif')
laz_merged_files_df = get_tile_ids(laz_merged_files)
dtm_merged_files_df = get_tile_ids(dtm_merged_files)
# Work out incomplete pairs
laz_missing_dtm = list(set(laz_merged_files_df['tile_id']) -
set(dtm_merged_files_df['tile_id']))
dtm_missing_laz = list(set(dtm_merged_files_df['tile_id']) -
set(laz_merged_files_df['tile_id']))
# Save incomplte pairs to file
incomplete_pairs = pandas.concat(
[laz_merged_files_df[laz_merged_files_df['tile_id'].isin(laz_missing_dtm)],
dtm_merged_files_df[dtm_merged_files_df['tile_id'].isin(dtm_missing_laz)]])
incomplete_pairs.to_csv('incomplete_tile_pairs.csv', index = False)
# Status
print(str(len(laz_missing_dtm)) + ' laz files have no corresponding dtm file.' +
'\n')
print(str(len(dtm_missing_laz)) + ' dtm files have no corresponding laz file.' +
'\n')
print('Incomplete tile_ids and file names saved to' +
' "incomplete_tile_pairs.csv".\n')
# Prompt for choice to delete
del_choice = raw_input('Would you like to remove the incomplete tile pairs?' +
'[y/n]')
# Evaluate choice
if(del_choice == 'n'):
print('\nNo files will be deleted.')
elif(del_choice == 'y'):
print('\nDeleting files...')
# Delete files
progress = 0
files_to_delete = incomplete_pairs['file_name'].tolist()
for i in range(0, len(files_to_delete)):
os.remove(files_to_delete[i])
progress = float(i + 1) / float(len(files_to_delete))
print('\r|' +
'#' * int(round(progress * 54)) +
'-' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%'),
progress = None
print('\n')
else:
print('\nInvalid input.')
# Status
print('Merger complete.\n\n' + '#' * 80)
# EOF
|
murfrosoft/pygame-demos | pygame02_Animation.py | <filename>pygame02_Animation.py
import pygame
# initalize pygame modules
pygame.init()
clock = pygame.time.Clock() # create a clock to control update speed
# create a screen on which to draw
width, height = 500, 400
screensize = [width, height] # [width, height] in pixels
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption('demo02: Basic Annimation') # title of screen window
# initialize font
font = pygame.font.SysFont(None, 16) # font size = 16
# create a textbox: red text with white background
text = font.render('Score: %d' %(0), True, (255,0,0), (255,255,255))
textRect = text.get_rect()
textRect.topleft = screen.get_rect().topleft
# import an image to move
image = pygame.image.load(r'Images\fireball.png')
topx = width//2
topy = height//2
xspeed = 1
yspeed = 1
score = 0
# ready to run game loop
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
screen.fill((255,255,255)) # fill screen with white
text = font.render('Score: %d' %(score), True, (0,0,0), (255,255,255))
screen.blit(text, textRect) # draw text to screen
screen.blit(image, (topx, topy))
pygame.display.update() # update screen drawing
# update positions
topx += xspeed
topy += yspeed
if topx <= 0 or topx >= width - 32:
xspeed *= -1
score += 1
if topy <= 0 or topy >= height - 32:
yspeed *= -1
score += 1
clock.tick(60) # advance at 60 fps
|
murfrosoft/pygame-demos | pygame03_AnimationTranslation.py | <filename>pygame03_AnimationTranslation.py
import pygame
# initalize pygame modules
pygame.init()
clock = pygame.time.Clock() # create a clock to control update speed
# create a screen on which to draw
width, height = 500, 400
screensize = [width, height] # [width, height] in pixels
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption('demo03: More Annimation') # title of screen window
# initialize font
font = pygame.font.SysFont(None, 16) # font size = 16
# create a textbox: red text with white background
text = font.render('Score: %d' %(0), True, (255,0,0), (255,255,255))
textRect = text.get_rect()
textRect.topleft = screen.get_rect().topleft
# import an image to move
image = pygame.image.load(r'Images\fireball.png')
img_x = width//2
img_y = height//2
xspeed = 1
yspeed = 1
score = 0
angle = 0
# ready to run game loop
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
screen.fill((255,255,255)) # fill screen with white
text = font.render('Score: %d' %(score), True, (0,0,0), (255,255,255))
screen.blit(text, textRect) # draw text to screen
fireball = pygame.transform.rotate(image,angle);
img_rect = fireball.get_rect()
img_rect.centerx = img_x
img_rect.centery = img_y
screen.blit(fireball, img_rect)#screen.blit(fireball, (img_x, img_y))
pygame.display.update() # update screen drawing
# update positions
img_x += xspeed
img_y += yspeed
angle -= 9
if angle <= 0:
angle = 360
if img_x <= 16 or img_x >= width - 16:
xspeed *= -1
score += 1
if img_y <= 16 or img_y >= height - 16:
yspeed *= -1
score += 1
clock.tick(60) # advance at 60 fps
|
murfrosoft/pygame-demos | pygame04_ScrollingBackground.py | <reponame>murfrosoft/pygame-demos
import pygame
# initalize pygame modules
pygame.init()
clock = pygame.time.Clock() # create a clock to control update speed
# create a screen on which to draw
width, height = 500, 300
screensize = [width, height] # [width, height] in pixels
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption('demo04: Scrolling background') # title of screen window
# initialize font
font = pygame.font.SysFont(None, 16) # font size = 16
# create a textbox: red text with white background
text = font.render('Score: %d' %(0), True, (255,255,255), (0,0,0))
textRect = text.get_rect()
textRect.topleft = screen.get_rect().topleft
# import an image to move
image_bg = pygame.image.load(r'Images\starbg.png')
# ready to run game loop
bg_x = 0
bg_speed = 1
score = 0
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
if bg_speed < 30:
bg_speed += 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
if bg_speed > 1:
bg_speed -= 1
screen.fill((255,255,255)) # fill screen with white
text = font.render('Score: %d' %(score), True, (255,255,255), (0,0,0))
screen.blit(image_bg, (bg_x,0))
screen.blit(image_bg, (bg_x+600,0)) #screen.blit(fireball, (img_x, img_y))
screen.blit(text, textRect) # draw text to screen
pygame.display.update() # update screen drawing
bg_x = bg_x - bg_speed
if bg_x <= -600:
bg_x = 0
score += 1
clock.tick(60) # advance at 60 fps
|
murfrosoft/pygame-demos | pygame01_HelloWorld.py | <filename>pygame01_HelloWorld.py
import pygame
# initalize pygame modules
pygame.init()
clock = pygame.time.Clock() # create a clock to control update speed
# create a screen on which to draw
screensize = [300, 200] # [width, height] in pixels
screen = pygame.display.set_mode(screensize)
pygame.display.set_caption('demo01: Hello World') # title of screen window
# initialize font
font = pygame.font.SysFont(None, 40) # font size = 40
# create a textbox: red text with white background
text = font.render(' Hello world!', True, (255,0,0), (255,255,255))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery
# ready to run game loop
red = 255
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
run = False
screen.fill((255,255,255)) # fill screen with white
text = font.render('%003d Hello world!' %(red), True, (red,0,0), (255,255,255))
screen.blit(text, textRect) # draw text to screen
pygame.display.update() # update screen drawing
red -= 1
if red <= 0:
red = 255
clock.tick(60) # advance at 60 fps
|
weynelucas/drf-nested-views | drf_nested_views/mixins.py | """
Basic building blocks for generic class based views.
Use theese mixins with GenericAPIView from drf_nested_views.
With exception of CreateModelMixin and UpdateModelMixin,
other mixins does not perform or override any action of
his parents (rest_framework.mixins). Use them for track
future changes.
"""
from rest_framework import mixins
class CreateModelMixin(mixins.CreateModelMixin):
"""
Create a nested model instance.
"""
def perform_create(self, serializer):
serializer.save(**self.get_parent_lookup())
class ListModelMixin(mixins.ListModelMixin):
"""
List a nested queryset.
"""
pass
class RetrieveModelMixin(mixins.RetrieveModelMixin):
"""
Retrieve a nested model instance.
"""
pass
class UpdateModelMixin(mixins.UpdateModelMixin):
"""
Update a nested model instance.
"""
def perform_update(self, serializer):
serializer.save(**self.get_parent_lookup())
class DestroyModelMixin(mixins.DestroyModelMixin):
"""
Destroy a nested model instance.
"""
pass |
weynelucas/drf-nested-views | drf_nested_views/generics.py | <reponame>weynelucas/drf-nested-views
from rest_framework.serializers import ModelSerializer
from rest_framework import generics
from rest_framework_nested.serializers import NestedHyperlinkedModelSerializer
class GenericAPIView(generics.GenericAPIView):
"""
Base class for all other generic views that are nested.
"""
parent_lookup_kwargs = {}
def get_queryset(self):
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
If `serializer_class` attribute is a ModelSerializer
subclass, the queryset will be retrieved through
`model` attribute.
"""
if issubclass(self.serializer_class, ModelSerializer):
return self.serializer_class.Meta.model.objects.all()
return super(GenericAPIView, self).get_queryset()
def get_parent_lookup_kwargs(self):
"""
Returns the parent lookup kwargs, a dictionary that maps
URL parameters to object properties.
Defaults to using `self.parent_lookup_kwargs`.
If `serializer_class` attribute is a NestedHyperlinkedModelSerializer
subclass, the kwargs will be retrieved from it through
`parent_lookup_kwargs` attribute.
"""
if issubclass(self.serializer_class, NestedHyperlinkedModelSerializer):
return self.serializer_class.parent_lookup_kwargs
assert self.parent_lookup_kwargs, (
"'%s' should either include a `parent_lookup_kwargs` attribute, "
"or override the `get_parent_lookup_kwargs()` method."
% self.__class__.__name__
)
return self.parent_lookup_kwargs
def get_parent_lookup(self):
"""
Returns the entire lookup for parent based on `parent_lookup_kwargs`
and URL keyword arguments.
"""
parent_lookup_kwargs = self.get_parent_lookup_kwargs()
try:
return { v: self.kwargs[k] for k, v in parent_lookup_kwargs.items() }
except KeyError as error:
raise AttributeError(
"Keyword argument %s from `parent_lookup_kwargs` does not "
"have any match on URL keyword arguments."
% str(error)
)
def filter_queryset(self, queryset):
"""
Given a queryset, filter it with whichever filter
backend is in use.
After filter backend process, filter it with
`parent_lookup_kwargs` attribute.
"""
queryset = super(GenericAPIView, self).filter_queryset(queryset)
return queryset.filter(**self.get_parent_lookup()) |
dave-s477/SoMeNLP | somenlp/utils/time_marker.py | <gh_stars>0
import datetime
def get_time_marker(format='full'):
"""Get current time as a formatted string.
Args:
format (string): full (with time) vs. short (only date)
Returns:
string: current time
"""
if format=='full':
out_s = datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
else:
out_s = datetime.datetime.now().strftime("%d-%m-%Y")
return out_s |
dave-s477/SoMeNLP | somenlp/entity_disambiguation/feature_calculator.py | <filename>somenlp/entity_disambiguation/feature_calculator.py
import numpy as np
import pandas as pd
import time
import re
import torch
import json
import nltk
nltk.download('stopwords')
from itertools import combinations
from nltk.corpus import stopwords
from multiprocessing import Pool
from os.path import join, exists
from Levenshtein import distance as levenshtein_distance
from Levenshtein import jaro
from articlenizer import articlenizer
MENTION_SCALING_FACTOR = 60
URL_SCALING_FACTOR = 120
DEVEL_SCALING_FACTOR = 30
VERSION_SCALING_FACTOR = 10
class EntityDisambiguationFeatureGenerator:
"""Calculates features for entity disambiguation
"""
def __init__(self, dbpedia):
"""Init
Args:
dbpedia (str): location of a pandas dataframe containing DBpedia information for software entities
"""
self.stops = stopwords.words('english')
# self.dbpedia_names = pd.read_csv(dbpedia, compression='gzip')
# self.dbpedia_names.drop(self.dbpedia_names.columns.difference(['unique','label']), 1, inplace=True)
# self.dbpedia_names.dropna(inplace=True)
# self.dbpedia_unique_mapping = {}
# self.dbpedia_label_mapping = {}
# for index, row in self.dbpedia_names.iterrows():
# if row['unique'] not in self.dbpedia_unique_mapping:
# self.dbpedia_unique_mapping[row['unique']] = set()
# self.dbpedia_unique_mapping[row['unique']].update([row['label']])
# if row['label'] not in self.dbpedia_label_mapping:
# self.dbpedia_label_mapping[row['label']] = set()
# self.dbpedia_label_mapping[row['label']].update([row['unique']])
with open(dbpedia, 'r') as j_in:
self.dbpedia_data = json.load(j_in)
#self.dbpedia_unique_mapping = dbpedia_data['unique_mapping']
#self.dbpedia_label_mapping = dbpedia_data['label_mapping']
self.string_features_to_extract = [
{ # String based features
'name': 'LenFirst',
'function': self._len_first,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'LenSecond',
'function': self._len_second,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Jaro',
'function': self._jaro,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Levenshtein',
'function': self._levenshtein,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Substring',
'function': self._substring,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Norm_string_Jaro',
'function': self._norm_string_jaro,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Norm_string_Levenshtein',
'function': self._norm_string_levenshtein,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'KnownAbbr',
'function': self._known_abbreviation,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'MenGenAbbr',
'function': self._mention_generated_abbreviation,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'NormGenAbbr',
'function': self._norm_generated_abbreviation,
'data_type': np.float32,
'init': 1.0
}
]
self.context_features_to_extract = [
{ # Abbreviations and Alternative Names
'name': 'GivenAbbr',
'function': self._given_abbreviation,
'data_type': np.float32,
'init': 1.0
},
{ # URLs
'name': 'URL_LenFirst',
'function': self._url_len_first,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'URL_LenSecond',
'function': self._url_len_second,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'URL_Jaro',
'function': self._url_jaro,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'URL_Substring',
'function': self._url_substring,
'data_type': np.float32,
'init': 1.0
},
{ # Developers
'name': 'Devel_LenFirst',
'function': self._devel_len_first,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Devel_LenSecond',
'function': self._devel_len_second,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Devel_Jaro',
'function': self._devel_jaro,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Devel_Substring',
'function': self._devel_substring,
'data_type': np.float32,
'init': 1.0
},
{ # Versions
'name': 'Version_LenFirst',
'function': self._version_len_first,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Version_LenSecond',
'function': self._version_len_second,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Version_Jaro',
'function': self._version_jaro,
'data_type': np.float32,
'init': 1.0
},
{
'name': 'Version_Substring',
'function': self._version_substring,
'data_type': np.float32,
'init': 1.0
}
]
self.feature_length = len(self.string_features_to_extract) + len(self.context_features_to_extract)
def _len_fct(self, s, factor):
return len(s) / factor
def _jaro_fct(self, s0, s1):
return 1 - jaro(s0, s1)
def _levenshtein_fct(self, s0, s1):
return levenshtein_distance(s0, s1) / max(len(s0), len(s1))
def _substring_fct(self, s0, s1):
return 1 - (s0 in s1 or s1 in s0)
def _len_first(self, pair):
return self._len_fct(pair[0]['string'], MENTION_SCALING_FACTOR)
def _len_second(self, pair):
return self._len_fct(pair[1]['string'], MENTION_SCALING_FACTOR)
def _jaro(self, pair):
"""Jaro distance between entity mentions
Args:
pair (list): entity pair
"""
return self._jaro_fct(pair[0]['string'], pair[1]['string'])
def _levenshtein(self, pair):
"""Levenshtein distance between entity mentions
Args:
pair (list): entity pair
Returns:
int: Levenshtein edit distance
"""
return self._levenshtein_fct(pair[0]['string'], pair[1]['string'])
def _substring(self, pair):
"""Test substring relation between entity mentions
Args:
pair (list): entity pair
Returns:
bool: result
"""
return self._substring_fct(pair[0]['string'], pair[1]['string'])
def _normalize(self, s):
norm_s = re.sub('[^0-9a-zA-Z]+', ' ', s.casefold()).rstrip('0123456789 ,.').lstrip(' ')
norm_s = ' '.join([w for w in norm_s.split() if w not in self.stops])
if not norm_s:
norm_s = s
return norm_s
def _remove_spaces(self, s):
replace_regex = re.compile(r'\s(?P<to_keep>[\+\-#™_/\d]+)\s?')
matches = replace_regex.findall(s)
return replace_regex.sub(r'\g<to_keep>', s)
def _norm_string_jaro(self, pair):
return self._jaro_fct(pair[0]['norm'], pair[1]['norm'])
def _norm_string_levenshtein(self, pair):
return self._levenshtein_fct(pair[0]['norm'], pair[1]['norm'])
def _add_info_len(self, pair, idx, name, factor):
num = 0
length = 0
for rel in pair[idx]['relations']:
if rel['type'] == name:
num += 1
length += len(rel['string'])
length = length / num if num > 0 else 0
return length / factor
def _add_info_jaro(self, pair, name):
x_infos = []
for rel in pair[0]['relations']:
if rel['type'] == name:
x_infos.append(rel['string'])
if not x_infos:
return 1.0
y_infos = []
for rel in pair[1]['relations']:
if rel['type'] == name:
y_infos.append(rel['string'])
if not y_infos:
return 1.0
min_dist = 1.0
for i_x in x_infos:
for i_y in y_infos:
dist = self._jaro_fct(i_x, i_y)
if dist < min_dist:
min_dist = dist
return min_dist
def _add_info_substring(self, pair, name):
x_infos = []
for rel in pair[0]['relations']:
if rel['type'] == name:
x_infos.append(rel['string'])
if not x_infos:
return 1
y_infos = []
for rel in pair[1]['relations']:
if rel['type'] == name:
y_infos.append(rel['string'])
if not y_infos:
return 1
for i_x in x_infos:
for i_y in y_infos:
if i_x in i_y or i_y in i_x:
return 0
return 1
def _url_len_first(self, pair):
return self._add_info_len(pair, 0, 'URL_of', URL_SCALING_FACTOR)
def _url_len_second(self, pair):
return self._add_info_len(pair, 1, 'URL_of', URL_SCALING_FACTOR)
def _url_jaro(self, pair):
return self._add_info_jaro(pair, 'URL_of')
def _url_substring(self, pair):
return self._add_info_substring(pair, 'URL_of')
def _devel_len_first(self, pair):
return self._add_info_len(pair, 0, 'Developer_of', DEVEL_SCALING_FACTOR)
def _devel_len_second(self, pair):
return self._add_info_len(pair, 1, 'Developer_of', DEVEL_SCALING_FACTOR)
def _devel_jaro(self, pair):
return self._add_info_jaro(pair, 'Developer_of')
def _devel_substring(self, pair):
return self._add_info_substring(pair, 'Developer_of')
def _version_len_first(self, pair):
return self._add_info_len(pair, 0, 'Version_of', VERSION_SCALING_FACTOR)
def _version_len_second(self, pair):
return self._add_info_len(pair, 1, 'Version_of', VERSION_SCALING_FACTOR)
def _version_jaro(self, pair):
return self._add_info_jaro(pair, 'Version_of')
def _version_substring(self, pair):
return self._add_info_substring(pair, 'Version_of')
def _mention_generated_abbreviation(self, pair):
men_str_x = pair[0]['string'].replace('-', ' ')
men_abbr_x = ''.join([s[0] for s in men_str_x.split()]) if len(men_str_x.split()) > 2 else ''
men_str_y = pair[1]['string'].replace('-', ' ')
men_abbr_y = ''.join([s[0] for s in men_str_y.split()]) if len(men_str_y.split()) > 2 else ''
if not men_abbr_x and not men_abbr_y:
return 1.0
min_dist = min(self._jaro_fct(men_abbr_x, pair[1]['string']), self._jaro_fct(men_abbr_y, pair[0]['string']), self._jaro_fct(men_abbr_x, men_abbr_y))
return min_dist
def _norm_generated_abbreviation(self, pair):
men_str_x = pair[0]['norm'].replace('-', ' ')
men_abbr_x = ''.join([s[0] for s in men_str_x.split()]) if len(men_str_x.split()) > 2 else ''
men_str_y = pair[1]['norm'].replace('-', ' ')
men_abbr_y = ''.join([s[0] for s in men_str_y.split()]) if len(men_str_y.split()) > 2 else ''
if not men_abbr_x and not men_abbr_y:
return 1.0
min_dist = min(self._jaro_fct(men_abbr_x, pair[1]['norm']), self._jaro_fct(men_abbr_y, pair[0]['norm']), self._jaro_fct(men_abbr_x, men_abbr_y))
return min_dist
def _known_abbreviation(self, pair):
x_altnames = set([pair[0]['string']])
if pair[0]['string'] in self.dbpedia_data['unique_mapping']:
x_altnames.update(self.dbpedia_data['unique_mapping'][pair[0]['string']])
elif pair[0]['string'] in self.dbpedia_data['label_mapping']:
for key in self.dbpedia_data['label_mapping'][pair[0]['string']]:
x_altnames.update([key])
x_altnames.update(self.dbpedia_data['unique_mapping'][key])
y_altnames = set([pair[1]['string']])
if pair[1]['string'] in self.dbpedia_data['unique_mapping']:
y_altnames.update(self.dbpedia_data['unique_mapping'][pair[1]['string']])
elif pair[1]['string'] in self.dbpedia_data['label_mapping']:
for key in self.dbpedia_data['label_mapping'][pair[1]['string']]:
y_altnames.update([key])
y_altnames.update(self.dbpedia_data['unique_mapping'][key])
min_dist = 1.0
for i_x in x_altnames:
dist = self._jaro_fct(i_x, pair[1]['string'])
if dist < min_dist:
min_dist = dist
for i_y in y_altnames:
dist = self._jaro_fct(i_y, pair[0]['string'])
if dist < min_dist:
min_dist = dist
for i_x in x_altnames:
for i_y in y_altnames:
dist = self._jaro_fct(i_x, i_y)
if dist < min_dist:
min_dist = dist
return min_dist
def _given_abbreviation(self, pair):
x_infos = []
for rel in pair[0]['relations']:
if rel['type'] in ['Abbreviation_of', 'AlternativeName_of']:
x_infos.append(rel['string'])
y_infos = []
for rel in pair[1]['relations']:
if rel['type'] in ['Abbreviation_of', 'AlternativeName_of']:
y_infos.append(rel['string'])
min_dist = 1.0
for i_x in x_infos:
dist = self._jaro_fct(i_x, pair[1]['string'])
if dist < min_dist:
min_dist = dist
for i_y in y_infos:
dist = self._jaro_fct(i_y, pair[0]['string'])
if dist < min_dist:
min_dist = dist
for i_x in x_infos:
for i_y in y_infos:
dist = self._jaro_fct(i_x, i_y)
if dist < min_dist:
min_dist = dist
return min_dist
def features_for_pair(self, pair):
results = []
for fct in self.string_features_to_extract:
result = fct['function'](pair)
results.append(result)
for fct in self.context_features_to_extract:
result = fct['function'](pair)
results.append(result)
#print(results)
#print(torch.tensor(result).data)
return results
def features_for_pre_clustered(self, pair):
string_results = []
#string_results = np.empty((len(self.string_features_to_extract)))
for fct in self.string_features_to_extract:
result = fct['function'](pair)
string_results.append(result)
#string_results[idx] = result
context_results = []
#context_results = np.empty((len(pair[0]['contexts'])*len(pair[1]['contexts']), len(self.context_features_to_extract)))
#count = 0
for context_first in pair[0]['contexts']:
context_first['string'] = pair[0]['string']
for context_second in pair[1]['contexts']:
context_second['string'] = pair[1]['string']
context_results.append(string_results.copy())
for fct in self.context_features_to_extract:
result = fct['function']([context_first, context_second])
context_results[-1].append(result)
#context_results[count, idx] += result
#count += 1
return context_results#string_results, context_results
def apply_features(self, entities, ncores=5):
""" Applies an arbitrary number of features onto a list of entity names.
The feature is calculated once for each possible pair of entity names.
Arguments:
entities (list): list of entity names
"""
matrices = {}
for entity in entities:
entity['string'] = self._remove_spaces(entity['mention'])
entity['norm'] = self._normalize(entity['mention'])
pairs_to_compare = list(combinations(entities, 2))
# for fct in self.features_to_extract:
# start_time = time.time()
# if ncores > 1:
# with Pool(ncores) as p:
# result = p.map(fct['function'], pairs_to_compare)
# matrices[fct['name']] = result
# else:
# result = np.full((sum(range(len(entities)))), fct['init'], dtype=fct['data_type'])
# for idx, pair in enumerate(pairs_to_compare):
# distance = fct['function'](pair)
# result[idx] = distance
# matrices[fct['name']] = result
# end_time = time.time()
# print("It took {} seconds to calculate feature {} for {} inputs.".format(round(end_time-start_time, 3), fct['name'], len(entities)))
for fct in self.string_features_to_extract:
start_time = time.time()
if ncores > 1:
with Pool(ncores) as p:
result = p.map(fct['function'], pairs_to_compare)
matrices[fct['name']] = result
else:
result = np.full((sum(range(len(entities)))), fct['init'], dtype=fct['data_type'])
for idx, pair in enumerate(pairs_to_compare):
distance = fct['function'](pair)
result[idx] = distance
matrices[fct['name']] = result
end_time = time.time()
print("It took {} seconds to calculate feature {} for {} inputs.".format(round(end_time-start_time, 3), fct['name'], len(entities)))
for fct in self.context_features_to_extract:
start_time = time.time()
if ncores > 1:
with Pool(ncores) as p:
result = p.map(fct['function'], pairs_to_compare)
matrices[fct['name']] = result
else:
result = np.full((sum(range(len(entities)))), fct['init'], dtype=fct['data_type'])
for idx, pair in enumerate(pairs_to_compare):
distance = fct['function'](pair)
result[idx] = distance
matrices[fct['name']] = result
end_time = time.time()
print("It took {} seconds to calculate feature {} for {} inputs.".format(round(end_time-start_time, 3), fct['name'], len(entities)))
return matrices
def get_labels(self, entities, gold):
for g in gold:
g_sentences = articlenizer.get_tokenized_sentences(g['sentence'])
g['prosentence'] = [' '.join(s) for s in g_sentences]
g['proconcat'] = ' '.join(g['prosentence'])
for ent in entities:
for idx, g in enumerate(gold):
if ent['paper_id'] == g['paper_id'] and any([ent['sentence'] == s for s in g['prosentence']]) or ent['sentence'] == g['proconcat']:
if ent['mention'] == ' '.join(articlenizer.get_tokenized_sentences(g['mention'])[0]):
ent['gold_id'] = idx
break
pairs_to_compare = list(combinations(entities, 2))
start_time = time.time()
result = np.full((sum(range(len(entities)))), 0, dtype=np.int16)
for idx, pair in enumerate(pairs_to_compare):
if 'gold_id' in pair[0] and 'gold_id' in pair[1] and gold[pair[0]['gold_id']]['link'] == gold[pair[1]['gold_id']]['link']:
result[idx] = 1
end_time = time.time()
print("It took {} seconds to get labels for {} inputs.".format(round(end_time-start_time, 3), len(entities)))
return result
|
dave-s477/SoMeNLP | somenlp/feature_engineering/candidate_rules.py | <reponame>dave-s477/SoMeNLP
first_char_lower = lambda s: s[:1].lower() + s[1:] if s else ''
def pan_top_1(x):
'''use <> software'''
left_context = ['use']
right_context = ['software']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=1, style='lemma')
right_lemmas = x.sentence.get_right_tokens(x.end_idx, size=1, style='lemma')
if not left_lemmas or not right_lemmas or len(left_context) != len(left_lemmas) or len(right_context) != len(right_lemmas):
return False
for cont, feat in zip(left_context, left_lemmas):
if cont != feat:
return False
for cont,feat in zip(right_context, right_lemmas):
if cont != feat:
return False
return True
def pan_top_2(x):
'''perform use <>'''
left_context = ['perform', 'use']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=2, style='lemma')
if len(left_context) != len(left_lemmas):
return False
for c,l in zip(left_context, left_lemmas):
if c != l:
return False
return True
def pan_top_3(x):
'''be perform use <>'''
left_context = ['be', 'perform', 'use']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=3, style='lemma')
if len(left_context) != len(left_lemmas):
return False
for c,l in zip(left_context, left_lemmas):
if c != l:
return False
return True
def pan_top_4(x):
'''analysis be perform use <>'''
left_context = ['analysis', 'be', 'perform', 'use']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=4, style='lemma')
if len(left_context) != len(left_lemmas):
return False
for c,l in zip(left_context, left_lemmas):
if c != l:
return False
return True
def pan_top_5(x):
'''analyze use <>'''
left_context_ae = ['analyze', 'use']
left_context_be = ['analyse', 'use']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=2, style='lemma')
if len(left_context_ae) != len(left_lemmas):
return False
for c_ae, c_be, l in zip(left_context_ae, left_context_be, left_lemmas):
if c_ae != l and c_be != l:
return False
return True
def pan_top_6(x):
'''analysis be perform with <>'''
left_context = ['analysis', 'be', 'perform', 'with']
left_lemmas = x.sentence.get_left_tokens(x.start_idx, size=4, style='lemma')
if len(left_context) != len(left_lemmas):
return False
for c,l in zip(left_context, left_lemmas):
if c != l:
return False
return True
def pan_top_7(x):
'''<> statistical software'''
right_context = ['statistical', 'software']
right_lemmas = x.sentence.get_right_tokens(x.end_idx, size=2, style='lemma')
if not right_lemmas or len(right_context) != len(right_lemmas):
return False
for c,l in zip(right_context, right_lemmas):
if c != l:
return False
return True
def pan_top_8(x):
'''<> software be use'''
right_context = ['software', 'be', 'use']
right_lemmas = x.sentence.get_right_tokens(x.end_idx, size=3, style='lemma')
if not right_lemmas or len(right_context) != len(right_lemmas):
return False
for c,l in zip(right_context, right_lemmas):
if c != l:
return False
return True
def text_is_in_brackets(x):
left_context = x.sentence.get_left_tokens(x.start_idx, 1, style='plain')
right_context = x.sentence.get_right_tokens(x.end_idx, 1, style='plain')
if len(x.base_span) != 1 and left_context and right_context and left_context[0] in ['(', '[', '{'] and right_context[0] in [')', ']', '}']:
return True
else:
return False
def developer(x):
'''software developer match'''
left_context = x.sentence.get_left_tokens(x.start_idx, 1, style='plain')
right_context = x.sentence.get_right_tokens(x.end_idx, 1, style='plain')
if len(x.base_span) != 1 and left_context and right_context and left_context[0] == '(' and right_context[0] == ')':
for tok in x.tokens:
if tok.lower() in ['inc.', 'ltd.', 'corp.', 'apply', 'inc', 'ltd', 'corp']:
return True
return False
|
dave-s477/SoMeNLP | somenlp/feature_engineering/word_rules.py | def upper_cased(x):
if x.isalpha() and x.isupper():
return True
else:
return False
def first_char_upper(x):
if x.isalpha() and x[0].isupper():
if all(c.islower() for c in x[1:]):
return True
return False
def mixed_case(x):
if x.isalpha():
if any([c.isupper() for c in x[1:]]) and any([c.islower() for c in x]):
return True
return False
def lower_case(x):
if x.isalpha() and x.islower():
return True
else:
return False |
dave-s477/SoMeNLP | somenlp/utils/gpu_setup.py | <gh_stars>0
import torch
import subprocess, re
# Nvidia-smi GPU memory parsing.
# Tested on nvidia-smi 370.23
def run_command(cmd):
"""Run command, return output as string."""
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
return output.decode("ascii")
def list_available_gpus():
"""Returns list of available GPU ids."""
output = run_command("nvidia-smi -L")
# lines of the form GPU 0: TITAN X
gpu_regex = re.compile(r"GPU (?P<gpu_id>\d+):")
result = []
for line in output.strip().split("\n"):
m = gpu_regex.match(line)
assert m, "Couldnt parse "+line
result.append(int(m.group("gpu_id")))
return result
def gpu_memory_map():
"""Returns map of GPU id to memory allocated on that GPU."""
output = run_command("nvidia-smi")
gpu_output = output[output.find("GPU Memory"):]
# lines of the form
# | 0 8734 C python 11705MiB |
memory_regex = re.compile(r"[|]\s+?(?P<gpu_id>\d+)\D+?(?P<pid>\d+).+[ ](?P<gpu_memory>\d+)MiB")
rows = gpu_output.split("\n")
result = {gpu_id: 0 for gpu_id in list_available_gpus()}
for row in gpu_output.split("\n"):
m = memory_regex.search(row)
if not m:
continue
gpu_id = int(m.group("gpu_id"))
gpu_memory = int(m.group("gpu_memory"))
result[gpu_id] += gpu_memory
return result
def pick_gpu_lowest_memory():
"""Returns GPU with the least allocated memory"""
memory_gpu_map = [(memory, gpu_id) for (gpu_id, memory) in gpu_memory_map().items()]
best_memory, best_gpu = sorted(memory_gpu_map)[0]
return best_gpu
def setup_cuda(gpu):
if gpu and torch.cuda.device_count() > 0:
GPU = str(pick_gpu_lowest_memory())
print("Working on GPU: {}".format(GPU))
device = torch.device("cuda:{}".format(GPU))
else:
print("Working on CPU")
device = torch.device("cpu")
return device |
dave-s477/SoMeNLP | somenlp/NER/models/combined_lstm.py | <gh_stars>0
import torch
import torch.nn as nn
from .crf import CRF
class CombinedLSTM(nn.Module):
def __init__(self, device, char_size, word_size, tag2idx, emb_vecs, char_config, emb_config, drop_config, gen_config, **kwargs):
super(CombinedLSTM, self).__init__()
self.device = device
self.char_size = char_size
self.word_size = word_size
self.tag2idx = tag2idx
self.tag_size = len(tag2idx)
self.char_config = char_config
self.emb_config = emb_config
self.drop_config = drop_config
self.gen_config = gen_config
if not 'feature_dim' in kwargs:
raise(RuntimeError("FeatureLSTM requires feature_dim to be given."))
self.feature_dim = kwargs['feature_dim']
self.feature_projection = self.gen_config['feature_emb_dim'] > 0
self.char_embeds = nn.Embedding(self.char_size, self.char_config['emb_dim'])
self.char_lstm = nn.LSTM(self.char_config['emb_dim'], self.char_config['hidden_dim'], num_layers=self.char_config['layers'], bidirectional=True)
self.word_embeds = nn.Embedding.from_pretrained(emb_vecs)
if not self.emb_config['train']:
self.word_embeds.requires_grad = False
if self.feature_projection:
self.feature_remap = nn.Linear(self.feature_dim, self.gen_config['feature_emb_dim'])
feature_out_dim = self.gen_config['feature_emb_dim']
else:
feature_out_dim = self.feature_dim
self.lstm = nn.ModuleList()
for i in range(self.gen_config['layers']):
input_size = ( self.emb_config['dim'] + (self.char_config['hidden_dim'] * 2) + feature_out_dim ) if i == 0 else self.gen_config['hidden_dim'] * 2
self.lstm.append(nn.LSTM(input_size, self.gen_config['hidden_dim'], num_layers=1, bidirectional=True))
self.hidden2tag = nn.Linear(self.gen_config['hidden_dim'] * 2 * self.gen_config['layers'], self.tag_size)
self.crf = CRF(self.tag_size, device, init_parameters=None)
self.drop_char_emb = nn.Dropout(self.drop_config['char_emb'])
self.drop_char_feat = nn.Dropout(self.drop_config['char_feat'])
self.drop_word_emb = nn.Dropout(self.drop_config['word_emb'])
self.drop_lstm_feat = nn.Dropout(self.drop_config['lstm_feat'])
self.drop_dense_feat = nn.Dropout(self.drop_config['dense_feat'])
self.drop_features_in = nn.Dropout(self.drop_config['in_feat'])
self.drop_features_proj = nn.Dropout(self.drop_config['proj_feat'])
if self.gen_config['zero_init']:
self.hidden = self._init_hidden_zero()
self.char_hidden = self._init_hidden_zero()
else:
self.hidden = self._init_hidden_xavier_norm()
self.char_hidden = self._init_hidden_xavier_norm()
# init hidden idea:
# create param -> copy over batch -> concat with input length -> input to dense network
# -> use as initial lstm state...
def _init_hidden_zero(self, batch_size=1, hidden_dim=100):
"""Randomly initializes the hidden state of the bi-lstm.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
Returns:
(float tensor, float tensor): Random initial states for forward
and backward cells
"""
return (torch.zeros(2, batch_size, hidden_dim).to(self.device),
torch.zeros(2, batch_size, hidden_dim).to(self.device))
def _init_hidden_xavier_norm(self, batch_size=1, hidden_dim=100):
"""Randomly initializes the hidden state of the bi-lstm.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
Returns:
(float tensor, float tensor): Random initial states for forward
and backward cells
"""
return (nn.init.xavier_normal_(torch.empty(2, batch_size, hidden_dim).to(self.device)),
nn.init.xavier_normal_(torch.empty(2, batch_size, hidden_dim).to(self.device)))
def _get_character_features(self, char_sentence):
"""Create character based features by applying a bi-lstm on a char embedding
Args:
char_sentence: A [batch_size, max_seq_len, max_word_len] tensor of char indices.
Returns:
char_features: A [batch_size, max_seq_len, feat_len] tensor of char features for each word
"""
original_size = char_sentence.size()
if self.gen_config['zero_init']:
self.char_hidden = self._init_hidden_zero(original_size[0] * original_size[1], self.char_config['hidden_dim'])
else:
self.char_hidden = self._init_hidden_xavier_norm(original_size[0] * original_size[1], self.char_config['hidden_dim'])
# Get character embedding
char_embeds = self.char_embeds(char_sentence)
char_embeds = self.drop_char_emb(char_embeds)
char_embeds = char_embeds.view(original_size[0] * original_size[1], original_size[2], char_embeds.size()[-1]).permute(1, 0, 2)
# Get char bi-lstm features
char_lstm_out, self.char_hidden = self.char_lstm(char_embeds, self.char_hidden)
concat_lstm_hidden = torch.cat((self.char_hidden[0][0], self.char_hidden[0][1]), dim=-1)
concat_lstm_hidden = concat_lstm_hidden.view(original_size[0], original_size[1], -1)
concat_lstm_hidden = self.drop_char_feat(concat_lstm_hidden)
return concat_lstm_hidden
def _get_lstm_features(self, sentence, char_feats, features, sequence_lengths):
"""Create word based features by applying a bi-lstm on a sentence
Args:
sentence: A [batch_size, max_seq_len] tensor of word indices for a word emb lookup
char_feats: A [batch_size, max_seq_len, char_feat_len] tensor of char features.
Returns:
lstm_feats: A [batch_size, max_seq_len, lstm_feat_len] tensor of lstm features.
"""
batch_size = sentence.size()[0]
# Get word embeddings
embeds = self.word_embeds(sentence)
embeds = self.drop_word_emb(embeds)
given_features = self.drop_features_in(features)
if self.feature_projection:
given_features = self.feature_remap(given_features)
given_features = self.drop_features_proj(given_features)
# Combine word and character features and mask inputs
concat_feats = torch.cat((embeds, char_feats, given_features), dim=-1)
masks = torch.arange(sentence.shape[1]).to(self.device)[None, :] < sequence_lengths[:, None]
masks = masks.squeeze(1).long().unsqueeze(2)
concat_feats = torch.mul(concat_feats, masks).permute(1, 0, 2)
# Get bi-lstm features
lstm_outputs = []
for i in range(self.gen_config['layers']):
if self.gen_config['zero_init']:
self.hidden = self._init_hidden_zero(batch_size, self.gen_config['hidden_dim'])
else:
self.hidden = self._init_hidden_xavier_norm(batch_size, self.gen_config['hidden_dim'])
concat_feats, self.hidden = self.lstm[i](concat_feats, self.hidden)
lstm_outputs.append(concat_feats.permute(1, 0, 2))
lstm_out = torch.cat(lstm_outputs, -1)
lstm_out = self.drop_lstm_feat(lstm_out)
return lstm_out
def get_features(self, char_sentence, sentence, features, lengths):
char_feats = self._get_character_features(char_sentence)
word_feats = self._get_lstm_features(sentence, char_feats, features, lengths)
# Dense transform to num of tags
feats = self.hidden2tag(word_feats)
feats = self.drop_dense_feat(feats)
return feats
def neg_log_likelihood(self, tags, **features):
"""Calculates log_likelihood for training a bi-lstm-crf
Args:
char_sentence: A [batch_size, max_seq_len, max_word_len] tensor of char indices.
sentence: A [batch_size, max_seq_len] tensor of word indices
tags: A [batch_size, max_seq_len] tensor of ground-truth tags.
lengths: A [batch_size, 1] tensor of actual sequence lengths for the padded input.
Returns:
log_likelihood: A scalar value for the log_likelihood over the entire batch
"""
# Getting the features
feats = self.get_features(features['char_sentence'], features['sentence'], features['feature_sentence'], features['lengths'])
# Getting the CRF score
log_likelihood = self.crf.crf_log_likelihood(feats, tags, features['lengths'].squeeze(1))
if self.gen_config['crf_norm']:
log_likelihood = log_likelihood / features['lengths'].squeeze(-1)
tag_weights = tags != self.tag2idx['O']
tag_weights = torch.where(tag_weights.any(1), torch.tensor(self.gen_config['sample_weight']).to(self.device), torch.tensor(1.0).to(self.device))
log_likelihood = log_likelihood * tag_weights
neg_log_likelihood = torch.mean(-log_likelihood)
return neg_log_likelihood
def forward(self, **features):
"""Calculates tag sequence and its score based on the bi-lstm-crf
Args:
char_sentence: A [batch_size, max_seq_len, max_word_len] tensor of char indices.
sentence: A [batch_size, max_seq_len] tensor of word indices
lengths: A [batch_size, 1] tensor of actual sequence lengths for the padded input.
Returns:
log_likelihood: A scalar value for the log_likelihood over the entire batch
"""
# Getting the features
feats = self.get_features(features['char_sentence'], features['sentence'], features['feature_sentence'], features['lengths'])
# Find the best path, given the features.
tag_seq, seq_score, seq_mask = self.crf.viterbi_decode_batch(feats, features['lengths'].squeeze(1))
return tag_seq, seq_score, seq_mask |
dave-s477/SoMeNLP | somenlp/distant_supervision/perform_wiki_data_queries.py | import re
import json
import requests
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
WIKIDATA_ID = re.compile(r'Q\d{5,}')
def parse_query(plain_query, lang, default_address='https://query.wikidata.org/bigdata/namespace/wdq/sparql?query='):
"""Encode Wikidata query to pass it as an URL
Args:
plain_query (str): plain text query
lang (str): wikidata language identifier for the query
default_address (str, optional): url for wikidata sparql interface. Defaults to 'https://query.wikidata.org/bigdata/namespace/wdq/sparql?query='.
Returns:
str: encoded query
"""
plain_query = re.sub("'en'", '"' + lang + '"', plain_query)
encoded_query = quote_plus(plain_query, safe=r"()\{\}")
encoded_query = re.sub(r"\+", "%20", encoded_query)
complete_url = '{}{}'.format(default_address, encoded_query)
return complete_url
def execute_query(query_url):
"""Perform wikidata query. Might not be handled if no user agent is provided.
Args:
query_url (str): url query to execute
Returns:
str: json formatted response text
"""
#user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
user_agent = ''
headers = {
'Accept': 'json',
'User-Agent': user_agent
}
response = requests.get(query_url, headers=headers)
if response.status_code != 200:
print("Received HTTP-Statuscode " + str(response.status_code))
if response.status_code == 400:
print("Error is probably in the query syntax.")
sys.exit(1)
elif response.status_code == 429:
print("Too many requests were sent: ")
print("Sleeping: " + int(response.headers["Retry-After"]))
time.sleep(int(response.headers["Retry-After"]))
print("Continuing with query")
response = requests.get(query_url, headers=headers)
if response.status_code != 200:
raise(RuntimeError("Got another error while querying wikidata {}\nShutting down..".format(response.status_code)))
else:
return response.text
else:
return response.text
def query_wikidata(query_config):
"""Perform a series of wikidata queries based on a given configuration
Args:
query_config (dictionary): configuration of queries saved as json
Returns:
dictionary: merged responses for given queries
"""
results = {}
with open(query_config) as qc:
queries = json.load(qc)
for query_name, query_data in queries.items():
print("Performing wikidata queries for {}".format(query_name))
target_main_names = '{}_main_name'.format(query_data['target'])
target_alt_names = '{}_alt_name'.format(query_data['target'])
if target_main_names not in results.keys():
results[target_main_names] = set()
results[target_alt_names] = set()
for lang in query_data['languages']:
count = 0
query_string = parse_query(query_data["query_string"], lang)
query_response = execute_query(query_string)
soup = BeautifulSoup(query_response, 'lxml')
for res in soup.findAll('result'):
for bind in res.findAll('binding'):
count += 1
if bind['name'] == 'itemLabel':
results[target_main_names].update([bind.text.rstrip().lstrip()])
elif bind['name'] == 'abbreviation':
results[target_alt_names].update([bind.text.rstrip().lstrip()])
print("Processed {} entries for lang {}".format(count, lang))
return results
|
dave-s477/SoMeNLP | somenlp/NER/data_handler.py | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
import math
import random
import copy
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from pathlib import Path
from gensim.models import KeyedVectors
from itertools import zip_longest
from articlenizer import articlenizer as art
from transformers import BertTokenizer
from .LSTM_dataset import LSTMDataset
from .BERT_dataset import BERTDataset, BERTMultiDataset
BERT_MAX_LENGTH = 256
class DataHandler():
def __init__(self, data_config=None, data_file_extension='.data.txt', label_file_extension='.labels.txt', feature_file_extension='', relation_file_extension='', output_handler=None, checkpoint=None, padding='<PAD>', unknown='<UNK>', batch_size=32, max_word_length=-1, max_sent_length=-1, data_files=None, prepro=False, tokenizer=None, multi_task=False):
self.data_config = copy.deepcopy(data_config)
self.data_file_extension = data_file_extension
self.label_file_extension = label_file_extension
self.feature_file_extension = feature_file_extension
self.relation_file_extension = relation_file_extension
self.checkpoint = checkpoint
self.padding = padding
self.unknown = unknown
self.output_handler = output_handler
self.batch_size = batch_size
self.max_word_length = max_word_length
self.max_sent_length = max_sent_length
self.data_files = data_files
self.prepro = prepro
self.feature_dim = None
self.multi_task_mapping = multi_task
self.data = []
self.features = []
self.labels = []
self._load_tag_remapping()
self._load_relation_remapping()
self.tokenizer = tokenizer
if tokenizer is not None:
self._setup_tokenizer(tokenizer)
def _setup_tokenizer(self, location):
self.tokenizer = BertTokenizer.from_pretrained(location, do_lower_case=False)
self.padding = "[PAD]"
self.special_toks = {
'pad_tok': self.tokenizer.vocab["[PAD]"],
'sep_tok': self.tokenizer.vocab["[SEP]"],
'cls_tok': self.tokenizer.vocab["[CLS]"]
}
def _load_tag_remapping(self):
if self.data_config is not None and 'transform' in self.data_config and 'mapping' in self.data_config['transform'] and self.data_config['transform']['mapping']:
print("Loading tag remapping: {}".format(self.data_config['transform']['mapping']))
mapping = Path(self.data_config['transform']['mapping'])
with mapping.open(mode='r') as mapping_j:
self.tag_remapping = json.load(mapping_j)
elif self.data_config is not None and 'transform' in self.data_config and len(self.data_config['transform']) > 0:
print("Loading tag mappings for a multi-label tagging problem")
self.tag_remapping = {}
self.multi_task_mapping = True
for k, v in self.data_config['transform'].items():
with Path(v).open(mode='r') as mapping_j:
self.tag_remapping[k] = json.load(mapping_j)
else:
self.tag_remapping = None
def _load_relation_remapping(self):
if self.data_config is not None and 'transform' in self.data_config and 'relation_mapping' in self.data_config['transform'] and self.data_config['transform']['relation_mapping']:
print("Loading relation remapping: {}".format(self.data_config['transform']['relation_mapping']))
mapping = Path(self.data_config['transform']['relation_mapping'])
with mapping.open(mode='r') as mapping_j:
self.relation_remapping = json.load(mapping_j)
else:
self.relation_remapping = None
def _adjust_tag(self, tag, key=''):
if self.tag_remapping is None or tag == 'O':
return tag
else:
tag_prefix, tag_name = tag.split('-')
if not key:
remapped_tag = self.tag_remapping[tag_name]
else:
remapped_tag = self.tag_remapping[key][tag_name]
if remapped_tag == 'O':
return 'O'
else:
return '{}-{}'.format(tag_prefix, remapped_tag)
def stream_files(self):
for f_conf in self.data_files:
if self.prepro:
text = self._prepro_text_file(f_conf['in'])
else:
plain_text = self._read_text_file(f_conf['in'])
text = []
for line in plain_text:
text.append(line.split())
if 'feat' in f_conf:
features = self._read_feature_file(f_conf['feat'])
else:
features = []
if self.tokenizer is None:
characters, ids, tags, features = self._prepare_prediction_data(text, features)
input_data = LSTMDataset(characters, ids, tags, features, self.encoding['char2idx'], self.padding, self.max_word_length, self.max_sent_length)
sampler = SequentialSampler(input_data)
data_loader = DataLoader(input_data, sampler=sampler, batch_size=self.batch_size, collate_fn=self._collate_fn)
else:
ids, tags, masks, lengths = self._prepare_bert_prediction_data(text)
ids = self._pad_to_length(ids, length=BERT_MAX_LENGTH, fill_value=self.special_toks['pad_tok'], dtype=torch.long)
masks = self._pad_to_length(masks, length=BERT_MAX_LENGTH, fill_value=0, dtype=torch.long)
if not self.multi_task_mapping:
tags = self._pad_to_length(tags, length=BERT_MAX_LENGTH, fill_value=self.encoding['tag2idx']['O'], dtype=torch.long)
input_data = BERTDataset(ids, tags, masks)
else:
for k in tags.keys():
tags[k] = self._pad_to_length(tags[k], length=BERT_MAX_LENGTH, fill_value=self.encoding['tag2idx'][k]['O'], dtype=torch.long)
input_data = BERTMultiDataset(ids, tags, masks, lengths)
sampler = SequentialSampler(input_data)
data_loader = DataLoader(input_data, sampler=sampler, batch_size=self.batch_size)
yield [{'out': f_conf['out'], 'out-text': f_conf['out-text']}, data_loader, text]
def load_data_from_config(self):
for dataset, dataset_setup in self.data_config['sets'].items():
for sub_dataset in dataset_setup:
sub_dataset['all_files'] = []
for folder in sub_dataset['folder']:
files = list(Path(folder).rglob('*{}'.format(self.data_file_extension)))
for f in files:
base_file_name = f.name.split(self.data_file_extension)[0]
sample_files = {}
if self.data_file_extension:
f_entry = Path('{}/{}{}'.format(str(f.parents[0]), base_file_name, self.data_file_extension))
sample_files[self.data_file_extension] = f_entry
if self.label_file_extension:
f_entry = Path('{}/{}{}'.format(str(f.parents[0]), base_file_name, self.label_file_extension))
if not f_entry.is_file():
raise(RuntimeError("Label file {} not found".format(f_entry)))
sample_files[self.label_file_extension] = f_entry
if self.feature_file_extension:
f_entry = Path('{}/{}{}'.format(str(f.parents[0]), base_file_name, self.feature_file_extension))
if not f_entry.is_file():
raise(RuntimeError("Feature file {} not found".format(f_entry)))
sample_files[self.feature_file_extension] = f_entry
if self.relation_file_extension:
f_entry = Path('{}/{}{}'.format(str(f.parents[0]), base_file_name, self.relation_file_extension))
if not f_entry.is_file():
raise(RuntimeError("Relation file {} not found".format(f_entry)))
sample_files[self.relation_file_extension] = f_entry
sub_dataset['all_files'].append(sample_files)
def encoding(self, tags_only=False):
if self.checkpoint is not None and self.checkpoint['model']:
print("Loading given encodings")
self.encoding = self.output_handler.load_encoding()
for k, v in self.encoding.items():
if self.multi_task_mapping and k.endswith('tag2name'):
for sk, vk in v.items():
vk_new = {int(key): value for key, value in vk.items()}
self.encoding[k][sk] = vk_new
elif k.endswith('name'):
v_new = {int(key): value for key, value in v.items()}
self.encoding[k] = v_new
else:
print("Generating new encodings")
tag2idx, tag2name, word2idx, word2name, char2idx, char2name = {}, {}, {}, {}, {}, {}
if not tags_only:
for dataset, dataset_setup in self.data_config['sets'].items():
for sub_dataset in dataset_setup:
for f in sub_dataset['all_files']:
with f[self.data_file_extension].open(mode='r') as in_f:
for line in in_f:
for word in line.rstrip().split():
if word not in word2idx:
word2name[len(word2idx)] = word
word2idx[word] = len(word2idx)
for char in word:
if char not in char2idx:
char2name[len(char2idx)] = char
char2idx[char] = len(char2idx)
if self.multi_task_mapping:
print("Considering a multi-task problem")
for dataset, dataset_setup in self.data_config['sets'].items():
for sub_dataset in dataset_setup:
for f in sub_dataset['all_files']:
with f[self.label_file_extension].open(mode='r') as in_f:
for line in in_f:
for tag in line.rstrip().split():
for mapping_key in self.tag_remapping.keys():
if mapping_key not in tag2idx:
tag2idx[mapping_key] = {}
tag2name[mapping_key] = {}
t = self._adjust_tag(tag, mapping_key)
if tags_only and '-' in t:
t = t.split('-')[-1]
if t not in tag2idx[mapping_key]:
tag2name[mapping_key][len(tag2idx[mapping_key])] = t
tag2idx[mapping_key][t] = len(tag2idx[mapping_key])
for k, v in tag2idx.items():
to_add = set()
for tk, vk in v.items():
if tk.startswith('B-') and 'I-' + tk.split('B-')[-1] not in v:
to_add.update(['I-' + tk.split('B-')[-1]])
for i in to_add:
tag2name[k][len(tag2idx[k])] = i
tag2idx[k][i] = len(tag2idx[k])
else:
print("considering a single-task problem")
for dataset, dataset_setup in self.data_config['sets'].items():
for sub_dataset in dataset_setup:
for f in sub_dataset['all_files']:
with f[self.label_file_extension].open(mode='r') as in_f:
for line in in_f:
for tag in line.rstrip().split():
t = self._adjust_tag(tag)
if tags_only and '-' in t:
t = t.split('-')[-1]
if t not in tag2idx:
tag2name[len(tag2idx)] = t
tag2idx[t] = len(tag2idx)
if not tags_only:
word2name[len(word2idx)] = self.padding
word2idx[self.padding] = len(word2idx)
char2name[len(char2idx)] = self.padding
char2idx[self.padding] = len(char2idx)
word2name[len(word2idx)] = self.unknown
word2idx[self.unknown] = len(word2idx)
char2name[len(char2idx)] = self.unknown
char2idx[self.unknown] = len(char2idx)
self.encoding = {
'tag2idx': tag2idx,
'tag2name': tag2name,
'word2idx': word2idx,
'word2name': word2name,
'char2idx': char2idx,
'char2name': char2name
}
self.output_handler.save_json(self.encoding, name='encoding')
def _prepro_text_file(self, path):
with path.open(mode='r') as in_f:
text_in = in_f.read()
text_prepro = art.get_tokenized_sentences(text_in)
return text_prepro
def _read_text_file(self, path, read_empty=False):
text = []
with path.open(mode='r') as in_f:
for line in in_f:
clean_line = line.rstrip()
if clean_line:
text.append(clean_line)
elif read_empty:
text.append([])
return text
def _read_feature_file(self, path):
features = np.load(str(path), allow_pickle=True)
if self.feature_dim is None:
self.feature_dim = features['features'][0].shape[-1]
return features['features']
def _read_relation_file(self, path):
relations = []
with path.open(mode='r') as in_f:
for line in in_f:
if not line.rstrip():
relations.append([])
else:
sentence_rels = []
rel_strings = line.split(';;')
for rel in rel_strings:
if rel.rstrip():
rel_type, ent_1_str, ent_1_beg, ent_1_num, ent_2_str, ent_2_beg, ent_2_num = rel.split('\t')
if self.relation_remapping is not None:
rel_type = self.relation_remapping[rel_type]
sentence_rels.append({
'type': rel_type,
'ent1_s': ent_1_str,
'ent1_b': ent_1_beg,
'ent1_n': ent_1_num,
'ent2_s': ent_2_str,
'ent2_b': ent_2_beg,
'ent2_n': ent_2_num
})
relations.append(sentence_rels)
return relations
def _collate_fn(self, batch):
characters = [x['characters'] for x in batch]
ids = [x['ids'] for x in batch]
tags = [x['tags'] for x in batch]
features = [x['features'] for x in batch]
max_word_length_batch = 0
if self.max_word_length > 0:
max_word_length_batch = self.max_word_length
else:
max_word_length_batch = 0
for sent in characters:
if sent.size()[-1] > max_word_length_batch:
max_word_length_batch = sent.size()[-1]
length = torch.tensor([x.size() for x in ids])
padded_ids = pad_sequence(ids, batch_first=True, padding_value=self.encoding['word2idx'][self.padding])
padded_tags = pad_sequence(tags, batch_first=True, padding_value=self.encoding['tag2idx']['O'])
max_sent_length_batch = 0
if self.max_sent_length > 0:
padded_ids = F.pad(padded_ids, (0, self.max_sent_length-padded_ids.shape[-1]), "constant", self.encoding['word2idx'][self.padding])
padded_tags = F.pad(padded_tags, (0, self.max_sent_length-padded_tags.shape[-1]), "constant", self.encoding['tag2idx']['O'])
characters = [F.pad(x, (0,(max_word_length_batch-x.size()[-1]),0,(self.max_sent_length-x.size()[0])), "constant", self.encoding['char2idx'][self.padding]).unsqueeze(0) for x in characters]
else:
characters = [F.pad(x, (0,(max_word_length_batch-x.size()[-1]),0,(padded_ids.size()[-1]-x.size()[0])), "constant", self.encoding['char2idx'][self.padding]).unsqueeze(0) for x in characters]
if features[0] is not None:
padded_features = pad_sequence(features, batch_first=True)
if self.max_sent_length > 0:
padded_features = F.pad(padded_features, (0, 0, 0, self.max_sent_length-padded_features.shape[-2]), "constant", 0)
else:
padded_features = None
characters = torch.cat(characters, dim=0)
return {
'chars': characters,
'ids': padded_ids,
'tags': padded_tags,
'lengths': length,
'features': padded_features
}
def _prepare_prediction_data(self, sentences, features, tags=[]):
character_ids = []
input_ids = []
tag_ids = []
feat_ids = []
for sentence, feat, tag in zip_longest(sentences, features, tags, fillvalue=[]):
# TODO how to handle unknowns?
character_sentence = [[self.encoding['char2idx'][i] if i in self.encoding['char2idx'] else self.encoding['char2idx'][self.unknown] for i in w] for w in sentence]
tokenized_sentence = [self.encoding['word2idx'][w] if w in self.encoding['word2idx'] else self.encoding['word2idx'][self.unknown] for w in sentence]
character_ids.append(character_sentence)
input_ids.append(tokenized_sentence)
tag_ids.append(tag)
feat_ids.append(feat)
return character_ids, input_ids, tag_ids, feat_ids
def _prepare_training_data(self, sentences, tags, features, keep_prob=1.0):
character_ids = []
input_ids = []
tags_ids = []
feat_ids = []
for sentence, tag, feat in zip_longest(sentences, tags, features, fillvalue=[]):
labels = tag.split()
if any([not t.startswith('O') for t in labels]) or math.isclose(keep_prob, 1.0, rel_tol=1e-5) or random.random() < keep_prob:
words = sentence.split()
labels = [self.encoding['tag2idx'][self._adjust_tag(t)] for t in labels]
character_sentence = [[self.encoding['char2idx'][i] for i in j] for j in words]
tokenized_sentence = [self.encoding['word2idx'][w] for w in words]
character_ids.append(character_sentence)
input_ids.append(tokenized_sentence)
tags_ids.append(labels)
feat_ids.append(feat)
return character_ids, input_ids, tags_ids, feat_ids
def _prepare_bert_prediction_data(self, sentences, tags=[]):
input_ids = []
if not self.multi_task_mapping:
tags_ids = []
else:
tags_ids = {}
for k in self.encoding['tag2idx'].keys():
tags_ids[k] = []
attention_masks = []
length = []
for sentence, tag in zip_longest(sentences, tags, fillvalue=[]):
tokenized_sentence = []
for word in sentence:
tokenized_word = self.tokenizer.tokenize(word)
tokenized_sentence.extend(tokenized_word)
inputs = self.tokenizer.encode_plus(tokenized_sentence, add_special_tokens=True, return_attention_mask=True)
input_ids.append(inputs["input_ids"])
attention_masks.append(inputs['attention_mask'])
length.append(min(len(inputs['input_ids']), BERT_MAX_LENGTH))
if not self.multi_task_mapping:
tags_ids.append(tag)
else:
for mapping_key in self.encoding['tag2idx'].keys():
tags_ids[mapping_key].append(tag)
return input_ids, tags_ids, attention_masks, length
def _prepare_bert_training_data(self, sentences, tags, keep_prob):
input_ids = []
if not self.multi_task_mapping:
tags_ids = []
else:
tags_ids = {}
for k in self.tag_remapping.keys():
tags_ids[k] = []
attention_masks = []
length = []
for sentence, tag in zip(sentences, tags):
labels = tag.split()
if any([not t.startswith('O') for t in labels]) or math.isclose(keep_prob, 1.0, rel_tol=1e-5) or random.random() < keep_prob:
tokenized_sentence = []
tokenized_labels = []
words = sentence.split()
for word, label in zip(words, labels):
tokenized_word = self.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
tokenized_sentence.extend(tokenized_word)
if not label.startswith('B-'):
tokenized_labels.extend([label] * n_subwords)
else:
tokenized_labels.extend([label])
tokenized_labels.extend([label.replace('B-', 'I-')] * (n_subwords-1))
inputs = self.tokenizer.encode_plus(tokenized_sentence, add_special_tokens=True, return_attention_mask=True)
tokenized_labels = ["O"] + tokenized_labels + ["O"]
input_ids.append(inputs["input_ids"])
attention_masks.append(inputs['attention_mask'])
length.append(min(len(inputs["input_ids"]), BERT_MAX_LENGTH))
if not self.multi_task_mapping:
label_ids = [self.encoding['tag2idx'][self._adjust_tag(t)] for t in tokenized_labels]
tags_ids.append(label_ids)
else:
for mapping_key in self.tag_remapping.keys():
label_ids = [self.encoding['tag2idx'][mapping_key][self._adjust_tag(t, mapping_key)] for t in tokenized_labels]
tags_ids[mapping_key].append(label_ids)
return input_ids, tags_ids, attention_masks, length
def load_input(self):
for dataset, dataset_setup in self.data_config['sets'].items():
for sub_dataset in dataset_setup:
sub_dataset['sentences'] = []
sub_dataset['tags'] = []
sub_dataset['features'] = []
sub_dataset['relations'] = []
for file_config in sub_dataset['all_files']:
if self.data_file_extension:
sub_dataset['sentences'].extend(self._read_text_file(file_config[self.data_file_extension]))
if self.label_file_extension:
sub_dataset['tags'].extend(self._read_text_file(file_config[self.label_file_extension]))
if self.feature_file_extension:
sub_dataset['features'].extend(self._read_feature_file(file_config[self.feature_file_extension]))
if self.relation_file_extension:
sub_dataset['relations'].extend(self._read_relation_file(file_config[self.relation_file_extension]))
def _pad_to_length(self, sequences, length=512, fill_value=0, dtype=torch.long):
out_seqs = torch.full((len(sequences), length), fill_value, dtype=dtype)
for idx, ids in enumerate(sequences):
ids_to_insert = ids[:length]
out_seqs[idx, :len(ids_to_insert), ...] = torch.tensor(ids_to_insert)
return out_seqs
def data_loaders(self):
for dataset, dataset_setup in self.data_config['sets'].items():
for idx, sub_dataset in enumerate(dataset_setup):
if self.tokenizer is None:
characters, ids, tags, features = self._prepare_training_data(sub_dataset['sentences'], sub_dataset['tags'], sub_dataset['features'], sub_dataset['keep_neg_sample_prob'])
input_data = LSTMDataset(characters, ids, tags, features, self.encoding['char2idx'], self.padding, self.max_word_length, self.max_sent_length)
sampler = RandomSampler(input_data)
sub_dataset['dataloader'] = DataLoader(input_data, sampler=sampler, batch_size=self.batch_size, collate_fn=self._collate_fn)
else:
input_ids, tag_ids, attention_masks, lengths = self._prepare_bert_training_data(sub_dataset['sentences'], sub_dataset['tags'], sub_dataset['keep_neg_sample_prob'])
input_ids = self._pad_to_length(input_ids, length=BERT_MAX_LENGTH, fill_value=self.special_toks['pad_tok'], dtype=torch.long)
attention_masks = self._pad_to_length(attention_masks, length=BERT_MAX_LENGTH, fill_value=0, dtype=torch.long)
if not self.multi_task_mapping:
tag_ids = self._pad_to_length(tag_ids, length=BERT_MAX_LENGTH, fill_value=self.encoding['tag2idx']['O'], dtype=torch.long)
input_data = BERTDataset(input_ids, tag_ids, attention_masks)
else:
for k in tag_ids.keys():
tag_ids[k] = self._pad_to_length(tag_ids[k], length=BERT_MAX_LENGTH, fill_value=self.encoding['tag2idx'][k]['O'], dtype=torch.long)
input_data = BERTMultiDataset(input_ids, tag_ids, attention_masks, lengths)
sampler = RandomSampler(input_data)
sub_dataset['dataloader'] = DataLoader(input_data, sampler=sampler, batch_size=self.batch_size)
def word_embedding(self, emb_conf):
if self.checkpoint is not None and self.checkpoint['model']:
print("Using a pre-trained model --- word embedding is loaded as part of the model")
embedding_weights = torch.zeros(len(self.encoding['word2idx']), emb_conf['dim'])
else:
print("Loading word embedding: {}".format(emb_conf['file']))
pretrained_word_vectors = KeyedVectors.load_word2vec_format(emb_conf['file'], binary=True)
if emb_conf['zero_init']:
embedding_weights = torch.zeros(len(self.encoding['word2idx']), emb_conf['dim'])
else:
embedding_weights = torch.randn(len(self.encoding['word2idx']), emb_conf['dim'])
unknown_word_count = 0
for word, idx in self.encoding['word2idx'].items():
try:
embedding_weights[idx] = torch.tensor(pretrained_word_vectors[word].copy())
except KeyError:
unknown_word_count += 1
print("{}/{} word from the dataset do no exist in the word embedding".format(unknown_word_count, len(self.encoding['word2idx'])))
return embedding_weights
|
dave-s477/SoMeNLP | setup.py | <reponame>dave-s477/SoMeNLP
import sys
if sys.version_info <= (3,7):
sys.exit('Python >= 3.7 is required')
from setuptools import setup
def readme():
with open('Readme.md') as f:
return f.read()
setup(name='SoMeNLP',
version='0.1',
description='NLP procedures for scientific articles and software extraction.',
long_description=readme(),
classifiers=[
'License :: OSI Approved :: GPLv3',
'Programming Language :: Python :: 3.7',
'Topic :: NER :: NLP',
],
keywords='scientific entity relation software paper article',
url='https://github.com/dave-s477/SoMeNLP',
author='<NAME>',
author_email='<EMAIL>',
license='GPLv3',
packages=['somenlp'],
scripts=[
'bin/train_word_emb',
'bin/distant_supervision',
'bin/custom_feature_gen',
'bin/train_model',
'bin/tune_model',
'bin/tune_relext',
'bin/predict',
'bin/split_data',
'bin/train_relext',
'bin/load_dbpedia_info',
'bin/entity_disambiguation',
'bin/somesci_disambiguation_input',
'bin/map_unique_names_to_files',
'bin/predict_relext',
'bin/combine_annotations',
'bin/generate_file_list'
],
install_requires=[
'pytest',
'gensim>=4.0.1',
'torch',
'tensorboard',
'pandas',
'numpy',
'beautifulsoup4',
'wiktextract',
'wget',
'NLTK',
'scikit-learn',
'transformers==4.6.1',
'SPARQLWrapper',
'python-levenshtein',
'articlenizer @ https://github.com/dave-s477/articlenizer/tarball/master#egg=package'
],
include_package_data=True,
zip_safe=False)
|
dave-s477/SoMeNLP | somenlp/RE/RE_model.py | <filename>somenlp/RE/RE_model.py
import pandas as pd
import numpy as np
import pickle
from torch import cross
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import cross_validate
from itertools import combinations
class REmodel():
"""Relation extraction model
"""
def __init__(self, gen_config, model_config, data_handler, output_handler, output_config, feature_generator=None):
"""Init
Args:
model_config (dict): configuration of the RE model
data_handler (somenlp.NER.DataHandler)
output_handler (somenlp.NER.OutputHandler)
"""
self.gen_config = gen_config
self.model_config = model_config
self.data_handler = data_handler
self.output_handler = output_handler
self.feature_generator = feature_generator
self.output_config = output_config
if gen_config['type'] == 'RF':
self.model = RandomForestClassifier(
n_estimators=model_config["n_estimators"],
criterion=model_config["criterion"],
max_depth=model_config["max_depth"],
min_samples_split=model_config["min_samples_split"],
min_samples_leaf=model_config["min_samples_leaf"],
min_weight_fraction_leaf=model_config["min_weight_fraction_leaf"],
max_features=model_config["max_features"],
max_leaf_nodes=model_config["max_leaf_nodes"],
max_samples=model_config["max_samples"]
)
elif gen_config['type'] == 'NN':
if model_config['scaler']:
self.scaler = StandardScaler()
self.model = MLPClassifier(
solver = model_config['solver'],
batch_size = gen_config['batch_size'],
max_iter = model_config['epochs'],
learning_rate_init = model_config['lr'],
hidden_layer_sizes=tuple(model_config['layers'])
)
else:
raise(RuntimeError("Got unsupported model type {}".format(model_config['type'])))
def _train_model(self, data):
"""Train model based on provided data
Args:
data (list): features
"""
input_data = pd.DataFrame(data)
y_train = input_data['label'].values
X_train = input_data.loc[:, input_data.columns != 'label']
if self.gen_config['type'] == 'NN' and self.model_config['scaler']:
self.scaler.fit(X_train)
X_train = self.scaler.transform(X_train)
#self.X_train = X_train
self.model.fit(X_train, y_train)
def train(self):
"""Train RE model
"""
all_train_samples = []
for idx, dataset in enumerate(self.data_handler.data_config['sets']['train']):
print("Getting samples from {} dataset from train set".format(idx))
all_train_samples.extend(dataset['relext_feature_list'])
self._train_model(all_train_samples)
def cross_val(self):
all_train_samples = []
for idx, dataset in enumerate(self.data_handler.data_config['sets']['train']):
print("Getting samples from {} dataset from train set".format(idx))
all_train_samples.extend(dataset['relext_feature_list'])
input_data = pd.DataFrame(all_train_samples)
y = input_data['label'].values
X = input_data.loc[:, input_data.columns != 'label']
if self.gen_config['type'] == 'NN' and self.model_config['scaler']:
self.scaler.fit(X)
X = self.scaler.transform(X)
cv_results = cross_validate(self.model, X, y, cv=5, scoring=('precision_micro', 'recall_micro', 'f1_micro', 'precision_macro', 'recall_macro', 'f1_macro'))
for k, v in cv_results.items():
if k.startswith('test'):
print('{}: {}'.format(k, v))
def test(self):
"""Test RE model
"""
out_s = ''
cls_for_latex = {}
for idx, dataset in enumerate(self.data_handler.data_config['sets']['test']):
print("Start testing on corpus {}".format(idx))
out_s += 'Test Corpus {}: {}\n\n'.format(idx, dataset['name'])
input_data = pd.DataFrame(dataset['relext_feature_list'])
y_test = input_data['label'].values
X_test = input_data.loc[:, input_data.columns != 'label']
if self.gen_config['type'] == 'NN' and self.model_config['scaler']:
X_test = self.scaler.transform(X_test)
predictions = self.model.predict(X_test)
if "confusion" in self.output_config and self.output_config['confusion']:
cm = confusion_matrix(y_test, predictions, labels=self.model.classes_)
out_s += "Confusion Matrix:\n{}\n{}\n\n".format(self.model.classes_, cm)
if "no_neg" in self.output_config and self.output_config['no_neg']:
cl_no_negatives = classification_report(y_test, predictions, labels=self.model.classes_[self.model.classes_!='none'])
out_s += "Classification without negative samples:\n{}\n\n".format(cl_no_negatives)
if "with_neg" in self.output_config and self.output_config['with_neg']:
cl_with_negatives = classification_report(y_test, predictions, labels=self.model.classes_)
out_s += "Classification with negative samples:\n{}\n\n".format(cl_with_negatives)
if "latex" in self.output_config and self.output_config['latex']:
cl_no_negatives_dict = classification_report(y_test, predictions, labels=self.model.classes_[self.model.classes_!='none'], output_dict=True)
cls_for_latex[dataset['name']] = cl_no_negatives_dict
if "latex" in self.output_config and self.output_config['latex']:
out_s += "Formatted for latex: {}".format(self.output_handler.cl_for_latex(cls_for_latex))
print(out_s)
if "save_log" in self.output_config and self.output_config['save_log']:
with open('{}/rel_extraction_result.log'.format(self.output_handler.log_dir), 'w') as out_file:
out_file.write(out_s)
def predict(self, output):
print("Starting prediction")
iterator = self.feature_generator.stream_files()
for input in iterator:
prediction_outputs = []
for idx, (sentence, features, entities) in enumerate(zip(input['sentences'], input['relext_feature_list'], input['entity_list'])):
prediction_outputs.append([])
if features is not None:
# print(sentence)
# print(input['tags'])
# print()
#print(features)
X = pd.DataFrame(features)
if self.gen_config['type'] == 'NN' and self.model_config['scaler']:
X = self.scaler.transform(X)
predictions = self.model.predict(X)
#print(X)
for ent_pair, assigned_label in zip(entities, predictions):
if assigned_label != 'none':
prediction_outputs[-1].append([ent_pair, assigned_label])
#print(predictions)
#print()
with input['out_name'].open(mode='w') as out_f:
for line_pred in prediction_outputs:
line_string = ''
for pred in line_pred:
line_string += '{}\t{}\t{}\t{}\t{}\t{}\t{};;'.format(pred[1], pred[0][0]['string'], pred[0][0]['beg'], pred[0][0]['idx'], pred[0][1]['string'], pred[0][1]['beg'], pred[0][1]['idx'])
out_f.write(line_string + '\n')
#prediction_outputs
def show_features_importance(self):
"""Print a summary of the feature importance provided by sklearn
"""
feat_importances = pd.Series(self.model.feature_importances_, index=self.X_train.columns)
print("Feature importance for classifier:")
print(feat_importances.sort_values())
def save(self):
output_name = '{}/model.sav'.format(self.output_handler.save_dir)
pickle.dump(self.model, open(output_name, 'wb'))
print("Saved model at {}".format(output_name))
def load(self, checkpoint):
self.model = pickle.load(open(checkpoint['model'], 'rb'))
print("Loaded model from {}".format(checkpoint['model']))
|
dave-s477/SoMeNLP | somenlp/word_embedding/calculate_embedding.py |
import os
import gensim
import random
import logging
from pathlib import Path
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class SentenizedInput(object):
"""Simple linebased iterator for data.
Assumes sentences are split by newline character
and tokens are split by whitespaces.
"""
def __init__(self, file_list, seed):
random.seed(seed)
random.shuffle(file_list)
self.file_list = file_list
def __iter__(self):
for f in self.file_list:
with f.open() as text:
for line in text:
if line.strip():
yield line.split()
def resume_training_embedding(checkpoint, in_files, out_path, epochs, replace=False, format=2, seed=42, ncores=8):
"""Re-train a Word2Vec embedding from a gensim model checkpoint
Args:
checkpoint (string): path to pre-trained model
in_files (list of PosixPaths): files of training corpus
out_path (string): output path
epochs (int): number of training epochs
replace (bool, optional): removes the original checkpoint. Defaults to False.
format (int, optional): determines the output format between bin/gensim-model/both. Defaults to 2.
seed (int, optional): data shuffling seed. Defaults to 42.
ncores (int, optional): number of gensim workers. Defaults to 8.
"""
iterator = SentenizedInput(in_files, seed)
model = gensim.models.Word2Vec.load(checkpoint)
model.build_vocab(iterator, update=True)
model.train(iterator, total_examples=model.corpus_count, epochs=epochs)
out_loc = checkpoint.split('.model')[0] + "_re-{}".format(epochs)
if format == 0:
print("Saving model as bin")
model.wv.save_word2vec_format(out_loc + '.bin', binary=True)
elif format == 1:
print("Saving trainable model")
model.save(out_loc + '.model')
else:
print("Saving model as bin and trainable")
model.wv.save_word2vec_format(out_loc + '.bin', binary=True)
model.save(out_loc + '.model')
if replace:
os.remove(checkpoint)
def train_embedding(name, in_files, out_path, emb_dim=200, win_size=5, min_count=5, epochs=1, format=2, seed=42, ncores=8):
"""Train a gensim Word2Vec embedding on a corpus
Args:
name (string): name for writing the model
in_files (list of PosixPaths): files of training corpus
out_path (string): output path
emb_dim (int, optional): size of the embedding. Defaults to 200.
win_size (int, optional): w2v window size. Defaults to 200.
min_count (int, optional): word mincount to be in embedding. Defaults to 200.
epochs (int, optional): number of training epochs
format (int, optional): determines the output format between bin/gensim-model/both. Defaults to 2.
seed (int, optional): data shuffling seed. Defaults to 42.
ncores (int, optional): number of gensim workers. Defaults to 8.
"""
iterator = SentenizedInput(in_files, seed)
model = gensim.models.Word2Vec(iterator, vector_size=emb_dim, window=win_size, min_count=min_count, workers=ncores, sg=1, epochs=epochs)
out_loc = out_path + '/' + name
if format == 0:
print("Saving model as bin")
model.wv.save_word2vec_format(out_loc + '.bin', binary=True)
elif format == 1:
print("Saving trainable model")
model.save(out_loc + '.model')
else:
print("Saving model as bin and trainable")
model.wv.save_word2vec_format(out_loc + '.bin', binary=True)
model.save(out_loc + '.model')
|
dave-s477/SoMeNLP | read_data.py | <gh_stars>0
#!/usr/bin/env python
import tensorflow as tf
import argparse
import sys
import numpy as np
from os import write
from pathlib import Path
from tensorflow.python.summary.summary_iterator import summary_iterator
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def format_latex_string(values, separator='&'):
s = ''
for task_k, task_v in values.items():
s += "Task: {}\n".format(task_k)
for label_k, label_v in task_v.items():
s += '{} {} {} ({}) {} {} ({}) {} {} ({}) {}\n'.format(
label_k,
separator,
label_v['Precision_test_0'],
label_v['Precision_devel_0'],
separator,
label_v['Recall_test_0'],
label_v['Recall_devel_0'],
separator,
label_v['FScore_test_0'],
label_v['FScore_devel_0'],
separator
)
s += '\n'
return s
def format_latex_average_string(averages, separator='&'):
s = ''
for task_k, task_v in averages.items():
s += "Task: {}\n".format(task_k)
for label_k, label_v in task_v.items():
s += '{} {} {}$\pm${} {} {}$\pm${} {} {}$\pm${} {} {}$\pm${} {} {}$\pm${} {} {}$\pm${}\\\\ {}\n'.format(
label_k,
separator,
round(label_v['Precision_test_0']['mean'], 3),
round(label_v['Precision_test_0']['std'], 3),
separator,
round(label_v['Precision_devel_0']['mean'], 3),
round(label_v['Precision_devel_0']['std'], 3),
separator,
round(label_v['Recall_test_0']['mean'], 3),
round(label_v['Recall_test_0']['std'], 3),
separator,
round(label_v['Recall_devel_0']['mean'], 3),
round(label_v['Recall_devel_0']['std'], 3),
separator,
round(label_v['FScore_test_0']['mean'], 3),
round(label_v['FScore_test_0']['std'], 3),
separator,
round(label_v['FScore_devel_0']['mean'], 3),
round(label_v['FScore_devel_0']['std'], 3),
len(label_v['Precision_test_0']['values'])
)
s += '\n'
return s
def get_epochs(file, args):
epoch_summary = {}
for e in summary_iterator(file):
if e.step >= args.epochs[0] and e.step < args.epochs[1]:
if e.step not in epoch_summary:
epoch_summary[e.step] = {}
for v in e.summary.value:
if v.tag.count('/') == 3:
task, entity, value, dataset = v.tag.split('/')
else:
task = 'default'
entity, value, dataset = v.tag.split('/')
if task not in epoch_summary[e.step]:
epoch_summary[e.step][task] = {}
if entity not in epoch_summary[e.step][task]:
epoch_summary[e.step][task][entity] = {}
epoch_summary[e.step][task][entity][value + '_' + dataset] = v.simple_value
return epoch_summary
def get_max(file, args):
max_fscore = 0
max_epoch = 0
#print(file)
for e in summary_iterator(file):
for v in e.summary.value:
if v.tag.count('/') == 3 and v.tag == '{}/{}/FScore/devel_0'.format(args.get_max_task, args.get_max_label):
if v.simple_value > max_fscore:
max_fscore = v.simple_value
max_epoch = e.step
elif v.tag.count('/') == 2 and v.tag == '{}/FScore/devel_0'.format(args.get_max_label):
if v.simple_value > max_fscore:
max_fscore = v.simple_value
max_epoch = e.step
#if e.step < 100:
# print("HEappfaefa")
# return None
epoch_summary = {
max_epoch: {}
}
for e in summary_iterator(file):
if e.step == max_epoch:
for v in e.summary.value:
if v.tag.count('/') == 3:
task, entity, value, dataset = v.tag.split('/')
else:
task = 'default'
entity, value, dataset = v.tag.split('/')
if task not in epoch_summary[e.step]:
epoch_summary[e.step][task] = {}
if entity not in epoch_summary[e.step][task]:
epoch_summary[e.step][task][entity] = {}
epoch_summary[e.step][task][entity][value + '_' + dataset] = v.simple_value
#print(epoch_summary)
return epoch_summary
def get_average_performance(files, args):
averages = {}
for file in files:
epoch_summary = get_max(str(file), args)
if epoch_summary is None:
continue
epoch_result = list(epoch_summary.values())[0]
for task, task_res in epoch_result.items():
if task not in averages:
averages[task] = {}
for label, label_res in task_res.items():
if label not in averages[task]:
averages[task][label] = {}
for score, score_res in label_res.items():
if score not in averages[task][label]:
averages[task][label][score] = {
'values': []
}
averages[task][label][score]['values'].append(score_res)
for _, task_res in averages.items():
for _, label_res in task_res.items():
for score, score_res in label_res.items():
mean_val = np.mean(score_res['values'])
score_res['mean'] = mean_val
std_val = np.std(score_res['values'])
score_res['std'] = std_val
return averages
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Get values from tensorboard.")
parser.add_argument("--path", required=True, help="Full path to tensorboard log or path to folder containing multiple logs")
parser.add_argument("--get-max", action="store_true", help="Get the max score based on devel F-Score.")
parser.add_argument("--get-max-task", default="software")
parser.add_argument("--get-max-label", default="Application")
parser.add_argument("--epochs", default=[10,12], nargs=2, help="Epochs to search")
parser.add_argument("--round", default='2', help="How to round results")
parser.add_argument("--merge-all", action="store_true")
args = parser.parse_args()
in_path = Path(args.path)
if in_path.is_file():
file_list = {'single': [in_path]}
elif in_path.is_dir():
file_list = {}
events = list(in_path.rglob('events.out.tfevents*'))
for event in events:
if args.merge_all:
config_string = 'all'
else:
config_string = str(event).rsplit('/', maxsplit=2)[-2].split('_', maxsplit=2)[-1]
if config_string not in file_list:
file_list[config_string] = []
file_list[config_string].append(event)
else:
raise(RuntimeError("Invalid input path {}".format(args.path)))
for k,v in file_list.items():
print(k)
if not args.get_max:
for file in v:
args.epochs = [int(x) for x in args.epochs]
epoch_summary = get_epochs(str(file), args)
for k,v in epoch_summary.items():
print("Epoch {}".format(k))
print(format_latex_string(v))
else:
averages = get_average_performance(v, args)
print(format_latex_average_string(averages))
print()
|
dave-s477/SoMeNLP | somenlp/word_embedding/__init__.py | from .calculate_embedding import train_embedding, resume_training_embedding |
dave-s477/SoMeNLP | somenlp/entity_disambiguation/linking_data.py | <gh_stars>0
from hashlib import new
import json
import sys
import random
import re
import numpy as np
from articlenizer import articlenizer
from nltk.corpus import stopwords
random.seed(42)
STOPS = stopwords.words('english')
def normalize(s):
norm_s = re.sub('[^0-9a-zA-Z]+', ' ', s.casefold()).rstrip('0123456789 ,.').lstrip(' ')
norm_s = ' '.join([w for w in norm_s.split() if w not in STOPS])
if not norm_s:
norm_s = s
return norm_s
def remove_spaces(s):
replace_regex = re.compile(r'\s(?P<to_keep>[\+\-#™_/\d]+)\s?')
return replace_regex.sub(r'\g<to_keep>', s)
class LinkingData:
def __init__(self, config):
self.config = config
self.read_data_files(config)
def read_data_files(self, config):
self.train = {}
for entry in config['train']:
with open(entry['file'], 'r') as data_f:
self.train[entry['name']] = json.load(data_f)
for idx, entry in enumerate(self.train[entry['name']]):
entry['init_id'] = idx
self.test = {}
for entry in config['test']:
with open(entry['file'], 'r') as data_f:
self.test[entry['name']] = json.load(data_f)
for idx, entry in enumerate(self.test[entry['name']]):
entry['init_id'] = idx
if config['gold']:
with open(config['gold'], 'r') as gold_f:
self.gold = json.load(gold_f)
if self.config['data_augmentation']:
self.all = []
self.all_names = []
self.all_augmented_names = []
self.short_samples = []
self.long_samples = []
with open(config['all'], 'r') as data_f:
all = json.load(data_f)
for sample in all:
self.all_names.append(sample['mention'].lower())
tokens = sample['mention'].split()
sample['tokens'] = tokens
self.all.append(sample)
if len(tokens) <= 1:
self.short_samples.append(sample)
else:
self.long_samples.append(sample)
self._get_augmented_samples()
for s in self.all:
for idx, g in enumerate(self.gold):
if s['paper_id'] == g['paper_id'] and s['mention'] == ' '.join(articlenizer.get_tokenized_sentences(g['mention'])[0]):
s['gold_id'] = idx
break
for s in self.augmented_samples:
self.all.append(s)
random.shuffle(self.all)
self.link_lookup_table = np.zeros((len(self.all), len(self.all)), dtype=bool)
for idx, s in enumerate(self.all):
s['origin_id'] = idx
s['string'] = remove_spaces(s['mention'])
s['norm'] = normalize(s['mention'])
for idx_2, s_2 in enumerate(self.all[idx+1:]):
if 'gold_id' in s and 'gold_id' in s_2 and self.gold[s['gold_id']]['link'] == self.gold[s_2['gold_id']]['link']:
self.link_lookup_table[idx][idx+1+idx_2] = True
self.link_lookup_table[idx+1+idx_2][idx] = True
self.all_names = None
self.all_augmented_names = None
self.short_samples = None
self.long_samples = None
def _get_augmented_samples(self):
num_augmentation_samples = 0
for _, v in self.train.items():
num_augmentation_samples += len(v)
print(num_augmentation_samples)
num_augmentation_samples *= self.config['num_augmentation_samples']
self.augmented_samples = []
for _ in range(int(num_augmentation_samples/2)):
# Augment short sample
s_1 = self.short_samples[random.randint(0, len(self.short_samples)-1)]
s_2 = self.short_samples[random.randint(0, len(self.short_samples)-1)]
self._recombine(s_1, s_2)
# Augment long sample
s_1 = self.long_samples[random.randint(0, len(self.long_samples)-1)]
s_2 = self.long_samples[random.randint(0, len(self.long_samples)-1)]
self._recombine(s_1, s_2)
def _recombine_words(self, s1, s2):
split_idx_1 = random.randint(1, len(s1)-1) if len(s1) > 1 else random.randint(0,1)
split_idx_2 = random.randint(1, len(s2)-1) if len(s2) > 1 else random.randint(0,1)
if isinstance(s1, str) and isinstance(s2, str):
s1_new = s1[:split_idx_1] + s2[split_idx_2:]
s2_new = s2[:split_idx_2] + s1[split_idx_1:]
elif isinstance(s1, list) and isinstance(s2, list):
s1_new = ' '.join(s1[:split_idx_1] + s2[split_idx_2:])
s2_new = ' '.join(s2[:split_idx_2] + s1[split_idx_1:])
elif isinstance(s1, str) and isinstance(s2, list):
s1_new = s1[:split_idx_1] + ' ' + ' '.join(s2[split_idx_2:])
s2_new = ' '.join(s2[:split_idx_2]) + ' ' + s1[split_idx_1:]
elif isinstance(s1, list) and isinstance(s2, str):
s1_new = ' '.join(s1[:split_idx_1]) + ' ' + s2[split_idx_2:]
s2_new = s2[:split_idx_2] + ' ' + ' '.join(s1[split_idx_1:])
return s1_new, s2_new
def _get_add_info(self, r):
i = {}
for v in r:
tokens = v['string'].split()
i[v['type']] = tokens if len(tokens) > 1 else v['string']
return i
def _recombine(self, s_1, s_2):
if s_1['mention'].lower() == s_2['mention'].lower():
return []
new_strings = self._recombine_words(s_1['mention'], s_2['mention'])
add_info_1 = self._get_add_info(s_1['relations'])
add_info_2 = self._get_add_info(s_2['relations'])
for k, v in add_info_1.items():
if k in add_info_2 and v == add_info_2[k]:
return []
new_developers = self._recombine_words(add_info_1['Developer_of'], add_info_2['Developer_of']) if 'Developer_of' in add_info_1 and 'Developer_of' in add_info_2 else None
new_url = self._recombine_words(add_info_1['URL_of'], add_info_2['URL_of']) if 'URL_of' in add_info_1 and 'URL_of' in add_info_2 else None
new_version = self._recombine_words(add_info_1['Version_of'], add_info_2['Version_of']) if 'Version_of' in add_info_1 and 'Version_of' in add_info_2 else None
for idx, n_s in enumerate(new_strings):
if not n_s.lower() in self.all_names and not n_s.lower() in self.all_augmented_names:
new_sample = {
'mention': n_s,
'relations': []
}
if new_developers is not None:
new_sample['relations'].append({
'type': 'Developer_of',
'string': new_developers[idx]
})
if new_url is not None:
new_sample['relations'].append({
'type': 'URL_of',
'string': new_url[idx]
})
if new_version is not None:
new_sample['relations'].append({
'type': 'Version_of',
'string': new_version[idx]
})
self.augmented_samples.append(new_sample)
self.all_augmented_names.append(n_s)
|
dave-s477/SoMeNLP | somenlp/distant_supervision/packages.py | import json
import urllib.request
import os
from pathlib import Path
from bs4 import BeautifulSoup
def get_pypi_package_names(default_address='https://pypi.org/simple/'):
"""Download PyPi package names
Args:
default_address (str, optional): url for pypi. Defaults to 'https://pypi.org/simple/'.
Returns:
list: pypi package names
"""
print("Loading pypi names")
pypi_package_names = []
try:
content = urllib.request.urlopen(default_address)
except urllib.error.URLError as e:
print("Parsing pypi went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, "lxml")
for a in soup.findAll('a', href=True):
pypi_package_names.append(a.text)
return pypi_package_names
def get_R_forge_package_names(default_address='https://r-forge.r-project.org/softwaremap/trove_list.php?cat=c&form_cat=307&page='):
"""Download R-forge package names
Args:
default_address (str, optional): url for R-forge. Defaults to 'https://r-forge.r-project.org/softwaremap/trove_list.php?cat=c&form_cat=307&page='.
Returns:
list: list of R-forge packages
"""
print("Loading R packages")
rforge_packages = []
counter = 1
prev_rforge_packages = []
current_rforge_packages = []
try:
content = urllib.request.urlopen('{}{}'.format(default_address, counter))
except urllib.error.URLError as e:
print("Parsing R packages went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, "lxml")
main_div = soup.find("div", {"id": "maindiv"})
for idx, tab in enumerate(soup.findAll("table",{"class":"fullwidth"})):
if idx != 0:
for ref in tab.findAll("a", href=True):
if ref.text != '[Filter]' and ref.text.strip():
current_rforge_packages.append(ref.text.rstrip())
while current_rforge_packages != prev_rforge_packages:
if counter % 10 == 0:
print("Currently at R forge page {}".format(counter))
prev_rforge_packages = current_rforge_packages
rforge_packages.extend(prev_rforge_packages)
counter += 1
current_rforge_packages = []
try:
content = urllib.request.urlopen('{}{}'.format(default_address, counter))
except urllib.error.URLError as e:
print("Parsing R packages went wrong on page {}: {}".format(counter, e))
return rforge_packages
soup = BeautifulSoup(content, "lxml")
main_div = soup.find("div", {"id": "maindiv"})
for idx, tab in enumerate(soup.findAll("table",{"class":"fullwidth"})):
if idx == 0:
pass
else:
for ref in tab.findAll("a", href=True):
if ref.text != '[Filter]' and ref.text.strip():
current_rforge_packages.append(ref.text.rstrip())
return rforge_packages
def get_swMATH_software_names(default_address='https://swmath.org/?which_search=browse&sel=all&sortby=-rank&&page='):
"""Download list of SwMATH software names
Args:
default_address (str, optional): swMATH url. Defaults to 'https://swmath.org/?which_search=browse&sel=all&sortby=-rank&&page='.
Returns:
list: swMATH software names
"""
print("Loading SwMATH names")
swMATH_packages = []
counter = 1
prev_swMATH = []
current_swMATH = []
try:
content = urllib.request.urlopen('{}{}'.format(default_address, counter))
except urllib.error.URLError as e:
print("Parsing swMATH names went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, "lxml")
for h1 in soup.findAll("h1"):
current_swMATH.append(h1.text)
current_swMATH.remove('swMATH')
while current_swMATH != prev_swMATH:
if counter % 10 == 0:
print("Currently at swMATH page {}".format(counter))
prev_swMATH = current_swMATH
swMATH_packages.extend(prev_swMATH)
counter += 1
current_swMATH = []
try:
content = urllib.request.urlopen('{}{}'.format(default_address, counter), timeout=20)
soup = BeautifulSoup(content, "lxml")
except urllib.error.URLError as e:
print("Parsing swMATH names went wrong on page {}: {}".format(counter, e))
return swMATH_packages
except:
print("Probably a timeout in loading {}... trying again".format(counter))
counter -= 1
else:
for h1 in soup.findAll("h1"):
current_swMATH.append(h1.text)
current_swMATH.remove('swMATH')
return swMATH_packages
def get_CRAN_package_names(default_address='https://cran.r-project.org/web/packages/available_packages_by_name.html'):
"""Download CRAN package names
Args:
default_address (str, optional): url for CRAN. Defaults to 'https://cran.r-project.org/web/packages/available_packages_by_name.html'.
Returns:
list: CRAN package names
"""
print("Loading CRAN packages")
cran_packages = []
try:
content = urllib.request.urlopen(default_address)
except urllib.error.URLError as e:
print("Parsing pypi went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, 'lxml')
for a in soup.findAll('a', href=True):
cran_packages.append(a.text)
return cran_packages
def get_Bioconductor_package_names(default_address='https://www.bioconductor.org/packages/release/bioc/'):
"""Bioconductor package names
Args:
default_address (str, optional): url for Bioconductor. Defaults to 'https://www.bioconductor.org/packages/release/bioc/'.
Returns:
list: Bioconductor package names
"""
print("Loading Bioconductor packages")
bioconductor_packages = []
try:
content = urllib.request.urlopen(default_address)
except urllib.error.URLError as e:
print("Parsing pypi went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, 'lxml')
div = soup.find("div", {"id": "PageContent"})
for row in div.find_all('table')[0].find_all('tr'):
for a in row.findAll('a', href=True):
bioconductor_packages.append(a.text)
return bioconductor_packages
def get_Anaconda_package_names(repo='anaconda', default_address='https://anaconda.org/{}/repo?page='):
"""Get Anaconda package names
Args:
repo (str, optional): repo location for anaconda. Defaults to 'anaconda'.
default_address (str, optional): url for anaconda. Defaults to 'https://anaconda.org/{}/repo?page='.
Returns:
list: Anaconda package names
"""
conda_packages = []
counter = 1
prev_conda_packages = []
current_conda_packages = []
try:
req = urllib.request.Request('{}{}'.format(default_address.format(repo), counter), headers={'User-Agent': 'Mozilla/5.0'})
content = urllib.request.urlopen(req)
except urllib.error.URLError as e:
print("Parsing Anaconda went wrong due to: {}".format(e))
return None
soup = BeautifulSoup(content, 'lxml')
for span in soup.findAll("span", {"class": "packageName"}):
current_conda_packages.append(span.text)
while current_conda_packages != prev_conda_packages:
if (counter+1) % 1 == 0:
print("Currently at Anaconda ({}) page {}".format(repo, counter+1))
prev_conda_packages = current_conda_packages
conda_packages.extend(prev_conda_packages)
counter += 1
current_conda_packages = []
try:
req = urllib.request.Request('{}{}'.format(default_address.format(repo), counter), headers={'User-Agent': 'Mozilla/5.0'})
content = urllib.request.urlopen(req, timeout=20)
soup = BeautifulSoup(content, 'lxml')
except urllib.error.URLError as e:
print("Parsing Anaconda names went wrong on page {}: {}".format(counter, e))
return conda_packages
except:
print("Probably a timeout in loading {}... trying again".format(counter))
counter -= 1
else:
for span in soup.findAll("span", {"class": "packageName"}):
current_conda_packages.append(span.text)
return conda_packages
def get_if_not_exists(date, function, name, location='/tmp', *args):
"""Load file if it exists, else call a function to create it.
Args:
date (str): string identifier of file
function (fct): function to call is file does not exist
name (str): base name of file
location (str, optional): path to file. Defaults to '/tmp'.
Returns:
json-serializeable object: result from generating or loading a json file
"""
loc = Path(location) / '{}_{}'.format(date, name)
output = None
if loc.is_file():
print("Loading {} from {}".format(name, str(loc)))
with loc.open(mode='r') as j_in:
output = json.load(j_in)
else:
output = function(*args)
with loc.open(mode='w') as j_out:
json.dump(output, j_out, indent=4)
print("Saved {} to {}".format(name, str(loc)))
return output
def load_package_names(date, location='/tmp'):
"""Load package names from a number of repositories
Args:
date (str): date-based identifier
location (str, optional): output/lookup path. Defaults to '/tmp'.
Returns:
dictionary: packages repositories with their respective software lists
"""
data_dictionary = {
'pypi': get_if_not_exists(date, get_pypi_package_names, 'pypi.json', location),
'rforge': get_if_not_exists(date, get_R_forge_package_names, 'rforge.json', location),
'swMATH': get_if_not_exists(date, get_swMATH_software_names, 'swMATH.json', location),
'CRAN': get_if_not_exists(date, get_CRAN_package_names, 'CRAN.json', location),
'Bioconductor': get_if_not_exists(date, get_Bioconductor_package_names, 'Bioconductor.json', location),
'Anaconda': get_if_not_exists(date, get_Anaconda_package_names, 'Anaconda.json', location, 'anaconda'),
'Conda-forge': get_if_not_exists(date, get_Anaconda_package_names, 'Conda-forge.json', location, 'conda-forge')
}
return data_dictionary
|
dave-s477/SoMeNLP | somenlp/NER/tuner.py | <reponame>dave-s477/SoMeNLP
import json
import copy
from pathlib import Path
from somenlp.utils import find_type_in_dict, getFromDict, setInDict, get_abbr
class Tuner():
def __init__(self, config, time):
self.config = config
self.values_to_vary = find_type_in_dict(self.config, list)
self._gen_all_parameter_combinations()
self._gen_all_configs()
def _entry_to_combinations(self, entry):
res = []
for val in entry['values']:
res.append([{
'path': entry['path'],
'values': val
}])
return res
def _gen_all_parameter_combinations(self):
old_combinations = self._entry_to_combinations(self.values_to_vary[0])
for val in self.values_to_vary[1:]:
#print(self._entry_to_combinations(val))
new_combinations = []
for combination in old_combinations:
for next_param in self._entry_to_combinations(val):
comb = combination.copy()
comb.extend(next_param)
new_combinations.append(comb)
old_combinations = new_combinations
self.combinations = old_combinations
def _gen_config_name(self, parameter_config):
name = ''
for parameter in parameter_config:
for sub_path in parameter['path']:
name += '{}-'.format(get_abbr(sub_path))
name += '{}_'.format(parameter['values'])
for char in ['{', '}', ' ', "'", '/', '\\', ':', ',']:
name = name.replace(char, '')
return name.rstrip('_')[:200]
def _gen_all_configs(self):
self.configs_to_execute = {}
for combination in self.combinations:
config = copy.deepcopy(self.config)
config_name = self._gen_config_name(combination)
for entry in combination:
setInDict(config, entry['path'], entry['values'])
data_c_file = config.pop('data', None)
if data_c_file is None:
raise(RuntimeError("No data config was provided in tuning config."))
data_c_path = Path(data_c_file)
with data_c_path.open(mode='r') as data_c_json:
data_conf = json.load(data_c_json)
self.configs_to_execute[config_name] = {
'data': data_conf,
'model': config
}
def yield_configs(self):
for c_name, v in self.configs_to_execute.items():
yield c_name, v['data'], v['model'] |
dave-s477/SoMeNLP | somenlp/distant_supervision/combine_info.py | <gh_stars>0
import json
from articlenizer import articlenizer as art
def load_dicts(locations):
"""Load and merge information from json dictionaries
Args:
locations (list of Posix paths): locations to json files
Returns:
dictionary: dictionary merged from json files
"""
results = {}
for f in locations:
name = f.name.split('.dic')[0]
results[name] = []
with f.open(mode='r', errors='ignore') as in_f:
for line in in_f:
if line.rstrip():
results[name].append(line.rstrip())
return results
def normalize_entries(entries, max_length):
"""Shorten entries in list based on number of tokens after tokenization
Args:
entries (list): list of candidates
max_length (int): max allowed token length
Returns:
list: list with filtered long entries
"""
entry_list = []
for entry in entries:
tokens = art.tokenize_text(entry)
if len(tokens) <= max_length:
entry_list.append(' '.join(tokens))
return entry_list
def merge_results(out_file, max_token_length, inputs):
"""Merge and write a distant supervision ditionary from filtered dictionaries
Args:
out_file (Posix path): output location
max_token_length (int): max allowed token length
inputs (list of dictionaries): individual dicts to be merged
"""
distant_supervision_dictionary = {}
for i in inputs:
if i is not None:
for k, v in i.items():
distant_supervision_dictionary[k] = normalize_entries(v, max_token_length)
with out_file.open(mode='w') as json_out:
json.dump(distant_supervision_dictionary, json_out, indent=4)
|
dave-s477/SoMeNLP | somenlp/NER/models/crf.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
class CRF(nn.Module):
def __init__(self, tagset_size, device, init_parameters=None):
super(CRF, self).__init__()
self.device = device
if init_parameters is None:
#self.transition_params = nn.Parameter(torch.randn(tagset_size, tagset_size))
self.transition_params = nn.Parameter(nn.init.xavier_normal_(torch.empty(tagset_size, tagset_size)))
else:
self.transition_params = nn.Parameter(init_parameters.to(self.device))
def crf_sequence_score(self, inputs, tag_indices, sequence_lengths):
"""Computes the unnormalized score for a tag sequence.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which
we compute the unnormalized score.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
sequence_scores: A [batch_size] vector of unnormalized sequence scores.
"""
# If max_seq_len is 1, we skip the score calculation and simply gather the
# unary potentials of the single tag.
def _single_seq_fn():
batch_size = inputs.shape[0]
example_inds = torch.arange(batch_size, dtype=tag_indices.dtype).to(self.device).unsqueeze(1)
sequence_scores = inputs.squeeze(1)[example_inds, tag_indices].squeeze(1)
sequence_scores = torch.where(sequence_lengths <= 0, torch.zeros(sequence_scores.shape).to(self.device), sequence_scores)
return sequence_scores
# Compute the scores of the given tag sequence.
def _multi_seq_fn():
unary_scores = self.crf_unary_score(tag_indices, sequence_lengths, inputs)
binary_scores = self.crf_binary_score(tag_indices, sequence_lengths)
sequence_scores = unary_scores + binary_scores
return sequence_scores
if inputs.shape[1] == 1:
return _single_seq_fn()
else:
return _multi_seq_fn()
def crf_forward(self, inputs, state, sequence_lengths):
"""Computes the alpha values in a linear-chain CRF.
See http://www.cs.columbia.edu/~mcollins/fb.pdf for reference.
Args:
inputs: A [batch_size, num_tags] matrix of unary potentials.
state: A [batch_size, num_tags] matrix containing the previous alpha
values.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
new_alphas: A [batch_size, num_tags] matrix containing the
new alpha values.
"""
batch_size = inputs.shape[0]
sequence_lengths, _ = torch.max(
torch.stack(
(torch.zeros(sequence_lengths.shape, dtype=sequence_lengths.dtype).to(self.device), sequence_lengths - 2),
dim=1),
dim=1)
inputs = inputs.permute(1, 0, 2)
transition_params_unsq = self.transition_params.unsqueeze(0)
all_alphas = []
for idx in range(inputs.shape[0]):
state = state.unsqueeze(2)
transition_scores = state + transition_params_unsq
new_alphas = inputs[idx] + torch.logsumexp(transition_scores, dim=1)
state = new_alphas
all_alphas.append(new_alphas)
all_alphas = torch.stack(all_alphas, dim=1)
return all_alphas[torch.arange(sequence_lengths.shape[0]).to(self.device), sequence_lengths]
def crf_log_norm(self, inputs, sequence_lengths):
"""Computes the normalization for a CRF.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
log_norm: A [batch_size] vector of normalizers for a CRF.
"""
# Split up the first and rest of the inputs in preparation for the forward
# algorithm.
first_input = inputs.narrow(1, 0, 1).squeeze(1)
# If max_seq_len is 1, we skip the algorithm and simply reduce_logsumexp over
# the "initial state" (the unary potentials).
def _single_seq_fn():
log_norm = torch.logsumexp(first_input, dim=1)
log_norm = torch.where(sequence_lengths <= 0, torch.zeros(log_norm.shape).to(self.device), log_norm)
return log_norm
def _multi_seq_fn():
rest_of_input = inputs.narrow(1, 1, inputs.shape[1]-1)
alphas = self.crf_forward(rest_of_input, first_input, sequence_lengths)
log_norm = torch.logsumexp(alphas, dim=1)
log_norm = torch.where(sequence_lengths <= 0, torch.zeros(log_norm.shape).to(self.device), log_norm)
return log_norm
if inputs.shape[1] == 1:
return _single_seq_fn()
else:
return _multi_seq_fn()
def crf_log_likelihood(self, inputs, tag_indices, sequence_lengths):
"""Computes the log-likelihood of tag sequences in a CRF.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which
we compute the log-likelihood.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
log_likelihood: A [batch_size] `Tensor` containing the log-likelihood of
each example, given the sequence of tag indices.
"""
# Get shape information.
num_tags = inputs.shape[2]
sequence_scores = self.crf_sequence_score(inputs, tag_indices, sequence_lengths)
log_norm = self.crf_log_norm(inputs, sequence_lengths)
log_likelihood = sequence_scores - log_norm
return log_likelihood
def crf_unary_score(self, tag_indices, sequence_lengths, inputs):
"""Computes the unary scores of tag sequences.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
sequence_lengths: A [batch_size] vector of true sequence lengths.
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials.
Returns:
unary_scores: A [batch_size] vector of unary scores.
"""
batch_size = inputs.shape[0]
max_seq_len = inputs.shape[1]
num_tags = inputs.shape[2]
flattened_inputs = torch.flatten(inputs)
offsets = torch.unsqueeze(torch.arange(batch_size).to(self.device) * max_seq_len * num_tags, 1)
offsets = torch.add(offsets, torch.unsqueeze(torch.arange(max_seq_len).to(self.device) * num_tags, 0))
flattened_tag_indices = offsets + tag_indices
flattened_tag_indices = torch.flatten(flattened_tag_indices)
unary_scores = torch.gather(flattened_inputs, 0, flattened_tag_indices).view(batch_size, max_seq_len)
masks = torch.arange(unary_scores.shape[1]).to(self.device)[None, :] < sequence_lengths[:, None]
masks = masks.squeeze(1).long()
unary_scores = torch.sum(unary_scores * masks, dim=1)
return unary_scores
def crf_binary_score(self, tag_indices, sequence_lengths):
"""Computes the binary scores of tag sequences.
Args:
tag_indices: A [batch_size, max_seq_len] matrix of tag indices.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
binary_scores: A [batch_size] vector of binary scores.
"""
# Get shape information.
batch_size = tag_indices.shape[0]
num_tags = self.transition_params.shape[0]
num_transitions = tag_indices.shape[1] - 1
start_tag_indices = tag_indices.narrow(1, 0, num_transitions)
end_tag_indices = tag_indices.narrow(1, 1, num_transitions)
# Encode the indices in a flattened representation.
flattened_transition_indices = start_tag_indices * num_tags + end_tag_indices
flattened_transition_indices = flattened_transition_indices.flatten()
flattened_transition_params = self.transition_params.flatten()
# Get the binary scores based on the flattened representation.
binary_scores = torch.gather(flattened_transition_params, 0, flattened_transition_indices).view(batch_size, -1)
masks = torch.arange(tag_indices.shape[1]).to(self.device)[None, :] < sequence_lengths[:, None]
masks = masks.squeeze(1).long()
truncated_masks = masks.narrow(1, 1, masks.shape[1]-1)
binary_scores = torch.sum(binary_scores * truncated_masks, dim=1)
return binary_scores
def viterbi_decode_batch(self, feats, sequence_lengths):
"""Computes the most likely sequence with the viterbi algorithm for batch inputs.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
sequence_lengths: A [batch_size] vector of true sequence lengths.
Returns:
viterbi_sequence: A [batch_size, max_seq_len] vector of most likely sequences.
viterbi_score: The corresponding score of the viterbi_sequence
bool_mask: Input mask corresponding to the given lengths
"""
bool_masks = torch.arange(feats.shape[1]).to(self.device)[None, :] < sequence_lengths[:, None]
bool_masks = bool_masks.squeeze(1)
trellis = torch.zeros(feats.shape).to(self.device)
backpointers = torch.zeros(feats.shape, dtype=torch.int64).to(self.device)
trellis[:, 0] = feats[:, 0]
for t in range(1, feats.shape[1]):
v = torch.unsqueeze(trellis[:, t - 1], 2) + self.transition_params
val_max, arg_max = torch.max(v, dim=1)
bool_mask = bool_masks[:, t]
trellis[:, t] = torch.where(bool_mask.unsqueeze(1), feats[:, t] + val_max, trellis[:, t-1])
backpointers[:, t]= torch.where(
bool_mask.unsqueeze(1),
arg_max,
torch.argmax(
trellis[:, t-1],
dim=1).unsqueeze(1).expand(
arg_max.shape))
viterbi_cand_val, viterbi_cand_idx = torch.max(trellis[:, -1], dim=1)
viterbi = [viterbi_cand_idx]
reverse_bps = torch.flip(backpointers[:, 1:], dims=[1])
for idx in range(reverse_bps.shape[1]):
next_viterbi = torch.gather(
reverse_bps[:, idx],
1,
viterbi[-1].unsqueeze(1)
).squeeze(1)
viterbi.append(next_viterbi)
viterbi.reverse()
viterbi_score, _ = torch.max(trellis[:, -1], dim=1)
return torch.stack(viterbi, dim=1), viterbi_score, bool_masks |
dave-s477/SoMeNLP | somenlp/utils/__init__.py | import torch
import argparse
import operator
from functools import reduce
from .time_marker import get_time_marker
def str2bool(v):
"""Tranform a string value into bool
Args:
v (str): string indicating a bool value
Raises:
argparse.ArgumentTypeError: str is unsuited for parsing to boolean
Returns:
bool: result
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'True', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'False', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def set_dropout(model, drop_rate=0.3):
"""Recursively set dropout in pytorch model
Args:
model (pytorch model): model
drop_rate (float, optional): value for dropouts. Defaults to 0.3.
"""
for name, child in model.named_children():
if isinstance(child, torch.nn.Dropout):
child.p = drop_rate
set_dropout(child, drop_rate=drop_rate)
# TODO recursive would be nicer..
def find_type_in_dict(dictionary, data_type=list):
"""Get all entries in a dictionary that are of a certain type up to depth 3
Args:
dictionary (dict): input dictionary
data_type (python data type identifier, optional): data type to match. Defaults to list.
Returns:
list: paths of matched elements within the dictionary
"""
paths_to_examine = []
for main_key, main_values in dictionary.items():
current_path = [main_key]
if isinstance(main_values, data_type):
paths_to_examine.append({
'path': current_path,
'values': main_values
})
elif isinstance(main_values, dict):
for sub_key, sub_values in main_values.items():
current_path = [main_key, sub_key]
if isinstance(sub_values, data_type):
paths_to_examine.append({
'path': current_path,
'values': sub_values
})
elif isinstance(sub_values, dict):
for sub_sub_key, sub_sub_values in sub_values.items():
current_path = [main_key, sub_key, sub_sub_key]
if isinstance(sub_sub_values, data_type):
paths_to_examine.append({
'path': current_path,
'values': sub_sub_values
})
return paths_to_examine
def getFromDict(dataDict, mapList):
return reduce(operator.getitem, mapList, dataDict)
def setInDict(dataDict, mapList, value):
getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
def get_abbr(s):
abbr = s[0]
prev_char = s[0]
for c in s[1:]:
if prev_char in ['_', ' ']:
abbr += c
prev_char = c
return abbr
|
dave-s477/SoMeNLP | somenlp/NER/__init__.py | from .output_handler import OutputHandler
from .tuner import Tuner
from .LSTM_dataset import LSTMDataset
from .BERT_dataset import BERTDataset, BERTMultiDataset
from .data_handler import DataHandler
from .model_wrapper import ModelWrapper
from .models import BiLSTM_CRF, FeatureLSTM, CombinedLSTM
from .trainer import Trainer
from .run_model import main, predict, tune
|
dave-s477/SoMeNLP | somenlp/NER/tuning_wrapper_lstm.py | <filename>somenlp/NER/tuning_wrapper_lstm.py
import pickle
import random
import argparse
import json
from os.path import exists, join
from shutil import copytree
from itertools import product
import main_lstm as lstm
import main_lstm_with_features as feat_lstm
import main_lstm_combined as comb_lstm
def generate_all_configs(tuning_config):
model_confs = [dict(zip(tuning_config["model"], v)) for v in product(*tuning_config["model"].values())]
all_configs = []
for mc in model_confs:
mc['test_name'] = get_model_name(mc)
all_configs.append({
"mode": tuning_config['mode'],
"data": tuning_config['data'],
"model": mc,
"analyses": tuning_config['analyses']
})
return all_configs
def get_abbr(s):
abbr = s[0]
prev_char = s[0]
for c in s[1:]:
if prev_char == '_':
abbr += c
prev_char = c
return abbr
def get_model_name(model_config):
base_name = 'O'
for k, v in model_config.items():
corr_v = v
if isinstance(corr_v, str):
if not corr_v:
corr_v = 'none'
else:
corr_v = corr_v.replace(r'/', r'-')
corr_v = corr_v.replace(r'.', r'-')
corr_v = corr_v.replace(r'>', r'')
corr_v = corr_v.replace(r'<', r'')
if isinstance(corr_v, dict):
corr_s = ''
for k2, v2 in corr_v.items():
corr_v2 = v2
if isinstance(corr_v2, str):
corr_v2 = corr_v2.replace(r'/', r'-')
corr_s = '{}_{}_{}'.format(corr_s, get_abbr(k2), corr_v2)
corr_v = corr_s
if isinstance(corr_v, bool):
corr_v = str(corr_v)[0]
base_name = '{}_{}_{}'.format(base_name, get_abbr(k), corr_v)
if len(base_name) > 255:
print(RuntimeWarning("Maximum naming lengths is reached, consider chaning the setup."))
return base_name
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = "Run hyper-parameter tuning for Bi-LSTM-CRF.")
parser.add_argument("--log-loc", default='', help="Name of log-dir.")
parser.add_argument("--reset", action='store_true', help="Use new configs.")
parser.add_argument("--config", required=True, help="name of config")
args = parser.parse_args()
with open(args.config, 'r') as t_file:
config = json.load(t_file)
for key, value in config['data'].items():
if key in ['pretrain', 'train', 'devel', 'test']:
if value['text'] and not value['text'].startswith('/'):
value['text'] = config['data']['base_dir'] + value['text'] if value['text'] else ''
value['features'] = config['data']['base_dir'] + value['features'] if value['features'] else ''
value['labels'] = config['data']['base_dir'] + value['labels'] if value['labels'] else ''
if 'prepro' in value.keys() and 'arg' in value['prepro'].keys():
value['prepro']['arg'] = config['data']['base_dir'] + value['prepro']['arg']
value['relations']['tag_names'] = config['data']['base_dir'] + value['relations']['tag_names']
value['relations']['relations'] = config['data']['base_dir'] + value['relations']['relations']
training_confs = generate_all_configs(config)
for c in training_confs:
print("Training a model with config:")
print(json.dumps(c, indent=4))
if 'combined' in args.config:
print("Running a combined LSTM")
comb_lstm.train(c)
elif 'with_features' in args.config:
print("Running a custom feature LSTM")
feat_lstm.train(c)
else:
print("Running a plain LSTM")
lstm.train(c) |
dave-s477/SoMeNLP | somenlp/NER/seqeval_custom/__init__.py | from .metrics.sequence_labeling import classification_report, precision_recall_fscore_support |
dave-s477/SoMeNLP | somenlp/NER/output_handler.py | <reponame>dave-s477/SoMeNLP
import json
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
from sklearn.metrics import confusion_matrix
from articlenizer.formatting import bio_to_brat
class OutputHandler():
def __init__(self, name, time='0_0_0', checkpoint={}, log_dir='logs', save_dir='save'):
if 'model' in checkpoint and checkpoint['model']:
self.model_loc = checkpoint['model']
else:
self.model_loc = ''
if self.model_loc and 'log_dir' in checkpoint:
self.log_dir = Path(checkpoint['log_dir'])
else:
self.log_dir = Path('{}/{}/{}'.format(log_dir, name, time))
self.writer = SummaryWriter(self.log_dir)
if self.model_loc and 'save_dir' in checkpoint:
self.save_dir = Path(checkpoint['save_dir'])
else:
self.save_dir = Path('{}/{}/{}'.format(save_dir, name, time))
self.save_dir.mkdir(parents=True, exist_ok=True)
def save_json(self, data, name='encoding'):
with open('{}/{}.json'.format(self.save_dir, name), 'w') as json_file:
json.dump(data, json_file, indent=4)
def load_encoding(self):
with open('{}/encoding.json'.format(self.save_dir), 'r') as json_file:
encoding_dict = json.load(json_file)
return encoding_dict
def print_scalars(self, scalars, epoch, name, meta_name=''):
out_s = 'Classification result on {}{} ep {}:\n'.format(meta_name, name, epoch)
for idx, (k, v) in enumerate(scalars.items()):
if idx % 3 == 0:
out_s += '\n'
out_s += '{}:\t{}\n'.format(k, round(v, 3))
out_s += 'Done\n\n'
print(out_s)
def write_scalars(self, scalars, epoch):
for scalar_key, scalar_value in scalars.items():
self.writer.add_scalar(scalar_key, scalar_value, epoch)
def print_errors(self, labels, predictions, sentences, max_output_length, data_set_name, word2name):
out_s = ''
for sent_true, sent_pred, sent_coded_words in zip(labels, predictions, sentences):
if sent_true != sent_pred:
out_s += "Wrong sentence:\n"
output_string_true, output_string_pred, output_string_words = '', '', ''
if isinstance(word2name, dict):
sentence_words = [word2name[w] for w in sent_coded_words]
else:
sentence_words = word2name.convert_ids_to_tokens(sent_coded_words)
for true_label, predicted_label, word in zip(sent_true, sent_pred, sentence_words):
next_length = max(len(word), len(true_label), len(predicted_label)) + 1
if (len(output_string_words) + next_length) > max_output_length:
out_s += 'Sent:\t{}\n'.format(output_string_words)
out_s += 'True:\t{}\n'.format(output_string_true)
out_s += 'Pred:\t{}\n'.format(output_string_pred)
output_string_true, output_string_pred, output_string_words = '', '', ''
output_string_words += '{:{}s}'.format(word, next_length)
output_string_true += '{:{}s}'.format(true_label, next_length)
output_string_pred += '{:{}s}'.format(predicted_label, next_length)
if output_string_words:
out_s += 'Sent:\t{}\n'.format(output_string_words)
out_s += 'True:\t{}\n'.format(output_string_true)
out_s += 'Pred:\t{}\n'.format(output_string_pred)
out_s += '\n\n'
out_loc = self.save_dir / '{}_tagging_errors.txt'.format(data_set_name)
with out_loc.open(mode='w') as out_f:
out_f.write(out_s)
def c_matrix(self, names, labels, predictions, tag_mode):
tags = []
for n in names:
if n != 'O':
tags.append('B-{}'.format(n))
tags.append('I-{}'.format(n))
if tag_mode == 'bioes':
tags.append('S-{}'.format(n))
tags.append('E-{}'.format(n))
tags.append('O')
t = [item for sublist in labels for item in sublist]
p = [item for sublist in predictions for item in sublist]
unique_tags = set(tags)
cm = confusion_matrix(t, p, labels=tags)
out_s = """
Confusion Matrix for:
{}
{}
""".format(tags, cm)
print(out_s)
def save_predictions_fct(self, path, predictions, text):
with path['out'].open(mode='w') as out_f:
for preds in predictions:
out_f.write('{}\n'.format(' '.join(preds).rstrip()))
with path['out-text'].open(mode='w') as out_t:
for line in text:
out_t.write('{}\n'.format(' '.join(line).rstrip()))
def save_predictions(self, path, predictions, text):
if not isinstance(predictions, dict):
self.save_predictions_fct(path, predictions, text)
else:
path_out = str(path['out'])
for k, v in predictions.items():
path['out'] = Path(path_out + '.' + k)
self.save_predictions_fct(path, v, text)
def summarize_predictions_fct(self, path, predictions, text):
print("Predicted entities for {}".format(path.name))
out_path = Path(str(path) + '.sum')
entities, _, _ = bio_to_brat(text, predictions, split_sent=False, split_words=False)
out_s = ''
for e in entities:
out_s += '{}\t{} {} {}\t{}\n'.format(e['id'], e['type'], e['beg'], e['end'], e['string'])
with out_path.open(mode='w') as out_f:
out_f.write(out_s)
print(out_s)
def summarize_predictions(self, path, predictions, text):
if not isinstance(predictions, dict):
self.summarize_predictions_fct(path['out'], predictions, text)
else:
for k, v in predictions.items():
self.summarize_predictions_fct(Path(str(path['out']) + k), v, text)
def cl_for_latex(self, dictionary, round_n=2):
new_mapping = {}
for k,v in dictionary.items():
for label, values in v.items():
if label not in new_mapping:
new_mapping[label] = {}
for metric, result in values.items():
new_mapping[label][metric + '_' + k] = round(result, round_n)
separator='&'
s = ''
for k,v in new_mapping.items():
s += '{} {} {} ({}) {} {} ({}) {} {} ({}) {} {} ({})\n'.format(
k,
separator,
v['precision_test_0'],
v['precision_devel_0'],
separator,
v['recall_test_0'],
v['recall_devel_0'],
separator,
v['f1-score_test_0'],
v['f1-score_devel_0'],
separator,
v['support_test_0'],
v['support_devel_0']
)
return s
|
dave-s477/SoMeNLP | somenlp/feature_engineering/__init__.py | <filename>somenlp/feature_engineering/__init__.py
from .gen_custom_features import calculate_features_parallel |
dave-s477/SoMeNLP | somenlp/distant_supervision/gen_sequences.py | from itertools import product
GEN_SYMBOLS = ['T', 'C', 'G', 'A']
def generate_triplets():
"""Generate all possible triples
Returns:
dictionary: dictionary with list of possible GEN triplets
"""
gen_triplets = [''.join(seq) for seq in product(GEN_SYMBOLS, repeat = 3)]
return {'gen_triplets': gen_triplets} |
dave-s477/SoMeNLP | somenlp/entity_disambiguation/efficient_prediction.py | <gh_stars>0
import torch
import torch.nn as nn
import os
import json
import pickle
import random
import copy
import time
import re
import math
import numpy as np
import nltk
import itertools
nltk.download('stopwords')
from functools import partial
from nltk.corpus import stopwords
from torch.utils.data import DataLoader, IterableDataset, Dataset
#torch.multiprocessing.set_sharing_strategy('file_system')
from heapq import merge
from pathlib import Path
from . import EntityDisambiguationFeatureGenerator
from .model import DisambiguationModel
from sklearn.metrics import classification_report
BLOCK_SIZE = 5000000
STOPS = stopwords.words('english')
def normalize(s):
norm_s = re.sub('[^0-9a-zA-Z]+', ' ', s.casefold()).rstrip('0123456789 ,.').lstrip(' ')
norm_s = ' '.join([w for w in norm_s.split() if w not in STOPS])
if not norm_s:
norm_s = s
return norm_s
def remove_spaces(s):
replace_regex = re.compile(r'\s(?P<to_keep>[\+\-#™_/\d]+)\s?')
matches = replace_regex.findall(s)
return replace_regex.sub(r'\g<to_keep>', s)
class ReducedSampleSet():
def __init__(self, in_path, overview_file, save_path='/tmp'):
self.output_path = '{}/reduced_features.json'.format(save_path)
if os.path.isfile(self.output_path):
self._load()
else:
self._sample_overview(overview_file)
self._generate(in_path)
def _sample_overview(self, overview_file):
self.sample_overview = {}
with open(overview_file, 'r') as f_in:
for line in f_in:
num, key = line.rstrip().split(maxsplit=1)
self.sample_overview[key] = int(num)
def _load(self):
with open(self.output_path, 'r') as j_in:
self.sample_set = json.load(j_in)
def _save(self):
with open(self.output_path, 'w') as j_out:
json.dump(self.sample_set, j_out, indent=4)
def _generate(self, in_path):
file_count = 0
self.sample_set = {}
print("Gathering files..")
file_list = Path(in_path).rglob('*.linking')
for f in file_list:
file_count += 1
if file_count % 10000 == 0:
print("At file {}".format(file_count))
with f.open() as f_in:
entities = json.load(f_in)
for entity in entities:
name = entity.pop("mention")
if name not in self.sample_set:
self.sample_set[name] = {
"mention": name,
"string": remove_spaces(name),
"norm": normalize(name),
"contexts": []
}
if len(self.sample_set[name]['contexts']) < 5:
self.sample_set[name]['contexts'].append(copy.deepcopy(entity))
else:
keep_val = random.random()
occurrence_num = self.sample_overview[name] if name in self.sample_overview else 1000
if keep_val <= 1 / occurrence_num:
#print("Replace a value of {} with {} and num {}".format(name, keep_val, self.sample_overview[name]))
keep_pos = random.randint(0, 4)
self.sample_set[name]['contexts'][keep_pos] = copy.deepcopy(entity)
self._save()
# class IterDataset(IterableDataset):
# def __init__(self, data, sample_set, feature_calc, compare_set, labels=None):
# self.data = data
# self.sample_set = sample_set
# self.feature_calc = feature_calc
# self.compare_set = compare_set
# self.labels = labels
# def __iter__(self):
# for x in self.data:
# if self.compare_set is None:
# sample = [self.sample_set[x[0]], self.sample_set[x[1]]]
# else:
# sample = [self.sample_set[x[0]], self.compare_set[x[1]]]
# features = self.feature_calc.features_for_pair(sample)
# origin = [sample[0]['origin_id'], sample[1]['origin_id']]
# if self.labels is None:
# label = 0
# else:
# label = self.labels[sample[0]['origin_id']][sample[1]['origin_id']]
# yield torch.tensor(features), label, x, origin
class IterDataset(Dataset):
def __init__(self, data, sample_set, feature_calc, compare_set, labels=None):
self.data = data
self.sample_set = sample_set
self.feature_calc = feature_calc
self.compare_set = compare_set
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
if self.compare_set is None:
sample = [self.sample_set[x[0]], self.sample_set[x[1]]]
else:
sample = [self.sample_set[x[0]], self.compare_set[x[1]]]
features = self.feature_calc.features_for_pair(sample)
origin = [sample[0]['origin_id'], sample[1]['origin_id']]
if self.labels is None:
label = 0
else:
label = self.labels[sample[0]['origin_id']][sample[1]['origin_id']]
return torch.tensor(features), label, x, origin
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
split_size = math.ceil(len(dataset.data) / worker_info.num_workers)
dataset.data = dataset.data[worker_id * split_size:(worker_id + 1) * split_size]
class DistanceMap():
def __init__(self, sample_set, dbpedia, model_config, start_row, end_row, threshold=0.01, compare_set=None, n_cores=8, save_path='/tmp', batch_size=500, device='cpu'):
self.device = torch.device(device)
self.distance_map = []
self.sample_set = sample_set
self.compare_set = compare_set
self.sample_num = len(self.sample_set)
self.max_idx = int(self.sample_num * ( self.sample_num - 1 ) / 2)
self.threshold = threshold
self.cores = n_cores
self.batch_size = batch_size
self.start_row = start_row
self.end_row = end_row
self.dbpedia = dbpedia
self.model_config = model_config
self.output_path = '{}/distance_map_{}_{}.p'.format(save_path, start_row, end_row)
def calculate_distance_map(self, compare=False):
if os.path.isfile(self.output_path):
self._load()
else:
self.feature_generator = EntityDisambiguationFeatureGenerator(self.dbpedia)
self._setup_model(self.model_config)
if not compare:
self._generate()
else:
self._compare()
def _setup_model(self, config):
self.model = DisambiguationModel(len(self.feature_generator.string_features_to_extract) + len(self.feature_generator.context_features_to_extract), self.model_config['layer_sizes'], self.model_config['drop_outs'])
if not 'checkpoint' in config or not config['checkpoint']:
raise(RuntimeError("A pretrained model is required for prediction.."))
checkpoint_data = torch.load(config['checkpoint'], map_location=self.device)
self.model.load_state_dict(checkpoint_data['model_state_dict'])
self.model.eval()
def _load(self):
with open(self.output_path, 'rb') as p_in:
self.distance_map = pickle.load(p_in)
def _save(self):
with open(self.output_path, 'wb') as p_out:
pickle.dump(self.distance_map, p_out)
def _generator(self):
for idx_i in range(self.start_row, self.end_row):
for idx_j in range(idx_i+1, len(self.sample_set)):
yield idx_i, idx_j
def _compare_generator(self):
for idx_i in range(len(self.sample_set)):
for idx_j in range(len(self.compare_set)):
yield idx_i, idx_j
def _prune_predictions(self, predictions):
pruned = []
last_source = []
for prediction in predictions:
if prediction[2] != last_source:
pruned.append(prediction)
else:
if prediction[0] < pruned[-1][0]:
pruned[-1][0] = prediction[0]
last_source = prediction[2]
return pruned
def _generate(self):
generator = self._generator()
self._run_generation(generator)
def _compare(self):
print("Doing compare")
generator = self._compare_generator()
self._run_generation(generator)
def _run_generation(self, generator):
count = 0
while True:
print("At block {}".format(count))
count += 1
start = time.time()
block = list(itertools.islice(generator, BLOCK_SIZE))
if not block:
break
iterable_dataset = IterDataset(list(block), self.sample_set, self.feature_generator, self.compare_set)
loader = DataLoader(iterable_dataset, batch_size=self.batch_size, num_workers=self.cores, pin_memory=True)#self.batch_size, self.cores)
predictions = []
with torch.no_grad():
for sample in loader:
#print(type(sample))
pred = self.model(sample[0].to(self.device))
pred_class = 1 - torch.squeeze(torch.sigmoid(pred)).cpu().numpy()
results = [[x, [int(idx_x), int(idx_y)], [int(org_x), int(org_y)]] for x, idx_x, idx_y, org_x, org_y in zip(pred_class, sample[2][0], sample[2][1], sample[3][0], sample[3][1]) if x <= self.threshold]
predictions.extend(results)
#print(results)
print("preds")
print(len(predictions))
# pruned_predictions = self._prune_predictions(predictions)
# print("pruned")
# print(len(pruned_predictions))
predictions.sort()
self.distance_map = list(merge(self.distance_map, predictions))
end = time.time()
print("Took {}".format(round(end-start, 5)))
self._save()
class EfficientClustering():
def __init__(self, sorted_list, threshold, sample_set, save_path='/tmp', ncores=8):
self.dim = len(sample_set)
self.entities = sample_set
self.threshold = threshold
self.output_path = '{}/clusters.json'.format(save_path)
self.clusters = list(range(len(sample_set)))
self.features = sorted_list
self.ncores = ncores
self.reverse_index = {}
def cluster(self):
if os.path.isfile(self.output_path):
self._load()
else:
self._cluster()
def _load(self):
with open(self.output_path, 'r') as j_in:
self.clusters = json.load(j_in)
def _save(self):
with open(self.output_path, 'w') as j_out:
json.dump(self.clusters, j_out, indent=4)
def _get_cluster_idx(self, idx):
cluster_value = self.clusters[idx]
while idx != cluster_value:
idx = cluster_value
cluster_value = self.clusters[idx]
return idx
def _match_clusters(self, matches):
clusters_to_match = [set(x) for x in matches]
global_index = len(clusters_to_match) - 1
while global_index > 0:
next_match = clusters_to_match[global_index]
for idx, match in enumerate(clusters_to_match[:global_index]):
if next_match & match:
new_matches = next_match.union(match)
clusters_to_match[idx] = new_matches
clusters_to_match.pop(global_index)
break
global_index -= 1
return clusters_to_match
def _generator(self, start, end, clusters):
for idx_i in range(start, end):
for idx_j in range(idx_i+1, len(clusters)):
yield idx_i, idx_j
def _get_postprocessing_merges(self, rows, clusters):
print("Len clusters {}".format(len(clusters)))
gen = self._generator(rows[0], rows[1], clusters)
merges = []
for i in gen:
if clusters[i[0]] & clusters[i[1]]:
merges.append(i)
return merges
def _iterate_distances(self, threshold, cut_off_idx=0):
start = time.time()
if cut_off_idx > 0:
features = self.features[cut_off_idx:]
else:
features = self.features
for idx, i in enumerate(features):
if (idx+1) % 10000 == 0:
end = time.time()
print("At {}: {}, last step took {}".format(idx, i, round(end-start, 4)))
start = end
dist_val, pair_indices, _ = i
if dist_val > threshold:
break
x_idx, y_idx = pair_indices
x_pointer = self.clusters[x_idx]
y_pointer = self.clusters[y_idx]
if x_pointer == y_pointer:
# already a correct link -> do nothing
pass
elif x_pointer == x_idx and y_pointer == y_idx:
# two "original" samples -> new cluster
self.clusters[x_idx] = len(self.clusters)
self.clusters[y_idx] = len(self.clusters)
self.reverse_index[len(self.clusters)] = [x_idx, y_idx]
self.clusters.append(len(self.clusters))
elif x_pointer != x_idx and y_pointer == y_idx:
# first point does already belong to a cluster, second is "original" -> add second point to cluster
self.clusters[y_idx] = x_pointer
self.reverse_index[x_pointer].append(y_idx)
elif y_pointer != y_idx and x_pointer == x_idx:
# second point does already belong to a cluster, first is "original" -> add first point to cluster
self.clusters[x_idx] = y_pointer
self.reverse_index[y_pointer].append(x_idx)
elif y_pointer != y_idx and x_pointer != x_idx:
# both points already belong to clusters -> merge clusters (we will add one "empty" cluster in the result)
reverse_y_pointers = self.reverse_index.pop(y_pointer)
for p in reverse_y_pointers:
self.clusters[p] = x_pointer
self.reverse_index[x_pointer].append(p)
return idx + cut_off_idx
def _values_to_clusters(self):
cluster_buckets = {}
for i in range(len(self.clusters)-1, -1, -1):
if i == self.clusters[i]:
cluster_buckets[i] = {
'name': i,
'indices': [i],
'entities': []
}
else:
bucket_idx = self._get_cluster_idx(i)
cluster_buckets[bucket_idx]['indices'].append(i)
return cluster_buckets
def _print_values(self, n=100):
for i in [x for x in self.features if x[0] < self.threshold][-100:]:
dist, pair_indices, _ = i
x_idx, y_idx = pair_indices
print(dist)
print('x = {}'.format(self.entities[x_idx]))
print('y = {}'.format(self.entities[y_idx]))
print()
def _cluster(self):
self._pre_cluster()
print("Iterating distances..")
self._iterate_distances(self.threshold)
print("Writing values to clusters..")
cluster_buckets = self._values_to_clusters()
for bucket in cluster_buckets:
cluster_buckets[bucket]['indices'] = [x for x in cluster_buckets[bucket]['indices'] if x < self.dim]
for cluster in cluster_buckets:
for idx in cluster_buckets[cluster]['indices']:
cluster_buckets[cluster]['entities'].append(self.entities[idx])
self.clusters = cluster_buckets
print("Removing empty clusters..")
self.clusters = {x:y for x,y in self.clusters.items() if y['entities']}
for cluster, values in self.clusters.items():
values['origin_ids'] = [x['origin_id'] for x in values['entities']]
self._save()
def _pre_cluster(self):
print("Performing pre-clustering on exact string matches.")
name_index = {}
for idx, ent in enumerate(self.entities):
if idx % 10000 == 0:
print("At entity {}".format(idx))
if ent['mention'] not in name_index:
name_index[ent['mention']] = []
name_index[ent['mention']].append(idx)
for idx, (k, v) in enumerate(name_index.items()):
if idx % 10000 == 0:
print("At pre-cluster {}".format(idx))
if len(v) > 1:
cluster_id = len(self.clusters)
self.reverse_index[cluster_id] = []
self.clusters.append(cluster_id)
for ent_id in v:
self.clusters[ent_id] = cluster_id
self.reverse_index[cluster_id].append(ent_id)
def evaluate(self, gold_table, step_size, start_eval_threshold):
print("Evaluating clustering..")
self._pre_cluster()
current_threshold = 0
iteration_index = 0
prev_num_clusters = len(self.clusters)
while current_threshold < self.threshold:
current_threshold = min(self.threshold, current_threshold + step_size)
iteration_index = self._iterate_distances(current_threshold, cut_off_idx=iteration_index)
if current_threshold >= start_eval_threshold and len(self.clusters) > prev_num_clusters:
print("Added clusters - start evaluation")
prev_num_clusters = len(self.clusters)
eval_entities_with_clusters = [[x, y] for x, y in zip(self.entities, self.clusters) if 'eval' in x]
print(len(eval_entities_with_clusters))
# for x in eval_entities_with_clusters:
# print(x)
# print()
# break
preds = []
trues = []
for idx, entity in enumerate(eval_entities_with_clusters):
for comp_entity in eval_entities_with_clusters[idx+1:]:
pred_link = entity[1] == comp_entity[1]
preds.append(pred_link)
true_link = gold_table[entity[0]['origin_id']][comp_entity[0]['origin_id']]
trues.append(true_link)
print("Threshold {}".format(current_threshold))
# print("Iteration idx {}".format(iteration_index))
print("Num clusters: {} (not correct - just for debugging)".format(len(self.clusters)))
# print(classification_report(trues, preds))
output_dict = classification_report(trues, preds, output_dict=True)
logging_string = 'Graph_out:\t{},{},{},{}'.format(current_threshold, output_dict['True']['precision'], output_dict['True']['recall'], output_dict['True']['f1-score'])
print(logging_string)
print()
# def _post_pro_merge(self):
# total_size = len(self.clusters) * (len(self.clusters) - 1) / 2
# max_samples = math.ceil(total_size / self.ncores)
# row_overview = [0]
# curr_size = 0
# for row in range(len(self.clusters)-1):
# curr_size += len(self.clusters)-1-row
# if curr_size > max_samples:
# row_overview.append(row)
# curr_size = 0
# row_overview.append(row)
# row_pairs = [[row_overview[x-1], row_overview[x]] for x in range(1, len(row_overview))]
# print(row_overview)
# print("pairs")
# print(row_pairs)
# with Pool(len(row_overview)) as p:
# origin_ids = [x['origin_ids'] for x in self.clusters.values()]
# fct = partial(self._get_postprocessing_merges, clusters=origin_ids)
# post_processing_merges = p.map(fct, row_pairs)
# TODO -> flatten list
# post_processing_merges = [item for sublist in post_processing_merges for item in sublist]
# post_processing_merges = []
# for idx_x, (cluster_x, values_x) in enumerate(self.clusters.items()):
# if idx_x % 5000 == 0:
# print("At cluster {}".format(idx_x))
# for cluster_y, values_y in list(self.clusters.items())[idx_x+1:]:
# for ent_x in values_x['entities']:
# matched = False
# for ent_y in values_y['entities']:
# if ent_x['mention'] == ent_y['mention']:
# matched = True
# #if values_x['origin_ids'] & values_y['origin_ids']:
# post_processing_merges.append([cluster_x, cluster_y])
# break
# if matched:
# break
# p_merges = self._match_clusters(post_processing_merges)
# for ppm in p_merges:
# ppm = list(ppm)
# for i in ppm[1:]:
# values = self.clusters.pop(i)
# self.clusters[ppm[0]]['indices'].extend(values['indices'])
# self.clusters[ppm[0]]['entities'].extend(values['entities'])
# self.clusters[ppm[0]]['origin_ids'].update(values['origin_ids'])
|
dave-s477/SoMeNLP | somenlp/entity_disambiguation/clustering.py | import numpy as np
import math
import time
import bisect
import sys
from statistics import mean
from multiprocessing import Pool
class SimpleCluster:
"""Cluster a set of entites
"""
def __init__(self, config, entities, features):
"""Init
Args:
entities (list): list of entity mentions with additional information
features (dictionary): dictionary of triangular numpy matricies containing features for clustering
"""
self.config = config
self.features = []
for idx, i in enumerate(features):
if i <= config['threshold']:
self.features.append([i[0], idx])
self.features.sort()
self.dim = len(entities)
self.entities = entities
self.reverse_index = {}
#self.clusters = {idx: {'entities': [x]} for idx, x in enumerate(entities)}
self.clusters = list(range(len(entities)))
#self.clusters = {x: x for x in range(len(entities))}
def _get_eval_indices(self):
eval_indices = []
for _, cluster in self.clusters.items():
indices = [x['init_id'] for x in cluster['entities']]
eval_indices.append(indices)
return eval_indices
def _idx_to_matrix_pos(self, idx):
i = self.dim - 2 - int(math.sqrt(-8*idx + 4*self.dim*(self.dim-1)-7)/2.0 - 0.5)
j = idx + i + 1 - self.dim*(self.dim-1)/2 + (self.dim-i)*((self.dim-i)-1)/2
return i, int(j)
def _get_cluster_idx(self, idx):
#print("idx..")
#print(idx)
cluster_value = self.clusters[idx]
#print(cluster_value)
while idx != cluster_value:
idx = cluster_value
#print(idx)
cluster_value = self.clusters[idx]
#print()
return idx
def cluster(self):
for i in self.features:
_, pair_index = i
x_idx, y_idx = self._idx_to_matrix_pos(pair_index)
x_pointer = self.clusters[x_idx]
y_pointer = self.clusters[y_idx]
if x_pointer == y_pointer:
# already a correct link -> do nothing
pass
elif x_pointer == x_idx and y_pointer == y_idx:
# two "original" samples -> new cluster
self.clusters[x_idx] = len(self.clusters)
self.clusters[y_idx] = len(self.clusters)
self.reverse_index[len(self.clusters)] = [x_idx, y_idx]
self.clusters.append(len(self.clusters))
elif x_pointer != x_idx and y_pointer == y_idx:
# first point does already belong to a cluster, second is "original" -> add second point to cluster
self.clusters[y_idx] = x_pointer
self.reverse_index[x_pointer].append(y_idx)
elif y_pointer != y_idx and x_pointer == x_idx:
# second point does already belong to a cluster, first is "original" -> add first point to cluster
self.clusters[x_idx] = y_pointer
self.reverse_index[y_pointer].append(x_idx)
elif y_pointer != y_idx and x_pointer != x_idx:
# both points already belong to clusters -> merge clusters (we will add one "empty" cluster in the result)
#self.clusters = [i if i == idx or i != y_pointer else x_pointer for idx, i in enumerate(self.clusters)]
#self.clusters[:self.dim] = [i if i == idx or i != y_pointer else x_pointer for idx, i in enumerate(self.clusters[:self.dim])]
reverse_y_pointers = self.reverse_index.pop(y_pointer)
for p in reverse_y_pointers:
self.clusters[p] = x_pointer
self.reverse_index[x_pointer].append(p)
#print(len(self.clusters))
cluster_buckets = {}
for i in range(len(self.clusters)-1, -1, -1):
#print(i)
#print(self.clusters[i])
if i == self.clusters[i]:
cluster_buckets[i] = {
'name': i,
'indices': [i],
'entities': []
}
else:
bucket_idx = self._get_cluster_idx(i)
cluster_buckets[bucket_idx]['indices'].append(i)
#print(len(cluster_buckets))
for bucket in cluster_buckets:
#print(bucket)
#print(cluster_buckets[bucket])
cluster_buckets[bucket]['indices'] = [x for x in cluster_buckets[bucket]['indices'] if x < self.dim]
#print(cluster_buckets[bucket])
#print()
for cluster in cluster_buckets:
for idx in cluster_buckets[cluster]['indices']:
cluster_buckets[cluster]['entities'].append(self.entities[idx])
self.clusters = cluster_buckets
class Clustering:
"""Cluster a set of entites
"""
def __init__(self, config, entities, features):
"""Init
Args:
entities (list): list of entity mentions with additional information
features (dictionary): dictionary of triangular numpy matricies containing features for clustering
"""
self.config = config
self.features = features
# self._calculate_distances()
self.threshold = config['threshold']
self.dim = len(entities)
self.clusters = {idx: {'entities': [x]} for idx, x in enumerate(entities)}
self.new_clusters = list(self.clusters.keys())
self.cluster_count = len(self.clusters)
# def _calculate_distances(self):
# if self.config['feat_to_dist'] == 'average':
# self.distances = np.mean(self.features, axis=-1)
# print(self.distances)
# elif self.config['feat_to_dist'] == 'sum':
# self.distances = np.sum(self.features, axis=-1)
# else:
# raise(RuntimeError("Got unknown feat to dist config: {}".format(self.config['feat_to_dist'])))
# self.min = self.distances.min()
# print(self.min)
# self.max = self.distances.max()
# print(self.max)
# self.threshold = self.min + (self.max - self.min) * self.config['threshold']
# print(self.threshold)
def _get_eval_indices(self):
eval_indices = []
for _, cluster in self.clusters.items():
indices = [x['init_id'] for x in cluster['entities']]
eval_indices.append(indices)
return eval_indices
def _get_matrix_index(self, x, y):
if x > y:
x1 = y
x2 = x
else:
x1 = x
x2 = y
return int((x1 * self.dim) - ((x1+1) * ((x1+1) + 1) / 2 ) + x2)
def _calculate_distance(self, cluster_id):
distances = {}
for c_k, cluster in self.clusters.items():
if c_k == cluster_id:
distances[cluster_id] = 1.0
else:
cluster_distances = []
for c_ent in cluster['entities']:
for id_ent in self.clusters[cluster_id]['entities']:
cluster_distances.append(self.features[self._get_matrix_index(c_ent['init_id'], id_ent['init_id'])])
if self.config['linkage'] == 'single':
distances[c_k] = float(min(cluster_distances))
elif self.config['linkage'] == 'complete':
distances[c_k] = float(max(cluster_distances))
elif self.config['linkage'] == 'average':
distances[c_k] = mean([float(x) for x in cluster_distances])
else:
raise(RuntimeError("Received unkown type of linkage {}".format(self.config['linkage'])))
return distances
def _calculate_distances_cluster_parallel(self):
pass
def _match_clusters(self, matches):
clusters_to_match = [set(x) for x in matches]
global_index = len(clusters_to_match) - 1
while global_index > 0:
next_match = clusters_to_match[global_index]
for idx, match in enumerate(clusters_to_match[:global_index]):
if next_match & match:
new_matches = next_match.union(match)
clusters_to_match[idx] = new_matches
clusters_to_match.pop(global_index)
break
global_index -= 1
return clusters_to_match
def cluster(self):
# max_count = 0
while len(self.new_clusters) > 0: # and max_count < 1000:
# max_count += 1
if len(self.new_clusters) > 1:
# calculate distance to all other clusters parallel with defined linkage type
for cluster in self.new_clusters:
dist = self._calculate_distance(cluster)
self.clusters[cluster]['distances'] = dist
else:
# calculate distance to all other clusters parallel with defined linkage type
self.clusters[self.new_clusters[0]]['distances'] = self._calculate_distance(self.new_clusters[0])
# find smallest distance between all existing clusters.. test if clusters do still exist!
smallest_dist = 1.0
matches = []
for c_k, cluster in self.clusters.items():
keys_to_pop = []
for dist_k, dist_value in cluster['distances'].items():
# TODO: test if clusters do still exist
if dist_k in self.clusters:
if math.isclose(smallest_dist, dist_value):
to_append = sorted([c_k, dist_k])
if to_append not in matches:
matches.append(to_append)
elif dist_value < smallest_dist:
smallest_dist = dist_value
to_append = sorted([c_k, dist_k])
matches = [to_append]
else:
keys_to_pop.append(dist_k)
for k_pop in keys_to_pop:
cluster['distances'].pop(k_pop)
if smallest_dist > self.threshold:
print("Stopped clustering due to threshold criterion: T {}, D {}".format(round(self.threshold, 4), round(smallest_dist, 4)))
break
matched_clusters = self._match_clusters(sorted(matches))
# pop clusters to be merged from cluster list
self.new_clusters = []
for cluster_group in matched_clusters:
new_entities = []
for cluster_idx in cluster_group:
clust_entities = self.clusters.pop(cluster_idx)
new_entities.extend(clust_entities['entities'])
self.clusters[self.cluster_count] = {
'entities': new_entities
}
self.new_clusters.append(self.cluster_count)
self.cluster_count += 1
class IntervalClustering:
"""Cluster a set of entites
"""
def __init__(self, config, entities, features):
"""Init
Args:
entities (list): list of entity mentions with additional information
features (dictionary): dictionary of triangular numpy matricies containing features for clustering
"""
self.config = config
self.features = features
self.threshold = config['threshold']
self.n_intervals = config['intervals']
self.n_cores = config['n_cores']
self.drop_below = config['drop_below_threshold']
self.dim = len(entities)
self.clusters = {idx: {'entities': [x]} for idx, x in enumerate(entities)}
self.new_clusters = list(self.clusters.keys())
self.cluster_count = len(self.clusters)
self._calculate_intervals()
self.cluster_distances = []
self.cluster_distances_keys = []
def _calculate_intervals(self):
self.intervals = np.linspace(0, self.threshold, num=self.n_intervals, endpoint=True)
def _get_eval_indices(self):
eval_indices = []
for _, cluster in self.clusters.items():
indices = [x['init_id'] for x in cluster['entities']]
eval_indices.append(indices)
return eval_indices
def _get_matrix_index(self, x, y):
if x > y:
x1 = y
x2 = x
else:
x1 = x
x2 = y
return int((x1 * self.dim) - ((x1+1) * ((x1+1) + 1) / 2 ) + x2)
def _calculate_distance(self, cluster_id):
distances = {}
for c_k, cluster in self.clusters.items():
if c_k == cluster_id:
distances[cluster_id] = 1.0
else:
cluster_distances = []
for c_ent in cluster['entities']:
for id_ent in self.clusters[cluster_id]['entities']:
cluster_distances.append(self.features[self._get_matrix_index(c_ent['init_id'], id_ent['init_id'])])
if self.config['linkage'] == 'single':
distances[c_k] = float(min(cluster_distances))
elif self.config['linkage'] == 'complete':
distances[c_k] = float(max(cluster_distances))
elif self.config['linkage'] == 'average':
distances[c_k] = mean([float(x) for x in cluster_distances])
else:
raise(RuntimeError("Received unkown type of linkage {}".format(self.config['linkage'])))
return distances
def _match_clusters(self, matches):
clusters_to_match = [set(x) for x in matches]
global_index = len(clusters_to_match) - 1
while global_index > 0:
next_match = clusters_to_match[global_index]
for idx, match in enumerate(clusters_to_match[:global_index]):
if next_match & match:
new_matches = next_match.union(match)
clusters_to_match[idx] = new_matches
clusters_to_match.pop(global_index)
break
global_index -= 1
return clusters_to_match
def _calculate_distance(self, distance_tuple):
single_distances = []
for first_entity in self.clusters[distance_tuple[0]]['entities']:
for second_entity in self.clusters[distance_tuple[1]]['entities']:
single_distances.append(self.features[self._get_matrix_index(first_entity['init_id'], second_entity['init_id'])])
if self.config['linkage'] == 'single':
distance = float(min(single_distances))
elif self.config['linkage'] == 'complete':
distance = float(max(single_distances))
elif self.config['linkage'] == 'average':
distance = mean([float(x) for x in single_distances])
else:
raise(RuntimeError("Received unkown type of linkage {}".format(self.config['linkage'])))
if self.drop_below and distance > self.threshold:
return None
return distance
def _calculate_distances_cluster_parallel(self, distance_tuples):
with Pool(self.n_cores) as p:
distances = p.map(self._calculate_distance, distance_tuples)
return distances
def _update_distance_list(self):
distances_to_calculate = set()
print(len(self.new_clusters))
while self.new_clusters:
cluster = self.new_clusters.pop()
distances_to_calculate.update([tuple([x, cluster]) if x < cluster else tuple([cluster, x]) for x in self.clusters.keys() if x != cluster])
print(len(distances_to_calculate))
distances = self._calculate_distances_cluster_parallel(distances_to_calculate)
distances_with_keys = [[x, y] for x, y in zip(distances, distances_to_calculate) if x is not None]
print("Benchmark")
start = time.time()
sorted(distances_with_keys)
end = time.time()
print("First sorting option: {}".format(round(end-start, 4)))
start = time.time()
qsort(distances_with_keys)
end = time.time()
print("First sorting option: {}".format(round(end-start, 4)))
if not self.cluster_distances:
distance_tuples = sorted(distances_with_keys)
self.cluster_distances = [x[0] for x in distance_tuples]
self.cluster_distances_keys = [x[1] for x in distance_tuples]
else:
for distance, keys in distances_with_keys:
insertion_index = bisect.bisect(self.cluster_distances, distance)
self.cluster_distances.insert(insertion_index, distance)
self.cluster_distances_keys.insert(insertion_index, keys)
# TODO: use bisect to get right insertion point for each element
def _remove_from_distance_list(self, elements):
indices = []
for idx, cluster_keys in enumerate(self.cluster_distances_keys):
if cluster_keys[0] in elements or cluster_keys[1] in elements:
indices.append(idx)
for idx in reversed(indices):
self.cluster_distances_keys.pop(idx)
self.cluster_distances.pop(idx)
def cluster(self):
for interval in self.intervals[1:]:
start = time.time()
self._update_distance_list()
# for cluster in self.new_clusters:
# dist = self._calculate_distance(cluster)
# self.clusters[cluster]['distances'] = dist
end = time.time()
print("Calculating distances took {}".format(round(end-start, 4)))
start = end
# # find smallest distance between all existing clusters.. test if clusters do still exist!
# matches = []
bisection_point = bisect.bisect_right(self.cluster_distances, interval)
print(bisection_point)
clusters_to_merge = self.cluster_distances_keys[:bisection_point]
matched_clusters = self._match_clusters(clusters_to_merge)
end = time.time()
print("Matching clusters took {}".format(round(end-start, 4)))
start = end
self.cluster_distances_keys = self.cluster_distances_keys[bisection_point:]
self.cluster_distances = self.cluster_distances[bisection_point:]
clusters_to_remove = set(sorted([item for sublist in matched_clusters for item in sublist]))
self._remove_from_distance_list(clusters_to_remove)
# for c_k, cluster in self.clusters.items():
# keys_to_pop = []
# for dist_k, dist_value in cluster['distances'].items():
# # TODO: test if clusters do still exist
# if dist_k in self.clusters:
# if dist_value <= interval:
# to_append = sorted([c_k, dist_k])
# if to_append not in matches:
# matches.append(to_append)
# else:
# keys_to_pop.append(dist_k)
# for k_pop in keys_to_pop:
# cluster['distances'].pop(k_pop)
# matched_clusters = self._match_clusters(sorted(matches))
end = time.time()
print("Removing entries took {}".format(round(end-start, 4)))
start = end
# # pop clusters to be merged from cluster list
self.new_clusters = []
for cluster_group in matched_clusters:
new_entities = []
for cluster_idx in cluster_group:
clust_entities = self.clusters.pop(cluster_idx)
new_entities.extend(clust_entities['entities'])
self.clusters[self.cluster_count] = {
'entities': new_entities
}
self.new_clusters.append(self.cluster_count)
self.cluster_count += 1
end = time.time()
print("New clustering took {}\n".format(round(end-start, 4)))
start = end
|
dave-s477/SoMeNLP | somenlp/entity_disambiguation/feature_writer.py | import json
import os
import numpy as np
class FeatureWriter:
def __init__(self, save_dir):
self.output_path = save_dir
def read_features(self, path):
with open(path, 'r') as j_in:
feature_locations = json.load(j_in)
feature_matrices = {}
for k,v in feature_locations.items():
if k not in feature_matrices:
feature_matrices[k] = {}
for k2, v2 in v.items():
if k2 not in feature_matrices[k]:
feature_matrices[k][k2] = {}
for k3, v3 in v2.items():
feature_matrices[k][k2][k3] = np.load(v3)
return feature_matrices
def write_features(self, matrices):
os.makedirs(self.output_path, exist_ok=True)
save_locations = {}
for set_k, set_v in matrices.items():
if set_k not in save_locations:
save_locations[set_k] = {}
for set_k2, set_v2 in set_v.items():
if set_k2 not in save_locations[set_k]:
save_locations[set_k][set_k2] = {}
for feature_k, feature_m in set_v2.items():
output_name = '{}/{}_{}_{}'.format(self.output_path, set_k, set_k2, feature_k)
save_locations[set_k][set_k2][feature_k] = output_name + '.npy'
np.save(output_name, feature_m)
with open(self.output_path + '/save_locations.json', 'w') as j_out:
json.dump(save_locations, j_out, indent=4)
def save_triangular_matrix(self, tri_matrix, save_location):
""" This function saves a triangular matrix in a flat representation so unnecessary zeros can be excluded. Only necessary indicies from the matrix are extracted line by line.
Arguments:
tri_matrix (np.matrix with an arbitrary d_type): the matrix to save
save_location (string): path to output file
"""
indices = np.triu_indices(tri_matrix.shape[0], 1)
flat_representation = tri_matrix[indices]
np.save(save_location, flat_representation) |
dave-s477/SoMeNLP | somenlp/NER/BERT_dataset.py | <reponame>dave-s477/SoMeNLP
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
class BERTDataset(Dataset):
"""PyTorch Dataset for BERT data
"""
def __init__(self, ids, tags, masks, transforms=None):
self.ids = ids
self.tags = tags
self.masks = masks
self.transforms = transforms
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = {
'ids': self.ids[idx],
'masks': self.masks[idx],
'tags': self.tags[idx]
}
if self.transforms:
sample = self.transforms(sample)
return sample
class BERTMultiDataset(Dataset):
"""PyTorch Dataset for BERT data
"""
def __init__(self, ids, tags, masks, lengths, transforms=None):
self.ids = ids
self.tags = tags
self.masks = masks
self.lengths = lengths
self.transforms = transforms
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = {
'ids': self.ids[idx],
'masks': self.masks[idx],
'lengths': self.lengths[idx]
}
for k in self.tags.keys():
sample[k] = self.tags[k][idx]
if self.transforms:
sample = self.transforms(sample)
return sample
|
dave-s477/SoMeNLP | somenlp/entity_disambiguation/model.py | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import Dataset
from sklearn.metrics import classification_report
from torch.utils.tensorboard import SummaryWriter
class DisambiguationModel(nn.Module):
def __init__(self, dim, layer_sizes, drop_outs):
super(DisambiguationModel, self).__init__()
self.lin_layers = nn.ModuleList([nn.Linear(dim, layer_sizes[0])])
for i in range(len(layer_sizes)-1):
self.lin_layers.append(nn.ReLU())
self.lin_layers.append(nn.Dropout(drop_outs[i]))
self.lin_layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
def forward(self, x):
for l in self.lin_layers:
x = l(x)
return x
class ModelWrapper():
def __init__(self, config, input_dim, save_path=None, device='cpu'):
self.config = config
self.device = device
self.writer = SummaryWriter('{}/log'.format(save_path))
self.model = DisambiguationModel(input_dim, config['layer_sizes'], config['drop_outs']).to(device)
self.loss_fn = nn.BCEWithLogitsLoss().to(device)
self.optim = optim.Adam(self.model.parameters())
self.epoch = 0
self.save_path = save_path
def train(self, train_set):
self.model.train()
cum_loss = 0
for idx, sample in enumerate(train_set):
self.optim.zero_grad()
pred = self.model(sample[0].to(self.device))
loss = self.loss_fn(torch.squeeze(pred), sample[1].float().to(self.device))
loss.backward()
self.optim.step()
cum_loss += loss.detach()
if idx != 0 and idx % 4000 == 0:
print("Batch {} Avg. Loss {}".format(idx, cum_loss / 100))
cum_loss = 0
def test(self, test_set):
self.model.eval()
predictions = []
true = []
for idx, sample in enumerate(test_set):
with torch.no_grad():
pred = self.model(sample[0].to(self.device))
pred_class = torch.squeeze(torch.sigmoid(pred))
predictions.extend(pred_class.cpu().numpy())
true.extend(sample[1])
if idx != 0 and idx % 4000 == 0:
print("At batch {}".format(idx))
return true, predictions
def predict(self, test_set='train', test_set_ext='train'):
self.model.eval()
predictions = []
for idx, sample in enumerate(self.inputs[test_set][test_set_ext]['loader']):
with torch.no_grad():
pred = self.model(sample[0])
pred_class = torch.squeeze(torch.sigmoid(pred))
predictions.extend(pred_class.cpu().numpy())
return predictions
def eval(self, true, predictions, threshold=.5, epoch=0, write=True):
predictions = [0 if pred <= threshold else 1 for pred in predictions]
print(classification_report(true, predictions))
classification_dict = classification_report(true, predictions, output_dict=True)
if write and 'True' in classification_dict:
for k,v in classification_dict['True'].items():
self.writer.add_scalar(k, v, epoch)
def save(self):
print("Saving model")
torch.save({
'epoch': self.epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optim.state_dict(),
}, '{}/model.pth'.format(self.save_path))
def load(self):
if 'checkpoint' in self.config and self.config['checkpoint']:
print("Loading model from checkpoint")
checkpoint_data = torch.load(self.config['checkpoint'], map_location=self.device)
self.model.load_state_dict(checkpoint_data['model_state_dict'])
self.optim.load_state_dict(checkpoint_data['optimizer_state_dict'])
self.epoch = checkpoint_data['epoch']
|
dave-s477/SoMeNLP | somenlp/RE/__init__.py | <filename>somenlp/RE/__init__.py
from .features import FeatureGenerator
from .RE_model import REmodel |
dave-s477/SoMeNLP | somenlp/feature_engineering/distant_supervision_rules.py | <reponame>dave-s477/SoMeNLP
import numpy as np
def distant_supervision_by_dict(candidate, dictionary, mapping):
result = np.zeros(len(mapping), dtype=np.bool)
span_to_examine = candidate.base_span
values = [mapping[x] for x in dictionary[span_to_examine]] if span_to_examine in dictionary.keys() else []
if values:
for val in values:
result[val] = True
return result |
dave-s477/SoMeNLP | somenlp/feature_engineering/sentence_rep.py | <gh_stars>0
import string
import nltk
import re
import unicodedata
import numpy as np
from nltk.corpus import wordnet
from . import word_rules
lemmatizer = nltk.stem.WordNetLemmatizer()
URL = re.compile(r"^(https?\:\/\/[a-zA-Z0-9\-\.]+[\w\/\._\-\:~\?=#%]*[\w\/_\-\:~\?=#%]|ftp\:\/\/[a-zA-Z0-9\-\.]+[\w\/\._\-\:~\?=#%]*[\w\/_\-\:~\?=#%]|www\.[a-zA-Z0-9\-\.]+[\w\/\._\-\:~\?=#%]*|[a-zA-Z0-9\-\.]+\.(org|edu)/[\w\/_\-\:~\?=#%]*)$")
CITATION = re.compile(r'^\[[0-9\-,\?]+\]$')
FLOAT_NUM = re.compile(r'^\d+\.\d*$')
FLOAT_NON_LEADING = re.compile(r'^\.\d+$')
VERSION_LIKE_NUM = re.compile(r'^(\d+\.){2}\w*$')
LONG_VERSION_LIKE = re.compile(r'^(\d+\.){3,8}\w*$')
LONG_NUM = re.compile(r'^(\d{1,3}\,){1,8}\d{3}$')
HEADWORDS = ['software', 'package', 'program', 'tool', 'toolbox', 'web', 'service', 'spreadsheet', 'database', 'registry', 'data', 'model', 'algorithm', 'kit', 'standard', 'method', 'procedure']
def get_wordnet_pos(tag):
"""Map POS tag to first character lemmatize() accepts"""
s_tag = tag[0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(s_tag, wordnet.NOUN)
class SentenceRepresentation:
def __init__(self, input_string):
self.features = {}
# Basic words
self.tokens = input_string.split()
self.tokens_lower = [x.lower() for x in self.tokens]
pos_tags = nltk.pos_tag(self.tokens)
self.lemmas = []
for w, t in pos_tags:
wntag = get_wordnet_pos(t)
self.lemmas.append(lemmatizer.lemmatize(w, wntag))
self.length = len(self.tokens)
self.pos_tags = [x[1] for x in pos_tags]
# additional information
self.features['token_length'] = [len(x) for x in self.tokens]
self.features['punct'] = [x in string.punctuation for x in self.tokens]
self.features['math_chars'] = [unicodedata.category(x) == 'Sm' if len(x) == 1 else False for x in self.tokens]
self.features['hypen'] = [x == '-' for x in self.tokens]
self.features['slash'] = [x == '/' for x in self.tokens]
self.features['bracket_open'] = [x == '(' for x in self.tokens]
self.features['bracket_close'] = [x == ')' for x in self.tokens]
# digit information
self.features['digit'] = [x.isdigit() for x in self.tokens]
self.features['float_num'] = [bool(FLOAT_NUM.match(x)) for x in self.tokens]
self.features['float_non_leading'] = [bool(FLOAT_NON_LEADING.match(x)) for x in self.tokens]
self.features['version_like_num'] = [bool(VERSION_LIKE_NUM.match(x)) for x in self.tokens]
self.features['long_version_like'] = [bool(LONG_VERSION_LIKE.match(x)) for x in self.tokens]
self.features['long_num'] = [bool(LONG_NUM.match(x)) for x in self.tokens]
# further token based information
self.features['citation'] = [bool(CITATION.match(x)) for x in self.tokens]
self.features['url'] = [bool(URL.match(x)) for x in self.lemmas]
# casing info
self.features['upper'] = [word_rules.upper_cased(x) for x in self.tokens]
self.features['first_char_upper'] = [word_rules.first_char_upper(x) for x in self.tokens]
self.features['mixed_case'] = [word_rules.mixed_case(x) for x in self.tokens]
self.features['lower_case'] = [word_rules.lower_case(x) for x in self.tokens]
# individual head words
for headword in HEADWORDS:
self.features[headword] = [x == headword for x in self.lemmas]
def get_features(self):
matrix = []
for _, v in self.features.items():
matrix.append(v)
return np.array(matrix)
def get_left_tokens(self, idx, size, style='plain'):
if style == 'plain':
return self.tokens[max(idx-size, 0):idx]
if style == 'lower':
return self.tokens_lower[max(idx-size, 0):idx]
elif style == 'lemma':
return self.lemmas[max(idx-size, 0):idx]
else:
raise(RuntimeError("Style '{}' is not defined".format(style)))
def get_right_tokens(self, idx, size, style='plain'):
if style == 'plain':
return self.tokens[idx:idx+size]
if style == 'lower':
return self.tokens_lower[idx:idx+size]
elif style == 'lemma':
return self.lemmas[idx:idx+size]
else:
raise(RuntimeError("Style '{}' is not defined".format(style)))
def get_candidate(self, start_idx, end_idx):
return self.tokens[start_idx:end_idx], self.lemmas[start_idx:end_idx]
class Candidate:
def __init__(self, sentence, start_idx, end_idx):
self.tokens, self.lemmas = sentence.get_candidate(start_idx, end_idx)
self.sentence = sentence
self.start_idx = start_idx
self.end_idx = end_idx
self.base_span = ' '.join(self.tokens)
|
dave-s477/SoMeNLP | somenlp/entity_disambiguation/__init__.py | from .linking_data import LinkingData
from .feature_writer import FeatureWriter
from .feature_calculator import EntityDisambiguationFeatureGenerator
from .model import ModelWrapper
from .clustering import Clustering, IntervalClustering, SimpleCluster
from .ed_main import main
from .efficient_prediction import ReducedSampleSet, DistanceMap, EfficientClustering, IterDataset, worker_init_fn |
dave-s477/SoMeNLP | somenlp/entity_disambiguation/ed_main.py | from somenlp.entity_disambiguation.efficient_prediction import BLOCK_SIZE
import torch
import torch.nn as nn
import torch.optim as optim
torch.multiprocessing.set_sharing_strategy('file_system')
import json
import math
import pickle
import os
from somenlp.entity_disambiguation.model import DisambiguationModel
import sys
import random
import itertools
import time as timeit
import numpy as np
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, IterableDataset, Dataset
from itertools import combinations
from articlenizer.util import chunk_list
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from sklearn.metrics import classification_report
from somenlp.entity_disambiguation import LinkingData, EntityDisambiguationFeatureGenerator, FeatureWriter, ModelWrapper, Clustering, IntervalClustering, SimpleCluster
from somenlp.entity_disambiguation.efficient_prediction import IterDataset, worker_init_fn
def concat_features(vectors, labels=False):
dim = -1
if not vectors:
raise(RuntimeError("No features were given"))
feature_matrix = {}
for k, v in vectors.items():
if k not in feature_matrix:
feature_matrix[k] = {}
for k2, v2 in v.items():
if k2 not in feature_matrix[k]:
feature_matrix[k][k2] = {}
for idx, (k3, v3) in enumerate(v2.items()):
if idx == 0:
matrix = np.expand_dims(v3, -1)
elif labels and k3 == 'labels':
feature_matrix[k][k2]['labels'] = v3
else:
matrix = np.append(matrix, np.expand_dims(v3, -1), axis=1)
if dim < 0:
dim = matrix.shape[1]
feature_matrix[k][k2]['features'] = matrix
return feature_matrix, dim
def evaluate(entities, indicies, labels):
pairs_to_compare = list(combinations(entities, 2))
tp_same_name, fp_same_name, fn_same_name, tn_same_name = 0, 0, 0, 0
tp_diff_name, fp_diff_name, fn_diff_name, tn_diff_name = 0, 0, 0, 0
for pair, label in zip(pairs_to_compare, labels):
exact_match = False
if pair[0]['mention'] == pair[1]['mention']:
exact_match = True
pos = False
for ind in indicies:
if pair[0]['init_id'] in ind and pair[1]['init_id'] in ind:
pos = True
if pos and label:
if exact_match:
tp_same_name += 1
else:
tp_diff_name += 1
elif pos and not label:
if exact_match:
fp_same_name += 1
else:
fp_diff_name += 1
elif not pos and label:
if exact_match:
fn_same_name += 1
else:
fn_diff_name += 1
elif not pos and not label:
if exact_match:
tn_same_name += 1
else:
tn_diff_name += 1
tp = tp_same_name + tp_diff_name
fp = fp_same_name + fp_diff_name
fn = fn_same_name + fn_diff_name
tn = tn_same_name + tn_diff_name
print('Overall: tp ', tp, ', fp ', fp, ', fn ', fn, ', tn ', tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fscore = (2 * precision * recall) / (precision + recall)
print('Precision: {}, Recall {}, FScore: {}\n'.format(
round(precision, 2),
round(recall, 2),
round(fscore, 2)
))
print('Same name: tp ', tp_same_name, ', fp ', fp_same_name, ', fn ', fn_same_name, ', tn ', tn_same_name)
precision = tp_same_name / (tp_same_name + fp_same_name)
recall = tp_same_name / (tp_same_name + fn_same_name)
fscore = (2 * precision * recall) / (precision + recall)
print('Precision: {}, Recall {}, FScore: {}\n'.format(
round(precision, 2),
round(recall, 2),
round(fscore, 2)
))
print('Diff name: tp ', tp_diff_name, ', fp ', fp_diff_name, ', fn ', fn_diff_name, ', tn ', tn_diff_name)
precision = tp_diff_name / (tp_diff_name + fp_diff_name)
recall = tp_diff_name / (tp_diff_name + fn_diff_name)
fscore = (2 * precision * recall) / (precision + recall)
print('Precision: {}, Recall {}, FScore: {}\n'.format(
round(precision, 2),
round(recall, 2),
round(fscore, 2)
))
def generator_fct(sample_num, start_row, end_row):
for idx_i in range(start_row, end_row):
for idx_j in range(idx_i+1, sample_num):
yield idx_i, idx_j
def main(config, curr_time, ncores, gpu):
if gpu < 0:
device = torch.device("cpu")
else:
device = torch.device("cuda:{}".format(gpu))
if config['gold']:
read_labels = True
else:
read_labels = False
save_path = '{}/{}/{}'.format(
config['logdir'].rstrip('/'),
config['name'].rstrip('/'),
curr_time
)
if not os.path.isdir(save_path):
os.makedirs(save_path)
with open('{}/config.json'.format(save_path), 'w') as j_out:
json.dump(config, j_out, indent=4)
linking_data = LinkingData(config)
feature_generator = EntityDisambiguationFeatureGenerator(config['dbpedia'])
input_dim = len(feature_generator.string_features_to_extract) + len(feature_generator.context_features_to_extract)
print("Augmentated Dataset has {} samples".format(len(linking_data.all)))
test_set_size = int(len(linking_data.all) * 0.18)
model_w = ModelWrapper(config['model'], input_dim, save_path, device)
print("MODEL")
print(model_w.model)
print()
if config['model']['checkpoint']:
print("Loading model for disambiguation.. (or re-training)")
model_w.load()
for e in range(config['model']['epochs']):
model_w.epoch += 1
print("At Epoch {} ({})".format(e, model_w.epoch))
print(test_set_size)
train_generator = generator_fct(len(linking_data.all), test_set_size, len(linking_data.all))
count = 0
while True:
print("At block {}".format(count))
count += 1
block = list(itertools.islice(train_generator, BLOCK_SIZE))
if not block:
break
train_dataset = IterDataset(list(block), linking_data.all, feature_generator, None, labels=linking_data.link_lookup_table)
train_loader = DataLoader(train_dataset, batch_size=config['model']['batch_size'], num_workers=ncores, pin_memory=True)
model_w.train(train_loader)
print("Starting evaluation..")
test_generator = generator_fct(len(linking_data.all), 0, test_set_size)
count = 0
true = []
predictions = []
while True:
print("At block {}".format(count))
count += 1
block = list(itertools.islice(test_generator, BLOCK_SIZE))
if not block:
break
test_dataset = IterDataset(list(block), linking_data.all, feature_generator, None, labels=linking_data.link_lookup_table)
test_loader = DataLoader(test_dataset, batch_size=config['model']['batch_size'], num_workers=ncores, pin_memory=True)
t, p = model_w.test(test_loader)
true.extend(t)
predictions.extend(p)
model_w.eval(true, predictions, epoch=e)
print("Starting evaluation..")
test_generator = generator_fct(len(linking_data.all), 0, test_set_size)
count = 0
true = []
predictions = []
while True:
print("At block {}".format(count))
count += 1
block = list(itertools.islice(test_generator, BLOCK_SIZE))
if not block:
break
test_dataset = IterDataset(list(block), linking_data.all, feature_generator, None, labels=linking_data.link_lookup_table)
test_loader = DataLoader(test_dataset, batch_size=config['model']['batch_size'])
t, p = model_w.test(test_loader)
true.extend(t)
predictions.extend(p)
model_w.eval(true, predictions, epoch=0, write=False)
model_w.save()
|
dave-s477/SoMeNLP | somenlp/RE/features.py | import re
import numpy as np
from gensim.models import KeyedVectors
from itertools import combinations
from articlenizer import CUSTOM_STOPWORDS
from articlenizer.formatting import bio_to_brat
class FeatureGenerator():
def __init__(self, data_handler, embedding_location, emb_dim=200):
self.main_entities = ['Application_Creation', 'Application_Deposition', 'Application_Usage', 'Application_Mention', 'PlugIn_Creation', 'PlugIn_Deposition', 'PlugIn_Usage', 'PlugIn_Mention', 'ProgrammingEnvironment_Usage', 'ProgrammingEnvironment_Mention', 'OperatingSystem_Usage', 'OperatingSystem_Mention', 'SoftwareCoreference_Deposition']
self.data_handler = data_handler
self.type2idx, self.type2name, self.mention2idx, self.mention2name = {}, {}, {}, {}
for k in sorted(data_handler.encoding['tag2name']):
if '_' in data_handler.encoding['tag2name'][k]:
ent_type, mention_type = data_handler.encoding['tag2name'][k].split('_')
else:
ent_type = data_handler.encoding['tag2name'][k]
mention_type = 'None'
if ent_type not in self.type2idx:
self.type2name[len(self.type2idx)] = ent_type
self.type2idx[ent_type] = len(self.type2idx)
if mention_type not in self.mention2idx:
self.mention2name[len(self.mention2idx)] = mention_type
self.mention2idx[mention_type] = len(self.mention2idx)
self.default_ent_type = 'Application'
self.default_men_type = 'Usage'
# self.word_emb = KeyedVectors.load_word2vec_format(embedding_location, binary=True)
# self.emb_dim = emb_dim
def correct_types(self, ent_0, ent_1, men_0, men_1):
ent_0 = self.default_ent_type if ent_0 == 'Unknown' else ent_0
ent_1 = self.default_ent_type if ent_1 == 'Unknown' else ent_1
men_0 = self.default_men_type if men_0 == 'Unknown' else men_0
men_1 = self.default_men_type if men_1 == 'Unknown' else men_1
return ent_0, ent_1, men_0, men_1
def acronym(self, tokens_in):
"""Get acronym of tokens
Args:
tokens_in (list): list of tokens
Returns:
str: acronym
"""
tokens = [x.casefold() for x in tokens_in]
tokens = [re.sub('[^0-9a-zA-Z]+', ' ', x) for x in tokens]
tokens = [x.rstrip('0123456789 ,.').lstrip(' ') for x in tokens]
tokens = [x for x in tokens if x not in CUSTOM_STOPWORDS]
tokens = [x for x in tokens if x]
if len(tokens) <= 2:
return None
acronym = ''.join([x[0] for x in tokens])
return acronym
def is_substring(self, e1, e2):
"""Substring relation between entities
Args:
e1 (str): entity string
e2 (str): entity string
Returns:
bool: result
"""
if e1 is None or e2 is None:
return False
return ''.join(e1).casefold() in ''.join(e2).casefold()
def get_left_context_word(self, sentence, position):
"""Get word left of position in sentence
Args:
sentence (list): list of string tokens
position (int): position to extract
Returns:
str: string of token left of position
"""
sub_sent = sentence[:position]
sub_tokens = sub_sent.split()
if len(sub_tokens) > 0:
return sub_tokens[-1]
else:
return None
def get_right_context_word(self, sentence, position):
"""Get word right of position in sentence
Args:
sentence (list): list of string tokens
position (int): position to extract
Returns:
str: string of token left of position
"""
sub_sent = sentence[position:]
sub_tokens = sub_sent.split()
if len(sub_tokens) > 0:
return sub_tokens[0]
else:
return None
def one_hot_encoding(self, dictionary, name, ent_type, encoding):
"""Generate a one-hot encoding
Args:
dictionary (dict): dictionary with information, is written and changed
name (str): addition to output feature name
ent_type ([type]): restriction for entries to consider
encoding ([type]): mapping for names
"""
found_key = False
for k, v in encoding.items():
feature_name = '{}_{}'.format(name, k)
if ent_type == k:
dictionary[feature_name] = 1
found_key = True
else:
dictionary[feature_name] = 0
if not found_key:
raise(RuntimeError("Unknown entity type: {}".format(ent_type)))
# def get_entities_inbetween(self, tags, ind1, ind2):
# """Get number of tagged elements inbetween two indicies
# Args:
# tags (list): sequence of tags
# ind1 (int): index
# ind2 (int): index
# Returns:
# int: number of tagged entities
# """
# sub_tags = tags[ind1:ind2].split()
# return sum([1 if x.startswith('B-') else 0 for x in sub_tags])
def get_features(self, pair, sentence, tags, main_entity_count):
"""Get feature dictionary for pair of entities
Args:
pair (tuple): pair of entities
sentence (list): sentence containing entities as tokens
tags (list): IOB2 labels assigned to sentence
main_entity_count (int): number of main entities in sentence
entity_encoding (dictionary): numeric encoding of entities
Returns:
dictionary: features for pair
"""
if pair[0]['beg'] > pair[1]['beg']:
larger = pair[0]
smaller = pair[1]
else:
larger = pair[1]
smaller = pair[0]
distance_string = sentence[smaller['end']:larger['beg']]
tokens_distance_string = distance_string.split()
# ent_0_left_context = self.get_left_context_word(sentence, pair[0]['beg'])
# ent_1_left_context = self.get_left_context_word(sentence, pair[1]['beg'])
# ent_0_right_context = self.get_right_context_word(sentence, pair[0]['end'])
# ent_1_right_context = self.get_right_context_word(sentence, pair[1]['end'])
ent_0_tokens = pair[0]['string'].split()
ent_1_tokens = pair[1]['string'].split()
ent_0_acronym = self.acronym(ent_0_tokens)
ent_1_acronym = self.acronym(ent_1_tokens)
#entities_in_between = self.get_entities_inbetween(tags, smaller['end'], larger['beg'])
# if ent_0_left_context in self.word_emb:
# ent_0_left_context_emb = self.word_emb[ent_0_left_context]
# else:
# ent_0_left_context_emb = np.zeros(self.emb_dim)
# if ent_1_left_context in self.word_emb:
# ent_1_left_context_emb = self.word_emb[ent_1_left_context]
# else:
# ent_1_left_context_emb = np.zeros(self.emb_dim)
# if ent_0_right_context in self.word_emb:
# ent_0_right_context_emb = self.word_emb[ent_0_right_context]
# else:
# ent_0_right_context_emb = np.zeros(self.emb_dim)
# if ent_1_right_context in self.word_emb:
# ent_1_right_context_emb = self.word_emb[ent_1_right_context]
# else:
# ent_1_right_context_emb = np.zeros(self.emb_dim)
features = {
'entity_distance_abs': len(distance_string),
###'entity_distance_rel': len(distance_string) / len(sentence),
'entity_distance_tok': len(tokens_distance_string),
###'entity_distance_tok_rel': len(tokens_distance_string) / len(sentence.split()),
'entity_order': pair[1]['beg'] > pair[0]['beg'],
'entity_0_char_length': pair[0]['end'] - pair[0]['beg'],
'entity_0_token_length': len(ent_0_tokens),
'entity_1_char_length': pair[1]['end'] - pair[1]['beg'],
'entity_1_token_length': len(ent_1_tokens),
###'entity_0_left_context_for': ent_0_left_context == 'for',
###'entity_1_left_context_for': ent_1_left_context == 'for',
#'ent_0_left_context_emb': ent_0_left_context_emb,
#'ent_1_left_context_emb': ent_1_left_context_emb,
#'ent_0_right_context_emb': ent_0_right_context_emb,
#'ent_1_right_context_emb': ent_1_right_context_emb,
'num_main_entities': main_entity_count,
'entity_0_substring_of_entity_1': self.is_substring(pair[0]['string'], pair[1]['string']),
'acronym_entity_0_substring_of_entity_1': self.is_substring(ent_0_acronym, pair[1]['string']),
'acronym_entity_0_substring_of_acronym_entity_1': self.is_substring(ent_0_acronym, ent_1_acronym),
'entity_1_substring_of_entity_0': self.is_substring(pair[1]['string'], pair[0]['string']),
'acronym_entity_1_substring_of_entity_0': self.is_substring(ent_1_acronym, pair[0]['string']),
'acronym_entity_1_substring_of_acronym_entity_0': self.is_substring(ent_1_acronym, ent_0_acronym)
}
ent_0_type = pair[0]['type'].split('_')[0]
mention_0_type = pair[0]['type'].split('_')[1] if '_' in pair[0]['type'] else 'None'
ent_1_type = pair[1]['type'].split('_')[0]
mention_1_type = pair[1]['type'].split('_')[1] if '_' in pair[1]['type'] else 'None'
ent_0_type, ent_1_type, mention_0_type, mention_1_type = self.correct_types(ent_0_type, ent_1_type, mention_0_type, mention_1_type)
self.one_hot_encoding(features, 'entity_0_type', ent_0_type, self.type2idx)
self.one_hot_encoding(features, 'mention_0_type', mention_0_type, self.mention2idx)
self.one_hot_encoding(features, 'entity_1_type', ent_1_type, self.type2idx)
self.one_hot_encoding(features, 'mention_1_type', mention_1_type, self.mention2idx)
# for idx, (x, y, z, a) in enumerate(zip(ent_0_left_context_emb, ent_1_left_context_emb, ent_0_right_context_emb, ent_1_right_context_emb)):
# features['ent_0_left_context_emb_{}'.format(idx)] = x
# features['ent_1_left_context_emb_{}'.format(idx)] = y
# features['ent_0_right_context_emb_{}'.format(idx)] = z
# features['ent_1_right_context_emb_{}'.format(idx)] = a
return features
def get_sentence_relations_and_features(self, sentence, tags, entities, relations=None):
"""Span all potential relations in sentence and calculate its respective features
Args:
sentence (list): sentence as token list
tags (list): IOB2 labels assigned to sentence
entities (dict): entities in sentence
relations (dict): true relations in sentence
Returns:
list: relations in sentence including features and labels
"""
sentence_feature_list = []
entities = sorted(entities, key=lambda item: item['beg'])
for idx, x in enumerate(entities):
x['idx'] = idx
# TODO add this index as training data?
main_entity_count = 0
for ent in entities:
if ent['type'] in self.main_entities:
main_entity_count += 1
entity_combinations = combinations(entities, 2)
entity_pairs = []
for comb in entity_combinations:
entity_pairs.extend([list(comb), list(reversed(comb))])
assigned_count = 0
for idx, pair in enumerate(entity_pairs):
features = self.get_features(pair, sentence, tags, main_entity_count)
if relations is not None:
features['label'] = 'none'
for relation in relations:
if int(relation['ent1_b']) == int(pair[0]['beg']) and int(relation['ent2_b']) == int(pair[1]['beg']):
features['label'] = relation['type']
assigned_count += 1
break
sentence_feature_list.append(features)
return sentence_feature_list, entity_pairs
def generate_relation_extraction_features(self):
for dataset, dataset_setup in self.data_handler.data_config['sets'].items():
for sub_dataset in dataset_setup:
sub_dataset['relext_feature_list'] = []
for sentence, tags, relations in zip(sub_dataset['sentences'], sub_dataset['tags'], sub_dataset['relations']):
if sum([1 if t.startswith('B-') else 0 for t in tags.split()]) > 1:
entities, _, _ = bio_to_brat(sentence, tags)
if self.data_handler.tag_remapping is not None:
for ent in entities:
ent['type'] = self.data_handler.tag_remapping[ent['type']]
pairs, _ = self.get_sentence_relations_and_features(sentence, tags, entities, relations)
sub_dataset['relext_feature_list'].extend(pairs)
def stream_files(self):
for file_names in self.data_handler.data_files:
article = {
'out_name': file_names['out'],
'sentences': self.data_handler._read_text_file(file_names['in']),
'tags': self.data_handler._read_text_file(file_names['entities']),
'entity_list': [],
'relext_feature_list': []
}
for idx, (sentence, tags) in enumerate(zip(article['sentences'], article['tags'])):
if sum([1 if t.startswith('B-') else 0 for t in tags.split()]) > 1:
entities, _, _ = bio_to_brat(sentence, tags)
if self.data_handler.tag_remapping is not None:
for ent in entities:
ent['type'] = self.data_handler.tag_remapping[ent['type']]
pairs, entity_pairs = self.get_sentence_relations_and_features(sentence, tags, entities)
article['entity_list'].append(list(entity_pairs))
article['relext_feature_list'].append(pairs)
else:
article['entity_list'].append(None)
article['relext_feature_list'].append(None)
yield article
|
dave-s477/SoMeNLP | somenlp/NER/models/__init__.py | <gh_stars>0
from .crf import CRF
from .lstm import BiLSTM_CRF
from .feature_lstm import FeatureLSTM
from .combined_lstm import CombinedLSTM
from .multi_bert import BERTMultiTask, BERTMultiTaskOpt2, BERTMultiTaskCRF, BERTMultiTaskOpt2CRF
|
dave-s477/SoMeNLP | somenlp/RE/run_relation_extraction.py | <filename>somenlp/RE/run_relation_extraction.py<gh_stars>0
from somenlp.NER import OutputHandler, DataHandler, Tuner
from . import REmodel, FeatureGenerator
def main(model_config, data_config, time, data_file_ext, ent_file_ext, rel_file_ext):
"""Main function for performing relation extraction training
Args:
model_config (dict): configuration for RE model
data_config (dict): configuration of input data
time (str): time marker for saving
data_file_ext (str): identifying extension for automatic file loading
ent_file_ext (str): identifying extension for automatic file loading
rel_file_ext (str): identifying extension for automatic file loading
"""
print("\nSetting up output handler")
output_handler = OutputHandler(model_config['general']['name'], time, model_config['general']['checkpoint'])
output_handler.save_json(model_config, name='model_conf')
output_handler.save_json(data_config, name='data_conf')
print("\nSetting up data handler")
data_handler = DataHandler(data_config=data_config, data_file_extension=data_file_ext, label_file_extension=ent_file_ext, relation_file_extension=rel_file_ext, output_handler=output_handler, checkpoint=model_config['general']['checkpoint'], max_word_length=model_config['general']['max_word_length'], max_sent_length=model_config['general']['max_sentence_length'])
data_handler.load_data_from_config()
data_handler.encoding(tags_only=True)
data_handler.load_input()
feature_generator = FeatureGenerator(data_handler, model_config['model']['word_embedding'])
feature_generator.generate_relation_extraction_features()
print("\nSetting up model")
model_w = REmodel(model_config['general'], model_config['model'], data_handler, output_handler, model_config['output'])
if model_config['general']['train']:
if 'cross_val' in model_config['general'] and model_config['general']['cross_val']:
model_w.cross_val()
else:
model_w.train()
model_w.test()
model_w.save()
#model_w.show_features_importance()
else:
model_w.load(model_config['general']['checkpoint'])
model_w.test()
def tune(config, time, data_file_extension, ent_file_ext, rel_file_ext):
"""Hyper-parameter tuning for RE model
Args:
config (dict): configuration including model and data
time (str): time marker for saving
data_file_ext (str): identifying extension for automatic file loading
ent_file_ext (str): identifying extension for automatic file loading
rel_file_ext (str): identifying extension for automatic file loading
"""
tuner = Tuner(config, time)
iterator = tuner.yield_configs()
for name, data_conf, model_conf in iterator:
print(model_conf)
time_name = '{}_{}'.format(time, name)
print("Training model {}".format(time_name))
main(model_conf, data_conf, time_name, data_file_extension, ent_file_ext, rel_file_ext)
def predict(model_config, files, prepro, output):
print("\nSetting up output handler")
output_handler = OutputHandler(model_config['general']['name'], checkpoint=model_config['general']['checkpoint'])
print("\nSetting up data handler")
data_handler = DataHandler(data_files=files, prepro=prepro, output_handler=output_handler, checkpoint=model_config['general']['checkpoint'], max_word_length=model_config['general']['max_word_length'], max_sent_length=model_config['general']['max_sentence_length'])
data_handler.encoding(tags_only=True)
feature_generator = FeatureGenerator(data_handler, model_config['model']['word_embedding'])
print("\nSetting up model")
model_w = REmodel(model_config['general'], model_config['model'], data_handler, output_handler, model_config['output'], feature_generator=feature_generator)
model_w.load(model_config['general']['checkpoint'])
model_w.predict(output)
|
dave-s477/SoMeNLP | somenlp/feature_engineering/gen_custom_features.py | <gh_stars>0
import json
import numpy as np
from multiprocessing import Pool
from functools import partial
from . import candidate_rules
from . import distant_supervision_rules
from .sentence_rep import SentenceRepresentation, Candidate
FUNCTION_NAMES = [
'pan_top_1', 'pan_top_2', 'pan_top_3', 'pan_top_4', 'pan_top_5', 'pan_top_6', 'pan_top_7', 'pan_top_8', 'text_is_in_brackets', 'developer']
fcts = [getattr(candidate_rules, n) for n in FUNCTION_NAMES]
def encode_sentence(text, max_length, distant_supervision_dict, key_mapping, pos_tag_encoding):
sentence = SentenceRepresentation(text)
lengths = sentence.length
pos_tags = np.expand_dims(np.array([pos_tag_encoding[x] for x in sentence.pos_tags]), 0)
features = sentence.get_features()
sentence_counts = np.zeros((len(FUNCTION_NAMES), sentence.length), np.int32)
distant_supervision_counts = np.zeros((len(key_mapping), sentence.length), np.int32)
for cand_length in range(1, max_length+1):
for cand_beg in range((sentence.length - cand_length) + 1):
cand_end = cand_beg + cand_length
candidate = Candidate(sentence, cand_beg, cand_end)
for fct_idx, fct in enumerate(fcts):
match_res = fct(candidate)
if match_res == 1:
for pos in range(candidate.start_idx, candidate.end_idx):
sentence_counts[fct_idx][pos] += 1
distant_supervision_result = distant_supervision_rules.distant_supervision_by_dict(candidate, distant_supervision_dict, key_mapping)
for pos in range(candidate.start_idx, candidate.end_idx):
distant_supervision_counts[:,pos] += distant_supervision_result
all_feats = np.concatenate((pos_tags, features, sentence_counts, distant_supervision_counts))
return np.transpose(all_feats)
def calculate_features(input_files, max_length, distant_supervision_dict, key_mapping, pos_tag_encoding):
sentence_feature_encodings = []
with input_files[0].open(mode='r') as in_text:
for line in in_text:
sentence_feature_encodings.append(encode_sentence(line, max_length, distant_supervision_dict, key_mapping, pos_tag_encoding))
array_to_write = np.array(sentence_feature_encodings)
np.savez_compressed('{}.npz'.format(str(input_files[1])), features=array_to_write)
def update_words(all_words, in_list, name):
for word in in_list:
if word not in all_words.keys():
all_words[word] = []
if name not in all_words[word]:
all_words[word].append(name)
def load_distant_supervision(loc):
with loc.open(mode='r') as in_f:
distant = json.load(in_f)
all_distsup_words = {}
key_mapping = {}
for k, v in distant.items():
update_words(all_distsup_words, v, k)
key_mapping[k] = len(key_mapping)
return all_distsup_words, key_mapping
def load_pos_tag_encoding(loc):
with loc.open(mode='r') as in_f:
encoding = json.load(in_f)
return encoding
def calculate_features_parallel(files, max_length, distant_supervision_dictionary_location, pos_tag_encoding_location, n_cores=8):
distant_supervision_dictionary, distant_supervision_key_mapping = load_distant_supervision(distant_supervision_dictionary_location)
pos_tag_encoding = load_pos_tag_encoding(pos_tag_encoding_location)
fct_to_execute = partial(calculate_features, max_length=max_length, distant_supervision_dict=distant_supervision_dictionary, key_mapping=distant_supervision_key_mapping, pos_tag_encoding=pos_tag_encoding)
with Pool(n_cores) as p:
p.map(fct_to_execute, files)
|
dave-s477/SoMeNLP | somenlp/distant_supervision/__init__.py | <reponame>dave-s477/SoMeNLP
from .packages import load_package_names
from .perform_wiki_data_queries import query_wikidata
from .gen_sequences import generate_triplets
from .gen_wiktionary_dict import load_wiktionary
from .combine_info import merge_results, load_dicts |
dave-s477/SoMeNLP | somenlp/NER/LSTM_dataset.py | <gh_stars>0
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
class LSTMDataset(Dataset):
"""PyTorch dataset for LSTM input data
"""
def __init__(self, characters, ids, tags, features, character2idx, padding, max_word_length, max_sent_length, transforms=None):
self.characters = characters
self.ids = ids
self.tags = tags
self.transforms = transforms
self.character2idx = character2idx
self.max_sent_length = max_sent_length
self.max_word_length = max_word_length
self.padding = padding
self.features = features
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if self.max_sent_length > 0:
sample = {
'ids': torch.tensor(self.ids[idx][:self.max_sent_length]),
'features': torch.tensor(self.features[idx][:self.max_sent_length]) if self.features is not None else None,
'tags': torch.tensor(self.tags[idx][:self.max_sent_length])}
else:
sample = {
'ids': torch.tensor(self.ids[idx]),
'features': torch.tensor(self.features[idx]) if self.features is not None else None,
'tags': torch.tensor(self.tags[idx])}
if self.max_word_length < 0:
sample['characters'] = pad_sequence([torch.tensor(x) for x in self.characters[idx]], batch_first=True, padding_value=self.character2idx[self.padding])
else:
sample['characters'] = pad_sequence([torch.tensor(x[:self.max_word_length]) for x in self.characters[idx]], batch_first=True, padding_value=self.character2idx[self.padding])
if self.transforms:
sample = self.transforms(sample)
return sample
|
dave-s477/SoMeNLP | somenlp/distant_supervision/gen_wiktionary_dict.py | import json
import wget
from pathlib import Path
def load_wiktionary(download_location='/tmp/', default_address='https://kaikki.org/dictionary/English/kaikki.org-dictionary-English.json'):
"""Download a pre-processed wiktionary dump to generate an english dictionary for distant supervision.
Args:
download_location (str, optional): where to write the download. Defaults to '/tmp/'.
default_address (str, optional): url from where to download the dump. Defaults to 'https://kaikki.org/dictionary/English/kaikki.org-dictionary-English.json'.
Returns:
dictionary: list of english words from Wiktionary
"""
print("Loading English dictionary")
dict_location = '{}/{}'.format(download_location.rstrip('/'), 'wiktionary_dict.json')
wget.download(default_address, out=dict_location)
results = {}
with open(dict_location, 'r') as w_file:
for line in w_file:
entry = json.loads(line)
if entry['pos'] not in results.keys():
results[entry['pos']] = set()
results[entry['pos']].update([entry['word']])
return results
|
cjavaphp/keybinder3 | examples/example.py | #!/usr/bin/env python
"""
example.py
Created in 2010 by <NAME> <<EMAIL>>
This work is placed in the public domain.
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Keybinder', '3.0')
from gi.repository import Gtk
from gi.repository import Keybinder
def callback(keystr, user_data):
print "Handling", keystr, user_data
print "Event time:", Keybinder.get_current_event_time()
Gtk.main_quit()
if __name__ == '__main__':
keystr = "<Ctrl>A"
Keybinder.init()
Keybinder.bind(keystr, callback, "Keystring %s (user data)" % keystr)
print "Press", keystr, "to handle keybinding and quit"
Gtk.main()
|
lvrbanec/100DaysOfCode_Python | Project06, Beginner, Hangman/main.py | <reponame>lvrbanec/100DaysOfCode_Python
# 02.01.21, Frollo
# level: Beginner
# Project: Create a Hangman game
import random
from hangman_words import word_list #import wordlist
from hangman_art import logo, stages # imports pictures of a stages of hanging man
# pick a word randomly
chosen_word = random.choice(word_list)
# print(f"the secret word is {chosen_word}")
# show logo of the game
print(logo)
# create a drawing of the chosen_word
drawing = []
for i in chosen_word:
drawing.append('_')
print(drawing)
#create a drawing of a hangman
print(stages[6])
# guess the word until you win or die
end_of_game = False
lives = 6
previous_guesses = []
while not end_of_game:
# guess a letter
guess = input('Please choose a letter\n').lower()
# check if you guessed right
index = 0 #index od a letter
for letter in chosen_word: # or to get a position can write for i in len(chosen_word)
index += 1
if guess == letter:
drawing[index-1] = letter
print(drawing)
# if the letter isnt guessed, remove one life
if (guess not in chosen_word) and (guess not in previous_guesses):
lives -= 1
print(f'Letter {guess} is not in the chosen word.')
print(stages[lives])
#
if guess in previous_guesses:
print(f'You already tried letter {guess}.')
previous_guesses.append(guess)
# win condition
if "_" not in drawing:
end_of_game = True
print("You win.")
# lose condition
if lives == 0:
end_of_game = True
print("You lost.") |
lvrbanec/100DaysOfCode_Python | Project07, Beginner, caesar ciphone/main.py | # 02.02.2021, Frollo
# project: create Casear Cipher
# level Beginner
from art import logo
print(logo)
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# define a function for encryption
def caesar(word, shift_ammount, direction):
encrypted_word = ''
if direction == 'decode':
shift_ammount *= -1 # substract the position if decoding
for char in word:
if char in alphabet:
position = alphabet.index(char) # gives only the first index
encrypted_letter = alphabet[position + shift_ammount]
encrypted_word += encrypted_letter
else:
encrypted_word += char
print(f"The {direction}d text is '{encrypted_word}'.")
should_continue = True # run again?
while should_continue:
# give input to encript
desired_direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
# if the user imputs huge number find the one that corresponds to alphabet, 26 letters our alphabet
shift = shift % 26
# call function
caesar(word = text, shift_ammount = shift, direction = desired_direction )
# run again?
run_again = input("Type 'yes' if you want to run again. Otherwise type 'no'\n.")
if run_again == "no":
should_continue = False
print("Goodbye") |
lvrbanec/100DaysOfCode_Python | Project15, Intermediate, Plagiate D.Hirst Painting/main.py | # 08.02.2021, Frollo
# Level: Intermediate
# Project: Create a Damien Hirst painting using module turtle
import turtle
import random
turtle.colormode(255) # change the module to use the rgb color code
tim = turtle.Turtle()
tim.speed("fastest")
tim.penup() # remove the lines connecting the dots
tim.hideturtle() # remove the turtle
## extract the colors of the painting "painting.jpg"
# import colorgram as cg
# rgb_colors = [] # list of tuples
# colors = cg.extract("painting.jpg", 30)
# for color in colors:
# r = color.rgb.r
# g = color.rgb.g
# b = color.rgb.b
# new_color = (r, g, b)
# rgb_colors.append(new_color)
#
# print(rgb_colors)
# copy the output rgb_colors and remove first
# two colors as they are the background of the painting
color_list = [(238, 246, 244), (249, 243, 247), (1, 12, 31), (53, 25, 17),
(218, 127, 106), (10, 104, 159), (242, 213, 68), (149, 83, 39),
(215, 87, 63), (155, 6, 24), (165, 162, 31), (157, 62, 102),
(10, 64, 33), (206, 74, 104), (11, 96, 57), (95, 6, 20),
(174, 135, 163), (1, 61, 145), (7, 172, 216), (3, 213, 207),
(159, 33, 24), (8, 140, 85), (145, 227, 217), (122, 193, 147),
(220, 177, 216), (100, 218, 229), (117, 171, 192), (79, 135, 178)]
# draw 10 x 10 dots in a random pattern using the used_colors
# put the cursor at the left, down part of the screen
tim.setheading(255)
tim.forward(325)
tim.setheading(0)
number_of_dots = 100
for dot_count in range(1, number_of_dots + 1): # in total moved 500
tim.dot(20, random.choice(color_list))
tim.forward(50)
if dot_count % 10 == 0:
tim.setheading(90)
tim.forward(50)
tim.setheading(180)
tim.forward(500)
tim.setheading(0)
#create a screen
screen = turtle.Screen()
screen.exitonclick() |
lvrbanec/100DaysOfCode_Python | Project12, Beginner, HigherLowerGame/main.py | <reponame>lvrbanec/100DaysOfCode_Python<gh_stars>0
# 05/02/2021, Frollo
# Level: Beginner
# Project: Higher Lower game
import art
from game_data import data
import random
from replit import clear
# select a random datapoint
def select_datapoint():
""""Selects a random datapoint from a list, returns it as a dictionary"""
index = random.randint(0, len(data)-1) # !can also use random.choice(list)
datapoint_dict = data[index]
return datapoint_dict
# check if the player made a right decision and count the score
def compare_datapoints(dic_A, dic_B, player_decision, prev_score):
''' Looks if the player made a right decision and if yes increases the final score by 1. Output is guessed (True) or not guessed (False). '''
# if the player is right print it
if (dic_A['follower_count'] > dic_B['follower_count'] and player_decision == "a") or (dic_A['follower_count'] < dic_B['follower_count'] and player_decision == "b"):
prev_score += 1
print(f"You're right! Current score: {prev_score}")
return [True, prev_score]
# if the player is wrong print it
else:
print(f"Sorry, that's wrong. Current score: {prev_score}")
return [False, prev_score]
print(art.logo)
A = select_datapoint()
game_over = False
score = 0
while not game_over:
print(f"Compare A: {A['name']}, {A['description']}, from {A['country']}.")
print(art.vs)
B = select_datapoint()
while A == B: # if two datapoints are selected, pick again
B = select_datapoint()
print(f"Against B: {B['name']}, {B['description']}, from {B['country']}.")
answer = input("Who has more followers? Type 'A' or 'B': ").lower()
clear()
print(art.logo)
guessed, score = compare_datapoints(dic_A = A, dic_B = B, player_decision = answer, prev_score = score ) # returns true if player guessed
A = B
if not guessed:
game_over = True
|
lvrbanec/100DaysOfCode_Python | Project11, Beginner, NumberGuess/art.py | logo = """"
_______
(_______)
_ ___ _ _ _____ ___ ___
| | (_ | | | | ___ |/___)/___)
| |___) | |_| | ____|___ |___ |
\_____/|____/|_____(___/(___/
_ | |
_| |_| |__ _____
(_ _| _ \| ___ |
| |_| | | | ____|_
\__|_| |_|_____) |
____ _ _ ____ | |__ _____ ____
| _ \| | | | \| _ \| ___ |/ ___)
| | | | |_| | | | | |_) | ____| |
|_| |_|____/|_|_|_|____/|_____|_|
""" |
lvrbanec/100DaysOfCode_Python | Project04, Beginner, FizzBuzz/main.py | # 01.02.2021
# level: Beginner
# computer solves the Fizz Buzz game from number 1 to 100 (if the number is devisible by 3, print Fizz. If the number is devisible by 5 print Buzz. If its devisible by both, print FizzBuzz. Otherwise print the number.)
for number in range(1, 101):
if number % 3 == 0 and number % 5 == 0:
print('FizzBuzz')
elif number % 3 == 0:
print ('Fizz')
elif number % 5 == 0:
print('Buzz')
else:
print(number)
|
lvrbanec/100DaysOfCode_Python | Project11, Beginner, NumberGuess/main.py | # 05.02.2021, Frollo
# Level Beginner
# Project: Number Guessing Game
# computer randlomly picks an integer between 1 and 100, player needs to guess the number
import random
from art import logo
EASY_LEVEL_TURNS = 10 # constants are accessible in functions
HARD_LEVEL_TURNS = 5
# compare two numbers
def compare(number, num_guess):
if num_guess > number:
print("Too high.")
case = 0
elif num_guess < number:
print("Too low.")
case = 1
elif num_guess == number:
print("You guessed! The number was {number}")
case = 2
return case
#chose a difficulty
def set_difficulty():
def_difficulty = input("Choose a difficulty. Type 'easy' or 'hard': ")
if def_difficulty == "hard":
return HARD_LEVEL_TURNS
elif def_difficulty == "easy":
return EASY_LEVEL_TURNS
def play_game():
# select a random number between 1 and 100
all_numbers = list(range(1,101)) # can be written as ? randint(1,100)
rand_number = random.choice(all_numbers)
#print(number)
print(logo)
print("Welcome to the Number Guessing Game!")
attempts = set_difficulty()
game_over = False
while not game_over:
print(f"You have {attempts} attempts remaining to guess the number.")
num_guess = int(input("Make a guess: "))
case = compare(rand_number, num_guess)
# reduce the number of attempts
attempts -= 1
# end the game if you run out of attempts, game over
if attempts == 0:
print("Game lost")
game_over = True
# if not guessed, try again
elif case == 0 or case == 1:
print("Guess again.")
# if guesssed, stop the game
elif case == 2:
game_over = True
play_game()
|
lvrbanec/100DaysOfCode_Python | Project16, Intermediate, TurtleRace using turtle module/main.py | # 08.02.2021, Frollo
# Level: Intermediate
# Project: Make a turtle race betting game
from turtle import Turtle, Screen
import random
is_race_on = False
screen = Screen()
screen.setup(width=500, height=400)
user_bet = screen.textinput(title="Make your bet", prompt="Which turtle will win the race? Enter a color: ")
colors = ["red", "green", "purple", "blue", "yellow", "black"]
all_turtles = []
y_position = -100
for turtle_index in range(6):
new_turtle = Turtle("turtle")
new_turtle.color(colors[turtle_index])
new_turtle.penup()
new_turtle.goto(x=-230, y=y_position)
y_position += 40
all_turtles.append(new_turtle)
if user_bet: # of variable exists (wait for the input of the user)
is_race_on = True
while is_race_on:
for turtle in all_turtles:
if turtle.xcor() > 230:
is_race_on = False # stop the race
winning_color = turtle.pencolor()
if winning_color == user_bet:
print(f"You've won! The {winning_color} turtle is the winner!")
else:
print(f"You've lost! The {winning_color} turtle is the winner!")
rand_distance = random.randint(0, 20)
turtle.forward(rand_distance)
screen.exitonclick() |
lvrbanec/100DaysOfCode_Python | Project09, Beginner, Calculator/main.py | # 03.02.21, Frollo
# Level: beginner
# Project: Easy calculator
from art import logo
print(logo)
# Operations
# Add
def add(n1, n2):
return n1 + n2
# Substract
def substract(n1, n2):
return n1 - n2
# Multipy
def multipy(n1, n2):
return n1 * n2
# Divide
def divide(n1, n2):
return n1 / n2
operations = {
"+" : add,
"-" : substract,
"*" : multipy,
"/" : divide
}
def call_again(): # to restart the code upon conditional
switch = True
while switch:
# pick a first number
num1 = float(input("What's the first number?: "))
# pick a second number and an operation, and if wanted claculate with a previous result
while switch:
print("Pick one of the following operation:")
for key in operations:
print(key)
operation_symbol= input()
chosen_function = operations[operation_symbol]
num2 = float(input("What's the next number?: "))
result = chosen_function(num1, num2)
print(f"{num1} {operation_symbol} {num2} = {result}")
should_continue = input(f"Type 'y' to continue calculating with {result}, type 'n' to start a new calculation, or type 'esc' to exit.\n")
if should_continue == "esc":
switch = False
print("Thank you for using our calculator.")
elif should_continue == 'y':
num1 = result
elif should_continue == 'n':
call_again()
call_again() |
lvrbanec/100DaysOfCode_Python | Project14, Intermediate, KnowledgeQuiy, OOP/quiz_brain.py | <filename>Project14, Intermediate, KnowledgeQuiy, OOP/quiz_brain.py
class QuizBrain:
# load in the question list of objects, set the question number as 0
def __init__(self, question_list):
self.question_number = 0
self.score = 0
self.question_list = question_list
# check if there are unused objects left in the list of objects
def still_has_questions(self):
return len(self.question_list) > self.question_number
# increase the question number and show the next question
def next_question(self):
current_question = self.question_list[self.question_number] # enters the
# specific object in a list, no matter of its name
# current_question.text fetches the object's attribute text
self.question_number += 1
user_answer = input(f"Q.{self.question_number}: {current_question.text} (True/False): ")
self.check_answer(user_answer, current_question.answer)
# checks if the answer was correct and tracks the number of right answers
def check_answer(self, user_answer, correct_answer):
if user_answer.lower() == correct_answer.lower():
self.score += 1
print("You got it right!")
else:
print("That's wrong.")
print(f"The correct answer was: {correct_answer}")
print(f"Your current score is: {self.score}/{self.question_number}.\n")
|
lvrbanec/100DaysOfCode_Python | Project14, Intermediate, KnowledgeQuiy, OOP/main.py | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
# Reformat the data: create a list of objects containing .question and .answer
# from a list of dictionaries with "question" and "answer"
question_bank = []
for dictionary in question_data:
question = dictionary["text"]
answer = dictionary["answer"]
new_question = Question(question, answer)
question_bank.append(new_question)
# create an object quiz, on which the program is ran
quiz = QuizBrain(question_bank)
while quiz.still_has_questions():
quiz.next_question()
print("You've completed the quiz")
print(f"Your final score was: {quiz.score}/{quiz.question_number}.") |
lvrbanec/100DaysOfCode_Python | Project02, Beginner, Camel Game/main.py | <reponame>lvrbanec/100DaysOfCode_Python
# 30.01.2021
# level Beginner
# The point of the game is to find the water in the deser to escape Mr. Death
print('''
**********************************************************************
###
####
# ###
#### ###
######## ####
######### ## ####
# ### # ######
# ### # ###
# ### # #####
# ### # #####
# # #### #########
## ### # #################
###### ##### # ##################
######## ##### ## ###############
########## ####### ##############
## ##### ##### ##### #####
# #### ###### ### ## #
# #### ######### ## ##
# #### ######### ## ##
# #### # ####### ## ##
# ##### # ##### ## ##
# #### # ###### ## ##
# ###### ######### ## ##
# ####################
# #####################
#######################
#####################
###################
###### #######
#### ######
### #####
### ### #
### ### #
### ## #
### ####
## ###
************************************************************************
''')
# the art is taken from https://ascii.co.uk/art/camels
print("Welcome to The Desert.")
print("You were riding cammels, when your cammel (with you on the back) ran away and you got lost in the desert.") # \' escapes the '
print("You\'r mission is to find the water, or otherwise you will meet Mr. Death. You have 3 days until you dehydrate.")
dome = input("You see one huge sand dome on the right. Other than that there is not much to see. The sand is everywhere. Where should you go? Write \"right\" to climb the dome, or \"left\" to walk next to the dome.\n").lower()
if dome == "left":
night = input("You tried to walk next to the dome but a huge wind came over and the dome fell apart. Good think you didn't climb that dome, or you would have died. Lucky you. The night has finally fallen. You see the sky. Should you follow the brighest star and move towards the north, or would you rather rest and sleep? Write \"walk\" or \"rest\".\n").lower()
if night == "walk":
elephants = input("Good choice! You walked towards the north until you met a group of elephants. Should you continue walking north, should you try to shoe the elephants away, or should you follow the elephants? Write \"walk\", \"shoo\" or \"follow\".\n").lower()
if elephants == "walk":
print("You continued walking norht, but you never reached the water. Sorry man.")
elif elephants == "shoo":
print("The elephants felt attached and smashed you with their huge legs. Be smarter next time.")
elif elephants == "follow":
print ("It is well known that the elephants remember the paths to the water in desert. Seems like you watched some documentaries on netflix. You found water. You are the survivor.")
else:
print("You decided to rest, but the temparatures fell under zero. You froze. Try again.")
else:
print("You climbed the dome and from the top saw your city, but the strong winds came and the dome collapsed. You got burried under the sand. Mr. Death came. Bad luck.") |
lvrbanec/100DaysOfCode_Python | Project10, Beginner, Balckjack game/main.py | # 04.02.2021, Frollo
# Level: Beginner
# project: simulation of a blackjack game
############### Our Blackjack House Rules #####################
## The deck is unlimited in size.
## There are no jokers.
## The Jack/Queen/King all count as 10.
## The the Ace can count as 11 if sum < 21 or 1 if sum > 21.
## The cards in the list have equal probability of being drawn.
## Cards are not removed from the deck as they are drawn.
from art import logo
from replit import clear
import random
# fuction for dealing 1 card
def deal_one_card():
""" returns a random card from the deck """
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
return random.choice(cards)
# fuction for dealing 2 cards
def deal_two_cards():
""" returns two random cards from the deck as a tuple"""
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
first_card = random.choice(cards)
second_card = random.choice(cards)
return first_card, second_card # returns tuple
# fuction for calculating sum of cards and declaring a blackjack
def calculate_score(cards):
""" calculates a score of a deck of cards (from a tuple) """
if sum(cards) == 21 and len(cards) == 2:
return 0 # blackjack!
if 11 in cards and sum(cards) > 21:
cards = tuple(list(cards).remove(11).append(1)) # check if works
return sum(cards)
# function to compare player_cards, and pc scores
def compare(player_sum, pc_sum):
if pc_sum > 21 and player_sum > 21:
return "You went over. You lose."
elif player_sum == pc_sum:
return "Draw"
elif pc_sum == 0:
return "Lose, opponen has Blackjack"
elif player_sum == 0:
return "Win with a Blackjack"
elif player_sum > 21:
return "You went over, you lose."
elif pc_sum > 21:
return "Computer went over, you win."
elif player_sum > pc_sum:
return "Your win."
else:
return "You lose."
# blackjack game
def play_game():
print (logo)
player_cards = deal_two_cards()
pc_cards = deal_two_cards()
# decisions of a player, ie does she/he wants another card?
is_game_over = False
while not is_game_over:
player_sum = calculate_score(player_cards)
pc_sum = calculate_score(pc_cards)
print(f"Your cards are: {player_cards}. Current score : {player_sum}.")
print(f"Computer's first card: {pc_cards[0]}")
if player_sum == 0 or pc_sum == 0 or player_sum > 21 :
is_game_over = True
else:
additional_card = input("Type 'y' to get another card, type 'n' to pass: ")
if additional_card == 'y':
player_cards += (deal_one_card(),) # adding an int to tuple. int has to be in a (int,) form
else:
is_game_over = True
# decisions of a computer, ie if computer has less than 17, needs to draw one more card
while pc_sum !=0 and pc_sum < 17: # 0 is blackjack
pc_cards += (deal_one_card(),)
pc_sum = calculate_score(pc_cards)
print(f"Your cards are: {player_cards}. Final score : {player_sum}.")
print(f"Computer's cards are: {pc_cards}. Final score : {pc_sum}.")
print(compare(player_sum, pc_sum))
# Do you want to restart a game?
while input("Do you want to play the game of blackjack? Type 'y' or 'n': ") == 'y':
clear()
play_game()
|
lvrbanec/100DaysOfCode_Python | Project01, Beginner, Love Calculator/main.py | <gh_stars>0
# 30.01.2021
# level: Beginner
# The aim of this code is to create a love calculator, which "predicts" how well two people match based on their names. The logic is based on the game we played as a kids, and is explained at https://www.buzzfeed.com/ariannarebolini/what-are-the-chances-your-crush-is-actually-your-true-love
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
name_together = name1 + name2
name_together_low = name_together.lower()
#print(name_together_low)
score_true = name_together_low.count("t")+ name_together_low.count("r") + name_together_low.count("u") + name_together_low.count("e")
#print(score_true)
score_love= name_together_low.count('l') + name_together_low.count('o') + name_together_low.count('v') + name_together_low.count('e')
#print(score_love)
final_score = int(f"{score_true}{score_love}")
#print(final_score)
if final_score < 10 or final_score>90:
print(f"Your score is {final_score}, you go together like coke and mentos.")
elif final_score <= 50 and final_score >= 40:
print(f"Your score is {final_score}, you are alright together.")
else:
print(f"Your score is {final_score}.") |
lvrbanec/100DaysOfCode_Python | Project05, Beginner, Password Generator/main.py | <filename>Project05, Beginner, Password Generator/main.py
# 01.02.2021
# Lever: Beginner
# Password Generator Project
# generate a password with the number of letters, numbers and symbols specified by a subject
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to the PyPassword Generator!")
nr_letters= int(input("How many letters would you like in your password?\n"))
nr_symbols = int(input(f"How many symbols would you like?\n"))
nr_numbers = int(input(f"How many numbers would you like?\n"))
# select a random letter
selected_letters=[] # for a string, we would write selected_letters = ""
for i in range(nr_letters):
rand_letter = letters[random.randint(0, len(letters)-1)] # or rand_letter = random.choice(letters)
selected_letters.append(rand_letter) # or use sellected_letters += rand_letter
print(selected_letters)
# select a random number
selected_numbers=[]
for i in range(nr_numbers):
rand_number = numbers[random.randint(0, len(numbers)-1)]
selected_numbers.append(rand_number)
print(selected_numbers)
# select a random symbol
selected_symbols=[]
for i in range(nr_symbols):
rand_symbol = symbols[random.randint(0, len(symbols)-1)]
selected_symbols.append(rand_symbol)
print(selected_symbols)
password_list = selected_letters + selected_numbers + selected_symbols
random.shuffle(password_list) # automatically overwrites variable. if new variable needed use random.sample(x, len(x))
password_str = "".join(password_list)
print(password_str)
|
lvrbanec/100DaysOfCode_Python | Project08. Beginner, Blind bid decision/main.py | # 03.02.21, Frollo
# level: Beginner
# project: Create a blind bid program
from replit import clear
from art import logo
print(logo)
# add a new bidder function
bidder_info = [] # could be also saved as {name1: amount1, name2: amount2}
def add_bidder_info(val_name, val_bid):
new_bidder = {"name": val_name, "bid": val_bid}
bidder_info.append(new_bidder)
# find the highest bidder function
def find_highest_bidder(list_of_dictionary):
max_bid = 0
for dictionary in list_of_dictionary:
bid = dictionary["bid"]
if bid > max_bid:
max_bid = bid
max_name = dictionary["name"]
print(f'Highest bid is {max_bid}$ bidded by {max_name}.')
# save info about the bidders
will_continue = True
while will_continue:
input_name = input("What is your name?\n")
input_bid = float(input("What's your bid?\n$"))
input_continue = input("Are there any other bidders? Type 'yes' or 'no'.\n")
add_bidder_info(val_name = input_name, val_bid = input_bid)
clear()
if input_continue == "no":
will_continue = False
print(bidder_info)
find_highest_bidder(bidder_info) |
SubZero12556/Cats2dogs_ONNX | networks.py | <filename>networks.py
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.n_blocks = n_blocks
self.img_size = img_size
self.light = light
DownBlock = []
DownBlock += [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False),
nn.InstanceNorm2d(ngf),
nn.ReLU(True)]
# Down-Sampling
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
DownBlock += [nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False),
nn.InstanceNorm2d(ngf * mult * 2),
nn.ReLU(True)]
# Down-Sampling Bottleneck
mult = 2**n_downsampling
for i in range(n_blocks):
DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]
# Class Activation Map
self.gap_fc = nn.Linear(ngf * mult, 1, bias=False)
self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False)
self.conv1x1 = nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1, bias=True)
self.relu = nn.ReLU(True)
# Gamma, Beta block
if self.light:
FC = [nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True),
nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True)]
else:
FC = [nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias=False),
nn.ReLU(True),
nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True)]
self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False)
self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False)
# Up-Sampling Bottleneck
for i in range(n_blocks):
setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))
# Up-Sampling
UpBlock2 = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
ILN(int(ngf * mult / 2)),
nn.ReLU(True)]
UpBlock2 += [nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False),
nn.Tanh()]
self.DownBlock = nn.Sequential(*DownBlock)
self.FC = nn.Sequential(*FC)
self.UpBlock2 = nn.Sequential(*UpBlock2)
def forward(self, input):
x = self.DownBlock(input)
gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = x * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)
cam_logit = torch.cat([gap_logit, gmp_logit], 1)
x = torch.cat([gap, gmp], 1)
x = self.relu(self.conv1x1(x))
heatmap = torch.sum(x, dim=1, keepdim=True)
if self.light:
x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1)
x_ = self.FC(x_.view(x_.shape[0], -1))
else:
x_ = self.FC(x.view(x.shape[0], -1))
gamma, beta = self.gamma(x_), self.beta(x_)
for i in range(self.n_blocks):
x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta)
out = self.UpBlock2(x)
return out, cam_logit, heatmap
class ResnetBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetBlock, self).__init__()
conv_block = []
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim),
nn.ReLU(True)]
conv_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),
nn.InstanceNorm2d(dim)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetAdaILNBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetAdaILNBlock, self).__init__()
self.pad1 = nn.ReflectionPad2d(1)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm1 = adaILN(dim)
self.relu1 = nn.ReLU(True)
self.pad2 = nn.ReflectionPad2d(1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm2 = adaILN(dim)
def forward(self, x, gamma, beta):
out = self.pad1(x)
out = self.conv1(out)
out = self.norm1(out, gamma, beta)
out = self.relu1(out)
out = self.pad2(out)
out = self.conv2(out)
out = self.norm2(out, gamma, beta)
return out + x
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(adaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3)
return out
class ILN(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(ILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1-self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
class Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=5):
super(Discriminator, self).__init__()
model = [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
for i in range(1, n_layers - 2):
mult = 2 ** (i - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 2 - 1)
model += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
# Class Activation Map
mult = 2 ** (n_layers - 2)
self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.pad = nn.ReflectionPad2d(1)
self.conv = nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
self.model = nn.Sequential(*model)
def forward(self, input):
x = self.model(input)
gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = x * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)
cam_logit = torch.cat([gap_logit, gmp_logit], 1)
x = torch.cat([gap, gmp], 1)
x = self.leaky_relu(self.conv1x1(x))
heatmap = torch.sum(x, dim=1, keepdim=True)
x = self.pad(x)
out = self.conv(x)
return out, cam_logit, heatmap
class RhoClipper(object):
def __init__(self, min, max):
self.clip_min = min
self.clip_max = max
assert min < max
def __call__(self, module):
if hasattr(module, 'rho'):
w = module.rho.data
w = w.clamp(self.clip_min, self.clip_max)
module.rho.data = w
|
SubZero12556/Cats2dogs_ONNX | utils.py | from scipy import misc
import os, cv2, torch
import numpy as np
def load_test_data(image_path, size=256):
img = misc.imread(image_path, mode='RGB')
img = misc.imresize(img, [size, size])
img = np.expand_dims(img, axis=0)
img = preprocessing(img)
return img
def preprocessing(x):
x = x/127.5 - 1 # -1 ~ 1
return x
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def inverse_transform(images):
return (images+1.) / 2
def imsave(images, size, path):
return misc.imsave(path, merge(images, size))
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[h*j:h*(j+1), w*i:w*(i+1), :] = image
return img
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def str2bool(x):
return x.lower() in ('true')
def cam(x, size = 256):
x = x - np.min(x)
cam_img = x / np.max(x)
cam_img = np.uint8(255 * cam_img)
cam_img = cv2.resize(cam_img, (size, size))
cam_img = cv2.applyColorMap(cam_img, cv2.COLORMAP_JET)
return cam_img / 255.0
def imagenet_norm(x):
mean = [0.485, 0.456, 0.406]
std = [0.299, 0.224, 0.225]
mean = torch.FloatTensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(x.device)
std = torch.FloatTensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(x.device)
return (x - mean) / std
def denorm(x):
return x * 0.5 + 0.5
def tensor2numpy(x):
return x.detach().cpu().numpy().transpose(1,2,0)
def RGB2BGR(x):
return cv2.cvtColor(x, cv2.COLOR_RGB2BGR) |
jamesstidard/pipenv-sublime | vendor/pipenvlib.py | <gh_stars>10-100
import os
import json
from . import toml
from . import delegator
class Dependency(object):
"""A Dependency."""
def __init__(self, name, constraint, locked=False):
self.name = name
self.constraint = constraint
self.locked = locked
def __repr__(self):
return "<Dependency '{0}' constraint='{1}'>".format(
self.name, self.constraint
)
class LockedDependency(object):
"""A Locked Dependency."""
def __init__(self, name, constraint, hashes):
self.name = name
self.constraint = constraint
self.hashes = hashes
def __repr__(self):
return "<LockedDependency '{0}{1}'>".format(
self.name, self.constraint
)
class Requirement(object):
"""A Requirement."""
def __init__(self, name, constraint):
self.name = name
self.constraint = constraint
def __repr__(self):
return "<Requirement '{0}' constraint='{1}'>".format(
self.name, self.constraint
)
class PipenvProject(object):
"""A Pipenv project."""
def __init__(self, home, pipfile='Pipfile', create=False):
self.home = home
self.pipfile = pipfile
if not create:
# Assert that the Pipfile exists.
self.assert_has_pipfile()
else:
# Cheat a project-creation by installing, then uninstalling
# the Requests library. :)
self.install('requests')
self.uninstall('requests')
@property
def _pipfile_path(self):
return os.path.sep.join([self.home, self.pipfile])
@property
def _lockfile_path(self):
return os.path.sep.join([self.home, '{0}.lock'.format(self.pipfile)])
def assert_has_pipfile(self):
"""Asserts that the Pipfile exists."""
assert os.path.isfile(self._pipfile_path)
def assert_has_lockfile(self):
"""Asserts that the Pipfile.lock exists."""
assert os.path.isfile(self._lockfile_path)
def _get_section_of_pipfile(self, section, target):
def gen():
pipfile = toml.load(self._pipfile_path)
for package in pipfile[section]:
name = package
constraint = pipfile[section][package]
yield target(name, constraint)
return [p for p in gen()]
@property
def packages(self):
"""Returns a list of Dependency objects (for [packages]) for
the Pipenv project"""
return self._get_section_of_pipfile('packages', Dependency)
@property
def dev_packages(self):
"""Returns a list of Dependency objects (for [dev-packages]) for
the Pipenv project.
"""
return self._get_section_of_pipfile('dev-packages', Dependency)
@property
def requires(self):
"""Returns a list of Requirement objects for the Pipenv project.
"""
return self._get_section_of_pipfile('requires', Requirement)
@property
def locked_packages(self):
"""Returns a list of LockedDependency objects for the Pipenv
project.
"""
self.assert_has_lockfile()
def gen():
with open(self._lockfile_path) as f:
lockfile = json.load(f)
for package in lockfile['default']:
name = package
constraint = lockfile['default'][package]['version']
hashes = lockfile['default'][package]['hashes']
yield LockedDependency(name, constraint, hashes)
return [l for l in gen()]
@property
def locked_dev_packages(self):
"""Returns a list of LockedDependency objects for the Pipenv
project.
"""
self.assert_has_lockfile()
def gen():
with open(self._lockfile_path) as f:
lockfile = json.load(f)
for package in lockfile['develop']:
name = package
constraint = lockfile['develop'][package]['version']
hashes = lockfile['develop'][package]['hashes']
yield LockedDependency(name, constraint, hashes)
return [l for l in gen()]
@property
def locked_requirements(self):
"""Returns a list of Requirement objects for the Pipenv
project, from the Pipfile.lock.
"""
self.assert_has_lockfile()
def gen():
with open(self._lockfile_path) as f:
lockfile = json.load(f)
for require in lockfile['_meta']['requires']:
name = require
constraint = lockfile['_meta']['requires'][require]
yield Requirement(name, constraint)
return [l for l in gen()]
def run(self, cmd, block=True):
"""Run a Pipenv command for the Pipenv project."""
return delegator.run('pipenv {0}'.format(cmd), cwd=self.home, block=block)
def install(self, package_name, constraint=None, dev=False):
"""Installs a given package to the Pipenv project."""
# If no constraint was
if constraint is not None:
# Append the constraint to the package name.
package_name = 'package_name{0}'.format(constraint)
dev = '' if not dev else '--dev'
return self._run('install {0} {1}'.format(package_name, dev)).return_code == 0
def uninstall(self, package_name):
"""Uninstalls a given package from the Pipenv project."""
return self.run('uninstall {0}'.format(package_name)).return_code == 0
def check(self):
"""Runs Pipenv check on the Pipenv project."""
return self.run('check').return_code == 0
@property
def virtualenv_location(self):
return self.run('--venv').out.strip()
|
jamesstidard/pipenv-sublime | subl_pipenv.py | import os
import sublime
import sublime_plugin
from .vendor import requests
from .vendor import parse
from .vendor import pipenvlib
TEMPLATE = "<a href='{0}'>{0}</a><br/>"
ALL_PACKAGES = list()
def plugin_loaded():
pass
class PipenvIsEnabledMixin:
def is_enabled(self):
open_files = [view.file_name() for view in sublime.active_window().views()]
for o_f in open_files:
o_f = os.path.abspath(o_f)
dirname = os.path.dirname(o_f)
dirname = os.path.sep.join([dirname, '..', '..'])
for root, dirs, files in os.walk(dirname, followlinks=True):
if 'Pipfile' in files:
return True
return False
class InstallHandler(sublime_plugin.ListInputHandler):
def __init__(self):
super(InstallHandler, self).__init__()
def _yield_packages(self):
# Set the status message.
sublime.status_message("Fetching all available packages from PyPi (just a sec!)…")
# Make the HTTP Request.
r = requests.get('https://pypi.python.org/simple/')
sublime.status_message("")
# Yield results.
for result in parse.findall(TEMPLATE, r.text):
yield result[0]
@property
def _all_packages(self):
return ALL_PACKAGES
@_all_packages.setter
def function(self, value):
global ALL_PACKAGES
ALL_PACKAGES = value
@property
def all_packages(self):
if self._all_packages:
return self._all_packages
for package in self._yield_packages():
self._all_packages.append(package)
# List special packages first, because I can.
kr_packages = (
'requests', 'requests-html', 'maya', 'records', 'httpbin', 'crayons',
'delegator.py', 'tablib', 'background', 'clint', 'xerox'
)
# Special shout-outs.
kr_favorites = ('django', 'flask', 'docopt', 'parse', 'apistar')
kr_favorites = list(kr_packages + kr_favorites)
# Reverse order.
kr_favorites.reverse()
for kr_package in kr_favorites:
package = self._all_packages.pop(self._all_packages.index(kr_package))
self._all_packages.insert(0, package)
return self._all_packages
def list_items(self):
return self.all_packages
def initial_text(self, *args):
return ""
class pipenv_install(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_install, self).__init__(text)
# def is_enabled(self):
# return super(pipenv_install, self).is_enabled()
def input(self, *args):
return InstallHandler()
def run(self, install_handler):
# The package to install.
package = install_handler
# The home directory for the current file name.
home = os.path.dirname(sublime.active_window().active_view().file_name())
p = pipenvlib.PipenvProject(home)
# Update package status.
sublime.status_message("Installing {!r} with Pipenv…".format(package))
# Show the console.
sublime.active_window().active_view().window().run_command('show_panel', {'panel': 'console'})
# Run the install command.
c = p.run('install {}'.format(package), block=False)
# Update the status bar.
sublime.status_message("Waiting for {!r} to install…".format(package))
# Block on subprocess…
c.block()
# Print results to console.
print(c.out)
# Assure that the intallation was successful.
try:
# Ensure installation was successful.
assert c.return_code == 0
# Update the status bar.
sublime.status_message("Success installing {!r}!".format(package))
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile')
# Hide the console.
sublime.active_window().active_view().window().run_command('hide_panel', {'panel': 'console'})
except AssertionError:
# Update the status bar.
sublime.status_message("Error installing {!r}!".format(package))
# Report the error.
print(c.err)
class pipenv_install_dev(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_install_dev, self).__init__(text)
# def is_enabled(self):
# return super(pipenv_install, self).is_enabled()
def input(self, *args):
return InstallHandler()
def run(self, install_handler):
# The package to install.
package = install_handler
# The home directory for the current file name.
home = os.path.dirname(sublime.active_window().active_view().file_name())
p = pipenvlib.PipenvProject(home)
# Update package status.
sublime.status_message("Installing {!r} with Pipenv…".format(package))
# Show the console.
sublime.active_window().active_view().window().run_command('show_panel', {'panel': 'console'})
# Run the install command.
c = p.run('install --dev {}'.format(package), block=False)
# Update the status bar.
sublime.status_message("Waiting for {!r} to install…".format(package))
# Block on subprocess…
c.block()
# Print results to console.
print(c.out)
# Assure that the intallation was successful.
try:
# Ensure installation was successful.
assert c.return_code == 0
# Update the status bar.
sublime.status_message("Success installing {!r}!".format(package))
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile')
# Hide the console.
sublime.active_window().active_view().window().run_command('hide_panel', {'panel': 'console'})
except AssertionError:
# Update the status bar.
sublime.status_message("Error installing {!r}!".format(package))
# Report the error.
print(c.err)
class UninstallHandler(sublime_plugin.ListInputHandler):
def __init__(self):
super(UninstallHandler, self).__init__()
def list_items(self):
home = os.path.dirname(sublime.active_window().active_view().file_name())
p = pipenvlib.PipenvProject(home)
return list(set([p.name for p in p.packages + p.dev_packages]))
def initial_text(self, *args):
return ""
class pipenv_uninstall(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_uninstall, self).__init__(text)
# def is_enabled(self):
# return super(pipenv_uninstall, self).is_enabled()
def input(self, *args):
return UninstallHandler()
def run(self, uninstall_handler):
# The package to install.
package = uninstall_handler
# The home directory for the current file name.
home = os.path.dirname(sublime.active_window().active_view().file_name())
p = pipenvlib.PipenvProject(home)
# Update package status.
sublime.status_message("Un–installing {!r} with Pipenv…".format(package))
# Show the console.
sublime.active_window().active_view().window().run_command('show_panel', {'panel': 'console'})
# Run the uninstall command.
c = p.run('uninstall {}'.format(package), block=False)
# Update the status bar.
sublime.status_message("Waiting for {!r} to un–install…".format(package))
# Block on subprocess…
c.block()
# Print results to console.
print(c.out)
# Assure that the intallation was successful.
try:
# Ensure installation was successful.
assert c.return_code == 0
# Update the status bar.
sublime.status_message("Success un–installing {!r}!".format(package))
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile')
# Hide the console.
sublime.active_window().active_view().window().run_command('hide_panel', {'panel': 'console'})
except AssertionError:
# Update the status bar.
sublime.status_message("Error un–installing {!r}!".format(package))
# Report the error.
print(c.err)
class pipenv_open_pipfile(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_open_pipfile, self).__init__(text)
# def is_enabled(self):
# return super(pipenv_open_pipfile, self).is_enabled()
def run(self):
# Update package status.
sublime.status_message("Opening {!r} with Pipenv…".format('Pipfile'))
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile')
class pipenv_open_pipfile_lock(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_open_pipfile_lock, self).__init__(text)
def is_enabled(self):
return super(pipenv_open_pipfile_lock, self).is_enabled()
def run(self):
# Update package status.
sublime.status_message("Opening {!r} with Pipenv…".format('Pipfile.lock'))
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile.lock')
class pipenv_lock(PipenvIsEnabledMixin, sublime_plugin.WindowCommand):
def __init__(self, text):
super(pipenv_lock, self).__init__(text)
# def is_enabled(self):
# return super(pipenv_lock, self).is_enabled()
def run(self):
# Update package status.
sublime.status_message("Locking {!r} with Pipenv…".format('Pipfile'))
home = os.path.dirname(sublime.active_window().active_view().file_name())
p = pipenvlib.PipenvProject(home)
c = p.run('lock', block=False)
c.block()
try:
assert c.return_code == 0
# Update locking status.
sublime.status_message("Success!")
# Open the Pipfile.
sublime.active_window().active_view().window().open_file('Pipfile.lock')
sublime.status_message("")
except AssertionError:
# Show the console.
sublime.active_window().active_view().window().run_command('show_panel', {'panel': 'console'})
# Update locking status.
sublime.status_message("Error while locking!")
print(c.err)
if __name__ == '__main__':
if sublime.version() < '3000':
plugin_loaded()
|
philskillz-coder/discord-py-paginator | examples/normal_list.py | from abc import ABC
from discord import app_commands, Embed, Color, Interaction
from discord.ext.paginator import paginator
class ColorPaginator(paginator.Paginator, ABC):
pass
@app_commands.command(
name="colors",
description="Show some colors"
)
async def show_colors(interaction: Interaction):
await interaction.response.send_message(
content="Nice colors",
view=await ColorPaginator.from_list(
interaction.client,
interaction.user,
data=[
{
"content": "The color red",
"embed": Embed(title="Red", color=Color.red()),
},
{
"content": "The color green",
"embed": Embed(title="Green", color=Color.green()),
},
{
"content": "The color blue",
"embed": Embed(title="Blue", color=Color.blue()),
}
]
).run()
)
|
philskillz-coder/discord-py-paginator | discord/ext/paginator/button.py | from __future__ import annotations
from typing import Any, Dict, Callable, List, Coroutine, Type, Optional, Union
from discord import ButtonStyle, Emoji, PartialEmoji, Interaction
from discord.ui import Button
from . import errors
EH = Callable[[Any, Interaction, errors.ButtonException], Coroutine[Any, Any, Any]]
CH = Callable[[Any, Interaction], Coroutine[Any, Any, bool]]
class ButtonErrorHandler:
def __init__(self, callback: EH, exception_type: Type[errors.ButtonException]):
self.callback = callback
self.exception_type: Type[errors.ButtonException] = exception_type
async def invoke(self, instance, interaction: Interaction, error: errors.ButtonException):
return await self.callback(instance, interaction, error)
class ButtonCheck:
def __init__(self, callback: CH, priority: int):
self.callback = callback
self.priority = priority
async def invoke(self, instance, interaction: Interaction):
return await self.callback(instance, interaction)
class ButtonMeta(type):
__error_handlers__: Dict[Type[errors.ButtonException], ButtonErrorHandler] = {}
__checks__: Dict[int, ButtonCheck] = {}
def __new__(mcs, *args: Any, **kwargs: Any):
name, bases, attrs = args
error_handlers = {}
checks = {}
new_cls = super().__new__(mcs, *args, **kwargs)
for base in reversed(new_cls.__mro__):
for elem, value in base.__dict__.items():
if isinstance(value, ButtonErrorHandler):
error_handlers[value.exception_type] = value
if isinstance(value, ButtonCheck):
checks[value.priority] = value
new_cls.__error_handlers__ = error_handlers
new_cls.__checks__ = checks
return new_cls
def __init__(cls, *args: Any, **kwargs: Any) -> None:
super().__init__(*args)
def button_on_error(exception_type: Type[errors.ButtonException]):
def deco(method: EH) -> ButtonErrorHandler:
return ButtonErrorHandler(method, exception_type)
return deco
def button_check(priority: int):
def deco(method: CH) -> ButtonCheck:
return ButtonCheck(method, priority)
return deco
class BetterButton(Button, metaclass=ButtonMeta):
def __init__(
self,
style: ButtonStyle = ButtonStyle.secondary,
label: Optional[str] = None,
disabled: bool = False,
custom_id: Optional[str] = None,
url: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
row: Optional[int] = None
):
super().__init__(
style=style,
label=label,
disabled=disabled,
custom_id=custom_id,
url=url,
emoji=emoji,
row=row
)
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
raise error
async def interaction_check(self, interaction: Interaction) -> bool:
return True
async def on_click(self, interaction: Interaction):
raise NotImplementedError("On click method not implemented!")
async def callback(self, interaction: Interaction) -> Optional[Any]:
try:
value = await self.interaction_check(interaction)
if value is not True:
raise errors.ButtonFailed(f"Interaction check {self.interaction_check.__name__!r} with priority 0 failed!")
_checks: List[ButtonCheck] = sorted(self.__checks__.values(), key=lambda c: c.priority)
for check in _checks:
value = await check.invoke(self, interaction)
if value is not True:
raise errors.ButtonFailed("Button check failed!")
except errors.ButtonException as button_exception:
for exception_type, callback in self.__error_handlers__.items():
if isinstance(button_exception, exception_type):
await callback.invoke(self, interaction, button_exception)
return None
await self.on_error(interaction, button_exception)
return None
return await self.on_click(interaction)
def __init__button__(self, parent):
self.parent = parent
|
philskillz-coder/discord-py-paginator | discord/ext/paginator/modals.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from discord import User, Interaction, ui
from discord.utils import MISSING
if TYPE_CHECKING:
from .paginator import Paginator
class QuickNav(ui.Modal, title='Quick Navigation'):
def __init__(self, *, parent: Paginator, user: User, title: str = MISSING, timeout: Optional[float] = None, custom_id: str = MISSING) -> None:
super().__init__(title=title, timeout=timeout, custom_id=custom_id)
self.parent = parent
self.user = user
page = ui.TextInput(label='Page')
async def interaction_check(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
await interaction.response.send_message(
content="You are not allowed to do this",
ephemeral=True
)
raise ValueError("You are not allowed to do this!") # add better error message
return True
async def on_submit(self, interaction: Interaction):
if not str(self.page).isdigit():
await interaction.response.send_message(f"`{self.page}` is not a number!")
raise ValueError("Not a number") # add better error message
await self.parent.update_page_number(interaction, int(str(self.page)))
await self.parent.update_page_content(interaction)
|
philskillz-coder/discord-py-paginator | discord/ext/paginator/view_buttons.py | <gh_stars>1-10
from __future__ import annotations
from abc import ABC
from discord import ButtonStyle, User, Interaction
from discord.ext.commands import Bot
from . import button, errors, modals
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .paginator import Paginator
class FirstElement(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.update_page_number(interaction, 0)
await self.parent.update_page_content(interaction)
class PreviousElement(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.update_page_number(interaction, self.parent.page - 1)
await self.parent.update_page_content(interaction)
class NextElement(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.update_page_number(interaction, self.parent.page + 1)
await self.parent.update_page_content(interaction)
class LastElement(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.update_page_number(interaction, await self.parent.acquire_page_count(interaction)-1)
await self.parent.update_page_content(interaction)
class Stop(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.paginator_stop(interaction)
await interaction.response.send_message(
content="Stopped",
ephemeral=True
)
class Start(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await self.parent.paginator_start(interaction)
await interaction.response.defer()
values = await self.parent.acquire_page_content(interaction)
values["view"] = self.parent
ws = interaction.followup
await ws.edit_message(
(await interaction.original_message()).id,
**values
)
class QuickNav(button.BetterButton):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/,
client: Bot,
parent: Paginator,
user: User,
using: bool,
disabled: bool = True
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=disabled
)
self.client = client
self.parent = parent
self.user = user
self.using = using
@button.button_check(1)
async def check_author(self, interaction: Interaction) -> bool:
if interaction.user != self.user:
raise errors.NotAuthor("You are not allowed to do this!")
return True
async def on_error(self, interaction: Interaction, error: errors.ButtonException):
await interaction.response.send_message(
content=error.message,
ephemeral=True
)
async def on_click(self, interaction: Interaction):
await interaction.response.send_modal(modals.QuickNav(parent=self.parent, user=self.user))
class Placeholder(button.BetterButton, ABC):
def __init__(
self,
style: ButtonStyle,
label: str,
emoji: str,
/
):
super().__init__(
style=style,
label=label,
emoji=emoji,
disabled=True
)
|
philskillz-coder/discord-py-paginator | discord/ext/paginator/paginator.py | <filename>discord/ext/paginator/paginator.py
from discord import ui, User, Interaction, Webhook, Client, ButtonStyle
from discord.ext.commands import Bot
from . import view_buttons
from typing import Dict, Any, List, Union, Optional, Callable, Coroutine
GCP_TYPE = Callable[[Interaction, int], Coroutine[Any, Any, Dict[str, Any]]]
class Paginator(ui.View):
__CONFIG__: Dict = { # DO NOT CHANGE THIS! You can add custom config attrs in Paginator.CONFIG
"paginator_view_timeout": 180,
"paginator_ephemeral": None, # this setting overwrites ephemeral= from get_page_content. if None, not overwritten
"paginator_delete_when_finished": True, # only works when paginator is not ephemeral
"paginator_delete_delay": 10,
"start_button_enabled": True, # option not changable!
"start_button_style": ButtonStyle.success,
"start_button_label": "Start",
"start_button_emoji": None,
"stop_button_enabled": True, # option not changable!
"stop_button_style": ButtonStyle.danger,
"stop_button_label": "Quit",
"stop_button_emoji": None,
"quick_navigation_button_enabled": True,
"quick_navigation_button_style": ButtonStyle.blurple,
"quick_navigation_button_label": "Nav",
"quick_navigation_button_emoji": None,
"quick_navigation_error_message": "%s is not a number!", # False means no message
"quick_navigation_error_ephemeral": True,
"first_element_button_enabled": True,
"first_element_button_style": ButtonStyle.secondary,
"first_element_button_label": "\U000025c0 \U000025c0", # None means no label
"first_element_button_emoji": None,
"prev_element_button_enabled": True,
"prev_element_button_style": ButtonStyle.secondary,
"prev_element_button_label": "\U000025c0",
"prev_element_button_emoji": None,
"next_element_button_enabled": True,
"next_element_button_style": ButtonStyle.secondary,
"next_element_button_label": "\U000025b6",
"next_element_button_emoji": None,
"last_element_button_enabled": True,
"last_element_button_style": ButtonStyle.secondary,
"last_element_button_label": "\U000025b6 \U000025b6",
"last_element_button_emoji": None,
"placeholder_button_enabled": True,
"placeholder_button_style": ButtonStyle.secondary,
"placeholder_button_label": "\U0001f6ab",
"placeholder_button_emoji": None,
}
CONFIG = __CONFIG__.copy() # will count for all instances of your paginator
@staticmethod
def parse_config(config: Dict) -> Dict:
if config is None:
return Paginator.CONFIG
_config = Paginator.CONFIG.copy()
_config.update(config)
return _config
def __init__(
self,
client: Union[Bot, Client],
user: User,
config: Optional[Dict] = None,
*args, **kwargs
):
config = self.parse_config(config)
self.config = config
super().__init__(timeout=config["view_timeout"])
self.client = client
self.user = user
self.page = 0
self.first_elem_btn = view_buttons.FirstElement(
config["first_element_button_style"],
config["first_element_button_label"],
config["first_element_button_emoji"],
client=client,
parent=self,
user=user,
using=config["first_element_button_enabled"]
)
self.prev_elem_btn = view_buttons.PreviousElement(
config["prev_element_button_style"],
config["prev_element_button_label"],
config["prev_element_button_emoji"],
client=client,
parent=self,
user=user,
using=config["prev_element_button_enabled"]
)
self.next_elem_btn = view_buttons.NextElement(
config["next_element_button_style"],
config["next_element_button_label"],
config["next_element_button_emoji"],
client=client,
parent=self,
user=user,
using=config["next_element_button_enabled"]
)
self.last_elem_btn = view_buttons.LastElement(
config["last_element_button_style"],
config["last_element_button_label"],
config["last_element_button_emoji"],
client=client,
parent=self,
user=user,
using=config["last_element_button_enabled"]
)
self.start_btn = view_buttons.Start(
config["start_button_style"],
config["start_button_label"],
config["start_button_emoji"],
client=client,
parent=self,
user=user,
using=True
)
self.stop_btn = view_buttons.Stop(
config["stop_button_style"],
config["stop_button_label"],
config["stop_button_emoji"],
client=self.client,
parent=self,
user=user,
using=True
)
self.quick_nav_btn = view_buttons.QuickNav(
config["quick_navigation_button_style"],
config["quick_navigation_button_label"],
config["quick_navigation_button_emoji"],
client=self.client,
parent=self,
user=user,
using=config["quick_navigation_button_enabled"]
)
self.static_data: Optional[List[Dict[str, Any]]] = kwargs.get("static_data")
self.static_data_page_count = len(self.static_data or []) or kwargs.get("static_page_count") or None
@classmethod
def from_list(
cls,
client: Union[Bot, Client],
user: User,
config: Optional[Dict] = None,
data: List[Dict[str, Any]] = None
):
return cls(
client=client,
user=user,
config=config,
static_data=data
)
async def setup(self, *args, **kwargs):
pass
# noinspection PyArgumentList
async def add_buttons(self):
placeholder_config = (
self.config["placeholder_button_style"],
self.config["placeholder_button_label"],
self.config["placeholder_button_emoji"]
)
self.add_item(self.first_elem_btn)
self.add_item(self.prev_elem_btn)
self.add_item(self.start_btn)
self.add_item(self.next_elem_btn)
self.add_item(self.last_elem_btn)
if self.config["placeholder_button_enabled"]:
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(self.stop_btn)
if self.config["placeholder_button_enabled"]:
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(view_buttons.Placeholder(*placeholder_config))
async def run(self, *args, **kwargs):
await self.add_buttons()
await self.setup(*args, **kwargs)
return self
async def paginator_start(self, interaction: Interaction):
await self._paginator_start()
await self.on_start(interaction)
# noinspection PyArgumentList
async def _paginator_start(self):
self.clear_items()
if self.config["first_element_button_enabled"]:
self.first_elem_btn.disabled = False
if self.config["prev_element_button_enabled"]:
self.prev_elem_btn.disabled = False
if self.config["next_element_button_enabled"]:
self.next_elem_btn.disabled = False
if self.config["last_element_button_enabled"]:
self.last_elem_btn.disabled = False
self.quick_nav_btn.disabled = self.config["quick_navigation_button_enabled"]
placeholder_config = (
self.config["placeholder_button_style"],
self.config["placeholder_button_label"],
self.config["placeholder_button_emoji"]
)
self.add_item(self.first_elem_btn)
self.add_item(self.prev_elem_btn)
self.add_item(self.quick_nav_btn)
self.add_item(self.next_elem_btn)
self.add_item(self.last_elem_btn)
if self.config["placeholder_button_enabled"]:
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(self.stop_btn)
if self.config["placeholder_button_enabled"]:
self.add_item(view_buttons.Placeholder(*placeholder_config))
self.add_item(view_buttons.Placeholder(*placeholder_config))
async def paginator_stop(self, interaction: Interaction):
if self.config["paginator_delete_when_finished"]:
if interaction.message.flags.ephemeral:
await interaction.message.delete(
delay=self.config["paginator_delete_delay"]
)
await self.on_stop(interaction)
super().stop()
async def on_start(self, interaction: Interaction):
pass
async def on_stop(self, interaction: Interaction):
pass
async def acquire_page_count(self, interaction: Interaction) -> int:
if self.static_data_page_count is None:
return await self.get_page_count(interaction)
return self.static_data_page_count
async def get_page_count(self, interaction: Interaction) -> int:
raise NotImplementedError("get_page_count must be implemented!")
async def update_page_number(self, interaction: Interaction, page: int):
count = await self.acquire_page_count(interaction)
self.page = (page % count)
async def update_page_content(self, interaction: Interaction):
await interaction.response.defer()
contents = await self.acquire_page_content(interaction)
ephemeral = self.config["paginator_ephemeral"]
if ephemeral is not None:
contents["ephemeral"] = ephemeral
# noinspection PyTypeChecker
ws: Webhook = interaction.followup
await ws.edit_message(
(await interaction.original_message()).id,
**contents
)
async def acquire_page_content(self, interaction: Interaction):
if self.static_data is None:
return await self.get_page_content(interaction, self.page)
return self.static_data[self.page]
async def get_page_content(self, interaction: Interaction, page: int) -> Dict[str, Any]:
raise NotImplementedError("get_page_content must be implemented!")
|
philskillz-coder/discord-py-paginator | examples/default.py | <reponame>philskillz-coder/discord-py-paginator
from discord import app_commands, Embed, Color, Interaction
from discord.ext.paginator import paginator
from typing import Dict, Any
class GuildPaginator(paginator.Paginator):
async def get_page_count(self, interaction: Interaction) -> int:
return len(self.client.guilds)
async def get_page_content(self, interaction: Interaction, page: int) -> Dict[str, Any]:
# this method should return the arguments used for interaction.response.edit_message
# e.g. {'content': 'hello'} means the message content will be edited to hello
guild = self.client.guilds[page]
# this cannot throw a index error because page is between 0 and the guild count
return {
"content": f"Guild {page+1}/{await self.get_page_count(interaction)}",
"embed": (
Embed(
title="Guild",
colour=Color.green()
)
.add_field(name="Name", value=guild.name)
.add_field(name="ID", value=str(guild.id))
.add_field(name="Member count", value=str(guild.member_count), inline=False)
),
"ephemeral": True
}
@app_commands.command(
name="guilds",
description="Show all the guilds"
)
async def show_guilds(interaction: Interaction):
await interaction.response.send_message(
content="The bot guilds",
view=await GuildPaginator(
interaction.client,
interaction.user
).run()
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.