repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
AOE-Net | AOE-Net-main/evaluation_anet/utils.py | import json
import urllib2
import numpy as np
API = 'http://ec2-52-11-11-89.us-west-2.compute.amazonaws.com/challenge16/api.py'
def get_blocked_videos(api=API):
api_url = '{}?action=get_blocked'.format(api)
req = urllib2.Request(api_url)
response = urllib2.urlopen(req)
return json.loads(response.read())
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in xrange(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
return tiou
| 2,652 | 32.1625 | 81 | py |
AOE-Net | AOE-Net-main/evaluation_thumos/prop_eval.py | import io
import requests
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import json
import os
def pkl2dataframe(frm_nums, movie_fps_file, result_file):
data_frame = []
movie_fps = pickle.load(open(movie_fps_file, 'rb'))
dt_results = pickle.load(open(result_file, 'rb'))
for _key in dt_results:
fps = movie_fps[_key]
frm_num = frm_nums[_key]
for line in dt_results[_key]:
start = int(line[0]*fps)
end = int(line[1]*fps)
score = float(line[2])
data_frame.append([end, start, score, frm_num, _key])
return data_frame
def segment_tiou(target_segments, test_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
test_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [m x n] with IOU ratio.
Note: It assumes that target-segments are more scarce that test-segments
"""
if target_segments.ndim != 2 or test_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
m, n = target_segments.shape[0], test_segments.shape[0]
tiou = np.empty((m, n))
for i in range(m):
tt1 = np.maximum(target_segments[i, 0], test_segments[:, 0])
tt2 = np.minimum(target_segments[i, 1], test_segments[:, 1])
# Non-negative overlap score
intersection = (tt2 - tt1 + 1.0).clip(0)
union = ((test_segments[:, 1] - test_segments[:, 0] + 1) +
(target_segments[i, 1] - target_segments[i, 0] + 1) -
intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments at the frame level.
tiou[i, :] = intersection / union
return tiou
def average_recall_vs_nr_proposals(proposals, ground_truth,
tiou_thresholds=np.linspace(0.5, 1.0, 11)):
""" Computes the average recall given an average number
of proposals per video.
Parameters
----------
proposals : DataFrame
pandas table with the resulting proposals. It must include
the following columns: {'video-name': (str) Video identifier,
'f-init': (int) Starting index Frame,
'f-end': (int) Ending index Frame,
'score': (float) Proposal confidence}
ground_truth : DataFrame
pandas table with annotations of the dataset. It must include
the following columns: {'video-name': (str) Video identifier,
'f-init': (int) Starting index Frame,
'f-end': (int) Ending index Frame}
tiou_thresholds : 1darray, optional
array with tiou threholds.
Outputs
-------
average_recall : 1darray
recall averaged over a list of tiou threshold.
proposals_per_video : 1darray
average number of proposals per video.
"""
# Get list of videos.
video_lst = proposals['video-name'].unique()
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
for videoid in video_lst:
# Get proposals for this video.
prop_idx = proposals['video-name'] == videoid
this_video_proposals = proposals[prop_idx][['f-init',
'f-end']].values
# Sort proposals by score.
sort_idx = proposals[prop_idx]['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
gt_idx = ground_truth['video-name'] == videoid
this_video_ground_truth = ground_truth[gt_idx][['f-init',
'f-end']].values
# Compute tiou scores.
tiou = segment_tiou(this_video_ground_truth, this_video_proposals)
score_lst.append(tiou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_lst = np.arange(1, 101) / 100.0
matches = np.empty((video_lst.shape[0], pcn_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], pcn_lst.shape[0]))
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
for j, pcn in enumerate(pcn_lst):
# Get number of proposals as a percentage of total retrieved.
nr_proposals = int(score.shape[1] * pcn)
# Find proposals that satisfies minimum tiou threhold.
matches[i, j] = ((score[:, :nr_proposals] >= tiou).sum(axis=1) > 0).sum()
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_lst * (float(proposals.shape[0]) / video_lst.shape[0])
return recall, proposals_per_video
def average_recall_vs_freq(proposals, ground_truth, frm_nums,
tiou_thresholds=np.linspace(0.5, 1.0, 11)):
# Get list of videos.
video_lst = proposals['video-name'].unique()
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
score_name = []
for videoid in video_lst:
# Get proposals for this video.
prop_idx = proposals['video-name'] == videoid
this_video_proposals = proposals[prop_idx][['f-init',
'f-end']].values
# Sort proposals by score.
sort_idx = proposals[prop_idx]['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
gt_idx = ground_truth['video-name'] == videoid
this_video_ground_truth = ground_truth[gt_idx][['f-init',
'f-end']].values
# Compute tiou scores.
tiou = segment_tiou(this_video_ground_truth, this_video_proposals)
score_lst.append(tiou)
score_name.append(videoid)
# Computes average recall.
freq_lst = np.array([float(number) for number in 10**(np.arange(-1,0.9,0.1))])
matches = np.empty((video_lst.shape[0], freq_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], freq_lst.shape[0]))
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_lst):
frm_num = frm_nums[score_name[i]]
# Total positives per video.
positives[i] = score.shape[0]
for j, freq in enumerate(freq_lst):
# Get number of proposals as a percentage of total retrieved.
nr_proposals = min(score.shape[1],int(freq*frm_num/30.0))
# Find proposals that satisfies minimum tiou threhold.
matches[i, j] = ((score[:, :nr_proposals] >= tiou).sum(axis=1) > 0).sum()
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
recall = recall.mean(axis=0)
return recall, freq_lst
def recall_vs_tiou_thresholds(proposals, ground_truth, nr_proposals=1000,
tiou_thresholds=np.arange(0.05, 1.05, 0.05)):
# Get list of videos.
video_lst = proposals['video-name'].unique()
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
for videoid in video_lst:
# Get proposals for this video.
prop_idx = proposals['video-name'] == videoid
this_video_proposals = proposals[prop_idx][['f-init',
'f-end']].values
# Sort proposals by score.
sort_idx = proposals[prop_idx]['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
gt_idx = ground_truth['video-name'] == videoid
this_video_ground_truth = ground_truth[gt_idx][['f-init',
'f-end']].values
# Compute tiou scores.
tiou = segment_tiou(this_video_ground_truth, this_video_proposals)
score_lst.append(tiou)
# To obtain the average number of proposals, we need to define a
# percentage of proposals to get per video.
pcn = (video_lst.shape[0] * float(nr_proposals)) / proposals.shape[0]
# Computes recall at different tiou thresholds.
matches = np.empty((video_lst.shape[0], tiou_thresholds.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty(tiou_thresholds.shape[0])
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
# Get number of proposals at the fixed percentage of total retrieved.
nr_proposals = int(score.shape[1] * pcn)
# Find proposals that satisfies minimum tiou threhold.
matches[i, ridx] = ((score[:, :nr_proposals] >= tiou).sum(axis=1) > 0).sum()
# Computes recall given the set of matches per video.
recall[ridx] = matches[:, ridx].sum(axis=0) / positives.sum()
return recall, tiou_thresholds
def recall_freq_vs_tiou_thresholds(proposals, ground_truth, frm_nums,
tiou_thresholds=np.arange(0.05, 1.05, 0.05)):
# Get list of videos.
video_lst = proposals['video-name'].unique()
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
score_name = []
for videoid in video_lst:
# Get proposals for this video.
prop_idx = proposals['video-name'] == videoid
this_video_proposals = proposals[prop_idx][['f-init',
'f-end']].values
# Sort proposals by score.
sort_idx = proposals[prop_idx]['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
gt_idx = ground_truth['video-name'] == videoid
this_video_ground_truth = ground_truth[gt_idx][['f-init',
'f-end']].values
# Compute tiou scores.
tiou = segment_tiou(this_video_ground_truth, this_video_proposals)
score_lst.append(tiou)
score_name.append(videoid)
freq = 1.0
# Computes recall at different tiou thresholds.
matches = np.empty((video_lst.shape[0], tiou_thresholds.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty(tiou_thresholds.shape[0])
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
frm_num = frm_nums[score_name[i]]
nr_proposals = int(min(score.shape[1],freq*frm_num/30.0))
# Find proposals that satisfies minimum tiou threhold.
matches[i, ridx] = ((score[:, :nr_proposals] >= tiou).sum(axis=1) > 0).sum()
# Computes recall given the set of matches per video.
recall[ridx] = matches[:, ridx].sum(axis=0) / positives.sum()
return recall, tiou_thresholds
def main():
# Retrieves and loads DAPs proposal results.
frm_nums = pickle.load(open("./frm_num.pkl", 'rb'))
rows = pkl2dataframe(frm_nums)
daps_results = pd.DataFrame(rows, columns = ['f-end','f-init','score','video-frames','video-name'])
# Retrieves and loads Thumos14 test set ground-truth.
ground_truth_url = ('https://gist.githubusercontent.com/cabaf/'
'ed34a35ee4443b435c36de42c4547bd7/raw/'
'952f17b9cdc6aa4e6d696315ba75091224f5de97/'
'thumos14_test_groundtruth.csv')
s = requests.get(ground_truth_url).content
ground_truth = pd.read_csv(io.StringIO(s.decode('utf-8')),sep=' ')
# Computes average recall vs average number of proposals.
average_recall, average_nr_proposals = average_recall_vs_nr_proposals(daps_results,
ground_truth)
# Computes average recall vs proposal frequency.
average_recall_freq, freqs = average_recall_vs_freq(daps_results, ground_truth, frm_nums)
# Computes recall for different tiou thresholds at a fixed average number of proposals.
recall, tiou_thresholds = recall_vs_tiou_thresholds(daps_results, ground_truth,
nr_proposals=1000)
recall_freq, tiou_thresholds_freq = recall_freq_vs_tiou_thresholds(daps_results, ground_truth, frm_nums)
# Define plot style.
method = {'DAPs':{'legend': 'DAPs-prop',
'color': np.array([102,166,30]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'},
'SCNN-prop':{'legend': 'SCNN-prop',
'color': np.array([230,171,2]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'},
'Sparse-prop':{'legend': 'Sparse-prop',
'color': np.array([153,78,160]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'},
'Sliding Window':{'legend': 'Sliding Window',
'color': np.array([205,110,51]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'},
'Random':{'legend': 'Random',
'color': np.array([132,132,132]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'},
'Our Method':{'legend': 'Our Method',
'color': np.array([224,44,119]) / 255.0,
'marker': None,
'linewidth': 6.5,
'linestyle': '-'}
}
fn_size = 30
legend_size = 27.5
#reference points load:
avg_prop_pnt_pairs = {}
avg_prop_pnt_pairs['DAPs'] = np.load("./ref_pnt_pairs/DAP_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['SCNN-prop'] = np.load("./ref_pnt_pairs/SCNN_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['Sparse-prop'] = np.load("./ref_pnt_pairs/sparse_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['Sparse-prop'] = avg_prop_pnt_pairs['Sparse-prop']
avg_prop_pnt_pairs['flow'] = np.load("./ref_pnt_pairs/flow_svm_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['Sliding Window'] = np.load("./ref_pnt_pairs/sliding_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['Random'] = np.load("./ref_pnt_pairs/random_avg_prop_pnt_pairs.npy")
avg_prop_pnt_pairs['Our Method'] = np.array([average_nr_proposals,average_recall])
freq_pnt_pairs = {}
freq_pnt_pairs['DAPs'] = np.load("./ref_pnt_pairs/DAP_freq_pnt_pairs.npy")
freq_pnt_pairs['SCNN-prop'] = np.load("./ref_pnt_pairs/scnn_freq_pnt_pairs.npy")
freq_pnt_pairs['Sparse-prop'] = np.load("./ref_pnt_pairs/sparse_freq_pnt_pairs.npy")
freq_pnt_pairs['Sparse-prop'] = freq_pnt_pairs['Sparse-prop'][:,0:-2]
freq_pnt_pairs['flow'] = np.load("./ref_pnt_pairs/flow_svm_freq_pnt_pairs.npy")
freq_pnt_pairs['Sliding Window'] = np.load("./ref_pnt_pairs/sliding_freq_pnt_pairs.npy")
freq_pnt_pairs['Random'] = np.load("./ref_pnt_pairs/random_freq_pnt_pairs.npy")
freq_pnt_pairs['Our Method'] = np.array([freqs, average_recall_freq])
recall1000_pnt_pairs = {}
recall1000_pnt_pairs['DAPs'] = np.load("./ref_pnt_pairs/DAP_recall_pnt_pairs.npy")
recall1000_pnt_pairs['SCNN-prop'] = np.load("./ref_pnt_pairs/SCNN_recall_pnt_pairs.npy")
recall1000_pnt_pairs['Sparse-prop'] = np.load("./ref_pnt_pairs/sparse_recall_pnt_pairs.npy")
recall1000_pnt_pairs['flow'] = np.load("./ref_pnt_pairs/flow_svm_recall_pnt_pairs.npy")
recall1000_pnt_pairs['Sliding Window'] = np.load("./ref_pnt_pairs/sliding_recall_pnt_pairs.npy")
recall1000_pnt_pairs['Random'] = np.load("./ref_pnt_pairs/random_recall_pnt_pairs.npy")
recall1000_pnt_pairs['Our Method'] = np.array([tiou_thresholds,recall])
recall_freq_pnt_pairs = {}
recall_freq_pnt_pairs['DAPs'] = np.load("./ref_pnt_pairs/DAPs_recall_freq_pnt_pairs.npy")
recall_freq_pnt_pairs['SCNN-prop'] = np.load("./ref_pnt_pairs/scnn_recall_freq_pnt_pairs.npy")
recall_freq_pnt_pairs['Sparse-prop'] = np.load("./ref_pnt_pairs/sparse_recall_freq_pnt_pairs.npy")
#recall_freq_pnt_pairs['flow'] = np.load("./ref_pnt_pairs/flow_svm_recall_pnt_pairs.npy")
recall_freq_pnt_pairs['Sliding Window'] = np.load("./ref_pnt_pairs/sliding_recall_freq_pnt_pairs.npy")
recall_freq_pnt_pairs['Random'] = np.load("./ref_pnt_pairs/random_recall_freq_pnt_pairs.npy")
recall_freq_pnt_pairs['Our Method'] = np.array([tiou_thresholds_freq,recall_freq])
legends = ['Random','Sliding Window','Sparse-prop','DAPs','SCNN-prop','Our Method']
#legends = ['DAPs','SCNN-prop','Our Method']
# legends = ['Our Method']
plt.figure(num=None, figsize=(12, 10))
# Plots Average Recall vs Average number of proposals.
for _key in legends:
plt.semilogx(avg_prop_pnt_pairs[_key][0,:],avg_prop_pnt_pairs[_key][1,:],
label=method[_key]['legend'],
color=method[_key]['color'],
linewidth=method[_key]['linewidth'],
linestyle=str(method[_key]['linestyle']),
marker=str(method[_key]['marker']))
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average number of retrieved proposals', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 0.8])
plt.xlim([10**1, 5*10**3])
plt.yticks(np.arange(0.0,0.9,0.2))
plt.legend(legends,loc=2,prop={'size':legend_size})
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.savefig(sys.argv[1].split(".pkl")[0]+"_avg_recall.pdf",bbox_inches="tight")
#plt.show()
plt.figure(num=None, figsize=(12, 10))
# Plots Average Recall vs Average number of proposals.
for _key in legends:
plt.semilogx(freq_pnt_pairs[_key][0,:],freq_pnt_pairs[_key][1,:],
label=method[_key]['legend'],
color=method[_key]['color'],
linewidth=method[_key]['linewidth'],
linestyle=str(method[_key]['linestyle']),
marker=str(method[_key]['marker']))
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Proposal frequency', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 0.8])
plt.xlim([10**(-1), 10])
plt.yticks(np.arange(0.0,0.9,0.2))
plt.legend(legends,loc=2,prop={'size':legend_size})
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.savefig(sys.argv[1].split(".pkl")[0]+"_freq.pdf",bbox_inches="tight")
#plt.show()
# Plots recall at different tiou thresholds.
plt.figure(num=None, figsize=(12, 10))
for _key in legends:
plt.plot(recall1000_pnt_pairs[_key][0,:],recall1000_pnt_pairs[_key][1,:],
label=method[_key]['legend'],
color=method[_key]['color'],
linewidth=method[_key]['linewidth'],
linestyle=str(method[_key]['linestyle']),
marker=str(method[_key]['marker']))
plt.ylabel('Recall@AN=1000', fontsize=fn_size)
plt.xlabel('tIoU', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1])
plt.xlim([0.1, 1])
plt.xticks(np.arange(0.0,1.1,0.2))
plt.legend(legends,loc=3,prop={'size':legend_size})
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.savefig(sys.argv[1].split(".pkl")[0]+"_recall1000.pdf",bbox_inches="tight")
#plt.show()
plt.figure(num=None, figsize=(12, 10))
# Plots Average Recall vs Average number of proposals.
for _key in legends:
plt.plot(recall_freq_pnt_pairs[_key][0,:],recall_freq_pnt_pairs[_key][1,:],
label=method[_key]['legend'],
color=method[_key]['color'],
linewidth=method[_key]['linewidth'],
linestyle=str(method[_key]['linestyle']),
marker=str(method[_key]['marker']))
plt.ylabel('Recall@F=1.0', fontsize=fn_size)
plt.xlabel('tIoU', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1])
plt.xlim([0.1, 1])
plt.xticks(np.arange(0.0,1.1,0.2))
plt.legend(legends,loc=3,prop={'size':legend_size})
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.savefig(sys.argv[1].split(".pkl")[0]+"_recall_freq.pdf",bbox_inches="tight")
#plt.show()
if __name__ == '__main__':
main()
| 22,153 | 42.269531 | 108 | py |
advectionDiffusion | advectionDiffusion-main/Simulator.py | """
Forward model in the advection diffusion example.
Ms spatio-temporal model we separate it into the underlying grid
and the actual model propagation.
"""
import numpy as np
import os
from matplotlib import pyplot as plt
from scipy.linalg.special_matrices import toeplitz
import Sampler
class Grid:
"""Grid for the forward model"""
def __init__(self, nx, ny, dx, dy):
self.nx = nx
self.ny = ny
self.N_x = self.nx * self.ny
self.dx = dx
self.dy = dy
self.xdim = self.dx*self.nx
self.ydim = self.dy*self.ny
# Auxiliary matrix for the construction of the circullant distance matrix
self.dist_toepitz = np.zeros((self.ny, self.nx))
for i in range(self.nx):
if i <= self.nx/2:
di = i
else:
di = self.nx - i
for j in range(self.ny):
if j <= self.ny/2:
dj = j
else:
dj = self.ny - j
Dx = di * self.dx
Dy = dj * self.dy
self.dist_toepitz[j,i] = np.sqrt(Dx**2 + Dy**2)
self.dist_mat = np.zeros((self.N_x,self.N_x))
for j in range(self.ny):
toeplitz = np.roll(self.dist_toepitz,j,axis=0)
for i in range(self.nx):
self.dist_mat[j*self.nx + i] = np.reshape(np.roll(toeplitz,i,axis=1),(1,self.N_x))
def point2idx(self, points):
if any(isinstance(el, list) for el in points):
idxs = []
for point in points:
indicator_field = np.zeros((self.ny, self.nx))
indicator_field[point[1],point[0]] = 1.0
idx = np.where(indicator_field.flatten() != 0 )[0][0]
idxs.append(idx)
return idxs
else:
indicator_field = np.zeros((self.ny, self.nx))
indicator_field[points[1],points[0]] = 1.0
idx = np.where(indicator_field.flatten() != 0 )[0][0]
return idx
class Simulator:
def __init__(self, grid, D=0.05, v=[0.5,0.1], zeta=-0.0001, dt=0.01, noise_stddev=0.1, noise_matern_phi=1.0):
"""
D - diffusion parameter
v = np.array([v_x,v_y]) - advection
zeta - damping parameter
"""
self.grid = grid
self.M = self.matrix(D, v, zeta, dt)
self.D = D
self.v = v
self.zeta = zeta
self.dt = dt
self.noise_matern_phi = noise_matern_phi
self.noise_stddev = noise_stddev
self.Q = self.cov_matrix()
@staticmethod
def _neighborsDerivatives(i,ne,N):
"""
Periodic boundary conditions
"""
#(under,left,right,over)
jumps = np.array((-ne, -1, 1, ne))
#under
if((i - ne) < 0):
jumps[0] = N - ne
#over
if((i + ne) > N-1):
jumps[3] = ne - N
#left
if((i % ne) == 0):
jumps[1] = ne-1
#right
if((i % ne) == ne-1):
jumps[2] = -(ne-1)
return(jumps+i)
def matrix(self, D, v, zeta, dt):
N = self.grid.N_x
ve = np.repeat(v[0], N)
vn = np.repeat(v[1], N)
zeta = np.repeat(zeta, N)
dx = self.grid.dx
dy = self.grid.dy
diag_const = zeta -2*D/(dx**2) -2*D/(dy**2) # main
diag_minus_1 = -(-ve/(2*dx))+ D/(dx**2) #left
diag_plus_1 = (-ve/(2*dx)) + D/(dx**2) #right
diag_minus_N = -(-vn/(2*dy)) + D/(dy**2) #under
diag_plus_N = (-vn/(2*dy)) + D/(dy**2) #over
M = np.diag(diag_const)
for i in range(N):
neighbors = Simulator._neighborsDerivatives(i,self.grid.nx,N)
for j in range(4):
if(j==0):
M[i,neighbors[j]] = M[i,neighbors[j]] + diag_minus_N[i]
if(j==1):
M[i,neighbors[j]] = M[i,neighbors[j]] + diag_minus_1[i]
if(j==2):
M[i,neighbors[j]] = M[i,neighbors[j]] + diag_plus_1[i]
if(j==3):
M[i,neighbors[j]] = M[i,neighbors[j]] + diag_plus_N[i]
return (np.diag(np.ones(N)) + dt*M)
def cov_matrix(self):
noise_args = {"mean_upshift" : 0.0,
"matern_phi" : self.noise_matern_phi,
"stddev" : self.noise_stddev}
self.noise_sampler = Sampler.Sampler(self.grid, noise_args)
return self.noise_sampler.cov
def propagate(self, mean, cov=None, steps=1):
for t in range(steps):
mean = np.matmul(self.M, mean)
if cov is not None:
cov = np.matmul(self.M,np.matmul(cov, self.M.transpose())) + self.noise
if cov is not None:
return (mean, cov)
else:
return mean
def plot_correlation_points(self, points):
"""Showing point on the grid with advection field"""
plt.figure()
plt.xlim(0, self.grid.nx-1)
plt.ylim(0, self.grid.ny-1)
plt.suptitle("Points for Correlation Study")
plt.title("Chosen in Advection Direction")
freq = self.grid.nx/10
X, Y = np.meshgrid( np.arange(0,self.grid.nx,freq), np.arange(0,self.grid.ny,freq) )
plt.quiver(X.flatten(), Y.flatten(), np.repeat(self.v[0],len(X.flatten())), np.repeat(self.v[1],len(Y.flatten())))
i = 0
for point in points:
plt.scatter(point[0], point[1], c="red", s=150)
plt.text(point[0], point[1], str(i), c="black", fontsize=12)
i=i+1
plt.show()
def to_file(self, timestamp):
root_path = os.getcwd()
new_path = os.path.join(root_path, "experiment_files")
if not os.path.exists(new_path):
os.makedirs(new_path)
exp_path = os.path.join(new_path, "experiment_" + timestamp)
os.makedirs(exp_path)
file = "experiment_files/experiment_" + timestamp + "/setup"
f = open(file, "a")
f.write("--------------------------------------------\n")
f.write("Setup for the advection diffusion experiment\n")
f.write("--------------------------------------------\n")
f.write("The grid:\n")
f.write("grid.nx = " + str(self.grid.nx) + "\n")
f.write("grid.ny = " + str(self.grid.ny) + "\n")
f.write("grid.dx = " + str(self.grid.dx) + "\n")
f.write("grid.dy = " + str(self.grid.dy) + "\n")
f.write("--------------------------------------------\n")
f.write("The parameters for the advection diffusion equation:\n")
f.write("simulator.D = " + str(self.D) + "\n")
f.write("simulator.v = " + str(self.v) + "\n")
f.write("simulator.zeta = " + str(self.zeta) + "\n")
f.write("simulator.dt = " + str(self.dt) + "\n")
f.write("simulator.noise_stddev = " + str(self.noise_stddev) + "\n")
f.write("simulator.noise_matern_phi = " + str(self.noise_matern_phi) + "\n")
f.close()
def from_file(timestamp):
f = open("experiment_files/experiment_"+timestamp+"/setup", "r")
f.readline()
f.readline()
f.readline()
f.readline()
nx = int(f.readline()[10:-1])
ny = int(f.readline()[10:-1])
dx = float(f.readline()[10:-1])
dy = float(f.readline()[10:-1])
grid = Grid(nx,ny,dx,dy)
f.readline()
f.readline()
D = float(f.readline()[14:-1])
v = f.readline()[14:-1].strip('][').split(', ')
v[0] = float(v[0])
v[1] = float(v[1])
zeta = float(f.readline()[17:-1])
dt = float(f.readline()[15:-1])
noise_stddev = float(f.readline()[25:-1])
noise_matern_phi = float(f.readline()[29:-1])
simulator = Simulator(grid, D=D, v=v, zeta=zeta, dt=dt,
noise_stddev=noise_stddev, noise_matern_phi=noise_matern_phi)
f.close()
return grid, simulator
| 8,003 | 30.888446 | 122 | py |
advectionDiffusion | advectionDiffusion-main/Statistics.py | """
Mean and Variance for the advection diffusion example
(eventually in ensemble representation)
"""
import Ensemble
import Sampler
import numpy as np
import linecache
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import copy
class Statistics:
def __init__(self, simulator, N_e=0, safe_history=False):
"""Class for handling the mean and cov throughout times"""
self.simulator = simulator
# Allocation
self.mean = np.zeros([self.simulator.grid.N_x])
self.stddev = np.zeros([self.simulator.grid.N_x])
self.cov = np.zeros([self.simulator.grid.N_x,self.simulator.grid.N_x])
self.safe_history = safe_history
if safe_history:
self.prev_mean = np.zeros([self.simulator.grid.N_x])
self.prev_stddev = np.zeros([self.simulator.grid.N_x])
self.prev_cov = np.zeros([self.simulator.grid.N_x,self.simulator.grid.N_x])
self.forecast_mean = np.zeros([self.simulator.grid.N_x])
self.forecast_stddev = np.zeros([self.simulator.grid.N_x])
self.forecast_cov = np.zeros([self.simulator.grid.N_x,self.simulator.grid.N_x])
# Default is analytical
if N_e > 0:
self.ensemble = Ensemble.Ensemble(simulator, N_e)
if self.safe_history:
self.prev_ensemble = Ensemble.Ensemble(simulator, N_e)
self.forecast_ensemble = Ensemble.Ensemble(simulator, N_e)
else:
self.ensemble = None
print("Please remember to set priors!")
def ensemble_statistics(self):
self.mean = np.average(self.ensemble.ensemble, axis = 1)
if self.ensemble.N_e > 1:
self.cov = 1/(self.ensemble.N_e-1)*\
(self.ensemble.ensemble - np.reshape(self.mean, (self.simulator.grid.N_x,1))) \
@ (self.ensemble.ensemble - np.reshape(self.mean, (self.simulator.grid.N_x,1))).transpose()
self.stddev = np.sqrt(np.diag(self.cov))
def set(self, mean, cov):
"""Setting the member variables from input arguments"""
self.mean = mean
self.stddev = np.sqrt(np.diag(cov))
self.cov = cov
def set_prior(self, prior_args):
prior_sampler = Sampler.Sampler(self.simulator.grid, prior_args)
if self.ensemble is not None:
self.ensemble.initialize(prior_sampler)
self.ensemble_statistics()
else:
self.mean = prior_sampler.mean
self.cov = prior_sampler.cov
self.stddev = np.sqrt(np.diag(self.cov))
self.vmin_mean = np.min(prior_sampler.mean) - 0.5
self.vmax_mean = np.max(prior_sampler.mean) + 0.5
self.vmax_cov = np.max(self.cov)
def set_ensemble(self, ensemble):
self.ensemble.set(ensemble)
self.ensemble_statistics()
def plot(self, mean=None, stddev=None, cov=None):
"""Plotting mean, stddev, and cov in a unified graphics"""
fig, axs = plt.subplots(1,3, figsize=(12,4))
if mean is None:
mean = np.reshape(self.mean, (self.simulator.grid.ny,self.simulator.grid.nx))
fig0 = axs[0].imshow(mean, origin = "lower", vmin=self.vmin_mean, vmax=self.vmax_mean)
axs[0].set_title("Mean")
ax_divider = make_axes_locatable(axs[0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig0, cax=ax_cb, orientation="horizontal")
if stddev is None:
stddev = np.reshape(self.stddev, (self.simulator.grid.ny,self.simulator.grid.nx))
fig1 = axs[1].imshow(stddev, origin = "lower", vmin=0.0, vmax=np.sqrt(self.vmax_cov))
axs[1].set_title("Standard Deviation")
ax_divider = make_axes_locatable(axs[1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig1, cax=ax_cb, orientation="horizontal")
if cov is None:
cov = self.cov
fig2 = axs[2].imshow(cov, vmin=0.0, vmax=self.vmax_cov)
axs[2].set_title("Covariance Matrix")
ax_divider = make_axes_locatable(axs[2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig2, cax=ax_cb, orientation="horizontal")
plt.show()
def propagate(self, nt, model_error=True):
"""Propagating the model for nt simulator time steps.
NOTE: nt simulator steps are 1 model time step
wherefore a distinged (DA) model matrix is constructed"""
# Construct forward step matrix (by multiple steps from simulator matrix)
self.M = np.linalg.matrix_power(self.simulator.M, nt)
if self.safe_history:
self.prev_mean = self.mean
self.prev_stddev = np.sqrt(np.diag(self.cov))
self.prev_cov = self.cov
if self.ensemble is not None:
self.prev_ensemble.ensemble = self.ensemble.ensemble
# Propagate
# - with model error for ensembles
# - without model error for analytical distributions
if self.ensemble is None:
self.mean = self.M @ self.mean
self.cov = self.M @ self.cov @ self.M.T + self.simulator.Q
else:
if model_error:
forecast = self.M @ self.ensemble.ensemble + self.simulator.noise_sampler.sample(self.ensemble.N_e)
else:
forecast = self.M @ self.ensemble.ensemble
self.ensemble.set(forecast)
self.ensemble_statistics()
if self.safe_history:
if self.ensemble is None:
self.forecast_mean = self.mean
self.forecast_stddev = np.sqrt(np.diag(self.cov))
self.forecast_cov = self.cov
elif self.ensemble is not None:
if not model_error:
forecast = forecast + self.simulator.noise_sampler.sample(self.ensemble.N_e)
print("Model error in historical forecast added")
self.forecast_ensemble.ensemble = forecast
self.forecast_mean = np.average(self.forecast_ensemble.ensemble, axis = 1)
if self.forecast_ensemble.N_e > 1:
self.forecast_cov = 1/(self.forecast_ensemble.N_e-1)*\
(self.forecast_ensemble.ensemble - np.reshape(self.forecast_mean, (self.simulator.grid.N_x,1))) \
@ (self.forecast_ensemble.ensemble - np.reshape(self.forecast_mean, (self.simulator.grid.N_x,1))).transpose()
self.forecast_stddev = np.sqrt(np.diag(self.forecast_cov))
def evaluate_correlation(self, points):
"""Evaluating the correlation between p0 at t0 and p1 at t1
(For details see mail by Jo from 09.12.21)"""
idxs = self.simulator.grid.point2idx(points)
if self.ensemble is not None:
mean_point0 = self.prev_mean[idxs[0]]
mean_point1 = self.forecast_mean[idxs[1]]
stddev_point0 = self.prev_stddev[idxs[0]]
stddev_point1 = self.forecast_stddev[idxs[1]]
cov_point2point = 1/(self.ensemble.N_e-1) * (self.prev_ensemble.ensemble[idxs[0]] - mean_point0) @ (self.forecast_ensemble.ensemble[idxs[1]] - mean_point1)
corr_point2point = cov_point2point/(stddev_point0*stddev_point1)
else:
scale0 = self.prev_stddev[idxs[0]]
scale1 = self.forecast_stddev[idxs[1]]
cov_point2point = (self.M @ self.prev_cov)[idxs[1],idxs[0]]
corr_point2point = cov_point2point/(scale0*scale1)
return corr_point2point
def prior_args_from_file(timestamp):
f = "experiment_files/experiment_"+timestamp+"/setup"
mean_upshift = float(linecache.getline(f, 23)[15:-1])
bell_center = linecache.getline(f, 24)[14:-1].strip('][').split(', ')
bell_center[0] = float(bell_center[0])
bell_center[1] = float(bell_center[1])
bell_sharpness = float(linecache.getline(f, 25)[17:-1])
bell_scaling = float(linecache.getline(f, 26)[15:-1])
matern_phi = float(linecache.getline(f, 27)[13:-1])
stddev = float(linecache.getline(f, 28)[8:-1])
prior_args = {"mean_upshift" : mean_upshift,
"bell_center" : bell_center,
"bell_sharpness" : bell_sharpness,
"bell_scaling" : bell_scaling,
"matern_phi" : matern_phi ,
"stddev" : stddev}
return prior_args | 8,629 | 39.327103 | 167 | py |
advectionDiffusion | advectionDiffusion-main/Comparer.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1 import AxesGrid
import scipy.stats
from statsmodels.distributions.empirical_distribution import ECDF
class Comparer:
def __init__(self, statistics_kf, statistics_etkf, statistics_letkf, statistics_iewpf, statistics_mc=None):
self.statistics_kf = statistics_kf
self.statistics_etkf = statistics_etkf
self.statistics_letkf = statistics_letkf
self.statistics_iewpf = statistics_iewpf
if statistics_mc is not None:
self.statistics_mc = statistics_mc
else:
self.statistics_mc = None
self.grid = self.statistics_kf.simulator.grid
self.poi = []
self.corr_ref_pois = []
def mean_plots(self):
fig, axs = plt.subplots(2,4, figsize=(12,8))
fig00 = axs[0,0].imshow(np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=10, vmax=15)
axs[0,0].set_title("KF Mean")
ax_divider = make_axes_locatable(axs[0,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig00, cax=ax_cb, orientation="horizontal")
fig01 = axs[0,1].imshow(np.reshape(self.statistics_etkf.mean, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=10, vmax=15)
axs[0,1].set_title("ETKF Mean")
ax_divider = make_axes_locatable(axs[0,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig01, cax=ax_cb, orientation="horizontal")
fig02 = axs[0,2].imshow(np.reshape(self.statistics_letkf.mean, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=10, vmax=15)
axs[0,2].set_title("LETKF Mean")
ax_divider = make_axes_locatable(axs[0,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig02, cax=ax_cb, orientation="horizontal")
fig03 = axs[0,3].imshow(np.reshape(self.statistics_iewpf.mean, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=10, vmax=15)
axs[0,3].set_title("IEWPF Mean")
ax_divider = make_axes_locatable(axs[0,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig03, cax=ax_cb, orientation="horizontal")
mean_err_kf = np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
fig10 = axs[1,0].imshow(mean_err_kf, origin = "lower", vmin=-0.1, vmax=0.1, cmap="bwr")
axs[1,0].set_title("KF Error")
ax_divider = make_axes_locatable(axs[1,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig10, cax=ax_cb, orientation="horizontal")
mean_err_etkf = np.reshape(self.statistics_etkf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
fig11 = axs[1,1].imshow(mean_err_etkf, origin = "lower", vmin=-0.1, vmax=0.1, cmap="bwr")
axs[1,1].set_title("ETKF Error")
ax_divider = make_axes_locatable(axs[1,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig11, cax=ax_cb, orientation="horizontal")
mean_err_letkf = np.reshape(self.statistics_letkf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
fig12 = axs[1,2].imshow(mean_err_letkf, origin = "lower", vmin=-0.1, vmax=0.1, cmap="bwr")
axs[1,2].set_title("LETKF Error")
ax_divider = make_axes_locatable(axs[1,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig12, cax=ax_cb, orientation="horizontal")
mean_err_iewpf = np.reshape(self.statistics_iewpf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
fig13 = axs[1,3].imshow(mean_err_iewpf, origin = "lower", vmin=-0.1, vmax=0.1, cmap="bwr")
axs[1,3].set_title("IEWPF Error")
ax_divider = make_axes_locatable(axs[1,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig13, cax=ax_cb, orientation="horizontal")
plt.show()
return mean_err_etkf, mean_err_letkf, mean_err_iewpf
def mean_rmse(self):
mean_err_kf = np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
mean_err_etkf = np.reshape(self.statistics_etkf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
mean_err_letkf = np.reshape(self.statistics_letkf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
mean_err_iewpf = np.reshape(self.statistics_iewpf.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
if self.statistics_mc is not None:
mean_err_mc = np.reshape(self.statistics_mc.mean, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx))
mean_rmse_kf = np.sqrt(np.sum(mean_err_kf**2))
mean_rmse_etkf = np.sqrt(np.sum(mean_err_etkf**2))
mean_rmse_letkf = np.sqrt(np.sum(mean_err_letkf**2))
mean_rmse_iewpf = np.sqrt(np.sum(mean_err_iewpf**2))
if self.statistics_mc is not None:
mean_rmse_mc = np.sqrt(np.sum(mean_err_mc**2))
return mean_rmse_kf, mean_rmse_etkf, mean_rmse_letkf, mean_rmse_iewpf, mean_rmse_mc
else:
return mean_rmse_kf, mean_rmse_etkf, mean_rmse_letkf, mean_rmse_iewpf
def stddev_plots(self):
fig, axs = plt.subplots(2,4, figsize=(12,8))
fig00 = axs[0,0].imshow(np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=0, vmax=0.2)
axs[0,0].set_title("KF Stddev")
ax_divider = make_axes_locatable(axs[0,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig00, cax=ax_cb, orientation="horizontal")
fig01 = axs[0,1].imshow(np.reshape(self.statistics_etkf.stddev, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=0, vmax=0.2)
axs[0,1].set_title("ETKF Stddev")
ax_divider = make_axes_locatable(axs[0,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig01, cax=ax_cb, orientation="horizontal")
fig02 = axs[0,2].imshow(np.reshape(self.statistics_letkf.stddev, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=0, vmax=0.2)
axs[0,2].set_title("LETKF Stddev")
ax_divider = make_axes_locatable(axs[0,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig02, cax=ax_cb, orientation="horizontal")
fig03 = axs[0,3].imshow(np.reshape(self.statistics_iewpf.stddev, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=0, vmax=0.2)
axs[0,3].set_title("IEWPF Stddev")
ax_divider = make_axes_locatable(axs[0,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig03, cax=ax_cb, orientation="horizontal")
stddev_err_kf = np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
fig10 = axs[1,0].imshow(stddev_err_kf, origin = "lower", vmin=-0.05, vmax=0.05, cmap="bwr")
axs[1,0].set_title("KF Error")
ax_divider = make_axes_locatable(axs[1,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig10, cax=ax_cb, orientation="horizontal")
stddev_err_etkf = np.reshape(self.statistics_etkf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
fig11 = axs[1,1].imshow(stddev_err_etkf, origin = "lower", vmin=-0.05, vmax=0.05, cmap="bwr")
axs[1,1].set_title("ETKF Error")
ax_divider = make_axes_locatable(axs[1,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig11, cax=ax_cb, orientation="horizontal")
stddev_err_letkf = np.reshape(self.statistics_letkf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
fig12 = axs[1,2].imshow(stddev_err_letkf, origin = "lower", vmin=-0.05, vmax=0.05, cmap="bwr")
axs[1,2].set_title("LETKF Error")
ax_divider = make_axes_locatable(axs[1,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig12, cax=ax_cb, orientation="horizontal")
stddev_err_iewpf = np.reshape(self.statistics_iewpf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
fig13 = axs[1,3].imshow(stddev_err_iewpf, origin = "lower", vmin=-0.05, vmax=0.05, cmap="bwr")
axs[1,3].set_title("LETKF Error")
ax_divider = make_axes_locatable(axs[1,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig13, cax=ax_cb, orientation="horizontal")
plt.show()
def stddev_rmse(self):
stddev_err_kf = np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
stddev_err_etkf = np.reshape(self.statistics_etkf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
stddev_err_letkf = np.reshape(self.statistics_letkf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
stddev_err_iewpf = np.reshape(self.statistics_iewpf.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
if self.statistics_mc is not None:
stddev_err_mc = np.reshape(self.statistics_mc.stddev, (self.grid.ny, self.grid.nx)) - np.reshape(self.statistics_kf.stddev, (self.grid.ny, self.grid.nx))
stddev_rmse_kf = np.sqrt(np.sum(stddev_err_kf**2))
stddev_rmse_etkf = np.sqrt(np.sum(stddev_err_etkf**2))
stddev_rmse_letkf = np.sqrt(np.sum(stddev_err_letkf**2))
stddev_rmse_iewpf = np.sqrt(np.sum(stddev_err_iewpf**2))
if self.statistics_mc is not None:
stddev_rmse_mc = np.sqrt(np.sum(stddev_err_mc**2))
return stddev_rmse_kf, stddev_rmse_etkf, stddev_rmse_letkf, stddev_rmse_iewpf, stddev_rmse_mc
else:
return stddev_rmse_kf, stddev_rmse_etkf, stddev_rmse_letkf, stddev_rmse_iewpf
def cov_plots(self):
fig, axs = plt.subplots(2,4, figsize=(12,8))
fig00 = axs[0,0].imshow(self.statistics_kf.cov,vmin=0, vmax=0.01)
axs[0,0].set_title("KF Cov")
ax_divider = make_axes_locatable(axs[0,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig00, cax=ax_cb, orientation="horizontal")
fig01 = axs[0,1].imshow(self.statistics_etkf.cov,vmin=0, vmax=0.01)
axs[0,1].set_title("ETKF Cov")
ax_divider = make_axes_locatable(axs[0,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig01, cax=ax_cb, orientation="horizontal")
fig02 = axs[0,2].imshow(self.statistics_letkf.cov,vmin=0, vmax=0.01)
axs[0,2].set_title("LETKF Cov")
ax_divider = make_axes_locatable(axs[0,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig02, cax=ax_cb, orientation="horizontal")
fig03 = axs[0,3].imshow(self.statistics_iewpf.cov,vmin=0, vmax=0.01)
axs[0,3].set_title("IEWPF Cov")
ax_divider = make_axes_locatable(axs[0,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig03, cax=ax_cb, orientation="horizontal")
fig10 = axs[1,0].imshow(self.statistics_kf.cov-self.statistics_kf.cov,vmin=-0.005, vmax=0.005, cmap="bwr")
axs[1,0].set_title("KF Error")
ax_divider = make_axes_locatable(axs[1,0])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig10, cax=ax_cb, orientation="horizontal")
fig11 = axs[1,1].imshow(self.statistics_kf.cov-self.statistics_etkf.cov,vmin=-0.005, vmax=0.005, cmap="bwr")
axs[1,1].set_title("ETKF Error")
ax_divider = make_axes_locatable(axs[1,1])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig11, cax=ax_cb, orientation="horizontal")
fig12 = axs[1,2].imshow(self.statistics_kf.cov-self.statistics_letkf.cov,vmin=-0.005, vmax=0.005, cmap="bwr")
axs[1,2].set_title("LETKF Error")
ax_divider = make_axes_locatable(axs[1,2])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig12, cax=ax_cb, orientation="horizontal")
fig13 = axs[1,3].imshow(self.statistics_kf.cov-self.statistics_iewpf.cov,vmin=-0.005, vmax=0.005, cmap="bwr")
axs[1,3].set_title("IEWPF Error")
ax_divider = make_axes_locatable(axs[1,3])
ax_cb = ax_divider.append_axes("bottom", size="10%", pad="20%")
plt.colorbar(fig13, cax=ax_cb, orientation="horizontal")
plt.show()
def cov_frobenius_dist(self):
cov_frob_kf = np.linalg.norm(self.statistics_kf.cov - self.statistics_kf.cov)
cov_frob_etkf = np.linalg.norm(self.statistics_kf.cov - self.statistics_etkf.cov)
cov_frob_letkf = np.linalg.norm(self.statistics_kf.cov - self.statistics_letkf.cov)
cov_frob_iewpf = np.linalg.norm(self.statistics_kf.cov - self.statistics_iewpf.cov)
if self.statistics_mc is not None:
cov_frob_mc = np.linalg.norm(self.statistics_kf.cov - self.statistics_mc.cov)
return cov_frob_kf, cov_frob_etkf, cov_frob_letkf, cov_frob_iewpf, cov_frob_mc
else:
return cov_frob_kf, cov_frob_etkf, cov_frob_letkf, cov_frob_iewpf
def cov_frobenius_dist_close(self):
cov_frob_kf_close = np.linalg.norm((self.statistics_kf.cov - self.statistics_kf.cov)[self.grid.dist_mat<1])
cov_frob_etkf_close = np.linalg.norm((self.statistics_kf.cov - self.statistics_etkf.cov)[self.grid.dist_mat<1])
cov_frob_letkf_close = np.linalg.norm((self.statistics_kf.cov - self.statistics_letkf.cov)[self.grid.dist_mat<1])
cov_frob_iewpf_close = np.linalg.norm((self.statistics_kf.cov - self.statistics_iewpf.cov)[self.grid.dist_mat<1])
if self.statistics_mc is not None:
cov_frob_mc_close = np.linalg.norm((self.statistics_kf.cov - self.statistics_mc.cov)[self.grid.dist_mat<1])
return cov_frob_kf_close, cov_frob_etkf_close, cov_frob_letkf_close, cov_frob_iewpf_close, cov_frob_mc_close
else:
return cov_frob_kf_close, cov_frob_etkf_close, cov_frob_letkf_close, cov_frob_iewpf_close
def cov_frobenius_dist_far(self):
cov_frob_kf_far = np.linalg.norm((self.statistics_kf.cov - self.statistics_kf.cov)[self.grid.dist_mat>1])
cov_frob_etkf_far = np.linalg.norm((self.statistics_kf.cov - self.statistics_etkf.cov)[self.grid.dist_mat>1])
cov_frob_letkf_far = np.linalg.norm((self.statistics_kf.cov - self.statistics_letkf.cov)[self.grid.dist_mat>1])
cov_frob_iewpf_far = np.linalg.norm((self.statistics_kf.cov - self.statistics_iewpf.cov)[self.grid.dist_mat>1])
if self.statistics_mc is not None:
cov_frob_mc_far = np.linalg.norm((self.statistics_kf.cov - self.statistics_mc.cov)[self.grid.dist_mat>1])
return cov_frob_kf_far, cov_frob_etkf_far, cov_frob_letkf_far, cov_frob_iewpf_far, cov_frob_mc_far
else:
return cov_frob_kf_far, cov_frob_etkf_far, cov_frob_letkf_far, cov_frob_iewpf_far
def set_poi(self, pos):
indicator_field = np.zeros((self.grid.ny, self.grid.nx))
indicator_field[pos[1],pos[0]] = 1.0
idx = np.where(indicator_field.flatten() != 0 )[0][0]
self.poi.append(idx)
def poi_plot(self, observation=None):
plt.imshow(np.reshape(self.statistics_kf.mean, (self.grid.ny, self.grid.nx)), origin = "lower", vmin=10, vmax=15)
if observation is not None:
for pos in observation.positions:
plt.scatter(pos[0],pos[1], c="red")
plt.xlim(0, self.grid.nx-1)
plt.ylim(0, self.grid.ny-1)
for i in range(len(self.poi)):
indicator = np.zeros(self.grid.N_x)
indicator[self.poi[i]] = 1.0
indicator_field = np.reshape(indicator, (self.grid.ny, self.grid.nx))
plt.scatter(np.where(indicator_field != 0)[1][0], np.where(indicator_field != 0 )[0][0], c="black", s=250)
plt.text(np.where(indicator_field != 0)[1][0], np.where(indicator_field != 0 )[0][0], str(i), c="white", fontsize=12)
plt.show()
def poi_hist(self, i):
xmin = self.statistics_kf.mean[self.poi[i]] - 3*self.statistics_kf.stddev[self.poi[i]]
xmax = self.statistics_kf.mean[self.poi[i]] + 3*self.statistics_kf.stddev[self.poi[i]]
x = np.arange(xmin,xmax,0.01)
density = scipy.stats.norm.pdf(x, loc=self.statistics_kf.mean[self.poi[i]], scale=self.statistics_kf.stddev[self.poi[i]])
ymax = np.max(density)+1
fig, axs = plt.subplots(1,4, figsize=(12,4))
axs[0].plot(x, density)
axs[0].set_title("KF Density")
axs[0].set_ylabel("Point of Interest" + str(i))
axs[0].set_ylim([0.0,ymax])
axs[1].hist(self.statistics_etkf.ensemble.ensemble[self.poi[i],:], density=True, bins=40, range=(xmin,xmax))
axs[1].plot(x, density)
axs[1].set_title("ETKF Density")
axs[1].set_ylim([0.0,ymax])
axs[2].hist(self.statistics_letkf.ensemble.ensemble[self.poi[i],:], density=True, bins=40, range=(xmin,xmax))
axs[2].plot(x, density)
axs[2].set_title("LETKF Density")
axs[2].set_ylim([0.0,ymax])
axs[3].hist(self.statistics_iewpf.ensemble.ensemble[self.poi[i],:], density=True, bins=40, range=(xmin,xmax))
axs[3].plot(x, density)
axs[3].set_title("IEWPF Density")
axs[3].set_ylim([0.0,ymax])
plt.show()
def poi_ecdf_plots(self, i):
cdf = lambda x: scipy.stats.norm.cdf(x, \
loc=self.statistics_kf.mean[self.poi[i]], \
scale=self.statistics_kf.stddev[self.poi[i]])
ecdf_etkf_values = self.statistics_etkf.ensemble.ensemble[self.poi[i],:]
ecdf_letkf_values = self.statistics_letkf.ensemble.ensemble[self.poi[i],:]
ecdf_iewpf_values = self.statistics_iewpf.ensemble.ensemble[self.poi[i],:]
ecdf_etkf = ECDF(ecdf_etkf_values)
ecdf_letkf = ECDF(ecdf_letkf_values)
ecdf_iewpf = ECDF(ecdf_iewpf_values)
xmin = self.statistics_kf.mean[self.poi[i]] - 3*self.statistics_kf.stddev[self.poi[i]]
xmax = self.statistics_kf.mean[self.poi[i]] + 3*self.statistics_kf.stddev[self.poi[i]]
X = np.arange(xmin, xmax, 0.01)
fig, axs = plt.subplots(1,4, figsize=(12,4))
axs[0].plot(X, cdf(X))
axs[0].set_title("KF CDF")
axs[0].set_ylabel("Point of Interest" + str(i))
axs[1].plot(X, cdf(X))
axs[1].plot(X, ecdf_etkf(X))
axs[1].set_title("ETKF ECDF")
axs[2].plot(X, cdf(X))
axs[2].plot(X, ecdf_letkf(X))
axs[2].set_title("LETKF ECDF")
axs[3].plot(X, cdf(X))
axs[3].plot(X, ecdf_iewpf(X))
axs[3].set_title("IEWPF ECDF")
plt.show()
return cdf, np.sort(ecdf_etkf_values), np.sort(ecdf_letkf_values), np.sort(ecdf_iewpf_values)
def poi_ecdf_err(self, i):
cdf = lambda x: scipy.stats.norm.cdf(x, \
loc=self.statistics_kf.mean[self.poi[i]], \
scale=self.statistics_kf.stddev[self.poi[i]])
ecdf_etkf = ECDF(self.statistics_etkf.ensemble.ensemble[self.poi[i],:])
ecdf_letkf = ECDF(self.statistics_letkf.ensemble.ensemble[self.poi[i],:])
ecdf_iewpf = ECDF(self.statistics_iewpf.ensemble.ensemble[self.poi[i],:])
diff_etkf = lambda x: abs(cdf(x)-ecdf_etkf(x))
diff_letkf = lambda x: abs(cdf(x)-ecdf_letkf(x))
diff_iewpf = lambda x: abs(cdf(x)-ecdf_iewpf(x))
xmin = self.statistics_kf.mean[self.poi[i]] - 3*self.statistics_kf.stddev[self.poi[i]]
xmax = self.statistics_kf.mean[self.poi[i]] + 3*self.statistics_kf.stddev[self.poi[i]]
ecdf_err_etkf = scipy.integrate.quad(diff_etkf, xmin, xmax, limit=100)[0]
ecdf_err_letkf = scipy.integrate.quad(diff_letkf, xmin, xmax, limit=100)[0]
ecdf_err_iewpf = scipy.integrate.quad(diff_iewpf, xmin, xmax, limit=100)[0]
if self.statistics_mc is not None:
ecdf_mc = ECDF(self.statistics_mc.ensemble.ensemble[self.poi[i],:])
diff_mc = lambda x: abs(cdf(x)-ecdf_mc(x))
ecdf_err_mc = scipy.integrate.quad(diff_mc, xmin, xmax, limit=100)[0]
return ecdf_err_etkf, ecdf_err_letkf, ecdf_err_iewpf, ecdf_err_mc
else:
return ecdf_err_etkf, ecdf_err_letkf, ecdf_err_iewpf
def set_corr_ref_pois(self, corr_ref_pois):
self.corr_ref_pois = corr_ref_pois
def corr_p2p_err(self, i):
corr_grid_kf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_etkf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_letkf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_iewpf = np.zeros((self.grid.nx, self.grid.ny))
for x in range(self.grid.nx):
for y in range(self.grid.ny):
corr_grid_kf[x,y] = self.statistics_kf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_etkf[x,y] = self.statistics_etkf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_letkf[x,y] = self.statistics_letkf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_iewpf[x,y] = self.statistics_iewpf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_p2p_err_etkf = np.linalg.norm(corr_grid_kf - corr_grid_etkf)
corr_p2p_err_letkf = np.linalg.norm(corr_grid_kf - corr_grid_letkf)
corr_p2p_err_iewpf = np.linalg.norm(corr_grid_kf - corr_grid_iewpf)
return corr_p2p_err_etkf, corr_p2p_err_letkf, corr_p2p_err_iewpf
def corr_p2p_plot(self, i, observation=None):
corr_grid_kf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_etkf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_letkf = np.zeros((self.grid.nx, self.grid.ny))
corr_grid_iewpf = np.zeros((self.grid.nx, self.grid.ny))
for x in range(self.grid.nx):
for y in range(self.grid.ny):
corr_grid_kf[x,y] = self.statistics_kf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_etkf[x,y] = self.statistics_etkf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_letkf[x,y] = self.statistics_letkf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
corr_grid_iewpf[x,y] = self.statistics_iewpf.evaluate_correlation([self.corr_ref_pois[i],[x,y]])
fig = plt.figure(figsize=(8,4))
axs = AxesGrid(fig, (0.0,0.0,1.0,1.0), nrows_ncols=(1,4), axes_pad=0.1,
cbar_mode="single", cbar_location="right", cbar_pad=0.1)
fig0 = axs[0].imshow(corr_grid_kf.T, vmin=-1, vmax=1, cmap="seismic", origin="lower")
axs[0].set_title("Kalman")
axs[0].scatter(self.corr_ref_pois[i][0], self.corr_ref_pois[i][1], s=100, c="black", marker="x")
if observation is not None:
axs[0].scatter(np.array(observation.positions)[:,0],np.array(observation.positions)[:,1], c="black", s=30)
fig1 = axs[1].imshow(corr_grid_etkf.T, vmin=-1, vmax=1, cmap="seismic", origin="lower")
axs[1].set_title("ETKF")
axs[1].scatter(self.corr_ref_pois[i][0], self.corr_ref_pois[i][1], s=100, c="black", marker="x")
fig2 = axs[2].imshow(corr_grid_letkf.T, vmin=-1, vmax=1, cmap="seismic", origin="lower")
axs[2].set_title("LETKF")
axs[2].scatter(self.corr_ref_pois[i][0], self.corr_ref_pois[i][1], s=100, c="black", marker="x")
fig3 = axs[3].imshow(corr_grid_iewpf.T, vmin=-1, vmax=1, cmap="seismic", origin="lower")
axs[3].set_title("IEWPF")
axs[3].scatter(self.corr_ref_pois[i][0], self.corr_ref_pois[i][1], s=100, c="black", marker="x")
cbar = axs[0].cax.colorbar(fig3)
plt.show()
return corr_grid_kf, corr_grid_etkf, corr_grid_letkf, corr_grid_iewpf | 24,976 | 50.605372 | 167 | py |
advectionDiffusion | advectionDiffusion-main/RunningWriter.py | import numpy as np
import os
import datetime
class RunningWriter:
def __init__(self, trials, N_poi, N_corr_poi):
self.trials = trials
self.N_poi = N_poi
self.N_corr_poi = N_corr_poi
self.mean_rmse_etkfs = np.zeros(trials)
self.mean_rmse_letkfs = np.zeros(trials)
self.mean_rmse_iewpfs = np.zeros(trials)
self.mean_rmse_mcs = np.zeros(trials)
self.stddev_rmse_etkfs = np.zeros(trials)
self.stddev_rmse_letkfs = np.zeros(trials)
self.stddev_rmse_iewpfs = np.zeros(trials)
self.stddev_rmse_mcs = np.zeros(trials)
self.cov_frob_etkfs = np.zeros(trials)
self.cov_frob_letkfs = np.zeros(trials)
self.cov_frob_iewpfs = np.zeros(trials)
self.cov_frob_mcs = np.zeros(trials)
self.cov_frob_etkfs_close = np.zeros(trials)
self.cov_frob_letkfs_close = np.zeros(trials)
self.cov_frob_iewpfs_close = np.zeros(trials)
self.cov_frob_mcs_close = np.zeros(trials)
self.cov_frob_etkfs_far = np.zeros(trials)
self.cov_frob_letkfs_far = np.zeros(trials)
self.cov_frob_iewpfs_far = np.zeros(trials)
self.cov_frob_mcs_far = np.zeros(trials)
self.ecdf_err_etkfs = np.zeros((N_poi, trials))
self.ecdf_err_letkfs = np.zeros((N_poi, trials))
self.ecdf_err_iewpfs = np.zeros((N_poi, trials))
self.ecdf_err_mcs = np.zeros((N_poi, trials))
self.corr_err_etkf = np.zeros((N_corr_poi, trials))
self.corr_err_letkf = np.zeros((N_corr_poi, trials))
self.corr_err_iewpf = np.zeros((N_corr_poi, trials))
self.corr_err_mc = np.zeros((N_corr_poi, trials))
def header2file(self, N_e, trails_truth, trails_init, timestamp=None):
if timestamp is not None:
self.result_timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
file = "experiment_files/experiment_" + timestamp + "/results_" + self.result_timestamp
f = open(file, "a")
f.write("--------------------------------------------\n")
f.write("Same model, ")
f.write(str(trails_truth) + " truthes, ")
f.write(str(trails_init) + " ensemble initialisations ")
f.write("with " + str(N_e) + " ensemble members\n")
f.write("--------------------------------------------\n")
f.close()
def results(self, mode="avg"):
vars_names = list(filter(lambda n: n[0]!="N" and n[0]!="_" and n!="trials" and n!="results" and "2" not in n , dir(self)))
stats = {}
for var_name in vars_names:
var = getattr(self, var_name)
if len(var.shape) > 1:
if mode == "avg":
avgs = np.zeros(var.shape[0])
for i in range(var.shape[0]):
avgs[i] = np.average(var[i])
stats["avg_"+var_name] = avgs
else:
stds = np.zeros(var.shape[0])
for i in range(var.shape[0]):
stds[i] = np.std(var[i])
stats["avg_"+var_name] = stds
else:
if mode == "avg":
stats["avg_"+var_name] = np.average(var)
if mode == "std":
stats["std_"+var_name] = np.std(var)
return stats
def results2file(self, timestamp=None, table=None, title=None, mode="avg"):
if timestamp is not None:
if table is None:
file = "experiment_files/experiment_" + timestamp + "/results_" + self.result_timestamp
stats = self.results()
f = open(file, "a")
f.write("--------------------------------------------\n")
f.write("Results from the Comparison of ETKF and LETKF\n")
f.write("versus the analytical posterior from the KF\n")
f.write("--------------------------------------------\n")
f.write("Mean RMSE EKTF = " + str(stats("avg_mean_rmse_etkfs")) + "\n")
f.write("Mean RMSE LEKTF = " + str(stats("avg_mean_rmse_letkfs")) + "\n")
f.write("Mean RMSE IEWPF = " + str(stats("avg_mean_rmse_iewpfs")) + "\n")
f.write("Mean RMSE MC = " + str(stats("avg_mean_rmse_mcs")) + "\n")
f.write("\n")
f.write("Stddev RMSE EKTF = " + str(stats("avg_stddev_rmse_etkfs")) + "\n")
f.write("Stddev RMSE LEKTF = " + str(stats("avg_stddev_rmse_letkfs")) + "\n")
f.write("Stddev RMSE IEWPF = " + str(stats("avg_stddev_rmse_iewpfs")) + "\n")
f.write("Stddev RMSE MC = " + str(stats("avg_stddev_rmse_mcs")) + "\n")
f.write("\n")
f.write("Cov Frobenius ETKF = " + str(stats("avg_cov_frob_etkfs")) + "\n")
f.write("Cov Frobenius LETKF = " + str(stats("avg_cov_frob_letkfs")) + "\n")
f.write("Cov Frobenius IEWPF = " + str(stats("avg_cov_frob_iewpfs")) + "\n")
f.write("Cov Frobenius MC = " + str(stats("avg_cov_frob_mcs")) + "\n")
f.write("\n")
f.write("Cov Frobenius ETKF (close) = " + str(stats("avg_cov_frob_etkfs_close")) + "\n")
f.write("Cov Frobenius LETKF (close) = " + str(stats("avg_cov_frob_letkfs_close")) + "\n")
f.write("Cov Frobenius IEWPF (close) = " + str(stats("avg_cov_frob_iewpfs_close")) + "\n")
f.write("Cov Frobenius MC (close) = " + str(stats("avg_cov_frob_mcs_close")) + "\n")
f.write("\n")
f.write("Cov Frobenius ETKF (far) = " + str(stats("avg_cov_frob_etkfs_far")) + "\n")
f.write("Cov Frobenius LETKF (far) = " + str(stats("avg_cov_frob_letkfs_far")) + "\n")
f.write("Cov Frobenius IEWPF (far) = " + str(stats("avg_cov_frob_iewpfs_far")) + "\n")
f.write("Cov Frobenius MC (far) = " + str(stats("avg_cov_frob_mcs_far")) + "\n")
f.write("\n")
for p in range(self.N_poi):
f.write("ECDF Dist at PoI" + str(p) + " ETKF = " + str(stats("avg_ecdf_err_etkfs")[p]) + "\n")
f.write("ECDF Dist at PoI" + str(p) + " LETKF = " + str(stats("avg_ecdf_err_letkfs")[p]) + "\n")
f.write("ECDF Dist at PoI" + str(p) + " IEWPF = " + str(stats("avg_ecdf_err_iewpfs")[p]) + "\n")
for p in range(self.N_poi):
f.write("Correlation error from point"+str(p)+" ETKF = " + str(stats("avg_corr_p2p_err_etkfs")[p]) + "\n")
f.write("Correlation error from point"+str(p)+" LETKF = " + str(stats("avg_corr_p2p_err_letkfs")[p]) + "\n")
f.write("Correlation error from point"+str(p)+" IEWPF = " + str(stats("avg_corr_p2p_err_iewpfs")[p]) + "\n")
f.write("\n")
else:
result_timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
file = "experiment_files/experiment_" + timestamp + "/results_" + result_timestamp + "_" + mode
headers = title
vars_names = sorted(list(filter(lambda n: n[0]!="N" and n[0]!="_" and n!="trials" and n!="results" and "2" not in n, dir(self))))
for var_name in vars_names:
var = getattr(self, var_name)
if len(var.shape) > 1:
for i in range(var.shape[0]):
headers = headers +" "+ var_name+str(i)
table = np.column_stack((table, var[i]))
else:
headers = headers +" "+mode+"_"+ var_name
table = np.column_stack((table, var))
np.savetxt(file, table, header=headers)
def results2write(self, stats, trial):
for key in stats.keys():
value = stats[key]
if np.isscalar(value):
getattr(self,key[4:])[trial] = value
else:
getattr(self,key[4:])[:,trial] = value
# print(key)
# print(value)
# print(getattr(self,key[4:]))
| 8,360 | 45.709497 | 146 | py |
advectionDiffusion | advectionDiffusion-main/IEWParticleFilter.py | """
Kalman filter update for advection diffusion example.
"""
import numpy as np
from scipy.special import gammainc
from scipy.special import lambertw
from scipy.optimize import fsolve
from scipy.linalg import sqrtm
import sys
class IEWParticle:
def __init__(self, statistics, observation, beta=None, alpha=None):
self.statistics = statistics
self.N_e = self.statistics.ensemble.N_e
# From simulator
self.N_x = self.statistics.simulator.grid.N_x
self.Q = self.statistics.simulator.Q
# From observation
self.N_y = observation.N_y
self.H = observation.H
Pinv = np.linalg.inv( np.linalg.inv(self.Q) + self.H.T @ np.linalg.inv(observation.R) @ self.H )
self.sqrtPinv = np.real(sqrtm(Pinv))
self.S = np.linalg.inv( self.H @ self.Q @ self.H.T + observation.R)
self.beta = beta
self.betas = []
# Only for debug purposes
self.alpha = alpha
def filter(self, ensemble, obs):
# Innovations and weights
d = np.reshape(obs, (self.N_y,1)) - self.H @ ensemble
phis = np.zeros(self.N_e)
etas = np.zeros((self.N_e, self.N_x))
for e in range(self.N_e):
phis[e] = d[:,e] @ self.S @ d[:,e]
etas[e] = np.random.standard_normal(self.N_x)
cs = phis - np.log(1/self.N_e)
c_bar = np.average(cs)
if self.beta is None:
# Target weight
tmp = np.zeros(self.N_e)
for e in range(self.N_e):
tmp[e] = (c_bar - cs[e])/(etas[e]@etas[e]) + 1
beta = np.min(tmp)
else:
beta = self.beta
self.betas.append(beta)
# Get c_star
c_stars = np.zeros(self.N_e)
for e in range(self.N_e):
c_stars[e] = c_bar - cs[e] - (beta-1)*(etas[e]@etas[e])
#c_stars[e] = np.max(cs) - cs[e] - (beta-1)*(etas[e]@etas[e])
updated_ensemble = np.zeros_like(ensemble)
# Per ensemble member!
for e in range(self.N_e):
# Sampling random vectors
z = np.random.standard_normal(self.N_x)
xi = z - etas[e] * (z@etas[e])/(etas[e]@etas[e])
if self.alpha is None:
# Compute alpha
# fun = lambda alpha, m, x: gammainc(m, alpha*x)/gammainc(m, x)
# alpha = fsolve( lambda alpha: fun(alpha, self.N_x/2, etas[e]@etas[e]/2) - np.exp(-c_stars[e]/2), 0.5)
alpha = np.real( -self.N_x/(etas[e]@etas[e]) * lambertw(-(etas[e]@etas[e])/self.N_x * np.exp(-(etas[e]@etas[e])/self.N_x) * np.exp(-c_stars[e]/self.N_x) ) )
else:
alpha = self.alpha
# Update ensemble member
member_proposal = ensemble[:,e] + self.Q @ self.H.T @ self.S @ d[:,e]
member_update = member_proposal + np.sqrt(beta) * self.sqrtPinv @ etas[e] + np.sqrt(alpha) * self.sqrtPinv @ xi
updated_ensemble[:,e] = member_update
self.statistics.set_ensemble(updated_ensemble)
return updated_ensemble
| 3,123 | 30.877551 | 172 | py |
advectionDiffusion | advectionDiffusion-main/TruthGenerator.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import Sampler
def plot_kernel(grid, prior_args):
"""Plotting 1D Matern kernel for given phi on the half x domain of the grid"""
matern_phi = prior_args["matern_phi"]
# Plot kernel
h = np.arange(int(max(grid.nx,grid.ny)/2))
matern_kernel = (1+matern_phi*grid.dx*h)*np.exp(-matern_phi*grid.dx*h)
plt.title("Matern covariance kernel")
plt.plot(matern_kernel)
plt.xlabel("Distance in grid cells")
plt.ylabel("Correlation")
# Plot threshold
plt.plot(0.05*np.ones_like(h), "g")
# Plot desired cutting area
corr_min = int(max(grid.nx,grid.ny)/4)
corr_max = int(max(grid.nx,grid.ny)/3)
plt.fill_between(range(len(h)), 0, 0.1, where=(h >= corr_min) & (h <= corr_max), color="r", alpha=0.5)
plt.legend(["cov kernel", "correlation range", "desired cut"])
plt.show()
def plot_corr_radius(grid, prior_args):
prior_sampler = Sampler.Sampler(grid, prior_args)
corr = prior_sampler.corr
corr0 = corr[0]
plt.imshow(np.reshape((corr0>0.05), (grid.ny, grid.nx)), origin="lower")
plt.title("Correlation area for (0,0)")
plt.show()
def plot_xlims(statistics, prior_args):
prior_sampler = Sampler.Sampler(statistics.simulator.grid, prior_args)
vmin = np.min(prior_sampler.mean) - 0.5
vmax = np.max(prior_sampler.mean) + 0.5
return vmin, vmax
def plot_truth(state, grid, vmin=None, vmax=None):
mean = np.reshape(state, (grid.ny,grid.nx))
plt.imshow(mean, origin = "lower", vmin=vmin, vmax=vmax)
plt.title("Truth")
plt.colorbar(orientation="horizontal")
plt.show()
def to_file(timestamp, simulator, prior_args, observation):
"""Write the permanent file for the experimental set-up"""
simulator.to_file(timestamp)
file = "experiment_files/experiment_" + timestamp + "/setup"
f = open(file, "a")
f.write("--------------------------------------------\n")
f.write("Prior for the advection diffusion experiment\n")
f.write("(Parameters for mean and cov of Gaussian distribution):\n")
f.write("mean_upshift = " + str(prior_args["mean_upshift"]) + "\n")
f.write("bell_center = " + str(prior_args["bell_center"]) + "\n")
f.write("bell_sharpness = " + str(prior_args["bell_sharpness"]) + "\n")
f.write("bell_scaling = " + str(prior_args["bell_scaling"]) + "\n")
f.write("matern_phi = " + str(prior_args["matern_phi"]) + "\n")
f.write("stddev = " + str(prior_args["stddev"]) + "\n")
observation.setup_to_file(timestamp)
observation.positions_to_file(timestamp)
| 2,674 | 33.294872 | 106 | py |
advectionDiffusion | advectionDiffusion-main/Ensemble.py |
import numpy as np
class Ensemble:
def __init__(self, simulator, N_e):
self.simulator = simulator
self.N_e = N_e
# Allocation
self.ensemble = np.zeros((self.simulator.grid.N_x, self.N_e))
def initialize(self, prior_sampler):
self.ensemble = prior_sampler.sample(self.N_e)
def set(self, ensemble):
self.ensemble = ensemble
| 409 | 19.5 | 69 | py |
advectionDiffusion | advectionDiffusion-main/SLETKalmanFilter.py | """
Kalman filter update for advection diffusion example.
"""
import numpy as np
class SLETKalman:
def __init__(self, statistics, observation, scale_r, scale_w=1.0):
self.statistics = statistics
# Observation and obs error cov matrices
self.H = observation.H
self.R = observation.R
# More detailed information on the observation sites
self.N_y = observation.N_y
self.observation_positions = observation.positions *\
np.array([self.statistics.simulator.grid.dx,self.statistics.simulator.grid.dy])
# Grouping for serial processing
self.initializeGroups(scale_r)
# Local kernels around observations sites
self.initializeLocalisation(scale_r, scale_w)
def initializeGroups(self, scale_r):
# Assembling observation distance matrix
self.obs_dist_mat = np.zeros((self.N_y, self.N_y))
for i in range(self.N_y):
for j in range(self.N_y):
dx = np.abs(self.observation_positions[i][0] - self.observation_positions[j][0])
if dx > self.statistics.simulator.grid.xdim/2:
dx = self.statistics.simulator.grid.xdim - dx
dy = np.abs(self.observation_positions[i][1] - self.observation_positions[j][1])
if dy > self.statistics.simulator.grid.ydim/2:
dy = self.statistics.simulator.grid.ydim - dy
self.obs_dist_mat[i,j] = np.sqrt(dx**2+dy**2)
# Heavy diagonal such that 0-distances are above every threshold
np.fill_diagonal(self.obs_dist_mat, np.sqrt(self.statistics.simulator.grid.xdim**2 + self.statistics.simulator.grid.ydim**2))
# Groups of "un-correlated" observation
self.groups = list([list(np.arange(self.N_y, dtype=int))])
# Observations are assumed to be uncorrelated, if distance bigger than threshold
threshold = 2.0 * 1.5 * scale_r * self.statistics.simulator.grid.dx
g = 0
while self.obs_dist_mat[np.ix_(self.groups[g],self.groups[g])].min() < threshold:
while self.obs_dist_mat[np.ix_(self.groups[g],self.groups[g])].min() < threshold:
mask = np.ix_(self.groups[g],self.groups[g])
idx2move = self.groups[g][np.where(self.obs_dist_mat[mask] == self.obs_dist_mat[mask].min())[1][0]]
self.groups[g] = list(np.delete(self.groups[g], np.where(self.groups[g] == idx2move)))
if len(self.groups)<g+2:
self.groups.append([idx2move])
else:
self.groups[g+1].append(idx2move)
g = g + 1
def initializeLocalisation(self, scale_r, scale_w):
dx = self.statistics.simulator.grid.dx
dy = self.statistics.simulator.grid.dy
nx = self.statistics.simulator.grid.nx
ny = self.statistics.simulator.grid.ny
self.W_loc = SLETKalman.getLocalWeightShape(scale_r, dx, dy, nx, ny, scale_w)
self.all_Ls = []
self.all_xrolls = []
self.all_yrolls = []
self.W_analyses = []
self.W_forecasts = []
for g in range(len(self.groups)):
Ls = [None]*len(self.groups[g])
xrolls = np.zeros(len(self.groups[g]), dtype=np.int)
yrolls = np.zeros(len(self.groups[g]), dtype=np.int)
for d in range(len(self.groups[g])):
# Collecting rolling information (xroll and yroll are 0)
Ls[d], xrolls[d], yrolls[d] = \
SLETKalman.getLocalIndices(self.observation_positions[self.groups[g][d]], scale_r, \
dx, dy, nx, ny)
self.all_Ls.append(Ls)
self.all_xrolls.append(xrolls)
self.all_yrolls.append(yrolls)
W_combined = SLETKalman.getCombinedWeights(self.observation_positions[self.groups[g]], scale_r, dx, dy, nx, ny, self.W_loc, scale_w)
W_scale = np.maximum(W_combined,1)
W_analysis = W_combined/W_scale
W_forecast = np.ones_like(W_analysis) - W_analysis
self.W_analyses.append(W_analysis)
self.W_forecasts.append(W_forecast)
@staticmethod
def getLocalIndices(obs_loc, scale_r, dx, dy, nx, ny):
"""
Defines mapping from global domain (nx times ny) to local domain
"""
boxed_r = dx*np.ceil(scale_r*1.5)
localIndices = np.array([[False]*nx]*ny)
loc_cell_left = int(np.round(obs_loc[0]/dx)) - int(np.round(boxed_r/dx))
loc_cell_right = int(np.round(obs_loc[0]/dx)) + int(np.round((boxed_r+dx)/dx))
loc_cell_down = int(np.round(obs_loc[1]/dy)) - int(np.round(boxed_r/dy))
loc_cell_up = int(np.round(obs_loc[1]/dy)) + int(np.round((boxed_r+dy)/dy))
xranges = []
yranges = []
xroll = 0
yroll = 0
if loc_cell_left < 0:
xranges.append((nx+loc_cell_left , nx))
xroll = loc_cell_left # negative number
loc_cell_left = 0
elif loc_cell_right > nx:
xranges.append((0, loc_cell_right - nx))
xroll = loc_cell_right - nx # positive number
loc_cell_right = nx
xranges.append((loc_cell_left, loc_cell_right))
if loc_cell_down < 0:
yranges.append((ny+loc_cell_down , ny))
yroll = loc_cell_down # negative number
loc_cell_down = 0
elif loc_cell_up > ny:
yranges.append((0, loc_cell_up - ny ))
yroll = loc_cell_up - ny # positive number
loc_cell_up = ny
yranges.append((loc_cell_down, loc_cell_up))
for xrange in xranges:
for yrange in yranges:
localIndices[yrange[0] : yrange[1], xrange[0] : xrange[1]] = True
for y in range(yrange[0],yrange[1]):
for x in range(xrange[0], xrange[1]):
loc = np.array([(x+0.5)*dx, (y+0.5)*dy])
return localIndices, xroll, yroll
@staticmethod
def getLocalWeightShape(scale_r, dx, dy, nx, ny, scale_w=1.0):
"""
Gives a local stencil with weights based on the distGC
"""
local_nx = int(np.ceil(scale_r*1.5)*2 + 1)
local_ny = int(np.ceil(scale_r*1.5)*2 + 1)
weights = np.zeros((local_ny, local_ny))
obs_loc = np.array([local_nx*dx/2, local_ny*dy/2])
for y in range(local_ny):
for x in range(local_nx):
loc = np.array([(x+0.5)*dx, (y+0.5)*dy])
if np.linalg.norm(obs_loc - loc) > 1.5*scale_r*dx:
weights[y,x] = 0
else:
weights[y,x] = min(1, SLETKalman.distGC(obs_loc, loc, scale_r*dx, nx*dx, ny*dy))
return scale_w * weights
@staticmethod
def distGC(obs, loc, r, lx, ly):
"""
Calculating the Gasparin-Cohn value for the distance between obs
and loc for the localisation radius r.
obs: drifter positions ([x,y])
loc: current physical location to check (either [x,y] or [[x1,y1],...,[xd,yd]])
r: localisation scale in the Gasparin Cohn function
lx: domain extension in x-direction (necessary for periodic boundary conditions)
ly: domain extension in y-direction (necessary for periodic boundary conditions)
"""
if not obs.shape == loc.shape:
obs = np.tile(obs, (loc.shape[0],1))
if len(loc.shape) == 1:
dist = min(np.linalg.norm(np.abs(obs-loc)),
np.linalg.norm(np.abs(obs-loc) - np.array([lx,0 ])),
np.linalg.norm(np.abs(obs-loc) - np.array([0 ,ly])),
np.linalg.norm(np.abs(obs-loc) - np.array([lx,ly])) )
else:
dist = np.linalg.norm(obs-loc, axis=1)
# scalar case
if isinstance(dist, float):
distGC = 0.0
if dist/r < 1:
distGC = 1 - 5/3*(dist/r)**2 + 5/8*(dist/r)**3 + 1/2*(dist/r)**4 - 1/4*(dist/r)**5
elif dist/r >= 1 and dist/r < 2:
distGC = 4 - 5*(dist/r) + 5/3*(dist/r)**2 + 5/8*(dist/r)**3 -1/2*(dist/r)**4 + 1/12*(dist/r)**5 - 2/(3*(dist/r))
# vector case
else:
distGC = np.zeros_like(dist)
for i in range(len(dist)):
if dist[i]/r < 1:
distGC[i] = 1 - 5/3*(dist[i]/r)**2 + 5/8*(dist[i]/r)**3 + 1/2*(dist[i]/r)**4 - 1/4*(dist[i]/r)**5
elif dist[i]/r >= 1 and dist[i]/r < 2:
distGC[i] = 4 - 5*(dist[i]/r) + 5/3*(dist[i]/r)**2 + 5/8*(dist[i]/r)**3 -1/2*(dist[i]/r)**4 + 1/12*(dist[i]/r)**5 - 2/(3*(dist[i]/r))
return distGC
@staticmethod
def getCombinedWeights(observation_positions, scale_r, dx, dy, nx, ny, W_loc, scale_w=1.0):
W_scale = np.zeros((ny, nx))
num_drifters = observation_positions.shape[0]
#print('found num_drifters:', num_drifters)
if observation_positions.shape[1] != 2:
print('observation_positions has wrong shape')
return None
# Get the shape of the local weights (drifter independent)
W_loc = SLETKalman.getLocalWeightShape(scale_r, dx, dy, nx, ny, scale_w)
for d in range(num_drifters):
# Get local mapping for drifter
L, xroll, yroll = SLETKalman.getLocalIndices(observation_positions[d,:], scale_r, dx, dy, nx, ny)
# Roll weigths according to periodic boundaries
W_loc_d = np.roll(np.roll(W_loc, shift=yroll, axis=0 ), shift=xroll, axis=1)
# Add weights to global domain based on local mapping:
W_scale[L] += W_loc_d.flatten()
return W_scale
def filter_per_group(self, ensemble, obs, g):
# Bookkeeping
nx = self.statistics.simulator.grid.nx
ny = self.statistics.simulator.grid.ny
N_e = ensemble.shape[1]
X_f = np.zeros((N_e, ny, nx))
for e in range(X_f.shape[0]):
X_f[e] = np.reshape(ensemble[:,e], (ny, nx))
X_f_mean = np.average(X_f, axis=0)
X_f_pert = X_f - X_f_mean
X_a = np.zeros_like(X_f)
H = self.H[self.groups[g]]
HX_f = H @ ensemble
HX_f_mean = np.average(HX_f, axis=1)
HX_f_pert = HX_f - np.reshape(HX_f_mean, (len(obs),1))
# Prepare local ETKF analysis
N_x_local = self.W_loc.shape[0]*self.W_loc.shape[1]
X_f_loc_tmp = np.zeros((N_e, N_x_local))
X_f_loc_pert_tmp = np.zeros((N_e, N_x_local))
X_f_loc_mean_tmp = np.zeros((N_x_local))
# Loop over all d
for d in range(len(obs)):
L, xroll, yroll = self.all_Ls[g][d], self.all_xrolls[g][d], self.all_yrolls[g][d]
X_f_loc_tmp[:,:] = X_f[:,L]
X_f_loc_pert_tmp[:,:] = X_f_pert[:,L]
X_f_loc_mean_tmp[:] = X_f_mean[L]
if not (xroll == 0 and yroll == 0):
rolling_shape = (N_e, self.W_loc.shape[0], self.W_loc.shape[1]) # roll around axis 2 and 3
X_f_loc_tmp[:,:] = np.roll(np.roll(X_f_loc_tmp.reshape(rolling_shape), shift=-yroll, axis=1 ), shift=-xroll, axis=2).reshape((N_e, N_x_local))
X_f_loc_pert_tmp[:,:] = np.roll(np.roll(X_f_loc_pert_tmp.reshape(rolling_shape), shift=-yroll, axis=1 ), shift=-xroll, axis=2).reshape((N_e, N_x_local))
mean_rolling_shape = (self.W_loc.shape[0], self.W_loc.shape[1]) # roll around axis 1 and 2
X_f_loc_mean_tmp[:] = np.roll(np.roll(X_f_loc_mean_tmp.reshape(mean_rolling_shape), shift=-yroll, axis=0 ), shift=-xroll, axis=1).reshape((N_x_local))
# Adapting LETKF dimensionalisation
X_f_loc = X_f_loc_tmp.T
X_f_loc_pert = X_f_loc_pert_tmp.T
X_f_loc_mean = X_f_loc_mean_tmp.T
# Local observation
HX_f_loc_mean = HX_f_mean[d]
HX_f_loc_pert = np.reshape(HX_f_pert[d,:],(1,N_e))
# LETKF
Rinv = np.linalg.inv(np.reshape(self.R[d,d], (1,1)))
y_loc = obs[d]
D = y_loc - HX_f_loc_mean # 1 x 1
A1 = (N_e-1)*np.eye(N_e)
A2 = HX_f_loc_pert.T @ Rinv @ HX_f_loc_pert # N_e x N_e
A = A1 + A2
P = np.linalg.inv(A)
K = np.reshape(X_f_loc_pert @ P @ HX_f_loc_pert.T @ Rinv, N_x_local) # N_x_loc x 1
X_a_loc_mean = X_f_loc_mean + K * D
sigma, V = np.linalg.eigh( (N_e - 1) * P )
X_a_loc_pert = X_f_loc_pert @ V @ np.diag( np.sqrt( np.real(sigma) ) ) @ V.T
X_a_loc = np.reshape(X_a_loc_mean,(N_x_local,1)) + X_a_loc_pert
# Calculate weighted local analysis
weighted_X_a_loc = X_a_loc[:,:]*(np.tile(self.W_loc.flatten().T, (N_e, 1)).T)
# Here, we use np.tile(W_loc.flatten().T, (N_e_active, 1)).T to repeat W_loc as column vector N_e_active times
if not (xroll == 0 and yroll == 0):
weighted_X_a_loc = np.roll(np.roll(weighted_X_a_loc[:,:].reshape((self.W_loc.shape[0], self.W_loc.shape[1], N_e)),
shift=yroll, axis=0 ),
shift=xroll, axis=1)
X_a[:,L] += weighted_X_a_loc.reshape(self.W_loc.shape[0]*self.W_loc.shape[1], N_e).T
# (end loop over all d)
# COMBINING (the already weighted) ANALYSIS WITH THE FORECAST
X_new = np.zeros_like(X_f)
for e in range(N_e):
X_new[e] = self.W_forecasts[g]*X_f[e] + X_a[e]
# Upload
X_new = np.reshape(X_new, (N_e, nx*ny)).T
self.statistics.set_ensemble(X_new)
return X_new
def filter(self, ensemble, obs):
for g in range(len(self.groups)):
ensemble = self.filter_per_group(ensemble, obs[self.groups[g]], g)
| 14,065 | 39.188571 | 168 | py |
advectionDiffusion | advectionDiffusion-main/ETKalmanFilter.py | """
Kalman filter update for advection diffusion example.
"""
import numpy as np
class ETKalman:
def __init__(self, statistics, observation):
self.statistics = statistics
# Observation and obs error cov matrices
self.H = observation.H
self.R = observation.R
def filter(self, ensemble, obs):
X_f_mean = np.average(ensemble, axis=1)
X_f_pert = ensemble - np.reshape(X_f_mean, (self.statistics.simulator.grid.N_x,1))
Rinv = np.linalg.inv(self.R)
HX_f = self.H @ ensemble
HX_f_mean = np.average(HX_f, axis=1)
HX_f_pert = HX_f - np.reshape(HX_f_mean, (len(obs),1))
D = obs - HX_f_mean
A1 = (self.statistics.ensemble.N_e-1)*np.eye(self.statistics.ensemble.N_e)
A2 = np.dot(HX_f_pert.T, np.dot(Rinv, HX_f_pert))
A = A1 + A2
P = np.linalg.inv(A)
K = np.dot(X_f_pert, np.dot(P, np.dot(HX_f_pert.T, Rinv)))
X_a_mean = X_f_mean + np.dot(K, D)
sigma, V = np.linalg.eigh( (self.statistics.ensemble.N_e - 1) * P )
X_a_pert = np.dot( X_f_pert, np.dot( V, np.dot( np.diag( np.sqrt( np.real(sigma) ) ), V.T )))
X_a = X_a_pert + np.reshape(X_a_mean, (self.statistics.simulator.grid.N_x,1))
self.statistics.set_ensemble(X_a)
return X_a | 1,324 | 29.113636 | 101 | py |
advectionDiffusion | advectionDiffusion-main/run_FilteringComparisonLocalisation.py | # %%
"""
Example:
python run_FilteringComparison.py -m ensemble_size
"""
# %%
import numpy as np
# %%
import Sampler
import Simulator
import Observation
import Statistics
import KalmanFilter
import ETKalmanFilter
import SLETKalmanFilter
import IEWParticleFilter
import Comparer
import RunningWriter
# %%
# Initialisation
print("Initialising...")
timestamp = "2022_03_02-12_44_46"
grid, simulator = Simulator.from_file(timestamp)
observation = Observation.from_file(grid, timestamp)
prior_args = Statistics.prior_args_from_file(timestamp)
print("done\n")
# %%
# LOCALISATION IEWPF
iewpfQphis = [3.0, 5.0, 7.0, 11.0]
iewpfQs = [ Sampler.Sampler(grid, {"mean_upshift" : 0.0, "matern_phi" : phi, "stddev" : simulator.noise_stddev} ).cov for phi in iewpfQphis]
# %%
# LOCALISATION LETKF
scale_rs = [100,9,6,3]
# %%
trials_truth = 20
trials_init = 5
N_e = 50
# %%
kfmeans = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny))
kfcovs = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, grid.nx*grid.ny))
states_iewpf = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, N_e))
states_letkf = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, N_e))
# %%
# Repeating ensemble runs
for trial_model in range(len(iewpfQphis)):
for trail_truth in range(trials_truth):
# Truth
print("\nModel", trial_model, ", Truth", trail_truth)
observation.clear_observations()
statistics_truth = Statistics.Statistics(simulator, 1)
statistics_truth.set_prior(prior_args)
for t in range(10):
statistics_truth.propagate(25)
observation.observe(statistics_truth.mean)
# KF
print("KF DA")
statistics_kf = Statistics.Statistics(simulator, safe_history=True)
statistics_kf.set_prior(prior_args)
kalmanFilter = KalmanFilter.Kalman(statistics_kf, observation)
for t in range(observation.N_obs):
statistics_kf.propagate(25)
kalmanFilter.filter(statistics_kf.mean, statistics_kf.cov, observation.obses[t])
for trial_init in range(trials_init):
print("\nModel", trial_model, ", Truth", trail_truth, ", Init", trial_init)
# ETKF
if trial_model == 0:
print("ETKF DA")
statistics_etkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_etkf.set_prior(prior_args)
etkFilter = ETKalmanFilter.ETKalman(statistics_etkf, observation)
for t in range(observation.N_obs):
statistics_etkf.propagate(25)
etkFilter.filter(statistics_etkf.ensemble.ensemble, observation.obses[t])
# LETKF
if trial_model > 0:
print("LETKF DA")
statistics_letkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_letkf.set_prior(prior_args)
sletkFilter = SLETKalmanFilter.SLETKalman(statistics_letkf, observation, scale_rs[trial_model])
for t in range(observation.N_obs):
statistics_letkf.propagate(25)
sletkFilter.filter(statistics_letkf.ensemble.ensemble, observation.obses[t])
# IEWPF
print("IEWPF DA")
statistics_iewpf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_iewpf.set_prior(prior_args)
iewpFilter = IEWParticleFilter.IEWParticle(statistics_iewpf, observation, beta=0.55, Q=iewpfQs[trial_model])
for t in range(observation.N_obs):
statistics_iewpf.propagate(25, model_error=False)
iewpFilter.filter(statistics_iewpf.ensemble.ensemble, observation.obses[t])
# Comparison
print("Storing")
trial = trail_truth*trials_init + trial_init
kfmeans[trial_model, trial] = statistics_kf.mean
kfcovs[trial_model, trial] = statistics_kf.cov
states_iewpf[trial_model, trial] = statistics_iewpf.ensemble.ensemble
if trial_model == 0:
states_letkf[trial_model, trial] = statistics_etkf.ensemble.ensemble
else:
states_letkf[trial_model, trial] = statistics_letkf.ensemble.ensemble
print("done")
# %%
import datetime
result_timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
file = "experiment_files/experiment_" + timestamp + "/localisation_results_" + result_timestamp
f = open(file, "a")
f.write("IEWPF phis: " + ",".join([str(phi) for phi in iewpfQphis]))
np.save("experiment_files/experiment_" + timestamp + "/loc_KFmeans_"+result_timestamp+".npy", kfmeans)
np.save("experiment_files/experiment_" + timestamp + "/loc_KFcovs_"+result_timestamp+".npy", kfcovs)
np.save("experiment_files/experiment_" + timestamp + "/loc_IEWPFQ_"+result_timestamp+".npy", states_iewpf)
np.save("experiment_files/experiment_" + timestamp + "/loc_LETKFr_"+result_timestamp+".npy", states_letkf)
| 5,114 | 31.169811 | 140 | py |
advectionDiffusion | advectionDiffusion-main/run_FilteringComparison.py | """
Example:
python run_FilteringComparison.py -m ensemble_size
"""
import Simulator
import Observation
import Statistics
import KalmanFilter
import ETKalmanFilter
import SLETKalmanFilter
import IEWParticleFilter
import Comparer
import RunningWriter
# Initialisation
print("Initialising...")
timestamp = "2022_03_02-12_44_46"
grid, simulator = Simulator.from_file(timestamp)
observation = Observation.from_file(grid, timestamp)
prior_args = Statistics.prior_args_from_file(timestamp)
print("done\n")
# Repeated ensemble runs
pois = [[0,0], [25,15], [0,1]]
corr_ref_pois = [[20,10],[21,10],[25,15]]
# Setting mode
import argparse
import sys
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-m', required=True, dest='mode', choices=["ensemble_size", "observation_size", "advection", "model_noise"],
help='specifying which parameter is changed throughout experiments')
parser.add_argument(
'-tt', default=20, type=int, dest='trials_truth', help='how often the truth is re-initialized in the repeated experiments')
parser.add_argument(
'-ti', default=5, type=int, dest='trials_init', help='how often the ensemble is re-initialized in the repeated experiments'
)
args = parser.parse_args(sys.argv[1:])
mode = args.mode
if mode == "ensemble_size":
N_es = [25, 50, 100, 250, 1000, 5000]
runningModelWriter = RunningWriter.RunningWriter(trials=len(N_es), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
runningModelWriterSTD = RunningWriter.RunningWriter(trials=len(N_es), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
if mode == "observation_size":
N_ys = [3, 4, 5, 10, 15]
runningModelWriter = RunningWriter.RunningWriter(trials=len(N_ys), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
runningModelWriterSTD = RunningWriter.RunningWriter(trials=len(N_ys), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
if mode == "advection":
vs = [[0.5,0.5], [1.0, 0.5], [1.5, 0.5], [2.0, 0.5]]
runningModelWriter = RunningWriter.RunningWriter(trials=len(vs), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
if mode == "model_noise":
noise_stddevs = [0.05, 0.1, 0.25, 0.5]
runningModelWriter = RunningWriter.RunningWriter(trials=len(noise_stddevs), N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
# Repeating ensemble runs
for trial_model in range(runningModelWriter.trials):
print("Changing the model! Set up ", trial_model)
if mode == "ensemble_size":
N_e = N_es[trial_model]
else:
N_e = 50
if mode == "observation_size":
observation.set_regular_positions(N_ys[trial_model])
N_ys[trial_model] = observation.N_y
if mode == "advection":
simulator.v = vs[trial_model]
if mode == "model_noise":
prior_args["stddev"] = noise_stddevs[trial_model]
trials_truth = args.trials_truth
trials_init = args.trials_init
if N_e >= 1000:
trials_truth = 5
trials_init = 3
runningWriter = RunningWriter.RunningWriter(trials=trials_truth*trials_init, N_poi=len(pois), N_corr_poi=len(corr_ref_pois))
for trail_truth in range(trials_truth):
# Truth
print("New true observations", trail_truth)
observation.clear_observations()
statistics_truth = Statistics.Statistics(simulator, 1)
statistics_truth.set_prior(prior_args)
for t in range(10):
statistics_truth.propagate(25)
observation.observe(statistics_truth.mean)
# KF
print("KF DA")
statistics_kf = Statistics.Statistics(simulator, safe_history=True)
statistics_kf.set_prior(prior_args)
kalmanFilter = KalmanFilter.Kalman(statistics_kf, observation)
for t in range(observation.N_obs):
statistics_kf.propagate(25)
kalmanFilter.filter(statistics_kf.mean, statistics_kf.cov, observation.obses[t])
for trial_init in range(trials_init):
print("Ensemble init ", trial_init)
# ETKF
print("ETKF DA")
statistics_etkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_etkf.set_prior(prior_args)
etkFilter = ETKalmanFilter.ETKalman(statistics_etkf, observation)
for t in range(observation.N_obs):
statistics_etkf.propagate(25)
etkFilter.filter(statistics_etkf.ensemble.ensemble, observation.obses[t])
# LETKF
print("LETKF DA")
statistics_letkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_letkf.set_prior(prior_args)
scale_r = 6
sletkFilter = SLETKalmanFilter.SLETKalman(statistics_letkf, observation, scale_r)
for t in range(observation.N_obs):
statistics_letkf.propagate(25)
sletkFilter.filter(statistics_letkf.ensemble.ensemble, observation.obses[t])
# IEWPF
print("IEWPF DA")
statistics_iewpf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_iewpf.set_prior(prior_args)
iewpFilter = IEWParticleFilter.IEWParticle(statistics_iewpf, observation, beta=0.55)
for t in range(observation.N_obs):
statistics_iewpf.propagate(25, model_error=False)
iewpFilter.filter(statistics_iewpf.ensemble.ensemble, observation.obses[t])
# MC
print("MC")
statistics_mc = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_mc.set_prior(prior_args)
for t in range(observation.N_obs):
statistics_mc.propagate(25)
# Comparison
print("Comparing")
trial = trail_truth*trials_init + trial_init
comparer = Comparer.Comparer(statistics_kf, statistics_etkf, statistics_letkf, statistics_iewpf, statistics_mc)
mean_rmse_kf, runningWriter.mean_rmse_etkfs[trial], runningWriter.mean_rmse_letkfs[trial], runningWriter.mean_rmse_iewpfs[trial], runningWriter.mean_rmse_mcs[trial] = comparer.mean_rmse()
stddev_rmse_kf, runningWriter.stddev_rmse_etkfs[trial], runningWriter.stddev_rmse_letkfs[trial], runningWriter.stddev_rmse_iewpfs[trial], runningWriter.stddev_rmse_mcs[trial] = comparer.stddev_rmse()
cov_frob_kf, runningWriter.cov_frob_etkfs[trial], runningWriter.cov_frob_letkfs[trial], runningWriter.cov_frob_iewpfs[trial], runningWriter.cov_frob_mcs[trial] = comparer.cov_frobenius_dist()
cov_frob_kf_close, runningWriter.cov_frob_etkfs_close[trial], runningWriter.cov_frob_letkfs_close[trial], runningWriter.cov_frob_iewpfs_close[trial], runningWriter.cov_frob_mcs_close[trial] = comparer.cov_frobenius_dist_close()
cov_frob_kf_far, runningWriter.cov_frob_etkfs_far[trial], runningWriter.cov_frob_letkfs_far[trial], runningWriter.cov_frob_iewpfs_far[trial], runningWriter.cov_frob_mcs_far[trial] = comparer.cov_frobenius_dist_far()
for p in range(len(pois)):
comparer.set_poi(pois[p])
for p in range(len(pois)):
runningWriter.ecdf_err_etkfs[p][trial], runningWriter.ecdf_err_letkfs[p][trial], runningWriter.ecdf_err_iewpfs[p][trial], runningWriter.ecdf_err_mcs[p][trial] = comparer.poi_ecdf_err(p)
comparer.set_corr_ref_pois(corr_ref_pois)
for p in range(len(corr_ref_pois)):
runningWriter.corr_err_etkf[p][trial], runningWriter.corr_err_letkf[p][trial], runningWriter.corr_err_iewpf[p][trial] = comparer.corr_p2p_err(p)
print("done\n")
runningModelWriter.results2write(runningWriter.results(), trial_model)
runningModelWriterSTD.results2write(runningWriter.results("std"), trial_model)
if mode == "ensemble_size":
runningModelWriter.results2file(timestamp, N_es, "N_e", "avg")
runningModelWriterSTD.results2file(timestamp, N_es, "N_e", "std")
if mode == "observation_size":
runningModelWriter.results2file(timestamp, N_ys, "N_y")
if mode == "advection":
runningModelWriter.results2file(timestamp, [v[0] for v in vs])
if mode == "model_noise":
runningModelWriter.results2file(timestamp, noise_stddevs) | 8,317 | 39.183575 | 239 | py |
advectionDiffusion | advectionDiffusion-main/run_FilteringComparisonSpectrum.py | # %%
"""
Example:
python run_FilteringComparison.py -m ensemble_size
"""
# %%
import numpy as np
# %%
import Sampler
import Simulator
import Observation
import Statistics
import KalmanFilter
import ETKalmanFilter
import SLETKalmanFilter
import IEWParticleFilter
import Comparer
import RunningWriter
# %%
# Initialisation
print("Initialising...")
timestamp = "2022_03_02-12_44_46"
grid, simulator = Simulator.from_file(timestamp)
observation = Observation.from_file(grid, timestamp)
prior_args = Statistics.prior_args_from_file(timestamp)
print("done\n")
# %%
trials_truth = 20
trials_init = 5
N_e = 50
# %%
prior_kfmeans = np.zeros((trials_truth*trials_init, grid.nx*grid.ny))
prior_kfcovs = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, grid.nx*grid.ny))
prior_states_iewpf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
prior_states_etkf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
prior_states_letkf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
posterior_kfmeans = np.zeros((trials_truth*trials_init, grid.nx*grid.ny))
posterior_kfcovs = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, grid.nx*grid.ny))
posterior_states_iewpf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
posterior_states_etkf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
posterior_states_letkf = np.zeros((trials_truth*trials_init, grid.nx*grid.ny, N_e))
# %%
# Repeating ensemble runs
for trail_truth in range(trials_truth):
# Truth
print("\nTruth", trail_truth)
observation.clear_observations()
statistics_truth = Statistics.Statistics(simulator, 1)
statistics_truth.set_prior(prior_args)
for t in range(10):
statistics_truth.propagate(25)
observation.observe(statistics_truth.mean)
# KF
print("KF DA")
statistics_kf = Statistics.Statistics(simulator, safe_history=True)
statistics_kf.set_prior(prior_args)
kalmanFilter = KalmanFilter.Kalman(statistics_kf, observation)
for t in range(observation.N_obs):
if t == 9:
prior_kfmeans[trail_truth*trials_init:(trail_truth+1)*trials_init] = statistics_kf.mean
prior_kfcovs[trail_truth*trials_init:(trail_truth+1)*trials_init] = statistics_kf.cov
statistics_kf.propagate(25)
kalmanFilter.filter(statistics_kf.mean, statistics_kf.cov, observation.obses[t])
if t == 9:
posterior_kfmeans[trail_truth*trials_init:(trail_truth+1)*trials_init] = statistics_kf.mean
posterior_kfcovs[trail_truth*trials_init:(trail_truth+1)*trials_init] = statistics_kf.cov
for trial_init in range(trials_init):
trial = trail_truth*trials_init + trial_init
print("\nTruth", trail_truth, ", Init", trial_init)
# ETKF
print("ETKF DA")
statistics_etkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_etkf.set_prior(prior_args)
etkFilter = ETKalmanFilter.ETKalman(statistics_etkf, observation)
for t in range(observation.N_obs):
if t == 9:
prior_states_etkf[trial] = statistics_etkf.ensemble.ensemble
statistics_etkf.propagate(25)
etkFilter.filter(statistics_etkf.ensemble.ensemble, observation.obses[t])
if t == 9:
posterior_states_etkf[trial] = statistics_etkf.ensemble.ensemble
# LETKF
print("LETKF DA")
statistics_letkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_letkf.set_prior(prior_args)
sletkFilter = SLETKalmanFilter.SLETKalman(statistics_letkf, observation, 6)
for t in range(observation.N_obs):
if t == 9:
prior_states_letkf[trial] = statistics_letkf.ensemble.ensemble
statistics_letkf.propagate(25)
sletkFilter.filter(statistics_letkf.ensemble.ensemble, observation.obses[t])
if t == 9:
posterior_states_letkf[trial] = statistics_letkf.ensemble.ensemble
# IEWPF
print("IEWPF DA")
statistics_iewpf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_iewpf.set_prior(prior_args)
iewpFilter = IEWParticleFilter.IEWParticle(statistics_iewpf, observation, beta=0.55)
for t in range(observation.N_obs):
if t == 9:
prior_states_iewpf[trial] = statistics_iewpf.ensemble.ensemble
statistics_iewpf.propagate(25, model_error=False)
iewpFilter.filter(statistics_iewpf.ensemble.ensemble, observation.obses[t])
if t == 9:
posterior_states_iewpf[trial] = statistics_iewpf.ensemble.ensemble
# %%
import datetime
result_timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
np.save("experiment_files/experiment_" + timestamp + "/priorKFmeans_"+result_timestamp+".npy", prior_kfmeans)
np.save("experiment_files/experiment_" + timestamp + "/priorKFcovs_"+result_timestamp+".npy", prior_kfcovs)
np.save("experiment_files/experiment_" + timestamp + "/posteriorKFmeans_"+result_timestamp+".npy", posterior_kfmeans)
np.save("experiment_files/experiment_" + timestamp + "/posteriorKFcovs_"+result_timestamp+".npy", posterior_kfcovs)
np.save("experiment_files/experiment_" + timestamp + "/priorIEWPF_"+result_timestamp+".npy", prior_states_iewpf)
np.save("experiment_files/experiment_" + timestamp + "/posteriorIEWPF_"+result_timestamp+".npy", posterior_states_iewpf)
np.save("experiment_files/experiment_" + timestamp + "/priorLETKF_"+result_timestamp+".npy", prior_states_letkf)
np.save("experiment_files/experiment_" + timestamp + "/posteriorLETKF_"+result_timestamp+".npy", posterior_states_letkf)
np.save("experiment_files/experiment_" + timestamp + "/priorETKF_"+result_timestamp+".npy", prior_states_etkf)
np.save("experiment_files/experiment_" + timestamp + "/posteriorETKF_"+result_timestamp+".npy", posterior_states_etkf)
| 5,977 | 36.130435 | 120 | py |
advectionDiffusion | advectionDiffusion-main/Sampler.py | import numpy as np
class Sampler:
def __init__(self, grid, args):
self.grid = grid
self.args = args
self.construct()
def construct(self):
# Mean: constant lift
mean_lift = self.args["mean_upshift"]*np.ones(self.grid.N_x)
if "bell_scaling" in self.args.keys():
# Mean: bell shape
xx, yy = np.meshgrid(np.arange(self.grid.nx)*self.grid.dx, np.arange(self.grid.ny)*self.grid.dy)
bell_scaling = self.args["bell_scaling"]
bell_sharpness = self.args["bell_sharpness"]
bell_center_x = self.args["bell_center"][0]*self.grid.xdim
bell_center_y = self.args["bell_center"][1]*self.grid.ydim
bell = bell_scaling * np.exp(-bell_sharpness*((xx-bell_center_x)**2 + (yy-bell_center_y)**2))
else:
bell = np.zeros((self.grid.ny, self.grid.nx))
self.mean = mean_lift + np.reshape(bell, self.grid.N_x)
######################
# Correlation: Matern like matrix
phi = self.args["matern_phi"]
dist_mat = np.copy(self.grid.dist_mat)
self.corr = (1+phi*dist_mat)*np.exp(-phi*dist_mat)
#######################
# Var in each grid node
stddev_mesh = self.args["stddev"]*np.ones((self.grid.ny, self.grid.nx))
self.stddev = np.reshape(stddev_mesh, self.grid.N_x)
#######################
# Covariance matrix
stddev_normalization = np.meshgrid(self.stddev,self.stddev)[0]*np.meshgrid(self.stddev,self.stddev)[1]
self.cov = stddev_normalization * self.corr
def sample(self, N=1):
sample = self.gaussian_random_fieldFFT(self.mean, self.corr, self.stddev, N)
return sample
def gaussian_random_fieldFFT(self, mean, corr, stddev, N=1):
sample = np.zeros((self.grid.N_x, N))
# Sampling Gaussian random fields using the FFT
# What is utilizing the Toepitz structure of the covariance matrix.
# In the end, it is transformed with the mean and point variances
# NOTE: For periodic boundary conditions the covariance matrix
# becomes numerical problems with the semi-positive definiteness
# what forbids to use classical np.random.multivariate_normal sampling
# but the FFT approach for Toepitz matrixes circumvents those problems
cov_toepitz = np.reshape(corr[0,:], (self.grid.ny, self.grid.nx))
cmf = np.real(np.fft.fft2(cov_toepitz))
for e in range(N):
u = np.random.normal(size=(self.grid.ny, self.grid.nx))
uif = np.fft.ifft2(u)
xf = np.real(np.fft.fft2(np.sqrt(np.maximum(cmf,0))*uif))
sample[:,e] = mean + np.reshape(stddev, self.grid.N_x)*np.reshape(xf, self.grid.N_x)
return sample
# Classical alternative for sampling!! (Not used currently)
def gaussian_random_field(self, mean, cov, N=1, nugget=0.1):
# NOTE: For periodic boundary conditions the covariance matrix
# becomes numerical problems with the semi-positive definiteness.
# To avoid negative Eigenvalues a small nugget on the diagonal is added.
sample = np.random.multivariate_normal(mean, cov + nugget*np.eye(self.grid.N_x), N).transpose()
return sample
| 3,320 | 33.957895 | 111 | py |
advectionDiffusion | advectionDiffusion-main/Observation.py | import numpy as np
import linecache
from matplotlib import pyplot as plt
class Observation:
"""Observations in the advection diffusion example.
Handling observation values and construction observation operator"""
def __init__(self, grid, noise_stddev=0.1):
self.grid = grid
self.noise_stddev = noise_stddev
self.N_obs = 0
print("Remember to set observation positions and to set/observe values!")
def set_positions(self, positions):
self.positions = positions
self.N_y = len(positions)
self.obsidx = np.zeros(self.N_y).astype(int)
for i in range(self.N_y):
self.obsidx[i] = positions[i][1] * self.grid.nx + positions[i][0]
self.matrix()
self.noise_matrix()
def set_regular_positions(self, xfreq, yfreq=None):
if yfreq is None:
yfreq = xfreq
poses = []
for j in range(int(np.ceil(self.grid.ny/yfreq))):
for i in range(int(np.ceil(self.grid.nx/xfreq))):
poses.append([i*xfreq,j*yfreq])
self.set_positions(poses)
def plot_positions(self):
plt.title("Moorings in domain (remember periodic BC)")
plt.scatter(np.array(self.positions)[:,0],np.array(self.positions)[:,1])
plt.xlim(0, self.grid.nx)
plt.ylim(0, self.grid.ny)
plt.show()
def matrix(self):
self.H = np.zeros((self.N_y, self.grid.N_x))
for i in range(self.N_y):
self.H[i,self.obsidx[i]] = 1
def observe(self, x):
self.N_obs = self.N_obs + 1
obs = self.H @ x + np.random.normal(scale=self.noise_stddev, size=self.N_y)
if self.N_obs == 1:
self.obses = np.array([obs])
else:
self.obses = np.append(self.obses, np.array([obs]), axis=0)
def noise_matrix(self):
self.R = self.noise_stddev**2 * np.eye(self.N_y)
def load_observations(self, fname):
self.obses = np.loadtxt(fname)
self.N_obs = self.obses.shape[0]
assert self.obses.shape[1] == self.N_y, "Wrong dimensions!"
def clear_observations(self):
self.obses = np.array([])
self.N_obs = 0
def setup_to_file(self, timestamp):
file = "experiment_files/experiment_" + timestamp + "/setup"
f = open(file, "a")
f.write("--------------------------------------------\n")
f.write("Properties of the observations:\n")
f.write("observation.noise_stddev = " + str(self.noise_stddev) + "\n")
f.close()
def positions_to_file(self, timestamp):
file_positions = "experiment_files/experiment_" + timestamp + "/observation_positions.csv"
np.savetxt(file_positions, self.positions)
def values_to_file(self, timestamp, obs_timestamp):
file_values = "experiment_files/experiment_" + timestamp + "/observation_values_" + obs_timestamp + ".csv"
np.savetxt(file_values, np.reshape(self.obses,(self.N_obs, self.N_y) ))
def from_file(grid, timestamp, obs_timestamp=None):
f = "experiment_files/experiment_"+timestamp+"/setup"
noise_stddev = float(linecache.getline(f, 19)[27:-1])
observation = Observation(grid, noise_stddev)
f_poses = "experiment_files/experiment_" + timestamp + "/observation_positions.csv"
observation.set_positions(np.loadtxt(f_poses))
if obs_timestamp is not None:
f_values = "experiment_files/experiment_" + timestamp + "/observation_values_" + obs_timestamp + ".csv"
observation.load_observations(f_values)
return observation
| 3,648 | 30.188034 | 114 | py |
advectionDiffusion | advectionDiffusion-main/KalmanFilter.py | """
Kalman filter update for advection diffusion example.
"""
import numpy as np
class Kalman:
def __init__(self, statistics, observation):
self.statistics = statistics
# Observation and obs error cov matrices
self.H = observation.H
self.R = observation.R
def filter(self, forecasted_mean, forecasted_cov, obs):
S = self.H @ forecasted_cov @ self.H.T + self.R
K = forecasted_cov @ self.H.T @ np.linalg.inv(S)
updated_mean = forecasted_mean + K @ (obs - self.H @ forecasted_mean)
updated_covariance = (np.eye(self.statistics.simulator.grid.N_x) - K @ self.H) @ forecasted_cov
self.statistics.set(updated_mean, updated_covariance)
| 724 | 29.208333 | 103 | py |
advectionDiffusion | advectionDiffusion-main/run_FilteringComparisonLocalisationSingle.py | # %%
"""
Example:
python run_FilteringComparison.py -m ensemble_size
"""
# %%
import numpy as np
# %%
import Sampler
import Simulator
import Observation
import Statistics
import KalmanFilter
import ETKalmanFilter
import SLETKalmanFilter
import IEWParticleFilter
import Comparer
import RunningWriter
# %%
# Initialisation
print("Initialising...")
timestamp = "2022_03_02-12_44_46"
grid, simulator = Simulator.from_file(timestamp)
observation = Observation.Observation(grid)
observation.set_positions([[25,15]])
prior_args = Statistics.prior_args_from_file(timestamp)
print("done\n")
# %%
# LOCALISATION IEWPF
iewpfQphis = [3.0, 5.0, 7.0, 11.0]
iewpfQs = [ Sampler.Sampler(grid, {"mean_upshift" : 0.0, "matern_phi" : phi, "stddev" : simulator.noise_stddev} ).cov for phi in iewpfQphis]
# %%
# LOCALISATION LETKF
scale_rs = [100,9,6,3]
# %%
trials_truth = 20
trials_init = 5
N_e = 50
# %%
kfmeans = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny))
kfcovs = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, grid.nx*grid.ny))
states_iewpf = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, N_e))
states_letkf = np.zeros((len(iewpfQphis), trials_truth*trials_init, grid.nx*grid.ny, N_e))
# %%
# Repeating ensemble runs
for trial_model in range(len(iewpfQphis)):
for trail_truth in range(trials_truth):
# Truth
print("\nModel", trial_model, ", Truth", trail_truth)
observation.clear_observations()
statistics_truth = Statistics.Statistics(simulator, 1)
statistics_truth.set_prior(prior_args)
for t in range(10):
statistics_truth.propagate(25)
observation.observe(statistics_truth.mean)
# KF
print("KF DA")
statistics_kf = Statistics.Statistics(simulator, safe_history=True)
statistics_kf.set_prior(prior_args)
kalmanFilter = KalmanFilter.Kalman(statistics_kf, observation)
for t in range(observation.N_obs):
statistics_kf.propagate(25)
kalmanFilter.filter(statistics_kf.mean, statistics_kf.cov, observation.obses[t])
for trial_init in range(trials_init):
print("\nModel", trial_model, ", Truth", trail_truth, ", Init", trial_init)
# ETKF
if trial_model == 0:
print("ETKF DA")
statistics_etkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_etkf.set_prior(prior_args)
etkFilter = ETKalmanFilter.ETKalman(statistics_etkf, observation)
for t in range(observation.N_obs):
statistics_etkf.propagate(25)
etkFilter.filter(statistics_etkf.ensemble.ensemble, observation.obses[t])
# LETKF
if trial_model > 0:
print("LETKF DA")
statistics_letkf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_letkf.set_prior(prior_args)
sletkFilter = SLETKalmanFilter.SLETKalman(statistics_letkf, observation, scale_rs[trial_model])
for t in range(observation.N_obs):
statistics_letkf.propagate(25)
sletkFilter.filter(statistics_letkf.ensemble.ensemble, observation.obses[t])
# IEWPF
print("IEWPF DA")
statistics_iewpf = Statistics.Statistics(simulator, N_e, safe_history=True)
statistics_iewpf.set_prior(prior_args)
iewpFilter = IEWParticleFilter.IEWParticle(statistics_iewpf, observation, beta=0.55, Q=iewpfQs[trial_model])
for t in range(observation.N_obs):
statistics_iewpf.propagate(25, model_error=False)
iewpFilter.filter(statistics_iewpf.ensemble.ensemble, observation.obses[t])
# Comparison
print("Storing")
trial = trail_truth*trials_init + trial_init
kfmeans[trial_model, trial] = statistics_kf.mean
kfcovs[trial_model, trial] = statistics_kf.cov
states_iewpf[trial_model, trial] = statistics_iewpf.ensemble.ensemble
if trial_model == 0:
states_letkf[trial_model, trial] = statistics_etkf.ensemble.ensemble
else:
states_letkf[trial_model, trial] = statistics_letkf.ensemble.ensemble
print("done")
# %%
import datetime
result_timestamp = datetime.datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
file = "experiment_files/experiment_" + timestamp + "/localisation_results_" + result_timestamp
f = open(file, "a")
f.write("IEWPF phis: " + ",".join([str(phi) for phi in iewpfQphis]))
np.save("experiment_files/experiment_" + timestamp + "/locSingle_KFmeans_"+result_timestamp+".npy", kfmeans)
np.save("experiment_files/experiment_" + timestamp + "/locSingle_KFcovs_"+result_timestamp+".npy", kfcovs)
np.save("experiment_files/experiment_" + timestamp + "/locSingle_IEWPFQ_"+result_timestamp+".npy", states_iewpf)
np.save("experiment_files/experiment_" + timestamp + "/locSingle_LETKFr_"+result_timestamp+".npy", states_letkf)
| 5,166 | 31.29375 | 140 | py |
flair | flair-master/setup.py | from pathlib import Path
from setuptools import find_packages, setup
required = Path("requirements.txt").read_text(encoding="utf-8").split("\n")
setup(
name="flair",
version="0.12.2",
description="A very simple framework for state-of-the-art NLP",
long_description=Path("README.md").read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
author="Alan Akbik",
author_email="alan.akbik@gmail.com",
url="https://github.com/flairNLP/flair",
packages=find_packages(exclude="tests"), # same as name
license="MIT",
install_requires=required,
include_package_data=True,
python_requires=">=3.7",
)
| 666 | 29.318182 | 75 | py |
flair | flair-master/collect_env.py | import torch
import transformers
import flair
def main():
print("#### Versions:")
print(f"##### Flair\n{flair.__version__}")
print(f"##### Pytorch\n{torch.__version__}")
print(f"##### Transformers\n{transformers.__version__}")
print(f"#### GPU\n{torch.cuda.is_available()}")
if __name__ == "__main__":
main()
| 338 | 18.941176 | 60 | py |
flair | flair-master/examples/ner/run_ner.py | import inspect
import json
import logging
import os
import sys
from dataclasses import dataclass, field
import torch
from transformers import HfArgumentParser
import flair
from flair import set_seed
from flair.embeddings import TransformerWordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
logger = logging.getLogger("flair")
logger.setLevel(level="INFO")
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "The model checkpoint for weights initialization."},
)
layers: str = field(default="-1", metadata={"help": "Layers to be fine-tuned."})
subtoken_pooling: str = field(
default="first",
metadata={"help": "Subtoken pooling strategy used for fine-tuned."},
)
hidden_size: int = field(default=256, metadata={"help": "Hidden size for NER model."})
use_crf: bool = field(default=False, metadata={"help": "Whether to use a CRF on-top or not."})
@dataclass
class TrainingArguments:
num_epochs: int = field(default=10, metadata={"help": "The number of training epochs."})
batch_size: int = field(default=8, metadata={"help": "Batch size used for training."})
mini_batch_chunk_size: int = field(
default=1,
metadata={"help": "If smaller than batch size, batches will be chunked."},
)
learning_rate: float = field(default=5e-05, metadata={"help": "Learning rate"})
seed: int = field(default=42, metadata={"help": "Seed used for reproducible fine-tuning results."})
device: str = field(default="cuda:0", metadata={"help": "CUDA device string."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for optimizer."})
embeddings_storage_mode: str = field(default="none", metadata={"help": "Defines embedding storage method."})
@dataclass
class FlertArguments:
context_size: int = field(default=0, metadata={"help": "Context size when using FLERT approach."})
respect_document_boundaries: bool = field(
default=False,
metadata={"help": "Whether to respect document boundaries or not when using FLERT."},
)
@dataclass
class DataArguments:
dataset_name: str = field(metadata={"help": "Flair NER dataset name."})
dataset_arguments: str = field(default="", metadata={"help": "Dataset arguments for Flair NER dataset."})
output_dir: str = field(
default="resources/taggers/ner",
metadata={"help": "Defines output directory for final fine-tuned model."},
)
def get_flair_corpus(data_args):
ner_task_mapping = {}
for name, obj in inspect.getmembers(flair.datasets.sequence_labeling):
if inspect.isclass(obj) and name.startswith(("NER", "CONLL", "WNUT")):
ner_task_mapping[name] = obj
dataset_args = {}
dataset_name = data_args.dataset_name
if data_args.dataset_arguments:
dataset_args = json.loads(data_args.dataset_arguments)
if dataset_name not in ner_task_mapping:
raise ValueError(f"Dataset name {dataset_name} is not a valid Flair datasets name!")
return ner_task_mapping[dataset_name](**dataset_args)
def main():
parser = HfArgumentParser((ModelArguments, TrainingArguments, FlertArguments, DataArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
(
model_args,
training_args,
flert_args,
data_args,
) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(
model_args,
training_args,
flert_args,
data_args,
) = parser.parse_args_into_dataclasses()
set_seed(training_args.seed)
flair.device = training_args.device
corpus = get_flair_corpus(data_args)
logger.info(corpus)
tag_type: str = "ner"
tag_dictionary = corpus.make_label_dictionary(tag_type, add_unk=False)
logger.info(tag_dictionary)
embeddings = TransformerWordEmbeddings(
model=model_args.model_name_or_path,
layers=model_args.layers,
subtoken_pooling=model_args.subtoken_pooling,
fine_tune=True,
use_context=flert_args.context_size,
respect_document_boundaries=flert_args.respect_document_boundaries,
)
tagger = SequenceTagger(
hidden_size=model_args.hidden_size,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=model_args.use_crf,
use_rnn=False,
reproject_embeddings=False,
)
trainer = ModelTrainer(tagger, corpus)
trainer.fine_tune(
data_args.output_dir,
learning_rate=training_args.learning_rate,
mini_batch_size=training_args.batch_size,
mini_batch_chunk_size=training_args.mini_batch_chunk_size,
max_epochs=training_args.num_epochs,
embeddings_storage_mode=training_args.embeddings_storage_mode,
weight_decay=training_args.weight_decay,
)
torch.save(model_args, os.path.join(data_args.output_dir, "model_args.bin"))
torch.save(training_args, os.path.join(data_args.output_dir, "training_args.bin"))
# finally, print model card for information
tagger.print_model_card()
if __name__ == "__main__":
main()
| 5,261 | 32.303797 | 112 | py |
flair | flair-master/examples/ner/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/flair/optim.py | import logging
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau, _LRScheduler
from torch.optim.optimizer import required # type: ignore[attr-defined]
log = logging.getLogger("flair")
class SGDW(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with weight decay.
Implementation from the paper `Fixing Weight Decay Regularization in Adam`_.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
----
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay factor (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Example:
-------
>>> optimizer = torch.optim.SGDW(model.parameters(), lr=0.1, momentum=0.9,
weight_decay=1e-5)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
) -> None:
if lr is not required and lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = {
"lr": lr,
"momentum": momentum,
"dampening": dampening,
"weight_decay": weight_decay,
"nesterov": nesterov,
}
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
def step(self, closure=None):
"""Performs a single optimization step.
Parameters
----------
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
Returns:
-------
loss (float, optional): The loss if closure was set
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(1 - dampening, d_p)
d_p = d_p.add(momentum, buf) if nesterov else buf
if weight_decay != 0:
p.data.add_(-weight_decay, p.data)
p.data.add_(-group["lr"], d_p)
return loss
class ExpAnnealLR(_LRScheduler):
"""Exponentially anneal the lr of each parameter group from the initial lr to end_lr over a number of iterations.
Args:
----
optimizer (Optimizer): Wrapped optimizer.
end_lr (float): The final learning rate.
iterations (int): The number of iterations over which to increase the
learning rate.
last_epoch (int): The index of the last iteration. Default: -1.
"""
def __init__(self, optimizer, end_lr, iterations, last_epoch=-1) -> None:
self.end_lr = end_lr
self.iterations = iterations
super().__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
iteration = self.last_epoch + 1
pct = iteration / self.iterations
return [base_lr * (self.end_lr / base_lr) ** pct for base_lr in self.base_lrs]
class LinearSchedulerWithWarmup(LambdaLR):
"""Linearly increase the lr from 0 to initial lr during warmup and decrease the lr to 0 after the warmup.
Uses LambaLR scheduler where the learning rate is multiplied by a lambda factor after calling scheduler.step().
Args:
----
optimizer (Optimizer): Wrapped optimizer.
num_train_steps (int): total number of training steps (number of batches * epochs).
num_warmup_steps (int): number of training steps for learning rate warmup.
last_epoch (int): The index of the last iteration. Default: -1. The scheduler
will simply restart when resuming training from a checkpoint.
"""
def __init__(self, optimizer, num_train_steps, num_warmup_steps, last_epoch=-1) -> None:
def linear_lr_lambda(current_step: int):
lambda_during_warmup = float(current_step) / float(max(1, num_warmup_steps))
lambda_after_warmup = max(
0.0,
float(num_train_steps - current_step) / float(max(1, num_train_steps - num_warmup_steps)),
)
if current_step < num_warmup_steps:
return lambda_during_warmup
return lambda_after_warmup
super().__init__(optimizer, lr_lambda=linear_lr_lambda, last_epoch=last_epoch)
class ReduceLRWDOnPlateau(ReduceLROnPlateau):
"""Reduce learning rate and weight decay when a metric has stopped improving.
Models often benefit from reducing the learning rate by
a factor of 2-10 once learning stagnates. This scheduler reads a metric
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate and weight decay factor is reduced for
optimizers that implement the the weight decay method from the paper
`Fixing Weight Decay Regularization in Adam`_.
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Args:
----
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
-------
>>> optimizer = AdamW(model.parameters(), lr=0.1, weight_decay=1e-3)
>>> scheduler = ReduceLRWDOnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def step(self, metrics, epoch=None):
current = metrics
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self._reduce_weight_decay(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
def _reduce_weight_decay(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
if param_group["weight_decay"] != 0:
old_weight_decay = float(param_group["weight_decay"])
new_weight_decay = max(old_weight_decay * self.factor, self.min_lrs[i])
if old_weight_decay - new_weight_decay > self.eps:
param_group["weight_decay"] = new_weight_decay
if self.verbose:
log.info(f"Epoch {epoch}: reducing weight decay factor of group {i} to {new_weight_decay:.4e}.")
| 11,041 | 38.435714 | 120 | py |
flair | flair-master/flair/tokenization.py | import logging
import sys
from abc import ABC, abstractmethod
from typing import Callable, List
from segtok.segmenter import split_single
from segtok.tokenizer import split_contractions, word_tokenizer
log = logging.getLogger("flair")
class Tokenizer(ABC):
r"""An abstract class representing a :class:`Tokenizer`.
Tokenizers are used to represent algorithms and models to split plain text into
individual tokens / words. All subclasses should overwrite :meth:`tokenize`, which
splits the given plain text into tokens. Moreover, subclasses may overwrite
:meth:`name`, returning a unique identifier representing the tokenizer's
configuration.
"""
@abstractmethod
def tokenize(self, text: str) -> List[str]:
raise NotImplementedError
@property
def name(self) -> str:
return self.__class__.__name__
class SpacyTokenizer(Tokenizer):
"""Tokenizer using spacy under the hood.
Implementation of :class:`Tokenizer`, using models from Spacy.
:param model a Spacy V2 model or the name of the model to load.
"""
def __init__(self, model) -> None:
super().__init__()
try:
import spacy
from spacy.language import Language
except ImportError:
raise ImportError(
"Please install Spacy v3.4.4 or better before using the Spacy tokenizer, "
"otherwise you can use SegtokTokenizer as advanced tokenizer."
)
if isinstance(model, Language):
self.model = model
elif isinstance(model, str):
self.model = spacy.load(model)
else:
raise AssertionError(
"Unexpected type of parameter model. Please provide a loaded "
"spacy model or the name of the model to load."
)
def tokenize(self, text: str) -> List[str]:
from spacy.tokens.doc import Doc
doc: Doc = self.model.make_doc(text)
words: List[str] = []
for word in doc:
if len(word.text.strip()) == 0:
continue
words.append(word.text)
return words
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self.model.meta["name"] + "_" + self.model.meta["version"]
class SegtokTokenizer(Tokenizer):
"""Tokenizer using segtok, a third party library dedicated to rules-based Indo-European languages.
For further details see: https://github.com/fnl/segtok
"""
def __init__(self) -> None:
super().__init__()
def tokenize(self, text: str) -> List[str]:
return SegtokTokenizer.run_tokenize(text)
@staticmethod
def run_tokenize(text: str) -> List[str]:
words: List[str] = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
words.extend(contractions)
words = list(filter(None, words))
return words
class SpaceTokenizer(Tokenizer):
"""Tokenizer based on space character only."""
def __init__(self) -> None:
super().__init__()
def tokenize(self, text: str) -> List[str]:
return SpaceTokenizer.run_tokenize(text)
@staticmethod
def run_tokenize(text: str) -> List[str]:
tokens: List[str] = []
word = ""
index = -1
for index, char in enumerate(text):
if char == " ":
if len(word) > 0:
tokens.append(word)
word = ""
else:
word += char
# increment for last token in sentence if not followed by whitespace
index += 1
if len(word) > 0:
tokens.append(word)
return tokens
class JapaneseTokenizer(Tokenizer):
"""Tokenizer using konoha to support popular japanese tokenizers.
Tokenizer using konoha, a third party library which supports
multiple Japanese tokenizer such as MeCab, Janome and SudachiPy.
For further details see:
https://github.com/himkt/konoha
"""
def __init__(self, tokenizer: str, sudachi_mode: str = "A") -> None:
super().__init__()
available_tokenizers = ["mecab", "janome", "sudachi"]
if tokenizer.lower() not in available_tokenizers:
raise NotImplementedError(
f"Currently, {tokenizer} is only supported. Supported tokenizers: {available_tokenizers}."
)
try:
import konoha
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "konoha" is not installed!')
log.warning(
'- If you want to use MeCab, install mecab with "sudo apt install mecab libmecab-dev mecab-ipadic".'
)
log.warning('- Install konoha with "pip install konoha[{tokenizer_name}]"')
log.warning(' - You can choose tokenizer from ["mecab", "janome", "sudachi"].')
log.warning("Note that we Flair support only konoha<5.0.0,>=4.0.0")
log.warning("-" * 100)
sys.exit()
self.tokenizer = tokenizer
self.sentence_tokenizer = konoha.SentenceTokenizer()
self.word_tokenizer = konoha.WordTokenizer(tokenizer, mode=sudachi_mode)
def tokenize(self, text: str) -> List[str]:
words: List[str] = []
sentences = self.sentence_tokenizer.tokenize(text)
for sentence in sentences:
konoha_tokens = self.word_tokenizer.tokenize(sentence)
words.extend(list(map(str, konoha_tokens)))
return words
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self.tokenizer
class TokenizerWrapper(Tokenizer):
"""Helper class to wrap tokenizer functions to the class-based tokenizer interface."""
def __init__(self, tokenizer_func: Callable[[str], List[str]]) -> None:
super().__init__()
self.tokenizer_func = tokenizer_func
def tokenize(self, text: str) -> List[str]:
return self.tokenizer_func(text)
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self.tokenizer_func.__name__
class SciSpacyTokenizer(Tokenizer):
"""Tokenizer that uses the en_core_sci_sm Spacy model and some special heuristics.
Implementation of :class:`Tokenizer` which uses the en_core_sci_sm Spacy model
extended by special heuristics to consider characters such as "(", ")" "-" as
additional token separators. The latter distinguishes this implementation from
:class:`SpacyTokenizer`.
Note, you if you want to use the "normal" SciSpacy tokenization just use
:class:`SpacyTokenizer`.
"""
def __init__(self) -> None:
super().__init__()
try:
import spacy
from spacy.lang import char_classes
except ImportError:
raise ImportError(
" Please install scispacy version 0.5.1 (recommended) or higher before using the SciSpacy tokenizer, "
"otherwise you can use SegtokTokenizer as alternative implementation.\n"
" You can install scispacy (version 0.5.1) by running:\n\n"
" pip install scispacy==0.5.1\n\n"
" By default HunFlair uses the `en_core_sci_sm` model. You can install the model by running:\n\n"
" pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.5.1/en_core_sci_sm-0.5.1.tar.gz\n\n"
" Note that the scispacy version and the version of the model must match to work properly!"
)
def combined_rule_prefixes() -> List[str]:
"""Helper function that returns the prefix pattern for the tokenizer.
It is a helper function to accommodate spacy tests that only test prefixes.
"""
prefix_punct = char_classes.PUNCT.replace("|", " ")
prefixes = [
"§",
"%",
"=",
"\\+",
*char_classes.split_chars(prefix_punct),
*char_classes.LIST_ELLIPSES,
*char_classes.LIST_QUOTES,
*char_classes.LIST_CURRENCY,
*char_classes.LIST_ICONS,
]
return prefixes
infixes = (
char_classes.LIST_ELLIPSES
+ char_classes.LIST_ICONS
+ [
r"x", # added this special x character to tokenize it separately
r"[\(\)\[\]\{\}]", # want to split at every bracket
r"/", # want to split at every slash
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
rf"(?<=[{char_classes.ALPHA_LOWER}])\.(?=[{char_classes.ALPHA_UPPER}])",
r"(?<=[{a}]),(?=[{a}])".format(a=char_classes.ALPHA),
r'(?<=[{a}])[?";:=,.]*(?:{h})(?=[{a}])'.format(a=char_classes.ALPHA, h=char_classes.HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=char_classes.ALPHA),
]
)
prefix_re = spacy.util.compile_prefix_regex(combined_rule_prefixes())
infix_re = spacy.util.compile_infix_regex(infixes)
self.model = spacy.load(
"en_core_sci_sm",
disable=["tagger", "ner", "parser", "textcat", "lemmatizer"],
)
self.model.tokenizer.prefix_search = prefix_re.search
self.model.tokenizer.infix_finditer = infix_re.finditer
def tokenize(self, text: str) -> List[str]:
sentence = self.model(text)
words: List[str] = []
for word in sentence:
words.append(word.text)
return words
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self.model.meta["name"] + "_" + self.model.meta["version"]
| 9,873 | 33.645614 | 133 | py |
flair | flair-master/flair/inference_utils.py | import logging
import pickle
import re
import shutil
import sqlite3
from pathlib import Path
from typing import Union
import numpy as np
import torch
from tqdm import tqdm
import flair
from flair.embeddings import WordEmbeddings
# this is the default init size of a lmdb database for embeddings
DEFAULT_MAP_SIZE = 100 * 1024 * 1024 * 1024
logger = logging.getLogger("flair")
class WordEmbeddingsStore:
"""Class to simulate a WordEmbeddings class from flair.
Run this to generate a headless (without word embeddings) model as well a stored word embeddings:
>>> from flair.inference_utils import WordEmbeddingsStore
>>> from flair.models import SequenceTagger
>>> import pickle
>>> tagger = SequenceTagger.load("multi-ner-fast")
>>> WordEmbeddingsStore.create_stores(tagger)
>>> pickle.dump(tagger, open("multi-ner-fast-headless.pickle", "wb"))
The same but using LMDB as memory database:
>>> from flair.inference_utils import WordEmbeddingsStore
>>> from flair.models import SequenceTagger
>>> import pickle
>>> tagger = SequenceTagger.load("multi-ner-fast")
>>> WordEmbeddingsStore.create_stores(tagger, backend='lmdb')
>>> pickle.dump(tagger, open("multi-ner-fast-headless.pickle", "wb"))
Then this can be used as follows:
>>> from flair.data import Sentence
>>> tagger = pickle.load(open("multi-ner-fast-headless.pickle", "rb"))
>>> WordEmbeddingsStore.load_stores(tagger)
>>> text = "Schade um den Ameisenbären. Lukas Bärfuss veröffentlicht Erzählungen aus zwanzig Jahren."
>>> sentence = Sentence(text)
>>> tagger.predict(sentence)
>>> print(sentence.get_spans('ner'))
The same but using LMDB as memory database:
>>> from flair.data import Sentence
>>> tagger = pickle.load(open("multi-ner-fast-headless.pickle", "rb"))
>>> WordEmbeddingsStore.load_stores(tagger, backend='lmdb')
>>> text = "Schade um den Ameisenbären. Lukas Bärfuss veröffentlicht Erzählungen aus zwanzig Jahren."
>>> sentence = Sentence(text)
>>> tagger.predict(sentence)
>>> print(sentence.get_spans('ner'))
"""
def __init__(self, embedding: WordEmbeddings, backend="sqlite", verbose=True) -> None:
"""Instantiates the WordEmbeddingsStore.
:param embedding: Flair WordEmbeddings instance.
:param backend: cache database backend name e.g ``'sqlite'``, ``'lmdb'``.
Default value is ``'sqlite'``.
:param verbose: If `True` print information on standard output
"""
self.items = ""
# get db filename from embedding name
self.name = embedding.name
self.store_path: Path = WordEmbeddingsStore._get_store_path(embedding, backend)
if verbose:
logger.info(f"store filename: {self.store_path!s}")
self.backend: Union[WordEmbeddings, WordEmbeddingsStoreBackend]
if backend == "sqlite":
self.backend = SqliteWordEmbeddingsStoreBackend(embedding, verbose)
elif backend == "lmdb":
self.backend = LmdbWordEmbeddingsStoreBackend(embedding, verbose)
else:
raise ValueError(f'The given backend "{backend}" is not available.')
# In case initialization of cached version failed, just fallback to the original WordEmbeddings
if not self.backend.is_ok:
self.backend = WordEmbeddings(embedding.embeddings)
def _get_vector(self, word="house"):
return self.backend._get_vector(word)
def embed(self, sentences):
for sentence in sentences:
for token in sentence:
t = torch.tensor(self._get_vector(word=token.text.lower()))
token.set_embedding(self.name, t)
def get_names(self):
return [self.name]
@staticmethod
def _get_store_path(embedding, backend="sqlite"):
"""Get the filename of the store."""
cache_dir = flair.cache_root
embedding_filename = re.findall("/(embeddings/.*)", embedding.name)[0]
store_path = cache_dir / (embedding_filename + "." + backend)
return store_path
@staticmethod
def _word_embeddings(model):
# SequenceTagger
if hasattr(model, "embeddings"):
embeds = model.embeddings.embeddings
# TextClassifier
elif hasattr(model, "document_embeddings") and hasattr(model.document_embeddings, "embeddings"):
embeds = model.document_embeddings.embeddings.embeddings
else:
embeds = []
return embeds
@staticmethod
def create_stores(model, backend="sqlite"):
"""Creates database versions of all word embeddings in the model.
Also deletes the original vectors to save memory.
"""
for embedding in WordEmbeddingsStore._word_embeddings(model):
if type(embedding) == WordEmbeddings:
WordEmbeddingsStore(embedding, backend)
del embedding.precomputed_word_embeddings
@staticmethod
def load_stores(model, backend="sqlite"):
"""Loads the db versions of all word embeddings in the model."""
embeds = WordEmbeddingsStore._word_embeddings(model)
for i, embedding in enumerate(embeds):
if type(embedding) == WordEmbeddings:
embeds[i] = WordEmbeddingsStore(embedding, backend)
@staticmethod
def delete_stores(model, backend="sqlite"):
"""Deletes the db versions of all word embeddings."""
for embedding in WordEmbeddingsStore._word_embeddings(model):
store_path: Path = WordEmbeddingsStore._get_store_path(embedding)
logger.info(f"delete store: {store_path!s}")
if store_path.is_file():
store_path.unlink()
elif store_path.is_dir():
shutil.rmtree(store_path, ignore_errors=False, onerror=None)
class WordEmbeddingsStoreBackend:
def __init__(self, embedding, backend, verbose=True) -> None:
# get db filename from embedding name
self.name = embedding.name
self.store_path: Path = WordEmbeddingsStore._get_store_path(embedding, backend)
@property
def is_ok(self):
return hasattr(self, "k")
def _get_vector(self, word="house"):
pass
class SqliteWordEmbeddingsStoreBackend(WordEmbeddingsStoreBackend):
def __init__(self, embedding, verbose) -> None:
super().__init__(embedding, "sqlite", verbose)
# if embedding database already exists
if self.store_path.exists() and self.store_path.is_file():
try:
self.db = sqlite3.connect(str(self.store_path))
cursor = self.db.cursor()
cursor.execute("SELECT * FROM embedding LIMIT 1;")
result = list(cursor)
self.k = len(result[0]) - 1
return
except sqlite3.Error as err:
logger.exception(f"Fail to open sqlite database {self.store_path!s}: {err!s}")
# otherwise, push embedding to database
if hasattr(embedding, "precomputed_word_embeddings"):
self.db = sqlite3.connect(str(self.store_path))
pwe = embedding.precomputed_word_embeddings
self.k = pwe.vector_size
self.db.execute("DROP TABLE IF EXISTS embedding;")
self.db.execute(
f"CREATE TABLE embedding(word text,{','.join('v' + str(i) + ' float' for i in range(self.k))});"
)
vectors_it = ([word, *pwe.get_vector(word).tolist()] for word in pwe.vocab)
if verbose:
logger.info("load vectors to store")
self.db.executemany(
f"INSERT INTO embedding(word,{','.join('v' + str(i) for i in range(self.k))}) \
values ({','.join(['?'] * (1 + self.k))})",
tqdm(vectors_it),
)
self.db.execute("DROP INDEX IF EXISTS embedding_index;")
self.db.execute("CREATE INDEX embedding_index ON embedding(word);")
self.db.commit()
self.db.close()
def _get_vector(self, word="house"):
db = sqlite3.connect(str(self.store_path))
cursor = db.cursor()
word = word.replace('"', "")
cursor.execute(f'SELECT * FROM embedding WHERE word="{word}";')
result = list(cursor)
db.close()
if not result:
return [0.0] * self.k
return result[0][1:]
class LmdbWordEmbeddingsStoreBackend(WordEmbeddingsStoreBackend):
def __init__(self, embedding, verbose) -> None:
super().__init__(embedding, "lmdb", verbose)
try:
import lmdb
# if embedding database already exists
if self.store_path.exists() and self.store_path.is_dir():
# open the database in read mode
try:
self.env = lmdb.open(
str(self.store_path),
readonly=True,
max_readers=2048,
max_spare_txns=4,
)
if self.env:
# we need to set self.k
with self.env.begin() as txn:
cursor = txn.cursor()
for _key, value in cursor:
vector = pickle.loads(value)
self.k = vector.shape[0]
break
cursor.close()
return
except lmdb.Error as err:
logger.exception(f"Fail to open lmdb database {self.store_path!s}: {err!s}")
# create and load the database in write mode
if hasattr(embedding, "precomputed_word_embeddings"):
pwe = embedding.precomputed_word_embeddings
self.k = pwe.vector_size
self.store_path.mkdir(parents=True, exist_ok=True)
self.env = lmdb.open(str(self.store_path), map_size=DEFAULT_MAP_SIZE)
if verbose:
logger.info("load vectors to store")
txn = self.env.begin(write=True)
for word in tqdm(pwe.vocab.keys()):
vector = pwe.get_vector(word)
if len(word.encode(encoding="UTF-8")) < self.env.max_key_size():
txn.put(word.encode(encoding="UTF-8"), pickle.dumps(vector))
txn.commit()
return
except ModuleNotFoundError:
logger.warning("-" * 100)
logger.warning('ATTENTION! The library "lmdb" is not installed!')
logger.warning('To use LMDB, please first install with "pip install lmdb"')
logger.warning("-" * 100)
def _get_vector(self, word="house"):
try:
import lmdb
with self.env.begin() as txn:
vector = txn.get(word.encode(encoding="UTF-8"))
if vector:
word_vector = pickle.loads(vector)
vector = None
else:
word_vector = np.zeros((self.k,), dtype=np.float32)
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env.close()
self.env = lmdb.open(
self.store_path,
readonly=True,
max_readers=2048,
max_spare_txns=2,
lock=False,
)
return self._get_vector(word)
except ModuleNotFoundError:
logger.warning("-" * 100)
logger.warning('ATTENTION! The library "lmdb" is not installed!')
logger.warning('To use LMDB, please first install with "pip install lmdb"')
logger.warning("-" * 100)
word_vector = np.zeros((self.k,), dtype=np.float32)
return word_vector
| 12,086 | 39.834459 | 112 | py |
flair | flair-master/flair/data.py | import bisect
import logging
import re
import typing
from abc import ABC, abstractmethod
from collections import Counter, defaultdict, namedtuple
from operator import itemgetter
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Union, cast
import torch
from deprecated import deprecated
from torch.utils.data import Dataset, IterableDataset
from torch.utils.data.dataset import ConcatDataset, Subset
import flair
from flair.file_utils import Tqdm
from flair.tokenization import SegtokTokenizer, SpaceTokenizer, Tokenizer
T_co = typing.TypeVar("T_co", covariant=True)
log = logging.getLogger("flair")
def _iter_dataset(dataset: Optional[Dataset]) -> typing.Iterable:
if dataset is None:
return []
from flair.datasets import DataLoader
return (x[0] for x in DataLoader(dataset, batch_size=1))
def _len_dataset(dataset: Optional[Dataset]) -> int:
if dataset is None:
return 0
from flair.datasets import DataLoader
loader = DataLoader(dataset, batch_size=1)
return len(loader)
BoundingBox = namedtuple("BoundingBox", ["left", "top", "right", "bottom"])
class Dictionary:
"""This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings."""
def __init__(self, add_unk=True) -> None:
# init dictionaries
self.item2idx: Dict[bytes, int] = {}
self.idx2item: List[bytes] = []
self.add_unk = add_unk
self.multi_label = False
self.span_labels = False
# in order to deal with unknown tokens, add <unk>
if add_unk:
self.add_item("<unk>")
def remove_item(self, item: str):
bytes_item = item.encode("utf-8")
if bytes_item in self.item2idx:
self.idx2item.remove(bytes_item)
del self.item2idx[bytes_item]
def add_item(self, item: str) -> int:
"""Add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.
:param item: a string for which to assign an id.
:return: ID of string
"""
bytes_item = item.encode("utf-8")
if bytes_item not in self.item2idx:
self.idx2item.append(bytes_item)
self.item2idx[bytes_item] = len(self.idx2item) - 1
return self.item2idx[bytes_item]
def get_idx_for_item(self, item: str) -> int:
"""Returns the ID of the string, otherwise 0.
:param item: string for which ID is requested
:return: ID of string, otherwise 0
"""
item_encoded = item.encode("utf-8")
if item_encoded in self.item2idx:
return self.item2idx[item_encoded]
elif self.add_unk:
return 0
else:
log.error(f"The string '{item}' is not in dictionary! Dictionary contains only: {self.get_items()}")
log.error(
"You can create a Dictionary that handles unknown items with an <unk>-key by setting add_unk = True in the construction."
)
raise IndexError
def get_idx_for_items(self, items: List[str]) -> List[int]:
"""Returns the IDs for each item of the list of string, otherwise 0 if not found.
:param items: List of string for which IDs are requested
:return: List of ID of strings
"""
if not hasattr(self, "item2idx_not_encoded"):
d = {key.decode("UTF-8"): value for key, value in self.item2idx.items()}
self.item2idx_not_encoded = defaultdict(int, d)
if not items:
return []
results = itemgetter(*items)(self.item2idx_not_encoded)
if isinstance(results, int):
return [results]
return list(results)
def get_items(self) -> List[str]:
items = []
for item in self.idx2item:
items.append(item.decode("UTF-8"))
return items
def __len__(self) -> int:
return len(self.idx2item)
def get_item_for_index(self, idx):
return self.idx2item[idx].decode("UTF-8")
def set_start_stop_tags(self):
self.add_item("<START>")
self.add_item("<STOP>")
def is_span_prediction_problem(self) -> bool:
if self.span_labels:
return True
return any(item.startswith(("B-", "S-", "I-")) for item in self.get_items())
def start_stop_tags_are_set(self) -> bool:
return {b"<START>", b"<STOP>"}.issubset(self.item2idx.keys())
def save(self, savefile):
import pickle
with open(savefile, "wb") as f:
mappings = {"idx2item": self.idx2item, "item2idx": self.item2idx}
pickle.dump(mappings, f)
def __setstate__(self, d):
self.__dict__ = d
# set 'add_unk' if the dictionary was created with a version of Flair older than 0.9
if "add_unk" not in self.__dict__.keys():
self.__dict__["add_unk"] = b"<unk>" in self.__dict__["idx2item"]
@classmethod
def load_from_file(cls, filename: Union[str, Path]):
import pickle
with Path(filename).open("rb") as f:
mappings = pickle.load(f, encoding="latin1")
idx2item = mappings["idx2item"]
item2idx = mappings["item2idx"]
# set 'add_unk' depending on whether <unk> is a key
add_unk = b"<unk>" in idx2item
dictionary: Dictionary = Dictionary(add_unk=add_unk)
dictionary.item2idx = item2idx
dictionary.idx2item = idx2item
return dictionary
@classmethod
def load(cls, name: str):
from flair.file_utils import cached_path
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/characters"
if name == "chars" or name == "common-chars":
char_dict = cached_path(f"{hu_path}/common_characters", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-large" or name == "common-chars-large":
char_dict = cached_path(f"{hu_path}/common_characters_large", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-xl" or name == "common-chars-xl":
char_dict = cached_path(f"{hu_path}/common_characters_xl", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-lemmatizer" or name == "common-chars-lemmatizer":
char_dict = cached_path(f"{hu_path}/common_characters_lemmatizer", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
return Dictionary.load_from_file(name)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Dictionary):
return False
return self.item2idx == o.item2idx and self.idx2item == o.idx2item and self.add_unk == o.add_unk
def __str__(self) -> str:
tags = ", ".join(self.get_item_for_index(i) for i in range(min(len(self), 50)))
return f"Dictionary with {len(self)} tags: {tags}"
class Label:
"""This class represents a label.
Each label has a value and optionally a confidence score. The score needs to be between 0.0 and 1.0.
Default value for the score is 1.0.
"""
def __init__(self, data_point: "DataPoint", value: str, score: float = 1.0) -> None:
self._value = value
self._score = score
self.data_point: DataPoint = data_point
super().__init__()
def set_value(self, value: str, score: float = 1.0):
self._value = value
self._score = score
@property
def value(self) -> str:
return self._value
@property
def score(self) -> float:
return self._score
def to_dict(self):
return {"value": self.value, "confidence": self.score}
def __str__(self) -> str:
return f"{self.data_point.unlabeled_identifier}{flair._arrow}{self._value} ({round(self._score, 4)})"
@property
def shortstring(self):
return f'"{self.data_point.text}"/{self._value}'
def __repr__(self) -> str:
return f"'{self.data_point.unlabeled_identifier}'/'{self._value}' ({round(self._score, 4)})"
def __eq__(self, other):
return self.value == other.value and self.score == other.score and self.data_point == other.data_point
def __hash__(self):
return hash(self.__repr__())
def __lt__(self, other):
return self.data_point < other.data_point
@property
def labeled_identifier(self):
return f"{self.data_point.unlabeled_identifier}/{self.value}"
@property
def unlabeled_identifier(self):
return f"{self.data_point.unlabeled_identifier}"
class DataPoint:
"""This is the parent class of all data points in Flair.
Examples for data points are Token, Sentence, Image, etc.
Each DataPoint must be embeddable (hence the abstract property embedding() and methods to() and clear_embeddings()).
Also, each DataPoint may have Labels in several layers of annotation (hence the functions add_label(), get_labels()
and the property 'label')
"""
def __init__(self) -> None:
self.annotation_layers: Dict[str, List[Label]] = {}
self._embeddings: Dict[str, torch.Tensor] = {}
self._metadata: Dict[str, typing.Any] = {}
@property
@abstractmethod
def embedding(self):
pass
def set_embedding(self, name: str, vector: torch.Tensor):
self._embeddings[name] = vector
def get_embedding(self, names: Optional[List[str]] = None) -> torch.Tensor:
# if one embedding name, directly return it
if names and len(names) == 1:
if names[0] in self._embeddings:
return self._embeddings[names[0]].to(flair.device)
else:
return torch.tensor([], device=flair.device)
# if multiple embedding names, concatenate them
embeddings = self.get_each_embedding(names)
if embeddings:
return torch.cat(embeddings, dim=0)
else:
return torch.tensor([], device=flair.device)
def get_each_embedding(self, embedding_names: Optional[List[str]] = None) -> List[torch.Tensor]:
embeddings = []
for embed_name in sorted(self._embeddings.keys()):
if embedding_names and embed_name not in embedding_names:
continue
embed = self._embeddings[embed_name].to(flair.device)
embeddings.append(embed)
return embeddings
def to(self, device: str, pin_memory: bool = False):
for name, vector in self._embeddings.items():
if str(vector.device) != str(device):
if pin_memory:
self._embeddings[name] = vector.to(device, non_blocking=True).pin_memory()
else:
self._embeddings[name] = vector.to(device, non_blocking=True)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
if embedding_names is None:
self._embeddings = {}
else:
for name in embedding_names:
if name in self._embeddings:
del self._embeddings[name]
def has_label(self, type) -> bool:
return type in self.annotation_layers
def add_metadata(self, key: str, value: typing.Any) -> None:
self._metadata[key] = value
def get_metadata(self, key: str) -> typing.Any:
return self._metadata[key]
def has_metadata(self, key: str) -> bool:
return key in self._metadata
def add_label(self, typename: str, value: str, score: float = 1.0):
if typename not in self.annotation_layers:
self.annotation_layers[typename] = [Label(self, value, score)]
else:
self.annotation_layers[typename].append(Label(self, value, score))
return self
def set_label(self, typename: str, value: str, score: float = 1.0):
self.annotation_layers[typename] = [Label(self, value, score)]
return self
def remove_labels(self, typename: str):
if typename in self.annotation_layers:
del self.annotation_layers[typename]
def get_label(self, label_type: Optional[str] = None, zero_tag_value="O"):
if len(self.get_labels(label_type)) == 0:
return Label(self, zero_tag_value)
return self.get_labels(label_type)[0]
def get_labels(self, typename: Optional[str] = None):
if typename is None:
return self.labels
return self.annotation_layers[typename] if typename in self.annotation_layers else []
@property
def labels(self) -> List[Label]:
all_labels = []
for key in self.annotation_layers:
all_labels.extend(self.annotation_layers[key])
return all_labels
@property
@abstractmethod
def unlabeled_identifier(self):
raise NotImplementedError
def _printout_labels(self, main_label=None, add_score: bool = True):
all_labels = []
keys = [main_label] if main_label is not None else self.annotation_layers.keys()
if add_score:
for key in keys:
all_labels.extend(
[
f"{label.value} ({round(label.score, 4)})"
for label in self.get_labels(key)
if label.data_point == self
]
)
labels = "; ".join(all_labels)
if labels != "":
labels = flair._arrow + labels
else:
for key in keys:
all_labels.extend([f"{label.value}" for label in self.get_labels(key) if label.data_point == self])
labels = "/".join(all_labels)
if labels != "":
labels = "/" + labels
return labels
def __str__(self) -> str:
return self.unlabeled_identifier + self._printout_labels()
@property
@abstractmethod
def start_position(self) -> int:
raise NotImplementedError
@property
@abstractmethod
def end_position(self) -> int:
raise NotImplementedError
@property
@abstractmethod
def text(self):
raise NotImplementedError
@property
def tag(self):
return self.labels[0].value
@property
def score(self):
return self.labels[0].score
def __lt__(self, other):
return self.start_position < other.start_position
def __len__(self) -> int:
raise NotImplementedError
DT = typing.TypeVar("DT", bound=DataPoint)
DT2 = typing.TypeVar("DT2", bound=DataPoint)
class _PartOfSentence(DataPoint, ABC):
def __init__(self, sentence) -> None:
super().__init__()
self.sentence: Sentence = sentence
def add_label(self, typename: str, value: str, score: float = 1.0):
super().add_label(typename, value, score)
self.sentence.annotation_layers.setdefault(typename, []).append(Label(self, value, score))
def set_label(self, typename: str, value: str, score: float = 1.0):
if len(self.annotation_layers.get(typename, [])) > 0:
# First we remove any existing labels for this PartOfSentence in self.sentence
self.sentence.annotation_layers[typename] = [
label for label in self.sentence.annotation_layers.get(typename, []) if label.data_point != self
]
self.sentence.annotation_layers.setdefault(typename, []).append(Label(self, value, score))
super().set_label(typename, value, score)
return self
def remove_labels(self, typename: str):
# labels also need to be deleted at Sentence object
for label in self.get_labels(typename):
self.sentence.annotation_layers[typename].remove(label)
# delete labels at object itself
super().remove_labels(typename)
class Token(_PartOfSentence):
"""This class represents one word in a tokenized sentence.
Each token may have any number of tags. It may also point to its head in a dependency tree.
"""
def __init__(
self,
text: str,
head_id: Optional[int] = None,
whitespace_after: int = 1,
start_position: int = 0,
sentence=None,
) -> None:
super().__init__(sentence=sentence)
self.form: str = text
self._internal_index: Optional[int] = None
self.head_id: Optional[int] = head_id
self.whitespace_after: int = whitespace_after
self._start_position = start_position
self._embeddings: Dict = {}
self.tags_proba_dist: Dict[str, List[Label]] = {}
@property
def idx(self) -> int:
if self._internal_index is not None:
return self._internal_index
else:
return -1
@property
def text(self) -> str:
return self.form
@property
def unlabeled_identifier(self) -> str:
return f'Token[{self.idx - 1}]: "{self.text}"'
def add_tags_proba_dist(self, tag_type: str, tags: List[Label]):
self.tags_proba_dist[tag_type] = tags
def get_tags_proba_dist(self, tag_type: str) -> List[Label]:
if tag_type in self.tags_proba_dist:
return self.tags_proba_dist[tag_type]
return []
def get_head(self):
return self.sentence.get_token(self.head_id)
@property
def start_position(self) -> int:
return self._start_position
@start_position.setter
def start_position(self, value: int) -> None:
self._start_position = value
@property
def end_position(self) -> int:
return self.start_position + len(self.text)
@property
def embedding(self):
return self.get_embedding()
def __len__(self) -> int:
return 1
def __repr__(self) -> str:
return self.__str__()
def add_label(self, typename: str, value: str, score: float = 1.0):
# The Token is a special _PartOfSentence in that it may be initialized without a Sentence.
# therefore, labels get added only to the Sentence if it exists
if self.sentence:
super().add_label(typename=typename, value=value, score=score)
else:
DataPoint.add_label(self, typename=typename, value=value, score=score)
def set_label(self, typename: str, value: str, score: float = 1.0):
# The Token is a special _PartOfSentence in that it may be initialized without a Sentence.
# Therefore, labels get set only to the Sentence if it exists
if self.sentence:
super().set_label(typename=typename, value=value, score=score)
else:
DataPoint.set_label(self, typename=typename, value=value, score=score)
class Span(_PartOfSentence):
"""This class represents one textual span consisting of Tokens."""
def __new__(self, tokens: List[Token]):
# check if the span already exists. If so, return it
unlabeled_identifier = self._make_unlabeled_identifier(tokens)
if unlabeled_identifier in tokens[0].sentence._known_spans:
span = tokens[0].sentence._known_spans[unlabeled_identifier]
return span
# else make a new span
else:
span = super().__new__(self)
span.initialized = False
tokens[0].sentence._known_spans[unlabeled_identifier] = span
return span
def __init__(self, tokens: List[Token]) -> None:
if not self.initialized:
super().__init__(tokens[0].sentence)
self.tokens = tokens
self.initialized: bool = True
@property
def start_position(self) -> int:
return self.tokens[0].start_position
@property
def end_position(self) -> int:
return self.tokens[-1].end_position
@property
def text(self) -> str:
return "".join([t.text + t.whitespace_after * " " for t in self.tokens]).strip()
@staticmethod
def _make_unlabeled_identifier(tokens: List[Token]):
text = "".join([t.text + t.whitespace_after * " " for t in tokens]).strip()
return f'Span[{tokens[0].idx - 1}:{tokens[-1].idx}]: "{text}"'
@property
def unlabeled_identifier(self) -> str:
return self._make_unlabeled_identifier(self.tokens)
def __repr__(self) -> str:
return self.__str__()
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
@property
def embedding(self):
return self.get_embedding()
class Relation(_PartOfSentence):
def __new__(self, first: Span, second: Span):
# check if the relation already exists. If so, return it
unlabeled_identifier = self._make_unlabeled_identifier(first, second)
if unlabeled_identifier in first.sentence._known_spans:
span = first.sentence._known_spans[unlabeled_identifier]
return span
# else make a new relation
else:
span = super().__new__(self)
span.initialized = False
first.sentence._known_spans[unlabeled_identifier] = span
return span
def __init__(self, first: Span, second: Span) -> None:
if not self.initialized:
super().__init__(sentence=first.sentence)
self.first: Span = first
self.second: Span = second
self.initialized: bool = True
def __repr__(self) -> str:
return str(self)
@property
def tag(self):
return self.labels[0].value
@property
def text(self):
return f"{self.first.text} -> {self.second.text}"
@staticmethod
def _make_unlabeled_identifier(first, second):
text = f"{first.text} -> {second.text}"
return (
f"Relation"
f"[{first.tokens[0].idx - 1}:{first.tokens[-1].idx}]"
f"[{second.tokens[0].idx - 1}:{second.tokens[-1].idx}]"
f': "{text}"'
)
@property
def unlabeled_identifier(self) -> str:
return self._make_unlabeled_identifier(self.first, self.second)
@property
def start_position(self) -> int:
return min(self.first.start_position, self.second.start_position)
@property
def end_position(self) -> int:
return max(self.first.end_position, self.second.end_position)
@property
def embedding(self):
pass
class Sentence(DataPoint):
"""A Sentence is a list of tokens and is used to represent a sentence or text fragment."""
def __init__(
self,
text: Union[str, List[str], List[Token]],
use_tokenizer: Union[bool, Tokenizer] = True,
language_code: Optional[str] = None,
start_position: int = 0,
) -> None:
"""Class to hold all metadata related to a text.
Metadata can be tokens, predictions, language code, ...
:param text: original string (sentence), or a list of string tokens (words)
:param use_tokenizer: a custom tokenizer (default is :class:`SpaceTokenizer`)
more advanced options are :class:`SegTokTokenizer` to use segtok or :class:`SpacyTokenizer`
to use Spacy library if available). Check the implementations of abstract class Tokenizer or
implement your own subclass (if you need it). If instead of providing a Tokenizer, this parameter
is just set to True (deprecated), :class:`SegtokTokenizer` will be used.
:param language_code: Language of the sentence
:param start_position: Start char offset of the sentence in the superordinate document
"""
super().__init__()
self.tokens: List[Token] = []
# private field for all known spans
self._known_spans: Dict[str, _PartOfSentence] = {}
self.language_code: Optional[str] = language_code
self._start_position = start_position
# the tokenizer used for this sentence
if isinstance(use_tokenizer, Tokenizer):
tokenizer = use_tokenizer
elif type(use_tokenizer) == bool:
tokenizer = SegtokTokenizer() if use_tokenizer else SpaceTokenizer()
else:
raise AssertionError("Unexpected type of parameter 'use_tokenizer'. Parameter should be bool or Tokenizer")
self.tokenized: Optional[str] = None
# some sentences represent a document boundary (but most do not)
self.is_document_boundary: bool = False
# internal variables to denote position inside dataset
self._previous_sentence: Optional[Sentence] = None
self._has_context: bool = False
self._next_sentence: Optional[Sentence] = None
self._position_in_dataset: Optional[typing.Tuple[Dataset, int]] = None
# if text is passed, instantiate sentence with tokens (words)
if isinstance(text, str):
text = Sentence._handle_problem_characters(text)
words = tokenizer.tokenize(text)
elif text and isinstance(text[0], Token):
for t in text:
self._add_token(t)
self.tokens[-1].whitespace_after = 0
return
else:
words = cast(List[str], text)
text = " ".join(words)
# determine token positions and whitespace_after flag
current_offset: int = 0
previous_token: Optional[Token] = None
for word in words:
word_start_position: int = text.index(word, current_offset)
delta_offset: int = word_start_position - current_offset
token: Token = Token(text=word, start_position=word_start_position)
self._add_token(token)
if previous_token is not None:
previous_token.whitespace_after = delta_offset
current_offset = token.end_position
previous_token = token
# the last token has no whitespace after
if len(self) > 0:
self.tokens[-1].whitespace_after = 0
# log a warning if the dataset is empty
if text == "":
log.warning("Warning: An empty Sentence was created! Are there empty strings in your dataset?")
@property
def unlabeled_identifier(self):
return f'Sentence[{len(self)}]: "{self.text}"'
def get_relations(self, type: str) -> List[Relation]:
relations: List[Relation] = []
for label in self.get_labels(type):
if isinstance(label.data_point, Relation):
relations.append(label.data_point)
return relations
def get_spans(self, type: str) -> List[Span]:
spans: List[Span] = []
for potential_span in self._known_spans.values():
if isinstance(potential_span, Span) and potential_span.has_label(type):
spans.append(potential_span)
return sorted(spans)
def get_token(self, token_id: int) -> Optional[Token]:
for token in self.tokens:
if token.idx == token_id:
return token
return None
def _add_token(self, token: Union[Token, str]):
if isinstance(token, Token):
assert token.sentence is None
if type(token) is str:
token = Token(token)
token = cast(Token, token)
# data with zero-width characters cannot be handled
if token.text == "":
return
# set token idx and sentence
token.sentence = self
token._internal_index = len(self.tokens) + 1
if token.start_position == 0 and token._internal_index > 1:
token.start_position = len(self.to_original_text()) + self[-1].whitespace_after
# append token to sentence
self.tokens.append(token)
# register token annotations on sentence
for typename in token.annotation_layers:
for label in token.get_labels(typename):
if typename not in token.sentence.annotation_layers:
token.sentence.annotation_layers[typename] = [Label(token, label.value, label.score)]
else:
token.sentence.annotation_layers[typename].append(Label(token, label.value, label.score))
@property
def embedding(self):
return self.get_embedding()
def to(self, device: str, pin_memory: bool = False):
# move sentence embeddings to device
super().to(device=device, pin_memory=pin_memory)
# also move token embeddings to device
for token in self:
token.to(device, pin_memory)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
super().clear_embeddings(embedding_names)
# clear token embeddings
for token in self:
token.clear_embeddings(embedding_names)
def left_context(self, context_length: int, respect_document_boundaries: bool = True) -> List[Token]:
sentence = self
left_context: List[Token] = []
while len(left_context) < context_length:
sentence = sentence.previous_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
left_context = sentence.tokens + left_context
return left_context[-context_length:]
def right_context(self, context_length: int, respect_document_boundaries: bool = True) -> List[Token]:
sentence = self
right_context: List[Token] = []
while len(right_context) < context_length:
sentence = sentence.next_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
right_context += sentence.tokens
return right_context[:context_length]
def __str__(self) -> str:
return self.to_tagged_string()
def to_tagged_string(self, main_label=None) -> str:
already_printed = [self]
output = super().__str__()
label_append = []
for label in self.get_labels(main_label):
if label.data_point in already_printed:
continue
label_append.append(
f'"{label.data_point.text}"{label.data_point._printout_labels(main_label=main_label, add_score=False)}'
)
already_printed.append(label.data_point)
if len(label_append) > 0:
output += f"{flair._arrow}[" + ", ".join(label_append) + "]"
return output
@property
def text(self):
return self.to_original_text()
def to_tokenized_string(self) -> str:
if self.tokenized is None:
self.tokenized = " ".join([t.text for t in self.tokens])
return self.tokenized
def to_plain_string(self):
plain = ""
for token in self.tokens:
plain += token.text
if token.whitespace_after > 0:
plain += token.whitespace_after * " "
return plain.rstrip()
def infer_space_after(self):
"""Heuristics in case you wish to infer whitespace_after values for tokenized text.
This is useful for some old NLP tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with
no info of original whitespacing.
:return:
"""
last_token = None
quote_count: int = 0
# infer whitespace after field
for token in self.tokens:
if token.text == '"':
quote_count += 1
if quote_count % 2 != 0:
token.whitespace_after = 0
elif last_token is not None:
last_token.whitespace_after = 0
if last_token is not None:
if token.text in [".", ":", ",", ";", ")", "n't", "!", "?"]:
last_token.whitespace_after = 0
if token.text.startswith("'"):
last_token.whitespace_after = 0
if token.text in ["("]:
token.whitespace_after = 0
last_token = token
return self
def to_original_text(self) -> str:
# if sentence has no tokens, return empty string
if len(self) == 0:
return ""
# otherwise, return concatenation of tokens with the correct offsets
return (self[0].start_position - self.start_position) * " " + "".join(
[t.text + t.whitespace_after * " " for t in self.tokens]
).strip()
def to_dict(self, tag_type: Optional[str] = None):
labels = []
if tag_type:
labels = [label.to_dict() for label in self.get_labels(tag_type)]
return {"text": self.to_original_text(), tag_type: labels}
if self.labels:
labels = [label.to_dict() for label in self.labels]
return {"text": self.to_original_text(), "all labels": labels}
def get_span(self, start: int, stop: int):
span_slice = slice(start, stop)
return self[span_slice]
@typing.overload
def __getitem__(self, idx: int) -> Token:
...
@typing.overload
def __getitem__(self, s: slice) -> Span:
...
def __getitem__(self, subscript):
if isinstance(subscript, slice):
return Span(self.tokens[subscript])
else:
return self.tokens[subscript]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
def __repr__(self) -> str:
return self.__str__()
@property
def start_position(self) -> int:
return self._start_position
@start_position.setter
def start_position(self, value: int) -> None:
self._start_position = value
@property
def end_position(self) -> int:
# The sentence's start position is not propagated to its tokens.
# Therefore, we need to add the sentence's start position to its last token's end position, including whitespaces.
return self.start_position + self[-1].end_position + self[-1].whitespace_after
def get_language_code(self) -> str:
if self.language_code is None:
import langdetect
try:
self.language_code = langdetect.detect(self.to_plain_string())
except Exception:
self.language_code = "en"
return self.language_code
@staticmethod
def _handle_problem_characters(text: str) -> str:
text = Sentence.__remove_zero_width_characters(text)
text = Sentence.__restore_windows_1252_characters(text)
return text
@staticmethod
def __remove_zero_width_characters(text: str) -> str:
text = text.replace("\u200c", "")
text = text.replace("\u200b", "")
text = text.replace("\ufe0f", "")
text = text.replace("\ufeff", "")
return text
@staticmethod
def __restore_windows_1252_characters(text: str) -> str:
def to_windows_1252(match):
try:
return bytes([ord(match.group(0))]).decode("windows-1252")
except UnicodeDecodeError:
# No character at the corresponding code point: remove it
return ""
return re.sub(r"[\u0080-\u0099]", to_windows_1252, text)
def next_sentence(self):
"""Get the next sentence in the document.
This only works if context is set through dataloader or elsewhere
:return: next Sentence in document if set, otherwise None
"""
if self._next_sentence is not None:
return self._next_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] + 1
if index < len(dataset):
return dataset[index]
return None
def previous_sentence(self):
"""Get the previous sentence in the document.
works only if context is set through dataloader or elsewhere
:return: previous Sentence in document if set, otherwise None
"""
if self._previous_sentence is not None:
return self._previous_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] - 1
if index >= 0:
return dataset[index]
return None
def is_context_set(self) -> bool:
"""Determines if this sentence has a context of sentences before or after set.
Return True or False depending on whether context is set (for instance in dataloader or elsewhere)
:return: True if context is set, else False
"""
return (
self._has_context
or self._previous_sentence is not None
or self._next_sentence is not None
or self._position_in_dataset is not None
)
def copy_context_from_sentence(self, sentence: "Sentence") -> None:
self._previous_sentence = sentence._previous_sentence
self._next_sentence = sentence._next_sentence
self._position_in_dataset = sentence._position_in_dataset
@classmethod
def set_context_for_sentences(cls, sentences: List["Sentence"]) -> None:
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
sentence._has_context = True
if previous_sentence is not None:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
def get_labels(self, label_type: Optional[str] = None):
# if no label if specified, return all labels
if label_type is None:
return sorted(self.labels)
# if the label type exists in the Sentence, return it
if label_type in self.annotation_layers:
return sorted(self.annotation_layers[label_type])
# return empty list if none of the above
return []
def remove_labels(self, typename: str):
# labels also need to be deleted at all tokens
for token in self:
token.remove_labels(typename)
# labels also need to be deleted at all known spans
for span in self._known_spans.values():
span.remove_labels(typename)
# remove spans without labels
self._known_spans = {k: v for k, v in self._known_spans.items() if len(v.labels) > 0}
# delete labels at object itself
super().remove_labels(typename)
class DataPair(DataPoint, typing.Generic[DT, DT2]):
def __init__(self, first: DT, second: DT2) -> None:
super().__init__()
self.first = first
self.second = second
def to(self, device: str, pin_memory: bool = False):
self.first.to(device, pin_memory)
self.second.to(device, pin_memory)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
self.first.clear_embeddings(embedding_names)
self.second.clear_embeddings(embedding_names)
@property
def embedding(self):
return torch.cat([self.first.embedding, self.second.embedding])
def __len__(self) -> int:
return len(self.first) + len(self.second)
@property
def unlabeled_identifier(self):
return f"DataPair: '{self.first.unlabeled_identifier}' + '{self.second.unlabeled_identifier}'"
@property
def start_position(self) -> int:
return self.first.start_position
@property
def end_position(self) -> int:
return self.first.end_position
@property
def text(self):
return self.first.text + " || " + self.second.text
TextPair = DataPair[Sentence, Sentence]
class Image(DataPoint):
def __init__(self, data=None, imageURL=None) -> None:
super().__init__()
self.data = data
self._embeddings: Dict = {}
self.imageURL = imageURL
@property
def embedding(self):
return self.get_embedding()
def __str__(self) -> str:
image_repr = self.data.size() if self.data else ""
image_url = self.imageURL if self.imageURL else ""
return f"Image: {image_repr} {image_url}"
@property
def start_position(self) -> int:
raise NotImplementedError
@property
def end_position(self) -> int:
raise NotImplementedError
@property
def text(self) -> str:
raise NotImplementedError
@property
def unlabeled_identifier(self) -> str:
raise NotImplementedError
class Corpus(typing.Generic[T_co]):
def __init__(
self,
train: Optional[Dataset[T_co]] = None,
dev: Optional[Dataset[T_co]] = None,
test: Optional[Dataset[T_co]] = None,
name: str = "corpus",
sample_missing_splits: Union[bool, str] = True,
) -> None:
# set name
self.name: str = name
# abort if no data is provided
if not train and not dev and not test:
raise RuntimeError("No data provided when initializing corpus object.")
# sample test data from train if none is provided
if test is None and sample_missing_splits and train and sample_missing_splits != "only_dev":
train_length = _len_dataset(train)
test_size: int = round(train_length / 10)
test, train = randomly_split_into_two_datasets(train, test_size)
# sample dev data from train if none is provided
if dev is None and sample_missing_splits and train and sample_missing_splits != "only_test":
train_length = _len_dataset(train)
dev_size: int = round(train_length / 10)
dev, train = randomly_split_into_two_datasets(train, dev_size)
# set train dev and test data
self._train: Optional[Dataset[T_co]] = train
self._test: Optional[Dataset[T_co]] = test
self._dev: Optional[Dataset[T_co]] = dev
@property
def train(self) -> Optional[Dataset[T_co]]:
return self._train
@property
def dev(self) -> Optional[Dataset[T_co]]:
return self._dev
@property
def test(self) -> Optional[Dataset[T_co]]:
return self._test
def downsample(
self,
percentage: float = 0.1,
downsample_train=True,
downsample_dev=True,
downsample_test=True,
):
if downsample_train and self._train is not None:
self._train = self._downsample_to_proportion(self._train, percentage)
if downsample_dev and self._dev is not None:
self._dev = self._downsample_to_proportion(self._dev, percentage)
if downsample_test and self._test is not None:
self._test = self._downsample_to_proportion(self._test, percentage)
return self
def filter_empty_sentences(self):
log.info("Filtering empty sentences")
if self._train is not None:
self._train = Corpus._filter_empty_sentences(self._train)
if self._test is not None:
self._test = Corpus._filter_empty_sentences(self._test)
if self._dev is not None:
self._dev = Corpus._filter_empty_sentences(self._dev)
log.info(self)
def filter_long_sentences(self, max_charlength: int):
log.info("Filtering long sentences")
if self._train is not None:
self._train = Corpus._filter_long_sentences(self._train, max_charlength)
if self._test is not None:
self._test = Corpus._filter_long_sentences(self._test, max_charlength)
if self._dev is not None:
self._dev = Corpus._filter_long_sentences(self._dev, max_charlength)
log.info(self)
@staticmethod
def _filter_long_sentences(dataset, max_charlength: int) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in Tqdm.tqdm(enumerate(_iter_dataset(dataset))):
if len(sentence.to_plain_string()) > max_charlength:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
@staticmethod
def _filter_empty_sentences(dataset) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in enumerate(_iter_dataset(dataset)):
if len(sentence) == 0:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:
"""Creates a dictionary of all tokens contained in the corpus.
By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.
If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.
If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered
to be added to the dictionary.
:param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)
:param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)
:return: dictionary of tokens
"""
tokens = self._get_most_common_tokens(max_tokens, min_freq)
vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
vocab_dictionary.add_item(token)
return vocab_dictionary
def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:
tokens_and_frequencies = Counter(self._get_all_tokens())
tokens: List[str] = []
for token, freq in tokens_and_frequencies.most_common():
if (min_freq != -1 and freq < min_freq) or (max_tokens != -1 and len(tokens) == max_tokens):
break
tokens.append(token)
return tokens
def _get_all_tokens(self) -> List[str]:
assert self.train
tokens = [s.tokens for s in _iter_dataset(self.train)]
tokens = [token for sublist in tokens for token in sublist]
return [t.text for t in tokens]
@staticmethod
def _downsample_to_proportion(dataset: Dataset, proportion: float):
sampled_size: int = round(_len_dataset(dataset) * proportion)
splits = randomly_split_into_two_datasets(dataset, sampled_size)
return splits[0]
def obtain_statistics(self, label_type: Optional[str] = None, pretty_print: bool = True) -> Union[dict, str]:
"""Print statistics about the class distribution and sentence sizes.
only labels of sentences are taken into account
"""
json_data = {
"TRAIN": self._obtain_statistics_for(self.train, "TRAIN", label_type),
"TEST": self._obtain_statistics_for(self.test, "TEST", label_type),
"DEV": self._obtain_statistics_for(self.dev, "DEV", label_type),
}
if pretty_print:
import json
return json.dumps(json_data, indent=4)
return json_data
@staticmethod
def _obtain_statistics_for(sentences, name, tag_type) -> dict:
if len(sentences) == 0:
return {}
classes_to_count = Corpus._count_sentence_labels(sentences)
tags_to_count = Corpus._count_token_labels(sentences, tag_type)
tokens_per_sentence = Corpus._get_tokens_per_sentence(sentences)
label_size_dict = {}
for label, c in classes_to_count.items():
label_size_dict[label] = c
tag_size_dict = {}
for tag, c in tags_to_count.items():
tag_size_dict[tag] = c
return {
"dataset": name,
"total_number_of_documents": len(sentences),
"number_of_documents_per_class": label_size_dict,
"number_of_tokens_per_tag": tag_size_dict,
"number_of_tokens": {
"total": sum(tokens_per_sentence),
"min": min(tokens_per_sentence),
"max": max(tokens_per_sentence),
"avg": sum(tokens_per_sentence) / len(sentences),
},
}
@staticmethod
def _get_tokens_per_sentence(sentences):
return [len(x.tokens) for x in sentences]
@staticmethod
def _count_sentence_labels(sentences):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for label in sent.labels:
label_count[label.value] += 1
return label_count
@staticmethod
def _count_token_labels(sentences, label_type):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for token in sent.tokens:
if label_type in token.annotation_layers:
label = token.get_label(label_type)
label_count[label.value] += 1
return label_count
def __str__(self) -> str:
return "Corpus: %d train + %d dev + %d test sentences" % (
_len_dataset(self.train) if self.train else 0,
_len_dataset(self.dev) if self.dev else 0,
_len_dataset(self.test) if self.test else 0,
)
def make_label_dictionary(
self, label_type: str, min_count: int = -1, add_unk: bool = False, add_dev_test: bool = False
) -> Dictionary:
"""Creates a dictionary of all labels assigned to the sentences in the corpus.
:return: dictionary of labels
"""
if min_count > 0 and not add_unk:
add_unk = True
log.info("Adding <unk>-token to dictionary since min_count is set.")
label_dictionary: Dictionary = Dictionary(add_unk=add_unk)
label_dictionary.span_labels = False
assert self.train
datasets = [self.train]
if add_dev_test and self.dev is not None:
datasets.append(self.dev)
if add_dev_test and self.test is not None:
datasets.append(self.test)
data: ConcatDataset = ConcatDataset(datasets)
log.info("Computing label dictionary. Progress:")
sentence_label_type_counter: typing.Counter[str] = Counter()
label_value_counter: typing.Counter[str] = Counter()
all_sentence_labels: List[str] = []
# first, determine the datapoint type by going through dataset until first label is found
datapoint_type = None
for sentence in Tqdm.tqdm(_iter_dataset(data)):
labels = sentence.get_labels(label_type)
for label in labels:
datapoint_type = type(label.data_point)
if datapoint_type:
break
if datapoint_type == Span:
label_dictionary.span_labels = True
for sentence in Tqdm.tqdm(_iter_dataset(data)):
# count all label types per sentence
sentence_label_type_counter.update(sentence.annotation_layers.keys())
# go through all labels of label_type and count values
labels = sentence.get_labels(label_type)
label_value_counter.update(label.value for label in labels if label.value not in all_sentence_labels)
# special handling for Token-level annotations. Add all untagged as 'O' label
if datapoint_type == Token and len(sentence) > len(labels):
label_value_counter["O"] += len(sentence) - len(labels)
if not label_dictionary.multi_label and len(labels) > 1:
label_dictionary.multi_label = True
# if an unk threshold is set, UNK all label values below this threshold
total_count = 0
unked_count = 0
for label, count in label_value_counter.most_common():
if count >= min_count:
label_dictionary.add_item(label)
total_count += count
else:
unked_count += count
if len(label_dictionary.idx2item) == 0 or (
len(label_dictionary.idx2item) == 1 and "<unk>" in label_dictionary.get_items()
):
log.error(f"ERROR: You specified label_type='{label_type}' which is not in this dataset!")
contained_labels = ", ".join(
[f"'{label[0]}' (in {label[1]} sentences)" for label in sentence_label_type_counter.most_common()]
)
log.error(f"ERROR: The corpus contains the following label types: {contained_labels}")
raise Exception
log.info(
f"Dictionary created for label '{label_type}' with {len(label_dictionary)} "
f"values: {', '.join([label[0] + f' (seen {label[1]} times)' for label in label_value_counter.most_common(20)])}"
)
if unked_count > 0:
log.info(f" - at UNK threshold {min_count}, {unked_count} instances are UNK'ed and {total_count} remain")
return label_dictionary
def add_label_noise(
self,
label_type: str,
labels: List[str],
noise_share: float = 0.2,
split: str = "train",
noise_transition_matrix: Optional[Dict[str, List[float]]] = None,
):
"""Generates uniform label noise distribution in the chosen dataset split.
:label_type: the type of labels for which the noise should be simulated.
:labels: an array with unique labels of said type (retrievable from label dictionary).
:noise_share: the desired share of noise in the train split.
:split: in which dataset split the noise is to be simulated.
:noise_transition_matrix: provides pre-defined probabilities for label flipping based on the
initial label value (relevant for class-dependent label noise simulation).
"""
import numpy as np
if split == "train":
assert self.train
datasets = [self.train]
elif split == "dev":
assert self.dev
datasets = [self.dev]
elif split == "test":
assert self.test
datasets = [self.test]
else:
raise ValueError("split must be either train, dev or test.")
data: ConcatDataset = ConcatDataset(datasets)
corrupted_count = 0
total_label_count = 0
if noise_transition_matrix:
ntm_labels = noise_transition_matrix.keys()
if set(ntm_labels) != set(labels):
raise AssertionError(
"Label values in the noise transition matrix have to coincide with label values in the dataset"
)
log.info("Generating noisy labels. Progress:")
for data_point in Tqdm.tqdm(_iter_dataset(data)):
for label in data_point.get_labels(label_type):
total_label_count += 1
orig_label = label.value
# sample randomly from a label distribution according to the probabilities defined by the noise transition matrix
new_label = np.random.default_rng().choice(
a=list(ntm_labels),
p=noise_transition_matrix[orig_label],
)
# replace the old label with the new one
label.data_point.set_label(label_type, new_label)
# keep track of the old (clean) label using another label type category
label.data_point.add_label(label_type + "_clean", orig_label)
# keep track of how many labels in total are flipped
if new_label != orig_label:
corrupted_count += 1
else:
if noise_share < 0 or noise_share > 1:
raise ValueError("noise_share must be between 0 and 1.")
orig_label_p = 1 - noise_share
other_label_p = noise_share / (len(labels) - 1)
log.info("Generating noisy labels. Progress:")
for data_point in Tqdm.tqdm(_iter_dataset(data)):
for label in data_point.get_labels(label_type):
total_label_count += 1
orig_label = label.value
prob_dist = [other_label_p] * len(labels)
prob_dist[labels.index(orig_label)] = orig_label_p
# sample randomly from a label distribution according to the probabilities defined by the desired noise share
new_label = np.random.default_rng().choice(a=labels, p=prob_dist)
# replace the old label with the new one
label.data_point.set_label(label_type, new_label)
# keep track of the old (clean) label using another label type category
label.data_point.add_label(label_type + "_clean", orig_label)
# keep track of how many labels in total are flipped
if new_label != orig_label:
corrupted_count += 1
log.info(
f"Total labels corrupted: {corrupted_count}. Resulting noise share: {round((corrupted_count / total_label_count) * 100, 2)}%."
)
def get_label_distribution(self):
class_to_count = defaultdict(lambda: 0)
for sent in self.train:
for label in sent.labels:
class_to_count[label.value] += 1
return class_to_count
def get_all_sentences(self) -> ConcatDataset:
parts = []
if self.train:
parts.append(self.train)
if self.dev:
parts.append(self.dev)
if self.test:
parts.append(self.test)
return ConcatDataset(parts)
@deprecated(version="0.8", reason="Use 'make_label_dictionary' instead.")
def make_tag_dictionary(self, tag_type: str) -> Dictionary:
# Make the tag dictionary
tag_dictionary: Dictionary = Dictionary(add_unk=False)
tag_dictionary.add_item("O")
for sentence in _iter_dataset(self.get_all_sentences()):
for token in sentence.tokens:
tag_dictionary.add_item(token.get_label(tag_type).value)
tag_dictionary.add_item("<START>")
tag_dictionary.add_item("<STOP>")
return tag_dictionary
class MultiCorpus(Corpus):
def __init__(
self,
corpora: List[Corpus],
task_ids: Optional[List[str]] = None,
name: str = "multicorpus",
**corpusargs,
) -> None:
self.corpora: List[Corpus] = corpora
ids = task_ids if task_ids else [f"Task_{i}" for i in range(len(corpora))]
train_parts = []
dev_parts = []
test_parts = []
for corpus in self.corpora:
if corpus.train:
train_parts.append(corpus.train)
if corpus.dev:
dev_parts.append(corpus.dev)
if corpus.test:
test_parts.append(corpus.test)
super().__init__(
ConcatFlairDataset(train_parts, ids) if len(train_parts) > 0 else None,
ConcatFlairDataset(dev_parts, ids) if len(dev_parts) > 0 else None,
ConcatFlairDataset(test_parts, ids) if len(test_parts) > 0 else None,
name=name,
**corpusargs,
)
def __str__(self) -> str:
output = (
f"MultiCorpus: " # type: ignore[arg-type]
f"{len(self.train) if self.train else 0} train + "
f"{len(self.dev) if self.dev else 0} dev + "
f"{len(self.test) if self.test else 0} test sentences\n - "
)
output += "\n - ".join([f"{type(corpus).__name__} {corpus!s} - {corpus.name}" for corpus in self.corpora])
return output
class FlairDataset(Dataset):
@abstractmethod
def is_in_memory(self) -> bool:
pass
class ConcatFlairDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Args:
----
datasets (sequence): List of datasets to be concatenated
"""
datasets: List[Dataset]
cumulative_sizes: List[int]
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
length_of_e = len(e)
r.append(length_of_e + s)
s += length_of_e
return r
def __init__(self, datasets: Iterable[Dataset], ids: Iterable[str]) -> None:
super().__init__()
self.datasets = list(datasets)
self.ids = list(ids)
assert len(self.datasets) > 0, "datasets should not be an empty iterable"
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatSentenceDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self) -> int:
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[dataset_idx - 1]
sentence = self.datasets[dataset_idx][sample_idx]
sentence.set_label("multitask_id", self.ids[dataset_idx])
return sentence
@property
def cummulative_sizes(self):
return self.cumulative_sizes
def iob2(tags):
"""Converts the tags to the IOB2 format.
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag.value == "O":
continue
split = tag.value.split("-")
if len(split) != 2 or split[0] not in ["I", "B"]:
return False
if split[0] == "B":
continue
elif i == 0 or tags[i - 1].value == "O": # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
elif tags[i - 1].value[1:] == tag.value[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
return True
def randomly_split_into_two_datasets(dataset, length_of_first):
import random
indices = list(range(len(dataset)))
random.shuffle(indices)
first_dataset = indices[:length_of_first]
second_dataset = indices[length_of_first:]
first_dataset.sort()
second_dataset.sort()
return Subset(dataset, first_dataset), Subset(dataset, second_dataset)
def get_spans_from_bio(bioes_tags: List[str], bioes_scores=None) -> List[typing.Tuple[List[int], float, str]]:
# add a dummy "O" to close final prediction
bioes_tags.append("O")
# return complex list
found_spans = []
# internal variables
current_tag_weights: Dict[str, float] = {}
previous_tag = "O-"
current_span: List[int] = []
current_span_scores: List[float] = []
for idx, bioes_tag in enumerate(bioes_tags):
# non-set tags are OUT tags
if bioes_tag == "" or bioes_tag == "O" or bioes_tag == "_":
bioes_tag = "O-"
# anything that is not OUT is IN
in_span = bioes_tag != "O-"
# does this prediction start a new span?
starts_new_span = False
if bioes_tag[:2] in {"B-", "S-"} or (
in_span and previous_tag[2:] != bioes_tag[2:] and (bioes_tag[:2] == "I-" or previous_tag[2:] == "S-")
):
# B- and S- always start new spans
# if the predicted class changes, I- starts a new span
# if the predicted class changes and S- was previous tag, start a new span
starts_new_span = True
# if an existing span is ended (either by reaching O or starting a new span)
if (starts_new_span or not in_span) and len(current_span) > 0:
# determine score and value
span_score = sum(current_span_scores) / len(current_span_scores)
span_value = max(current_tag_weights.keys(), key=current_tag_weights.__getitem__)
# append to result list
found_spans.append((current_span, span_score, span_value))
# reset for-loop variables for new span
current_span = []
current_span_scores = []
current_tag_weights = {}
if in_span:
current_span.append(idx)
current_span_scores.append(bioes_scores[idx] if bioes_scores else 1.0)
weight = 1.1 if starts_new_span else 1.0
current_tag_weights[bioes_tag[2:]] = current_tag_weights.setdefault(bioes_tag[2:], 0.0) + weight
# remember previous tag
previous_tag = bioes_tag
return found_spans
| 65,248 | 34.694201 | 138 | py |
flair | flair-master/flair/splitter.py | from abc import ABC, abstractmethod
from typing import Any, List, Optional, Union
from segtok.segmenter import split_multi
from flair.data import Sentence
from flair.tokenization import (
SciSpacyTokenizer,
SegtokTokenizer,
SpacyTokenizer,
Tokenizer,
)
class SentenceSplitter(ABC):
r"""An abstract class representing a :class:`SentenceSplitter`.
Sentence splitters are used to represent algorithms and models to split plain text into
sentences and individual tokens / words. All subclasses should overwrite :meth:`splits`,
which splits the given plain text into a sequence of sentences (:class:`Sentence`). The
individual sentences are in turn subdivided into tokens / words. In most cases, this can
be controlled by passing custom implementation of :class:`Tokenizer`.
Moreover, subclasses may overwrite :meth:`name`, returning a unique identifier representing
the sentence splitter's configuration.
"""
@abstractmethod
def split(self, text: str) -> List[Sentence]:
raise NotImplementedError
@property
def name(self) -> str:
return self.__class__.__name__
@property
def tokenizer(self) -> Tokenizer:
raise NotImplementedError
@tokenizer.setter
def tokenizer(self, value: Tokenizer):
raise NotImplementedError
class SegtokSentenceSplitter(SentenceSplitter):
"""Sentence Splitter using SegTok.
Implementation of :class:`SentenceSplitter` using the SegTok library.
For further details see: https://github.com/fnl/segtok
"""
def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()) -> None:
super().__init__()
self._tokenizer = tokenizer
def split(self, text: str) -> List[Sentence]:
plain_sentences: List[str] = split_multi(text)
sentence_offset = 0
sentences: List[Sentence] = []
for sentence in plain_sentences:
try:
sentence_offset = text.index(sentence, sentence_offset)
except ValueError as error:
raise AssertionError(
f"Can't find the sentence offset for sentence {sentence} "
f"starting from position {sentence_offset}"
) from error
sentences.append(
Sentence(
text=sentence,
use_tokenizer=self._tokenizer,
start_position=sentence_offset,
)
)
sentence_offset += len(sentence)
return sentences
@property
def name(self) -> str:
return self.__class__.__name__
@property
def tokenizer(self) -> Tokenizer:
return self._tokenizer
@tokenizer.setter
def tokenizer(self, value: Tokenizer):
self._tokenizer = value
class SpacySentenceSplitter(SentenceSplitter):
"""Sentence Splitter using Spacy.
Implementation of :class:`SentenceSplitter`, using models from Spacy.
:param model Spacy V2 model or the name of the model to load.
:param tokenizer Custom tokenizer to use (default :class:`SpacyTokenizer`)
"""
def __init__(self, model: Union[Any, str], tokenizer: Optional[Tokenizer] = None) -> None:
super().__init__()
try:
import spacy
from spacy.language import Language
except ImportError:
raise ImportError(
"Please install spacy v3.4.4 or higher before using the SpacySentenceSplitter, "
"otherwise you can use SegtokSentenceSplitter as alternative implementation."
)
if isinstance(model, Language):
self.model: Language = model
else:
assert isinstance(model, str)
self.model = spacy.load(model)
if tokenizer is None:
self._tokenizer: Tokenizer = SpacyTokenizer("en_core_sci_sm")
else:
self._tokenizer = tokenizer
def split(self, text: str) -> List[Sentence]:
document = self.model(text)
sentences = [
Sentence(
text=str(spacy_sent),
use_tokenizer=self._tokenizer,
start_position=spacy_sent.start_char,
)
for spacy_sent in document.sents
if len(str(spacy_sent)) > 0
]
return sentences
@property
def tokenizer(self) -> Tokenizer:
return self._tokenizer
@tokenizer.setter
def tokenizer(self, value: Tokenizer):
self._tokenizer = value
@property
def name(self) -> str:
return (
self.__class__.__name__
+ "_"
+ self.model.meta["name"]
+ "_"
+ self.model.meta["version"]
+ "_"
+ self._tokenizer.name
)
class SciSpacySentenceSplitter(SpacySentenceSplitter):
"""Sentence splitter using the spacy model `en_core_sci_sm`.
Convenience class to instantiate :class:`SpacySentenceSplitter` with Spacy model `en_core_sci_sm`
for sentence splitting and :class:`SciSpacyTokenizer` as tokenizer.
"""
def __init__(self) -> None:
super().__init__("en_core_sci_sm", SciSpacyTokenizer())
class TagSentenceSplitter(SentenceSplitter):
"""SentenceSplitter which assumes that there is a tag within the text that is used to mark sentence boundaries.
Implementation of :class:`SentenceSplitter` which assumes that there is a special tag within
the text that is used to mark sentence boundaries.
"""
def __init__(self, tag: str, tokenizer: Tokenizer = SegtokTokenizer()) -> None:
super().__init__()
self._tokenizer = tokenizer
self.tag = tag
def split(self, text: str) -> List[Sentence]:
plain_sentences = text.split(self.tag)
sentences = []
last_offset = 0
for sentence in plain_sentences:
if len(sentence.strip()) == 0:
continue
sentences += [
Sentence(
text=sentence,
use_tokenizer=self._tokenizer,
start_position=last_offset,
)
]
last_offset += len(sentence) + len(self.tag)
return sentences
@property
def tokenizer(self) -> Tokenizer:
return self._tokenizer
@tokenizer.setter
def tokenizer(self, value: Tokenizer):
self._tokenizer = value
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self.tag + "_" + self._tokenizer.name
class NewlineSentenceSplitter(TagSentenceSplitter):
r"""Sentence Splitter using newline as boundary marker.
Convenience class to instantiate :class:`SentenceTagSplitter` with newline ("\n") as
sentence boundary marker.
"""
def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()) -> None:
super().__init__(tag="\n", tokenizer=tokenizer)
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self._tokenizer.name
class NoSentenceSplitter(SentenceSplitter):
"""Sentence Splitter which treats the full text as a single Sentence.
Implementation of :class:`SentenceSplitter` which treats the complete text as one sentence.
"""
def __init__(self, tokenizer: Tokenizer = SegtokTokenizer()) -> None:
super().__init__()
self._tokenizer = tokenizer
def split(self, text: str) -> List[Sentence]:
return [Sentence(text=text, use_tokenizer=self._tokenizer, start_position=0)]
@property
def tokenizer(self) -> Tokenizer:
return self._tokenizer
@tokenizer.setter
def tokenizer(self, value: Tokenizer):
self._tokenizer = value
@property
def name(self) -> str:
return self.__class__.__name__ + "_" + self._tokenizer.name
| 7,853 | 29.207692 | 115 | py |
flair | flair-master/flair/training_utils.py | import logging
import random
import sys
from collections import defaultdict
from enum import Enum
from functools import reduce
from math import inf
from pathlib import Path
from typing import Dict, List, Optional, Union
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import mean_absolute_error, mean_squared_error
from torch.optim import Optimizer
from torch.utils.data import Dataset
import flair
from flair.data import DT, Dictionary, Sentence, _iter_dataset
log = logging.getLogger("flair")
class Result:
def __init__(
self,
main_score: float,
detailed_results: str,
classification_report: dict = {},
scores: dict = {},
) -> None:
assert "loss" in scores, "No loss provided."
self.main_score: float = main_score
self.scores = scores
self.detailed_results: str = detailed_results
self.classification_report = classification_report
@property
def loss(self):
return self.scores["loss"]
def __str__(self) -> str:
return f"{self.detailed_results!s}\nLoss: {self.loss}'"
class MetricRegression:
def __init__(self, name) -> None:
self.name = name
self.true: List[float] = []
self.pred: List[float] = []
def mean_squared_error(self):
return mean_squared_error(self.true, self.pred)
def mean_absolute_error(self):
return mean_absolute_error(self.true, self.pred)
def pearsonr(self):
return pearsonr(self.true, self.pred)[0]
def spearmanr(self):
return spearmanr(self.true, self.pred)[0]
# dummy return to fulfill trainer.train() needs
def micro_avg_f_score(self):
return self.mean_squared_error()
def to_tsv(self):
return "{}\t{}\t{}\t{}".format(
self.mean_squared_error(),
self.mean_absolute_error(),
self.pearsonr(),
self.spearmanr(),
)
@staticmethod
def tsv_header(prefix=None):
if prefix:
return "{0}_MEAN_SQUARED_ERROR\t{0}_MEAN_ABSOLUTE_ERROR\t{0}_PEARSON\t{0}_SPEARMAN".format(prefix)
return "MEAN_SQUARED_ERROR\tMEAN_ABSOLUTE_ERROR\tPEARSON\tSPEARMAN"
@staticmethod
def to_empty_tsv():
return "\t_\t_\t_\t_"
def __str__(self) -> str:
line = "mean squared error: {:.4f} - mean absolute error: {:.4f} - pearson: {:.4f} - spearman: {:.4f}".format(
self.mean_squared_error(),
self.mean_absolute_error(),
self.pearsonr(),
self.spearmanr(),
)
return line
class EvaluationMetric(Enum):
MICRO_ACCURACY = "micro-average accuracy"
MICRO_F1_SCORE = "micro-average f1-score"
MACRO_ACCURACY = "macro-average accuracy"
MACRO_F1_SCORE = "macro-average f1-score"
MEAN_SQUARED_ERROR = "mean squared error"
class WeightExtractor:
def __init__(self, directory: Union[str, Path], number_of_weights: int = 10) -> None:
if type(directory) is str:
directory = Path(directory)
self.weights_file = init_output_file(directory, "weights.txt")
self.weights_dict: Dict[str, Dict[int, List[float]]] = defaultdict(lambda: defaultdict(list))
self.number_of_weights = number_of_weights
def extract_weights(self, state_dict, iteration):
for key in state_dict:
vec = state_dict[key]
# print(vec)
try:
weights_to_watch = min(self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size())))
except Exception:
continue
if key not in self.weights_dict:
self._init_weights_index(key, state_dict, weights_to_watch)
for i in range(weights_to_watch):
vec = state_dict[key]
for index in self.weights_dict[key][i]:
vec = vec[index]
value = vec.item()
with open(self.weights_file, "a") as f:
f.write(f"{iteration}\t{key}\t{i}\t{float(value)}\n")
def _init_weights_index(self, key, state_dict, weights_to_watch):
indices = {}
i = 0
while len(indices) < weights_to_watch:
vec = state_dict[key]
cur_indices = []
for _x in range(len(vec.size())):
index = random.randint(0, len(vec) - 1)
vec = vec[index]
cur_indices.append(index)
if cur_indices not in list(indices.values()):
indices[i] = cur_indices
i += 1
self.weights_dict[key] = indices
class AnnealOnPlateau:
"""A learningrate sheduler for annealing on plateau.
This class is a modification of
torch.optim.lr_scheduler.ReduceLROnPlateau that enables
setting an "auxiliary metric" to break ties.
Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
----
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
-------
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def __init__(
self,
optimizer,
mode="min",
aux_mode="min",
factor=0.1,
patience=10,
initial_extra_patience=0,
verbose=False,
cooldown=0,
min_lr=0,
eps=1e-8,
) -> None:
if factor >= 1.0:
raise ValueError("Factor should be < 1.0.")
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
if isinstance(min_lr, (list, tuple)):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError(f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}")
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.default_patience = patience
self.effective_patience = patience + initial_extra_patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.aux_mode = aux_mode
self.best = None
self.best_aux = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
self._init_is_better(mode=mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metric, auxiliary_metric=None) -> bool:
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metric)
epoch = self.last_epoch + 1
self.last_epoch = epoch
is_better = False
assert self.best is not None
if self.mode == "min" and current < self.best:
is_better = True
if self.mode == "max" and current > self.best:
is_better = True
if current == self.best and auxiliary_metric:
current_aux = float(auxiliary_metric)
if self.aux_mode == "min" and current_aux < self.best_aux:
is_better = True
if self.aux_mode == "max" and current_aux > self.best_aux:
is_better = True
if is_better:
self.best = current
if auxiliary_metric:
self.best_aux = auxiliary_metric
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
reduce_learning_rate = self.num_bad_epochs > self.effective_patience
if reduce_learning_rate:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self.effective_patience = self.default_patience
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
return reduce_learning_rate
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group["lr"] = new_lr
if self.verbose:
log.info(f" - reducing learning rate of group {epoch} to {new_lr}")
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _init_is_better(self, mode):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if mode == "min":
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.mode = mode
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != "optimizer"}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode)
def init_output_file(base_path: Union[str, Path], file_name: str) -> Path:
"""Creates a local file which can be appended to.
:param base_path: the path to the directory
:param file_name: the file name
:return: the created file
"""
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)
file = base_path / file_name
file.touch(exist_ok=True)
return file
def convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:
"""Convert list of labels to a one hot list.
:param label_list: list of labels
:param label_dict: label dictionary
:return: converted label list
"""
return [[1 if label in labels else 0 for label in label_dict.get_items()] for labels in label_list]
def log_line(log):
if sys.version_info >= (3, 8):
log.info("-" * 100, stacklevel=3)
else:
log.info("-" * 100)
def add_file_handler(log, output_file):
init_output_file(output_file.parents[0], output_file.name)
fh = logging.FileHandler(output_file, mode="w", encoding="utf-8")
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s %(message)s")
fh.setFormatter(formatter)
log.addHandler(fh)
return fh
def store_embeddings(
data_points: Union[List[DT], Dataset], storage_mode: str, dynamic_embeddings: Optional[List[str]] = None
):
if isinstance(data_points, Dataset):
data_points = list(_iter_dataset(data_points))
# if memory mode option 'none' delete everything
if storage_mode == "none":
dynamic_embeddings = None
# if dynamic embedding keys not passed, identify them automatically
elif dynamic_embeddings is None:
dynamic_embeddings = identify_dynamic_embeddings(data_points)
# always delete dynamic embeddings
for data_point in data_points:
data_point.clear_embeddings(dynamic_embeddings)
# if storage mode is "cpu", send everything to CPU (pin to memory if we train on GPU)
if storage_mode == "cpu":
pin_memory = str(flair.device) != "cpu"
for data_point in data_points:
data_point.to("cpu", pin_memory=pin_memory)
def identify_dynamic_embeddings(data_points: List[DT]):
dynamic_embeddings = []
all_embeddings = []
for data_point in data_points:
if isinstance(data_point, Sentence):
first_token = data_point[0]
for name, vector in first_token._embeddings.items():
if vector.requires_grad:
dynamic_embeddings.append(name)
all_embeddings.append(name)
for name, vector in data_point._embeddings.items():
if vector.requires_grad:
dynamic_embeddings.append(name)
all_embeddings.append(name)
if dynamic_embeddings:
return dynamic_embeddings
if not all_embeddings:
return None
return list(set(dynamic_embeddings))
| 14,157 | 32.709524 | 118 | py |
flair | flair-master/flair/file_utils.py | """Utilities for working with the local dataset cache. Copied from AllenNLP."""
import base64
import functools
import io
import logging
import mmap
import os
import re
import shutil
import tempfile
import typing
import warnings
import zipfile
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union, cast
from urllib.parse import urlparse
import boto3
import requests
import torch
from botocore import UNSIGNED
from botocore.config import Config
from tqdm import tqdm as _tqdm
import flair
logger = logging.getLogger("flair")
url_proxies: Optional[typing.Dict[str, str]] = None
def set_proxies(proxies: typing.Dict[str, str]) -> None:
"""Allows for data downloaded from urls to be forwarded to a proxy.
see https://requests.readthedocs.io/en/latest/user/advanced/#proxies
:param proxies: A dictionary of proxies according to the requests documentation.
:return: None
"""
global url_proxies
url_proxies = proxies
def load_big_file(f: str):
"""Workaround for loading a big pickle file.
Files over 2GB cause pickle errors on certain Mac and Windows distributions.
:param f:
:return:
"""
with open(f, "rb") as f_in:
# mmap seems to be much more memory efficient
bf = mmap.mmap(f_in.fileno(), 0, access=mmap.ACCESS_READ)
f_in.close()
return bf
def url_to_filename(url: str, etag: Optional[str] = None) -> str:
"""Converts an url into a filename in a reversible way.
If `etag` is specified, add it on the end, separated by a period
(which necessarily won't appear in the base64-encoded filename).
Get rid of the quotes in the etag, since Windows doesn't like them.
"""
url_bytes = url.encode("utf-8")
b64_bytes = base64.b64encode(url_bytes)
decoded = b64_bytes.decode("utf-8")
if etag:
# Remove quotes from etag
etag = etag.replace('"', "")
return f"{decoded}.{etag}"
else:
return decoded
def filename_to_url(filename: str) -> Tuple[str, Optional[str]]:
"""Recovers the the url from the encoded filename.
Returns it and the ETag (which may be ``None``)
"""
etag: Optional[str]
try:
# If there is an etag, it's everything after the first period
decoded, etag = filename.split(".", 1)
except ValueError:
# Otherwise, use None
decoded, etag = filename, None
filename_bytes = decoded.encode("utf-8")
url_bytes = base64.b64decode(filename_bytes)
return url_bytes.decode("utf-8"), etag
def cached_path(url_or_filename: str, cache_dir: Union[str, Path]) -> Path:
"""Download the given path and return the local path from the cache.
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
cache_dir = Path(cache_dir)
dataset_cache = flair.cache_root / cache_dir if flair.cache_root not in cache_dir.parents else cache_dir
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, dataset_cache)
elif parsed.scheme == "s3":
return download_s3_to_path(parsed.netloc, dataset_cache)
elif len(parsed.scheme) < 2 and Path(url_or_filename).exists():
# File, and it exists.
return Path(url_or_filename)
elif len(parsed.scheme) < 2:
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
def download_s3_to_path(bucket_name: str, cache_path: Path) -> Path:
out_path = cache_path / bucket_name
if out_path.exists():
return out_path
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.iterator():
if obj.key[-1] == "/":
continue
target = out_path / obj.key
target.parent.mkdir(exist_ok=True, parents=True)
bucket.download_file(obj.key, str(target))
return out_path
def unzip_file(file: Union[str, Path], unzip_to: Union[str, Path]):
from zipfile import ZipFile
with ZipFile(Path(file), "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(Path(unzip_to))
def unpack_file(file: Path, unpack_to: Path, mode: Optional[str] = None, keep: bool = True):
"""Unpacks an archive file to the given location.
:param file Archive file to unpack
:param unpack_to Destination where to store the output
:param mode Type of the archive (zip, tar, gz, targz, rar)
:param keep Indicates whether to keep the archive after extraction or delete it
"""
if mode == "zip" or (mode is None and str(file).endswith("zip")):
from zipfile import ZipFile
with ZipFile(file, "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(unpack_to)
elif mode == "targz" or (mode is None and str(file).endswith("tar.gz") or str(file).endswith("tgz")):
import tarfile
with tarfile.open(file, "r:gz") as tarObj:
tarObj.extractall(unpack_to)
elif mode == "tar" or (mode is None and str(file).endswith("tar")):
import tarfile
with tarfile.open(file, "r") as tarObj:
tarObj.extractall(unpack_to)
elif mode == "gz" or (mode is None and str(file).endswith("gz")):
import gzip
with gzip.open(str(file), "rb") as f_in, open(str(unpack_to), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
elif mode == "rar" or (mode is None and str(file).endswith("rar")):
import patoolib
patoolib.extract_archive(str(file), outdir=unpack_to, interactive=False)
else:
if mode is None:
raise AssertionError(f"Can't infer archive type from {file}")
else:
raise AssertionError(f"Unsupported mode {mode}")
if not keep:
os.remove(str(file))
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: Path) -> Path:
"""Given a URL, look for the corresponding file in the local cache or download it.
return: the path to the cached file.
"""
cache_dir.mkdir(parents=True, exist_ok=True)
filename = re.sub(r".+/", "", url)
# get cache path to put the file
cache_path = cache_dir / filename
if cache_path.exists():
return cache_path
# make HEAD request to check ETag
response = requests.head(url, headers={"User-Agent": "Flair"}, allow_redirects=True)
if response.status_code != 200:
raise OSError(f"HEAD request failed for url {url} with status code {response.status_code}.")
# add ETag to filename if it exists
# etag = response.headers.get("ETag")
if not cache_path.exists():
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
fd, temp_filename = tempfile.mkstemp()
logger.info("%s not found in cache, downloading to %s", url, temp_filename)
# GET file object
req = requests.get(url, stream=True, headers={"User-Agent": "Flair"}, proxies=url_proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total, unit_scale=True, unit_divisor=1024)
with open(temp_filename, "wb") as temp_file:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
logger.info("copying %s to cache at %s", temp_filename, cache_path)
shutil.copyfile(temp_filename, str(cache_path))
logger.info("removing temp file %s", temp_filename)
os.close(fd)
os.remove(temp_filename)
return cache_path
def open_inside_zip(
archive_path: str,
cache_dir: Union[str, Path],
member_path: Optional[str] = None,
encoding: str = "utf8",
) -> typing.Iterable:
cached_archive_path = cached_path(archive_path, cache_dir=Path(cache_dir))
with zipfile.ZipFile(cached_archive_path, "r") as archive:
if member_path is None:
members_list = archive.namelist()
member_path = get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member_file = archive.open(member_path, "r")
return io.TextIOWrapper(member_file, encoding=encoding)
def extract_single_zip_file(
archive_path: str,
cache_dir: Union[str, Path],
member_path: Optional[str] = None,
) -> Path:
cache_dir = Path(cache_dir)
cached_archive_path = cached_path(archive_path, cache_dir=cache_dir)
dataset_cache = flair.cache_root / cache_dir if flair.cache_root not in cache_dir.parents else cache_dir
if member_path is not None:
output_path = dataset_cache / member_path
if output_path.exists():
return output_path
with zipfile.ZipFile(cached_archive_path, "r") as archive:
if member_path is None:
members_list = archive.namelist()
member_path = get_the_only_file_in_the_archive(members_list, archive_path)
output_path = dataset_cache / member_path
if not output_path.exists():
archive.extract(member_path, dataset_cache)
return output_path
def get_the_only_file_in_the_archive(members_list: Sequence[str], archive_path: str) -> str:
if len(members_list) > 1:
raise ValueError(
"The archive {} contains multiple files, so you must select "
"one of the files inside providing a uri of the type: {}".format(
archive_path,
format_embeddings_file_uri("path_or_url_to_archive", "path_inside_archive"),
)
)
return members_list[0]
def format_embeddings_file_uri(main_file_path_or_url: str, path_inside_archive: Optional[str] = None) -> str:
if path_inside_archive:
return f"({main_file_path_or_url})#{path_inside_archive}"
return main_file_path_or_url
class Tqdm:
# These defaults are the same as the argument defaults in tqdm.
default_mininterval: float = 0.1
@staticmethod
def set_default_mininterval(value: float) -> None:
Tqdm.default_mininterval = value
@staticmethod
def set_slower_interval(use_slower_interval: bool) -> None:
"""Slows down the tqdm update interval.
If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default
output rate. ``tqdm's`` default output rate is great for interactively watching progress,
but it is not great for log files. You might want to set this if you are primarily going
to be looking at output through log files, not the terminal.
"""
if use_slower_interval:
Tqdm.default_mininterval = 10.0
else:
Tqdm.default_mininterval = 0.1
@staticmethod
def tqdm(*args, **kwargs):
new_kwargs = {"mininterval": Tqdm.default_mininterval, **kwargs}
return _tqdm(*args, **new_kwargs)
def instance_lru_cache(*cache_args, **cache_kwargs):
def decorator(func):
@functools.wraps(func)
def create_cache(self, *args, **kwargs):
instance_cache = functools.lru_cache(*cache_args, **cache_kwargs)(func)
instance_cache = instance_cache.__get__(self, self.__class__)
setattr(self, func.__name__, instance_cache)
return instance_cache(*args, **kwargs)
return create_cache
return decorator
def load_torch_state(model_file: str) -> typing.Dict[str, typing.Any]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround byhttps://github.com/highway11git
# to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = load_big_file(model_file)
return torch.load(f, map_location="cpu")
| 12,566 | 34.600567 | 109 | py |
flair | flair-master/flair/__init__.py | import logging.config
import os
from pathlib import Path
import torch
from transformers import set_seed as hf_set_seed
# global variable: cache_root
from .file_utils import set_proxies
cache_root = Path(os.getenv("FLAIR_CACHE_ROOT", Path(Path.home(), ".flair")))
device: torch.device
"""Flair is using a single device for everything. You can set this device by overwriting this variable."""
# global variable: device
if torch.cuda.is_available():
device_id = os.environ.get("FLAIR_DEVICE")
# No need for correctness checks, torch is doing it
device = torch.device(f"cuda:{device_id}") if device_id else torch.device("cuda:0")
else:
device = torch.device("cpu")
# global variable: version
__version__ = "0.12.2"
# global variable: arrow symbol
_arrow = " → "
from . import ( # noqa: E402 import after setting device
data,
models,
nn,
trainers,
visual,
)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(asctime)-15s %(message)s"}},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "standard",
"stream": "ext://sys.stdout",
}
},
"loggers": {"flair": {"handlers": ["console"], "level": "INFO", "propagate": False}},
}
)
logger = logging.getLogger("flair")
def set_seed(seed: int):
hf_set_seed(seed)
__all__ = [
"cache_root",
"device",
"__version__",
"logger",
"set_seed",
"data",
"models",
"nn",
"trainers",
"visual",
"datasets",
"set_proxies",
]
| 1,705 | 20.871795 | 106 | py |
flair | flair-master/flair/samplers.py | import logging
import random
from collections import defaultdict
from typing import Dict
import torch
from torch.utils.data.sampler import Sampler
log = logging.getLogger("flair")
class FlairSampler(Sampler):
def set_dataset(self, data_source):
"""Initialize the data source for the FlairSampler.
:param data_source: dataset to sample from.
"""
self.data_source = data_source
self.num_samples = len(self.data_source)
def __len__(self) -> int:
return self.num_samples
class ImbalancedClassificationDatasetSampler(FlairSampler):
"""Use this to upsample rare classes and downsample common classes in your unbalanced classification dataset."""
def __init__(self) -> None:
super().__init__(None)
def set_dataset(self, data_source):
"""Initialize the dataset used for sampling.
:param data_source:
"""
self.data_source = data_source
self.num_samples = len(self.data_source)
self.indices = list(range(len(data_source)))
# first determine the distribution of classes in the dataset
label_count: Dict[str, int] = defaultdict(int)
for sentence in data_source:
for label in sentence.labels:
label_count[label.value] += 1
# weight for each sample
offset = 0
weights = [1.0 / (offset + label_count[data_source[idx].labels[0].value]) for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
class ChunkSampler(FlairSampler):
"""Splits data into blocks and randomizes them before sampling.
This causes some order of the data to be preserved, while still shuffling the data.
"""
def __init__(self, block_size=5, plus_window=5) -> None:
super().__init__(None)
self.block_size = block_size
self.plus_window = plus_window
self.data_source = None
def __iter__(self):
data = list(range(len(self.data_source)))
blocksize = self.block_size + random.randint(0, self.plus_window)
log.info(f"Chunk sampling with blocksize = {blocksize} ({self.block_size} + {self.plus_window})")
# Create blocks
blocks = [data[i : i + blocksize] for i in range(0, len(data), blocksize)]
# shuffle the blocks
random.shuffle(blocks)
# concatenate the shuffled blocks
data[:] = [b for bs in blocks for b in bs]
return iter(data)
class ExpandingChunkSampler(FlairSampler):
"""Splits data into blocks and randomizes them before sampling.
Block size grows with each epoch.
This causes some order of the data to be preserved, while still shuffling the data.
"""
def __init__(self, step=3) -> None:
"""Initialize the ExpandingChunkSampler.
:param step: every *step* epochs the block size increments by one.
"""
super().__init__(None)
self.block_size = 1
self.epoch_count = 0
self.step = step
def __iter__(self):
self.epoch_count += 1
data = list(range(len(self.data_source)))
log.info(f"Chunk sampling with blocksize = {self.block_size}")
# Create blocks
blocks = [data[i : i + self.block_size] for i in range(0, len(data), self.block_size)]
# shuffle the blocks
random.shuffle(blocks)
# concatenate the shuffled blocks
data[:] = [b for bs in blocks for b in bs]
if self.epoch_count % self.step == 0:
self.block_size += 1
return iter(data)
| 3,688 | 30 | 116 | py |
flair | flair-master/flair/nn/model.py | import inspect
import itertools
import logging
import typing
from abc import ABC, abstractmethod
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch.nn
from torch.nn.modules.loss import _Loss
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair.data import DT, DT2, Corpus, Dictionary, Sentence, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import Embeddings
from flair.embeddings.base import load_embeddings
from flair.file_utils import Tqdm, load_torch_state
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module, typing.Generic[DT], ABC):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods.
"""
model_card: Optional[Dict[str, Any]] = None
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type."""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: List[DT]) -> Tuple[torch.Tensor, int]:
"""Performs a forward pass and returns a loss tensor for backpropagation.
Implement this to enable training.
"""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
data_points: Union[List[DT], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation results and a loss value.
Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU # noqa: E501
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
def _get_state_dict(self):
"""Returns the state dictionary for this model."""
state_dict = {"state_dict": self.state_dict()}
# Always include the name of the Model class for which the state dict holds
state_dict["__cls__"] = self.__class__.__name__
return state_dict
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
"""Initialize the model from a state dictionary."""
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
if isinstance(embeddings, dict):
embeddings = load_embeddings(embeddings)
kwargs["embeddings"] = embeddings
model = cls(**kwargs)
model.load_state_dict(state["state_dict"])
return model
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# write out a "model card" if one is set
if self.model_card is not None:
model_state["model_card"] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "Model":
"""Loads the model from the given file.
:param model_path: the model file or the already loaded state dict
:return: the loaded text classifier model
"""
# if this class is abstract, go through all inheriting classes and try to fetch and load the model
if inspect.isabstract(cls):
# get all non-abstract subclasses
subclasses = get_non_abstract_subclasses(cls)
# try to fetch the model for each subclass. if fetching is possible, load model and return it
for model_cls in subclasses:
try:
new_model_path = model_cls._fetch_model(model_path)
if new_model_path != model_path:
return model_cls.load(new_model_path)
except Exception:
# skip any invalid loadings, e.g. not found on huggingface hub
continue
# if the model cannot be fetched, load as a file
state = model_path if isinstance(model_path, dict) else load_torch_state(str(model_path))
# try to get model class from state
cls_name = state.pop("__cls__", None)
if cls_name:
for model_cls in subclasses:
if cls_name == model_cls.__name__:
return model_cls.load(state)
# older (flair 11.3 and below) models do not contain cls information. In this case, try all subclasses
for model_cls in subclasses:
# if str(model_cls) == "<class 'flair.models.pairwise_classification_model.TextPairClassifier'>": continue
try:
model = model_cls.load(state)
return model
except Exception as e:
print(e)
# skip any invalid loadings, e.g. not found on huggingface hub
continue
raise ValueError(f"Could not find any model with name '{model_path}'")
else:
# if this class is not abstract, fetch the model and load it
if not isinstance(model_path, dict):
model_file = cls._fetch_model(str(model_path))
state = load_torch_state(model_file)
else:
state = model_path
if "__cls__" in state:
state.pop("__cls__")
model = cls._init_model_with_state_dict(state)
if "model_card" in state:
model.model_card = state["model_card"]
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, "model_card"):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card['flair_version']}\n"
param_out += f"-- PyTorch version {self.model_card['pytorch_version']}\n"
if "transformers_version" in self.model_card:
param_out += f"-- Transformers version {self.model_card['transformers_version']}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = "\n".join(
f'-- {param} = {self.model_card["training_parameters"][param]}'
for param in self.model_card["training_parameters"]
)
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet "
"trained or was trained with Flair version < 0.9.1)"
)
class ReduceTransformerVocabMixin(ABC):
@abstractmethod
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
pass
class Classifier(Model[DT], typing.Generic[DT], ReduceTransformerVocabMixin, ABC):
"""Abstract base class for all Flair models that do classification.
The classifier inherits from flair.nn.Model and adds unified functionality for both, single- and multi-label
classification and evaluation. Therefore, it is ensured to have a fair comparison between multiple classifiers.
"""
def evaluate(
self,
data_points: Union[List[DT], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
import numpy as np
import sklearn
# make sure <unk> is contained in gold_label_dictionary, if given
if gold_label_dictionary and not gold_label_dictionary.add_unk:
raise AssertionError("gold_label_dictionary must have add_unk set to true in initialization.")
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
with torch.no_grad():
# loss calculation
eval_loss = torch.zeros(1, device=flair.device)
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: Set[str] = set()
all_true_values = {}
all_predicted_values = {}
loader = DataLoader(data_points, batch_size=mini_batch_size)
sentence_id = 0
for batch in Tqdm.tqdm(loader):
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels("predicted")
# predict for batch
loss_and_count = self.predict(
batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name="predicted",
return_loss=return_loss,
)
if return_loss:
if isinstance(loss_and_count, tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ": " + gold_label.unlabeled_identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = "<unk>"
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.add(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ": " + predicted_span.unlabeled_identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.add(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# convert true and predicted values to two span-aligned lists
true_values_span_aligned = []
predicted_values_span_aligned = []
for span in all_spans:
list_of_gold_values_for_span = all_true_values[span] if span in all_true_values else ["O"]
# delete exluded labels if exclude_labels is given
for excluded_label in exclude_labels:
if excluded_label in list_of_gold_values_for_span:
list_of_gold_values_for_span.remove(excluded_label)
# if after excluding labels, no label is left, ignore the datapoint
if not list_of_gold_values_for_span:
continue
true_values_span_aligned.append(list_of_gold_values_for_span)
predicted_values_span_aligned.append(
all_predicted_values[span] if span in all_predicted_values else ["O"]
)
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# check if this is a multi-label problem
multi_label = False
for true_instance, predicted_instance in zip(true_values_span_aligned, predicted_values_span_aligned):
if len(true_instance) > 1 or len(predicted_instance) > 1:
multi_label = True
break
log.debug(f"Evaluating as a multi-label problem: {multi_label}")
# compute numbers by formatting true and predicted such that Scikit-Learn can use them
y_true = []
y_pred = []
if multi_label:
# multi-label problems require a multi-hot vector for each true and predicted label
for true_instance in true_values_span_aligned:
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_instance:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
for predicted_values in predicted_values_span_aligned:
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
else:
# single-label problems can do with a single index for each true and predicted label
y_true = [
evaluation_label_dictionary.get_idx_for_item(true_instance[0])
for true_instance in true_values_span_aligned
]
y_pred = [
evaluation_label_dictionary.get_idx_for_item(predicted_instance[0])
for predicted_instance in predicted_values_span_aligned
]
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter(itertools.chain.from_iterable(all_true_values.values()))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, _count in counter.most_common():
if label_name == "O":
continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true,
y_pred,
digits=4,
target_names=target_names,
zero_division=0,
labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true,
y_pred,
target_names=target_names,
zero_division=0,
output_dict=True,
labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
# if there is only one label, then "micro avg" = "macro avg"
if len(target_names) == 1:
classification_report_dict["micro avg"] = classification_report_dict["macro avg"]
if "micro avg" in classification_report_dict:
# micro average is only computed if zero-label exists (for instance "O")
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
else:
# if no zero-label exists (such as in POS tagging) micro average is equal to accuracy
micro_f_score = round(classification_report_dict["accuracy"], 4)
# same for the main score
if "micro avg" not in classification_report_dict and main_evaluation_metric[0] == "micro avg":
main_score = classification_report_dict["accuracy"]
else:
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! "
"Could be an error in your corpus or how you "
"initialize the trainer!"
)
accuracy_score = micro_f_score = macro_f_score = main_score = 0.0
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
scores: Dict[Union[Tuple[str, ...], str], Any] = {}
for avg_type in ("micro avg", "macro avg"):
for metric_type in ("f1-score", "precision", "recall"):
if avg_type == "micro avg" and avg_type not in classification_report_dict:
value = classification_report_dict["accuracy"]
else:
value = classification_report_dict[avg_type][metric_type]
scores[(avg_type, metric_type)] = value
scores["accuracy"] = accuracy_score
if average_over > 0:
eval_loss /= average_over
scores["loss"] = eval_loss.item()
result = Result(
main_score=main_score,
detailed_results=detailed_result,
classification_report=classification_report_dict,
scores=scores,
)
return result
@abstractmethod
def predict(
self,
sentences: Union[List[DT], DT],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""Predicts the class labels for the given sentences.
The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted # noqa: E501
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted # noqa: E501
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. # noqa: E501
"""
raise NotImplementedError
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.labeled_identifier for label in datapoint.get_labels(gold_label_type)]
p = [label.labeled_identifier for label in datapoint.get_labels("predicted")]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = (
f"{datapoint.text}\n"
f" - Gold: {', '.join(label.value if label.data_point == datapoint else label.labeled_identifier for label in datapoint.get_labels(gold_label_type))}\n"
f" - Pred: {', '.join(label.value if label.data_point == datapoint else label.labeled_identifier for label in datapoint.get_labels('predicted'))}\n{correct_string}\n"
)
lines.append(eval_line)
return lines
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence]
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "Classifier":
from typing import cast
return cast("Classifier", super().load(model_path=model_path))
class DefaultClassifier(Classifier[DT], typing.Generic[DT, DT2], ABC):
"""Default base class for all Flair models that do classification.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers are
implemented here, including the loss calculation, prediction heads for both single- and multi- label classification
and the `predict()` method. Example implementations of this class are the TextClassifier, RelationExtractor,
TextPairClassifier and TokenClassifier.
"""
def __init__(
self,
embeddings: Embeddings,
label_dictionary: Dictionary,
final_embedding_size: int,
dropout: float = 0.0,
locked_dropout: float = 0.0,
word_dropout: float = 0.0,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Optional[Dict[str, float]] = None,
decoder: Optional[torch.nn.Module] = None,
inverse_model: bool = False,
train_on_gold_pairs_only: bool = False,
should_embed_sentence: bool = True,
) -> None:
super().__init__()
# set the embeddings
self.embeddings = embeddings
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# initialize the decoder
if decoder is not None:
self.decoder = decoder
self._custom_decoder = True
else:
self.decoder = torch.nn.Linear(final_embedding_size, len(self.label_dictionary))
torch.nn.init.xavier_uniform_(self.decoder.weight)
self._custom_decoder = False
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
self.final_embedding_size = final_embedding_size
self.inverse_model = inverse_model
# init dropouts
self.dropout: torch.nn.Dropout = torch.nn.Dropout(dropout)
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
self.word_dropout = flair.nn.WordDropout(word_dropout)
self.should_embed_sentence = should_embed_sentence
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights:
weight_list[i] = loss_weights[tag]
self.loss_weights: Optional[torch.Tensor] = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
# set up gradient reversal if so specified
if inverse_model:
from pytorch_revgrad import RevGrad
self.gradient_reversal = RevGrad()
if self.multi_label:
self.loss_function: _Loss = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights, reduction="sum")
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights, reduction="sum")
self.train_on_gold_pairs_only = train_on_gold_pairs_only
def _filter_data_point(self, data_point: DT) -> bool:
"""Specify if a data point should be kept.
That way you can remove for example empty texts. Per default all datapoints that have length zero
will be removed.
Return true if the data point should be kept and false if it should be removed.
"""
return len(data_point) > 0
@abstractmethod
def _get_embedding_for_data_point(self, prediction_data_point: DT2) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def _get_data_points_from_sentence(self, sentence: DT) -> List[DT2]:
"""Returns the data_points to which labels are added.
The results should be of any type that inherits from DataPoint (Sentence, Span, Token, ... objects).
"""
raise NotImplementedError
def _get_data_points_for_batch(self, sentences: List[DT]) -> List[DT2]:
"""Returns the data_points to which labels are added.
The results should be of any type that inherits from DataPoint (Sentence, Span, Token, ... objects).
"""
return [data_point for sentence in sentences for data_point in self._get_data_points_from_sentence(sentence)]
def _get_label_of_datapoint(self, data_point: DT2) -> List[str]:
"""Extracts the labels from the data points.
Each data point might return a list of strings, representing multiple labels.
"""
if self.multi_label:
return [label.value for label in data_point.get_labels(self.label_type)]
else:
return [data_point.get_label(self.label_type).value]
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if "default" in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {"default": x}
def _prepare_label_tensor(self, prediction_data_points: List[DT2]) -> torch.Tensor:
labels = [self._get_label_of_datapoint(dp) for dp in prediction_data_points]
if self.multi_label:
return torch.tensor(
[
[1 if label in all_labels_for_point else 0 for label in self.label_dictionary.get_items()]
for all_labels_for_point in labels
],
dtype=torch.float,
device=flair.device,
)
else:
return torch.tensor(
[
self.label_dictionary.get_idx_for_item(label[0])
if len(label) > 0
else self.label_dictionary.get_idx_for_item("O")
for label in labels
],
dtype=torch.long,
device=flair.device,
)
def _encode_data_points(self, sentences: List[DT], data_points: List[DT2]):
# embed sentences
if self.should_embed_sentence:
self.embeddings.embed(sentences)
# get a tensor of data points
data_point_tensor = torch.stack([self._get_embedding_for_data_point(data_point) for data_point in data_points])
# do dropout
data_point_tensor = data_point_tensor.unsqueeze(1)
data_point_tensor = self.dropout(data_point_tensor)
data_point_tensor = self.locked_dropout(data_point_tensor)
data_point_tensor = self.word_dropout(data_point_tensor)
data_point_tensor = data_point_tensor.squeeze(1)
return data_point_tensor
def _mask_scores(self, scores, data_points):
return scores
def forward_loss(self, sentences: List[DT]) -> Tuple[torch.Tensor, int]:
# make a forward pass to produce embedded data points and labels
sentences = [sentence for sentence in sentences if self._filter_data_point(sentence)]
# get the data points for which to predict labels
data_points = self._get_data_points_for_batch(sentences)
if len(data_points) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# get their gold labels as a tensor
label_tensor = self._prepare_label_tensor(data_points)
if label_tensor.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# pass data points through network to get encoded data point tensor
data_point_tensor = self._encode_data_points(sentences, data_points)
# decode
scores = self.decoder(data_point_tensor)
# an optional masking step (no masking in most cases)
scores = self._mask_scores(scores, data_points)
# calculate the loss
return self._calculate_loss(scores, label_tensor)
def _calculate_loss(self, scores: torch.Tensor, labels: torch.Tensor) -> Tuple[torch.Tensor, int]:
return self.loss_function(scores, labels), labels.size(0)
def _sort_data(self, data_points: List[DT]) -> List[DT]:
if len(data_points) == 0:
return []
if not isinstance(data_points[0], Sentence):
return data_points
# filter empty sentences
sentences = [sentence for sentence in typing.cast(List[Sentence], data_points) if len(sentence) > 0]
# reverse sort all sequences by their length
reordered_sentences = sorted(sentences, key=len, reverse=True)
return typing.cast(List[DT], reordered_sentences)
def predict(
self,
sentences: Union[List[DT], DT],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted # noqa: E501
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. # noqa: E501
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
if isinstance(sentences[0], Sentence):
Sentence.set_context_for_sentences(typing.cast(List[Sentence], sentences))
reordered_sentences = self._sort_data(sentences)
if len(reordered_sentences) == 0:
return sentences
if len(reordered_sentences) > mini_batch_size:
batches: Union[DataLoader, List[List[DT]]] = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(batches)
progress_bar.set_description("Batch inference")
batches = progress_bar
else:
batches = [reordered_sentences]
overall_loss = torch.zeros(1, device=flair.device)
label_count = 0
for batch in batches:
# filter data points in batch
batch = [dp for dp in batch if self._filter_data_point(dp)]
# stop if all sentences are empty
if not batch:
continue
data_points = self._get_data_points_for_batch(batch)
if not data_points:
continue
# pass data points through network and decode
data_point_tensor = self._encode_data_points(batch, data_points)
scores = self.decoder(data_point_tensor)
scores = self._mask_scores(scores, data_points)
# if anything could possibly be predicted
if len(data_points) > 0:
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
# filter data points that have labels outside of dictionary
filtered_indices = []
has_unknown_label = False
for idx, dp in enumerate(data_points):
if all(
label in self.label_dictionary.get_items() for label in self._get_label_of_datapoint(dp)
):
filtered_indices.append(idx)
else:
has_unknown_label = True
if has_unknown_label:
scores = torch.index_select(scores, 0, torch.tensor(filtered_indices, device=flair.device))
gold_labels = self._prepare_label_tensor([data_points[index] for index in filtered_indices])
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(filtered_indices)
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, data_point in enumerate(data_points):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == "O":
continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
data_point.add_label(typename=label_name, value=label_value, score=label_score)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, data_point in enumerate(data_points):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == "O":
continue
label_score = softmax[s_idx, l_idx].item()
data_point.add_label(typename=label_name, value=label_value, score=label_score)
else:
conf, indices = torch.max(softmax, dim=-1)
for data_point, c, i in zip(data_points, conf, indices):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == "O":
continue
data_point.add_label(typename=label_name, value=label_value, score=c.item())
store_embeddings(batch, storage_mode=embedding_storage_mode)
self._post_process_batch_after_prediction(batch, label_name)
if return_loss:
if has_unknown_label:
log.info(
"During evaluation, encountered labels that are not in the label_dictionary:"
"Evaluation loss is computed without them."
)
return overall_loss, label_count
return None
def _post_process_batch_after_prediction(self, batch, label_name):
pass
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold["default"]
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self) -> str:
return (
super(flair.nn.Model, self).__str__().rstrip(")")
+ f" (weights): {self.weight_dict}\n"
+ f" (weight_tensor) {self.loss_weights}\n)"
)
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# add DefaultClassifier arguments
for arg in [
"decoder",
"dropout",
"word_dropout",
"locked_dropout",
"multi_label",
"multi_label_threshold",
"loss_weights",
"train_on_gold_pairs_only",
"inverse_model",
]:
if arg not in kwargs and arg in state:
kwargs[arg] = state[arg]
return super(Classifier, cls)._init_model_with_state_dict(state, **kwargs)
def _get_state_dict(self):
state = super()._get_state_dict()
# add variables of DefaultClassifier
state["dropout"] = self.dropout.p
state["word_dropout"] = self.word_dropout.dropout_rate
state["locked_dropout"] = self.locked_dropout.dropout_rate
state["multi_label"] = self.multi_label
state["multi_label_threshold"] = self.multi_label_threshold
state["loss_weights"] = self.loss_weights
state["train_on_gold_pairs_only"] = self.train_on_gold_pairs_only
state["inverse_model"] = self.inverse_model
if self._custom_decoder:
state["decoder"] = self.decoder
return state
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "DefaultClassifier":
from typing import cast
return cast("DefaultClassifier", super().load(model_path=model_path))
def get_non_abstract_subclasses(cls):
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.extend(get_non_abstract_subclasses(subclass))
if inspect.isabstract(subclass):
continue
all_subclasses.append(subclass)
return all_subclasses
| 41,127 | 41.443756 | 267 | py |
flair | flair-master/flair/nn/multitask.py | from typing import Iterable, Tuple, Union
from flair.data import Corpus, MultiCorpus
from flair.models import MultitaskModel
from flair.nn import Classifier, Model
def make_multitask_model_and_corpus(
mapping: Iterable[Union[Tuple[Classifier, Corpus], Tuple[Classifier, Corpus, float]]]
) -> Tuple[Model, Corpus]:
models = []
corpora = []
loss_factors = []
ids = []
for task_id, map in enumerate(mapping):
models.append(map[0])
corpora.append(map[1])
if len(map) == 3:
loss_factors.append(map[2]) # type:ignore[misc] # mypy does not understand if above checks for length
else:
loss_factors.append(1.0)
ids.append(f"Task_{task_id}")
return MultitaskModel(models=models, task_ids=ids, loss_factors=loss_factors), MultiCorpus(corpora, ids)
| 836 | 30 | 114 | py |
flair | flair-master/flair/nn/dropout.py | import torch
class LockedDropout(torch.nn.Module):
"""Implementation of locked (or variational) dropout.
Randomly drops out entire parameters in embedding space.
"""
def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.batch_first = batch_first
self.inplace = inplace
def forward(self, x):
if not self.training or not self.dropout_rate:
return x
if not self.batch_first:
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)
else:
m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)
mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)
mask = mask.expand_as(x)
return mask * x
def extra_repr(self):
inplace_str = ", inplace" if self.inplace else ""
return f"p={self.dropout_rate}{inplace_str}"
class WordDropout(torch.nn.Module):
"""Implementation of word dropout.
Randomly drops out entire words (or characters) in embedding space.
"""
def __init__(self, dropout_rate=0.05, inplace=False) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.inplace = inplace
def forward(self, x):
if not self.training or not self.dropout_rate:
return x
m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)
mask = torch.autograd.Variable(m, requires_grad=False)
return mask * x
def extra_repr(self):
inplace_str = ", inplace" if self.inplace else ""
return f"p={self.dropout_rate}{inplace_str}"
| 1,747 | 29.666667 | 88 | py |
flair | flair-master/flair/nn/recurrent.py | from torch import nn
rnn_layers = {"lstm": (nn.LSTM, 2), "gru": (nn.GRU, 1)}
def create_recurrent_layer(layer_type, initial_size, hidden_size, nlayers, dropout=0, **kwargs):
layer_type = layer_type.lower()
assert layer_type in rnn_layers
module, hidden_count = rnn_layers[layer_type]
if nlayers == 1:
dropout = 0
return module(initial_size, hidden_size, nlayers, dropout=dropout, **kwargs), hidden_count
| 437 | 28.2 | 96 | py |
flair | flair-master/flair/nn/decoder.py | import logging
from typing import List, Optional
import torch
import flair
from flair.data import Dictionary, Sentence
from flair.embeddings import Embeddings
from flair.nn.distance import (
CosineDistance,
EuclideanDistance,
HyperbolicDistance,
LogitCosineDistance,
NegativeScaledDotProduct,
)
from flair.training_utils import store_embeddings
logger = logging.getLogger("flair")
class PrototypicalDecoder(torch.nn.Module):
def __init__(
self,
num_prototypes: int,
embeddings_size: int,
prototype_size: Optional[int] = None,
distance_function: str = "euclidean",
use_radius: Optional[bool] = False,
min_radius: Optional[int] = 0,
unlabeled_distance: Optional[float] = None,
unlabeled_idx: Optional[int] = None,
learning_mode: Optional[str] = "joint",
normal_distributed_initial_prototypes: bool = False,
) -> None:
super().__init__()
if not prototype_size:
prototype_size = embeddings_size
self.prototype_size = prototype_size
# optional metric space decoder if prototypes have different length than embedding
self.metric_space_decoder: Optional[torch.nn.Linear] = None
if prototype_size != embeddings_size:
self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size)
torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight)
# create initial prototypes for all classes (all initial prototypes are a vector of all 1s)
self.prototype_vectors = torch.nn.Parameter(torch.ones(num_prototypes, prototype_size), requires_grad=True)
# if set, create initial prototypes from normal distribution
if normal_distributed_initial_prototypes:
self.prototype_vectors = torch.nn.Parameter(torch.normal(torch.zeros(num_prototypes, prototype_size)))
# if set, use a radius
self.prototype_radii: Optional[torch.nn.Parameter] = None
if use_radius:
self.prototype_radii = torch.nn.Parameter(torch.ones(num_prototypes), requires_grad=True)
self.min_radius = min_radius
self.learning_mode = learning_mode
assert (unlabeled_idx is None) == (
unlabeled_distance is None
), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set."
self.unlabeled_idx = unlabeled_idx
self.unlabeled_distance = unlabeled_distance
self._distance_function = distance_function
self.distance: Optional[torch.nn.Module] = None
if distance_function.lower() == "hyperbolic":
self.distance = HyperbolicDistance()
elif distance_function.lower() == "cosine":
self.distance = CosineDistance()
elif distance_function.lower() == "logit_cosine":
self.distance = LogitCosineDistance()
elif distance_function.lower() == "euclidean":
self.distance = EuclideanDistance()
elif distance_function.lower() == "dot_product":
self.distance = NegativeScaledDotProduct()
else:
raise KeyError(f"Distance function {distance_function} not found.")
# all parameters will be pushed internally to the specified device
self.to(flair.device)
@property
def num_prototypes(self):
return self.prototype_vectors.size(0)
def forward(self, embedded):
if self.learning_mode == "learn_only_map_and_prototypes":
embedded = embedded.detach()
# decode embeddings into prototype space
encoded = self.metric_space_decoder(embedded) if self.metric_space_decoder is not None else embedded
prot = self.prototype_vectors
radii = self.prototype_radii
if self.learning_mode == "learn_only_prototypes":
encoded = encoded.detach()
if self.learning_mode == "learn_only_embeddings_and_map":
prot = prot.detach()
if radii is not None:
radii = radii.detach()
distance = self.distance(encoded, prot)
if radii is not None:
distance /= self.min_radius + torch.nn.functional.softplus(radii)
# if unlabeled distance is set, mask out loss to unlabeled class prototype
if self.unlabeled_distance:
distance[..., self.unlabeled_idx] = self.unlabeled_distance
scores = -distance
return scores
class LabelVerbalizerDecoder(torch.nn.Module):
"""A class for decoding labels using the idea of siamese networks / bi-encoders. This can be used for all classification tasks in flair.
Args:
label_encoder (flair.embeddings.TokenEmbeddings):
The label encoder used to encode the labels into an embedding.
label_dictionary (flair.data.Dictionary):
The label dictionary containing the mapping between labels and indices.
Attributes:
label_encoder (flair.embeddings.TokenEmbeddings):
The label encoder used to encode the labels into an embedding.
label_dictionary (flair.data.Dictionary):
The label dictionary containing the mapping between labels and indices.
Methods:
forward(self, label_embeddings: torch.Tensor, context_embeddings: torch.Tensor) -> torch.Tensor:
Takes the label embeddings and context embeddings as input and returns a tensor of label scores.
Examples:
label_dictionary = corpus.make_label_dictionary("ner")
label_encoder = TransformerWordEmbeddings('bert-base-ucnased')
label_verbalizer_decoder = LabelVerbalizerDecoder(label_encoder, label_dictionary)
"""
def __init__(self, label_embedding: Embeddings, label_dictionary: Dictionary):
super().__init__()
self.label_embedding = label_embedding
self.verbalized_labels: List[Sentence] = self.verbalize_labels(label_dictionary)
self.to(flair.device)
@staticmethod
def verbalize_labels(label_dictionary: Dictionary) -> List[Sentence]:
"""Takes a label dictionary and returns a list of sentences with verbalized labels.
Args:
label_dictionary (flair.data.Dictionary): The label dictionary to verbalize.
Returns:
A list of sentences with verbalized labels.
Examples:
label_dictionary = corpus.make_label_dictionary("ner")
verbalized_labels = LabelVerbalizerDecoder.verbalize_labels(label_dictionary)
print(verbalized_labels)
[Sentence: "begin person", Sentence: "inside person", Sentence: "end person", Sentence: "single org", ...]
"""
verbalized_labels = []
for byte_label, idx in label_dictionary.item2idx.items():
str_label = byte_label.decode("utf-8")
if label_dictionary.span_labels:
if str_label == "O":
verbalized_labels.append("outside")
elif str_label.startswith("B-"):
verbalized_labels.append("begin " + str_label.split("-")[1])
elif str_label.startswith("I-"):
verbalized_labels.append("inside " + str_label.split("-")[1])
elif str_label.startswith("E-"):
verbalized_labels.append("ending " + str_label.split("-")[1])
elif str_label.startswith("S-"):
verbalized_labels.append("single " + str_label.split("-")[1])
else:
verbalized_labels.append(str_label)
return list(map(Sentence, verbalized_labels))
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Forward pass of the label verbalizer decoder.
Args:
inputs (torch.Tensor): The input tensor.
Returns:
The scores of the decoder.
Raises:
RuntimeError: If an unknown decoding type is specified.
"""
if self.training or not self.label_embedding._everything_embedded(self.verbalized_labels):
self.label_embedding.embed(self.verbalized_labels)
label_tensor = torch.stack([label.get_embedding() for label in self.verbalized_labels])
if self.training:
store_embeddings(self.verbalized_labels, "none")
scores = torch.mm(inputs, label_tensor.T)
return scores
| 8,375 | 38.140187 | 140 | py |
flair | flair-master/flair/nn/__init__.py | from .decoder import LabelVerbalizerDecoder, PrototypicalDecoder
from .dropout import LockedDropout, WordDropout
from .model import Classifier, DefaultClassifier, Model
__all__ = [
"LockedDropout",
"WordDropout",
"Classifier",
"DefaultClassifier",
"Model",
"PrototypicalDecoder",
"LabelVerbalizerDecoder",
]
| 337 | 23.142857 | 64 | py |
flair | flair-master/flair/nn/distance/hyperbolic.py | """Hyperbolic distances implemented in pytorch.
This module was copied from the repository the following repository:
https://github.com/asappresearch/dynamic-classification
It contains the code from the paper "Metric Learning for Dynamic Text
Classification".
https://arxiv.org/abs/1911.01026
In case this file is modified, please consider contributing to the original
repository.
It was published under MIT License:
https://github.com/asappresearch/dynamic-classification/blob/master/LICENSE.md
Source: https://github.com/asappresearch/dynamic-classification/blob/55beb5a48406c187674bea40487c011e8fa45aab/distance/hyperbolic.py
"""
import torch
from torch import Tensor, nn
EPSILON = 1e-5
def arccosh(x):
"""Compute the arcosh, numerically stable."""
x = torch.clamp(x, min=1 + EPSILON)
a = torch.log(x)
b = torch.log1p(torch.sqrt(x * x - 1) / x)
return a + b
def mdot(x, y):
"""Compute the inner product."""
m = x.new_ones(1, x.size(1))
m[0, 0] = -1
return torch.sum(m * x * y, 1, keepdim=True)
def dist(x, y):
"""Get the hyperbolic distance between x and y."""
return arccosh(-mdot(x, y))
def project(x):
"""Project onto the hyeprboloid embedded in in n+1 dimensions."""
return torch.cat([torch.sqrt(1.0 + torch.sum(x * x, 1, keepdim=True)), x], 1)
def log_map(x, y):
"""Perform the log step."""
d = dist(x, y)
return (d / torch.sinh(d)) * (y - torch.cosh(d) * x)
def norm(x):
"""Compute the norm."""
n = torch.sqrt(torch.abs(mdot(x, x)))
return n
def exp_map(x, y):
"""Perform the exp step."""
n = torch.clamp(norm(y), min=EPSILON)
return torch.cosh(n) * x + (torch.sinh(n) / n) * y
def loss(x, y):
"""Get the loss for the optimizer."""
return torch.sum(dist(x, y) ** 2)
class HyperbolicDistance(nn.Module):
"""Implement a HyperbolicDistance object."""
def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor:
"""Returns the squared euclidean distance between each element in mat_1 and each element in mat_2.
Parameters
----------
mat_1: torch.Tensor
matrix of shape (n_1, n_features)
mat_2: torch.Tensor
matrix of shape (n_2, n_features)
Returns:
-------
dist: torch.Tensor
distance matrix of shape (n_1, n_2)
"""
# Get projected 1st dimension
mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True))
mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True))
# Compute bilinear form
left = mat_1_x_0.mm(mat_2_x_0.t()) # n_1 x n_2
right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) # n_1 x n_2
# Arcosh
return arccosh(left - right).pow(2)
class HyperbolicMean(nn.Module):
"""Compute the mean point in the hyperboloid model."""
def forward(self, data: Tensor) -> Tensor:
"""Performs a forward pass through the network.
Parameters
----------
data : torch.Tensor
The input data, as a float tensor
Returns:
-------
torch.Tensor
The encoded output, as a float tensor
"""
n_iter = 5 if self.training else 100
# Project the input data to n+1 dimensions
projected = project(data)
mean = torch.mean(projected, 0, keepdim=True)
mean = mean / norm(mean)
r = 1e-2
for _i in range(n_iter):
g = -2 * torch.mean(log_map(mean, projected), 0, keepdim=True)
mean = exp_map(mean, -r * g)
mean = mean / norm(mean)
# The first dimension, is recomputed in the distance module
return mean.squeeze()[1:]
| 3,718 | 25.949275 | 132 | py |
flair | flair-master/flair/nn/distance/euclidean.py | """Euclidean distances implemented in pytorch.
This module was copied from the repository the following repository:
https://github.com/asappresearch/dynamic-classification
It contains the code from the paper "Metric Learning for Dynamic Text
Classification".
https://arxiv.org/abs/1911.01026
In case this file is modified, please consider contributing to the original
repository.
It was published under MIT License:
https://github.com/asappresearch/dynamic-classification/blob/master/LICENSE.md
Source: https://github.com/asappresearch/dynamic-classification/blob/55beb5a48406c187674bea40487c011e8fa45aab/distance/euclidean.py
"""
import torch
from torch import Tensor, nn
class EuclideanDistance(nn.Module):
"""Implement a EuclideanDistance object."""
def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor:
"""Returns the squared euclidean distance between each element in mat_1 and each element in mat_2.
Parameters
----------
mat_1: torch.Tensor
matrix of shape (n_1, n_features)
mat_2: torch.Tensor
matrix of shape (n_2, n_features)
Returns:
-------
dist: torch.Tensor
distance matrix of shape (n_1, n_2)
"""
_dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range(mat_2.size(0))]
dist = torch.stack(_dist, dim=1)
return dist
class EuclideanMean(nn.Module):
"""Implement a EuclideanMean object."""
def forward(self, data: Tensor) -> Tensor:
"""Performs a forward pass through the network.
Parameters
----------
data : torch.Tensor
The input data, as a float tensor
Returns:
-------
torch.Tensor
The encoded output, as a float tensor
"""
return data.mean(0)
| 1,839 | 26.462687 | 131 | py |
flair | flair-master/flair/nn/distance/cosine.py | import torch
# Source: https://github.com/UKPLab/sentence-transformers/blob/master/sentence_transformers/util.py#L23
def dot_product(a: torch.Tensor, b: torch.Tensor, normalize=False):
"""Computes dot product for pairs of vectors.
:param normalize: Vectors are normalized (leads to cosine similarity)
:return: Matrix with res[i][j] = dot_product(a[i], b[j])
"""
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
if normalize:
a = torch.nn.functional.normalize(a, p=2, dim=1)
b = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a, b.transpose(0, 1))
class CosineDistance(torch.nn.Module):
def forward(self, a, b):
return -dot_product(a, b, normalize=True)
class LogitCosineDistance(torch.nn.Module):
def forward(self, a, b):
return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True))
class NegativeScaledDotProduct(torch.nn.Module):
def forward(self, a, b):
sqrt_d = torch.sqrt(torch.tensor(a.size(-1)))
return -dot_product(a, b, normalize=False) / sqrt_d
| 1,129 | 27.974359 | 103 | py |
flair | flair-master/flair/nn/distance/__init__.py | from .cosine import CosineDistance, LogitCosineDistance, NegativeScaledDotProduct
from .euclidean import EuclideanDistance, EuclideanMean
from .hyperbolic import HyperbolicDistance, HyperbolicMean
__all__ = [
"EuclideanDistance",
"EuclideanMean",
"HyperbolicDistance",
"HyperbolicMean",
"CosineDistance",
"LogitCosineDistance",
"NegativeScaledDotProduct",
]
| 387 | 26.714286 | 81 | py |
flair | flair-master/flair/models/text_regression_model.py | import logging
import typing
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
import flair.embeddings
from flair.data import Corpus, Dictionary, Sentence, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings.base import load_embeddings
from flair.nn.model import ReduceTransformerVocabMixin
from flair.training_utils import MetricRegression, Result, store_embeddings
log = logging.getLogger("flair")
class TextRegressor(flair.nn.Model[Sentence], ReduceTransformerVocabMixin):
def __init__(
self,
document_embeddings: flair.embeddings.DocumentEmbeddings,
label_name: str = "label",
) -> None:
super().__init__()
self.document_embeddings: flair.embeddings.DocumentEmbeddings = document_embeddings
self.label_name = label_name
self.decoder = nn.Linear(self.document_embeddings.embedding_length, 1)
nn.init.xavier_uniform_(self.decoder.weight)
self.loss_function = nn.MSELoss(reduction="sum")
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self.label_name
def _prepare_tensors(self, sentences: List[Sentence]) -> Tuple[torch.Tensor]:
self.document_embeddings.embed(sentences)
embedding_names = self.document_embeddings.get_names()
text_embedding_list = [sentence.get_embedding(embedding_names).unsqueeze(0) for sentence in sentences]
text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)
return (text_embedding_tensor,)
def forward(self, *args: torch.Tensor) -> torch.Tensor:
(text_embedding_tensor,) = args
label_scores = self.decoder(text_embedding_tensor)
return label_scores
def forward_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, int]:
labels = self._labels_to_tensor(sentences)
text_embedding_tensor = self._prepare_tensors(sentences)
scores = self.forward(*text_embedding_tensor)
return self.loss_function(scores.squeeze(1), labels), len(sentences)
def _labels_to_tensor(self, sentences: List[Sentence]):
indices = [
torch.tensor([float(label.value) for label in sentence.labels], dtype=torch.float) for sentence in sentences
]
vec = torch.cat(indices, 0).to(flair.device)
return vec
def predict(
self,
sentences: Union[Sentence, List[Sentence]],
mini_batch_size: int = 32,
verbose: bool = False,
label_name: Optional[str] = None,
embedding_storage_mode="none",
) -> List[Sentence]:
if label_name is None:
label_name = self.label_name if self.label_name is not None else "label"
with torch.no_grad():
if not isinstance(sentences, list):
sentences = [sentences]
if not sentences:
return sentences
Sentence.set_context_for_sentences(sentences)
filtered_sentences = self._filter_empty_sentences(sentences)
reordered_sentences = sorted(filtered_sentences, key=lambda s: len(s), reverse=True)
if len(reordered_sentences) == 0:
return sentences
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(dataloader)
progress_bar.set_description("Batch inference")
dataloader = progress_bar
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
(sentence_tensor,) = self._prepare_tensors(batch)
scores = self.forward(sentence_tensor)
for sentence, score in zip(batch, scores.tolist()):
sentence.set_label(label_name, value=str(score[0]))
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
return sentences
def forward_labels_and_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, torch.Tensor]:
labels = self._labels_to_tensor(sentences)
text_embedding_tensor = self._prepare_tensors(sentences)
scores = self.forward(*text_embedding_tensor)
return scores, self.loss_function(scores.squeeze(1), labels)
def evaluate(
self,
data_points: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size)
with torch.no_grad():
eval_loss = torch.zeros(1, device=flair.device)
metric = MetricRegression("Evaluation")
lines: List[str] = []
total_count = 0
for batch in data_loader:
if isinstance(batch, Sentence):
batch = [batch]
scores, loss = self.forward_labels_and_loss(batch)
true_values = []
for sentence in batch:
total_count += 1
for label in sentence.get_labels(gold_label_type):
true_values.append(float(label.value))
results = scores[:, 0].cpu().tolist()
eval_loss += loss
metric.true.extend(true_values)
metric.pred.extend(results)
for sentence, prediction, true_value in zip(batch, results, true_values):
eval_line = f"{sentence.to_original_text()}\t{true_value}\t{prediction}\n"
lines.append(eval_line)
store_embeddings(batch, embedding_storage_mode)
eval_loss /= total_count
# TODO: not saving lines yet
if out_path is not None:
with open(out_path, "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
detailed_result = (
f"AVG: mse: {metric.mean_squared_error():.4f} - "
f"mae: {metric.mean_absolute_error():.4f} - "
f"pearson: {metric.pearsonr():.4f} - "
f"spearman: {metric.spearmanr():.4f}"
)
result: Result = Result(
main_score=metric.pearsonr(),
detailed_results=detailed_result,
scores={
"loss": eval_loss.item(),
"mse": metric.mean_squared_error(),
"mae": metric.mean_absolute_error(),
"pearson": metric.pearsonr(),
"spearman": metric.spearmanr(),
},
)
return result
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.document_embeddings.save_embeddings(use_state_dict=False),
"label_name": self.label_type,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
embeddings = state["document_embeddings"]
if isinstance(embeddings, dict):
embeddings = load_embeddings(embeddings)
return super()._init_model_with_state_dict(
state, document_embeddings=embeddings, label_name=state.get("label_name"), **kwargs
)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TextRegressor":
from typing import cast
return cast("TextRegressor", super().load(model_path=model_path))
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence]
| 9,039 | 36.201646 | 120 | py |
flair | flair-master/flair/models/pairwise_classification_model.py | import typing
from typing import List
import torch
import flair.embeddings
import flair.nn
from flair.data import Corpus, Sentence, TextPair, _iter_dataset
class TextPairClassifier(flair.nn.DefaultClassifier[TextPair, TextPair]):
"""Text Pair Classification Model for tasks such as Recognizing Textual Entailment, build upon TextClassifier.
The model takes document embeddings and puts resulting text representation(s) into a linear layer to get the
actual class label. We provide two ways to embed the DataPairs: Either by embedding both DataPoints
and concatenating the resulting vectors ("embed_separately=True") or by concatenating the DataPoints and embedding
the resulting vector ("embed_separately=False").
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
embed_separately: bool = False,
**classifierargs,
) -> None:
"""Initializes a TextPairClassifier.
:param embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param multi_label: auto-detected by default, but you can set this to True to force multi-label prediction
or False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
super().__init__(
**classifierargs,
embeddings=embeddings,
final_embedding_size=2 * embeddings.embedding_length if embed_separately else embeddings.embedding_length,
should_embed_sentence=False,
)
self._label_type = label_type
self.embed_separately = embed_separately
if not self.embed_separately:
# set separator to concatenate two sentences
self.sep = " "
if isinstance(
self.embeddings,
flair.embeddings.document.TransformerDocumentEmbeddings,
):
if self.embeddings.tokenizer.sep_token:
self.sep = " " + str(self.embeddings.tokenizer.sep_token) + " "
else:
self.sep = " [SEP] "
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self._label_type
def _get_data_points_from_sentence(self, sentence: TextPair) -> List[TextPair]:
return [sentence]
def _get_embedding_for_data_point(self, prediction_data_point: TextPair) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
if self.embed_separately:
self.embeddings.embed([prediction_data_point.first, prediction_data_point.second])
return torch.cat(
[
prediction_data_point.first.get_embedding(embedding_names),
prediction_data_point.second.get_embedding(embedding_names),
],
0,
)
else:
concatenated_sentence = Sentence(
prediction_data_point.first.to_tokenized_string()
+ self.sep
+ prediction_data_point.second.to_tokenized_string(),
use_tokenizer=False,
)
self.embeddings.embed(concatenated_sentence)
return concatenated_sentence.get_embedding(embedding_names)
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"embed_separately": self.embed_separately,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("document_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
embed_separately=state.get("embed_separately"),
**kwargs,
)
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence_pair in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence_pair.first]
yield [t.text for t in sentence_pair.second]
| 4,628 | 38.905172 | 118 | py |
flair | flair-master/flair/models/word_tagger_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Union
import torch
import flair.nn
from flair.data import Dictionary, Sentence, Span, Token
from flair.embeddings import TokenEmbeddings
log = logging.getLogger("flair")
def WordTagger(embeddings, tag_dictionary, tag_type, **classifierargs):
from warnings import warn
warn("The WordTagger class is deprecated after Flair version 0.12.2. Use TokenClassifier instead!")
return TokenClassifier(
embeddings=embeddings, label_dictionary=tag_dictionary, label_type=tag_type, **classifierargs
)
class TokenClassifier(flair.nn.DefaultClassifier[Sentence, Token]):
"""This is a simple class of models that tags individual words in text."""
def __init__(
self,
embeddings: TokenEmbeddings,
label_dictionary: Dictionary,
label_type: str,
span_encoding: str = "BIOES",
**classifierargs,
) -> None:
"""Initializes a TokenClassifier.
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
"""
# if the classifier predicts BIO/BIOES span labels, the internal label dictionary must be computed
if label_dictionary.span_labels:
internal_label_dictionary = self._create_internal_label_dictionary(label_dictionary, span_encoding)
else:
internal_label_dictionary = label_dictionary
super().__init__(
embeddings=embeddings,
label_dictionary=internal_label_dictionary,
final_embedding_size=embeddings.embedding_length,
**classifierargs,
)
# fields in case this is a span-prediction problem
self.span_prediction_problem = self._determine_if_span_prediction_problem(internal_label_dictionary)
self.span_encoding = span_encoding
# the label type
self._label_type: str = label_type
# all parameters will be pushed internally to the specified device
self.to(flair.device)
@staticmethod
def _create_internal_label_dictionary(label_dictionary, span_encoding):
internal_label_dictionary = Dictionary(add_unk=False)
for label in label_dictionary.get_items():
if label == "<unk>":
continue
internal_label_dictionary.add_item("O")
if span_encoding == "BIOES":
internal_label_dictionary.add_item("S-" + label)
internal_label_dictionary.add_item("B-" + label)
internal_label_dictionary.add_item("E-" + label)
internal_label_dictionary.add_item("I-" + label)
if span_encoding == "BIO":
internal_label_dictionary.add_item("B-" + label)
internal_label_dictionary.add_item("I-" + label)
return internal_label_dictionary
def _determine_if_span_prediction_problem(self, dictionary: Dictionary) -> bool:
return any(item.startswith(("B-", "S-", "I-")) for item in dictionary.get_items())
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
**kwargs,
)
def _get_embedding_for_data_point(self, prediction_data_point: Token) -> torch.Tensor:
names = self.embeddings.get_names()
return prediction_data_point.get_embedding(names)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Token]:
# special handling during training if this is a span prediction problem
if self.training and self.span_prediction_problem:
for token in sentence.tokens:
token.set_label(self.label_type, "O")
for span in sentence.get_spans(self.label_type):
span_label = span.get_label(self.label_type).value
if len(span) == 1:
if self.span_encoding == "BIOES":
span.tokens[0].set_label(self.label_type, "S-" + span_label)
elif self.span_encoding == "BIO":
span.tokens[0].set_label(self.label_type, "B-" + span_label)
else:
for token in span.tokens:
token.set_label(self.label_type, "I-" + span_label)
span.tokens[0].set_label(self.label_type, "B-" + span_label)
if self.span_encoding == "BIOES":
span.tokens[-1].set_label(self.label_type, "E-" + span_label)
return sentence.tokens
def _post_process_batch_after_prediction(self, batch, label_name):
if self.span_prediction_problem:
for sentence in batch:
# internal variables
previous_tag = "O-"
current_span: List[Token] = []
for token in sentence:
bioes_tag = token.get_label(label_name).value
# non-set tags are OUT tags
if bioes_tag == "" or bioes_tag == "O" or bioes_tag == "_":
bioes_tag = "O-"
# anything that is not OUT is IN
in_span = bioes_tag != "O-"
# does this prediction start a new span?
starts_new_span = False
if bioes_tag[:2] in {"B-", "S-"} or (
in_span
and previous_tag[2:] != bioes_tag[2:]
and (bioes_tag[:2] == "I-" or previous_tag[2:] == "S-")
):
# B- and S- always start new spans
# if the predicted class changes, I- starts a new span
# if the predicted class changes and S- was previous tag, start a new span
starts_new_span = True
# if an existing span is ended (either by reaching O or starting a new span)
if (starts_new_span or not in_span) and len(current_span) > 0:
sentence[current_span[0].idx - 1 : current_span[-1].idx].set_label(label_name, previous_tag[2:])
# reset for-loop variables for new span
current_span = []
if in_span:
current_span.append(token)
# remember previous tag
previous_tag = bioes_tag
token.remove_labels(label_name)
token.remove_labels(self.label_type)
# if there is a span at end of sentence, add it
if len(current_span) > 0:
sentence[current_span[0].idx - 1 : current_span[-1].idx].set_label(label_name, previous_tag[2:])
@property
def label_type(self):
return self._label_type
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.span_prediction_problem:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
else:
for datapoint in batch:
# print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label(gold_label_type).value} "
f"{token.get_label('predicted').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TokenClassifier":
from typing import cast
return cast("TokenClassifier", super().load(model_path=model_path))
| 9,672 | 40.337607 | 120 | py |
flair | flair-master/flair/models/pairwise_regression_model.py | import typing
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair.embeddings
import flair.nn
from flair.data import Corpus, Dictionary, Sentence, TextPair, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.nn.model import ReduceTransformerVocabMixin
from flair.training_utils import MetricRegression, Result, store_embeddings
class TextPairRegressor(flair.nn.Model[TextPair], ReduceTransformerVocabMixin):
"""Text Pair Regression Model for tasks such as Semantic Textual Similarity Benchmark.
The model takes document embeddings and puts resulting text representation(s) into a linear layer to get the
score. We provide two ways to embed the DataPairs: Either by embedding both DataPoints
and concatenating the resulting vectors ("embed_separately=True") or by concatenating the DataPoints and embedding
the resulting vector ("embed_separately=False").
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
embed_separately: bool = False,
dropout: float = 0.0,
locked_dropout: float = 0.0,
word_dropout: float = 0.0,
decoder: Optional[torch.nn.Module] = None,
**classifierargs,
) -> None:
"""Initialize the Text Pair Regression Model.
:param embeddings: embeddings used to embed each data point
:param label_name:
"""
super().__init__()
self.embeddings: flair.embeddings.DocumentEmbeddings = embeddings
self.label_name = label_type
self.embed_separately = embed_separately
if not self.embed_separately:
# set separator to concatenate two sentences
self.sep = " "
if isinstance(
self.embeddings,
flair.embeddings.document.TransformerDocumentEmbeddings,
):
if self.embeddings.tokenizer.sep_token:
self.sep = " " + str(self.embeddings.tokenizer.sep_token) + " "
else:
self.sep = " [SEP] "
self.decoder: torch.nn.Module
if decoder is None:
self.decoder = nn.Linear(
2 * embeddings.embedding_length if embed_separately else embeddings.embedding_length, 1
)
nn.init.xavier_uniform_(self.decoder.weight)
else:
self.decoder = decoder
# init dropouts
self.dropout: torch.nn.Dropout = torch.nn.Dropout(dropout)
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
self.word_dropout = flair.nn.WordDropout(word_dropout)
self.loss_function = nn.MSELoss(reduction="sum")
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self.label_name
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence_pair in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence_pair.first]
yield [t.text for t in sentence_pair.second]
def forward_loss(self, pairs: List[TextPair]) -> Tuple[torch.Tensor, int]:
loss, num = self._forward_loss_and_scores(pairs=pairs, return_num=True, return_scores=False)
assert isinstance(loss, torch.Tensor)
assert isinstance(num, int)
return loss, num
def _forward_loss_and_scores(self, pairs: List[TextPair], return_num=True, return_scores=True) -> Tuple:
# make a forward pass to produce embedded data points and labels
pairs = [pair for pair in pairs if self._filter_data_point(pair)]
if len(pairs) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# get their gold labels as a tensor
target_tensor = self._prepare_target_tensor(pairs)
if target_tensor.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# pass data points through network to get encoded data point tensor
data_point_tensor = self._encode_data_points(pairs)
# decode
scores = self.decoder(data_point_tensor)[:, 0]
# calculate the loss
loss, num = self._calculate_loss(scores, target_tensor)
return_value: Tuple[Any, ...] = (loss,)
if return_num:
return_value += (num,)
if return_scores:
return_value += (scores,)
return return_value
def _calculate_loss(self, scores: torch.Tensor, target_tensor: torch.Tensor) -> Tuple[torch.Tensor, int]:
return self.loss_function(scores, target_tensor), target_tensor.size(0)
def _prepare_target_tensor(self, pairs: List[TextPair]):
target_values = [
torch.tensor([float(label.value) for label in pair.get_labels(self.label_name)], dtype=torch.float)
for pair in pairs
]
return torch.cat(target_values, 0).to(flair.device)
def _filter_data_point(self, pair: TextPair) -> bool:
return len(pair) > 0
def _encode_data_points(self, data_points: List[TextPair]):
# get a tensor of data points
data_point_tensor = torch.stack([self._get_embedding_for_data_point(data_point) for data_point in data_points])
# do dropout
data_point_tensor = data_point_tensor.unsqueeze(1)
data_point_tensor = self.dropout(data_point_tensor)
data_point_tensor = self.locked_dropout(data_point_tensor)
data_point_tensor = self.word_dropout(data_point_tensor)
data_point_tensor = data_point_tensor.squeeze(1)
return data_point_tensor
def _get_embedding_for_data_point(self, prediction_data_point: TextPair) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
if self.embed_separately:
self.embeddings.embed([prediction_data_point.first, prediction_data_point.second])
return torch.cat(
[
prediction_data_point.first.get_embedding(embedding_names),
prediction_data_point.second.get_embedding(embedding_names),
],
0,
)
else:
concatenated_sentence = Sentence(
prediction_data_point.first.to_tokenized_string()
+ self.sep
+ prediction_data_point.second.to_tokenized_string(),
use_tokenizer=False,
)
self.embeddings.embed(concatenated_sentence)
return concatenated_sentence.get_embedding(embedding_names)
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_type": self.label_type,
"embed_separately": self.embed_separately,
"dropout": self.dropout.p,
"word_dropout": self.word_dropout.dropout_rate,
"locked_dropout": self.locked_dropout.dropout_rate,
"decoder": self.decoder,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# add DefaultClassifier arguments
for arg in [
"document_embeddings",
"label_type",
"embed_separately",
"dropout",
"word_dropout",
"locked_dropout",
"decoder",
]:
if arg not in kwargs and arg in state:
kwargs[arg] = state[arg]
return super()._init_model_with_state_dict(state, **kwargs)
def predict(
self,
pairs: Union[TextPair, List[TextPair]],
mini_batch_size: int = 32,
verbose: bool = False,
label_name: Optional[str] = None,
embedding_storage_mode="none",
) -> List[TextPair]:
if label_name is None:
label_name = self.label_name if self.label_name is not None else "label"
with torch.no_grad():
if isinstance(pairs, list):
if len(pairs) == 0:
return []
else:
pairs = [pairs]
filtered_pairs = [pair for pair in pairs if self._filter_data_point(pair)]
if len(filtered_pairs) == 0:
return pairs
reordered_pairs = sorted(filtered_pairs, key=lambda pair: len(pair.first) + len(pair.second), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_pairs),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(dataloader)
progress_bar.set_description("Batch inference")
dataloader = progress_bar
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
data_point_tensor = self._encode_data_points(pairs)
scores = self.decoder(data_point_tensor)
for sentence, score in zip(batch, scores.tolist()):
sentence.set_label(label_name, value=str(score[0]))
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
return pairs
def evaluate(
self,
data_points: Union[List[TextPair], Dataset],
gold_label_type: str,
out_path: Union[str, Path, None] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size)
with torch.no_grad():
eval_loss = torch.zeros(1, device=flair.device)
metric = MetricRegression("Evaluation")
if out_path is not None:
out_file = open(out_path, "w", encoding="utf-8") # noqa: SIM115
total_count = 0
try:
for batch in data_loader:
if isinstance(batch, Sentence):
batch = [batch]
loss, num, scores = self._forward_loss_and_scores(batch, return_scores=True)
true_values = []
for sentence in batch:
total_count += 1
for label in sentence.get_labels(gold_label_type):
true_values.append(float(label.value))
results = scores.cpu().tolist()
eval_loss += loss
metric.true.extend(true_values)
metric.pred.extend(results)
if out_path is not None:
for pair, prediction, true_value in zip(batch, results, true_values):
eval_line = "{}\t{}\t{}\t{}\n".format(
pair.first.to_original_text(), pair.second.to_original_text(), true_value, prediction
)
out_file.write(eval_line)
store_embeddings(batch, embedding_storage_mode)
finally:
if out_path is not None:
out_file.close()
eval_loss /= total_count
detailed_result = (
f"AVG: mse: {metric.mean_squared_error():.4f} - "
f"mae: {metric.mean_absolute_error():.4f} - "
f"pearson: {metric.pearsonr():.4f} - "
f"spearman: {metric.spearmanr():.4f}"
)
scores = {
"loss": eval_loss.item(),
"mse": metric.mean_squared_error(),
"mae": metric.mean_absolute_error(),
"pearson": metric.pearsonr(),
"spearman": metric.spearmanr(),
}
if main_evaluation_metric[0] in ("correlation", "other"):
main_score = scores[main_evaluation_metric[1]]
else:
main_score = scores["spearman"]
return Result(
main_score=main_score,
detailed_results=detailed_result,
scores=scores,
)
| 12,876 | 36.324638 | 119 | py |
flair | flair-master/flair/models/regexp_tagger.py | import re
import typing
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Union
from flair.data import Sentence, Span, Token
@dataclass
class TokenCollection:
"""A utility class for RegexpTagger to hold all tokens for a given Sentence and define some functionality.
:param sentence: A Sentence object
"""
sentence: Sentence
__tokens_start_pos: List[int] = field(init=False, default_factory=list)
__tokens_end_pos: List[int] = field(init=False, default_factory=list)
def __post_init__(self):
for token in self.tokens:
self.__tokens_start_pos.append(token.start_position)
self.__tokens_end_pos.append(token.end_position)
@property
def tokens(self) -> List[Token]:
return list(self.sentence)
def get_token_span(self, span: Tuple[int, int]) -> Span:
"""Find a span by the token character positions.
Given an interval specified with start and end pos as tuple, this function returns a Span object
spanning the tokens included in the interval. If the interval is overlapping with a token span, a
ValueError is raised
:param span: Start and end pos of the requested span as tuple
:return: A span object spanning the requested token interval
"""
span_start: int = self.__tokens_start_pos.index(span[0])
span_end: int = self.__tokens_end_pos.index(span[1])
return Span(self.tokens[span_start : span_end + 1])
class RegexpTagger:
def __init__(self, mapping: Union[List[Tuple[str, str]], Tuple[str, str]]) -> None:
r"""This tagger is capable of tagging sentence objects with given regexp -> label mappings.
I.e: The tuple (r'(["\'])(?:(?=(\\?))\2.)*?\1', 'QUOTE') maps every match of the regexp to
a <QUOTE> labeled span and therefore labels the given sentence object with RegexpTagger.predict().
This tagger supports multilabeling so tokens can be included in multiple labeled spans.
The regexp are compiled internally and an re.error will be raised if the compilation of a given regexp fails.
If a match violates (in this case overlaps) a token span, an exception is raised.
:param mapping: A list of tuples or a single tuple representing a mapping as regexp -> label
"""
self._regexp_mapping: Dict[str, typing.Pattern] = {}
self.register_labels(mapping=mapping)
@property
def registered_labels(self):
return self._regexp_mapping
def register_labels(self, mapping: Union[List[Tuple[str, str]], Tuple[str, str]]):
"""Register a regexp -> label mapping.
:param mapping: A list of tuples or a single tuple representing a mapping as regexp -> label
"""
mapping = self._listify(mapping)
for regexp, label in mapping:
try:
self._regexp_mapping[label] = re.compile(regexp)
except re.error as err:
raise re.error(
f"Couldn't compile regexp '{regexp}' for label '{label}'. Aborted with error: '{err.msg}'"
)
def remove_labels(self, labels: Union[List[str], str]):
"""Remove a registered regexp -> label mapping given by label.
:param labels: A list of labels or a single label as strings.
"""
labels = self._listify(labels)
for label in labels:
if not self._regexp_mapping.get(label):
continue
self._regexp_mapping.pop(label)
@staticmethod
def _listify(element: object) -> list:
if not isinstance(element, list):
return [element]
else:
return element
def predict(self, sentences: Union[List[Sentence], Sentence]) -> List[Sentence]:
"""Predict the given sentences according to the registered mappings."""
if not isinstance(sentences, list):
sentences = [sentences]
if not sentences:
return sentences
sentences = self._listify(sentences)
for sentence in sentences:
self._label(sentence)
return sentences
def _label(self, sentence: Sentence):
"""This will add a complex_label to the given sentence for every match.span() for every registered_mapping.
If a match span overlaps with a token span an exception is raised.
"""
collection = TokenCollection(sentence)
for label, pattern in self._regexp_mapping.items():
for match in pattern.finditer(sentence.to_original_text()):
span: Tuple[int, int] = match.span()
try:
token_span = collection.get_token_span(span)
except ValueError:
raise Exception(f"The match span {span} for label '{label}' is overlapping with a token!")
token_span.add_label(label, label)
| 4,917 | 38.344 | 117 | py |
flair | flair-master/flair/models/sequence_tagger_model.py | import logging
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from urllib.error import HTTPError
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import flair.nn
from flair.data import Dictionary, Label, Sentence, Span, get_spans_from_bio
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import TokenEmbeddings
from flair.file_utils import cached_path, unzip_file
from flair.models.sequence_tagger_utils.crf import CRF
from flair.models.sequence_tagger_utils.viterbi import ViterbiDecoder, ViterbiLoss
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class SequenceTagger(flair.nn.Classifier[Sentence]):
def __init__(
self,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_rnn: bool = True,
rnn: Optional[torch.nn.RNN] = None,
rnn_type: str = "LSTM",
tag_format: str = "BIOES",
hidden_size: int = 256,
rnn_layers: int = 1,
bidirectional: bool = True,
use_crf: bool = True,
reproject_embeddings: bool = True,
dropout: float = 0.0,
word_dropout: float = 0.05,
locked_dropout: float = 0.5,
train_initial_hidden_state: bool = False,
loss_weights: Optional[Dict[str, float]] = None,
init_from_state_dict: bool = False,
allow_unk_predictions: bool = False,
) -> None:
"""Sequence Tagger class for predicting labels for single tokens. Can be parameterized by several attributes.
In case of multitask learning, pass shared embeddings or shared rnn into respective attributes.
:param embeddings: Embeddings to use during training and prediction
:param tag_dictionary: Dictionary containing all tags from corpus which can be predicted
:param tag_type: type of tag which is going to be predicted in case a corpus has multiple annotations
:param use_rnn: If true, use a RNN, else Linear layer.
:param rnn: (Optional) Takes a torch.nn.Module as parameter by which you can pass a shared RNN between
different tasks.
:param rnn_type: Specifies the RNN type to use, default is 'LSTM', can choose between 'GRU' and 'RNN' as well.
:param hidden_size: Hidden size of RNN layer
:param rnn_layers: number of RNN layers
:param bidirectional: If True, RNN becomes bidirectional
:param use_crf: If True, use a Conditional Random Field for prediction, else linear map to tag space.
:param reproject_embeddings: If True, add a linear layer on top of embeddings, if you want to imitate
fine tune non-trainable embeddings.
:param dropout: If > 0, then use dropout.
:param word_dropout: If > 0, then use word dropout.
:param locked_dropout: If > 0, then use locked dropout.
:param train_initial_hidden_state: if True, trains initial hidden state of RNN
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
:param init_from_state_dict: Indicator whether we are loading a model from state dict
since we need to transform previous models' weights into CRF instance weights
"""
super().__init__()
# ----- Create the internal tag dictionary -----
self.tag_type = tag_type
self.tag_format = tag_format.upper()
if init_from_state_dict:
self.label_dictionary = tag_dictionary
else:
# span-labels need special encoding (BIO or BIOES)
if tag_dictionary.span_labels:
# the big question is whether the label dictionary should contain an UNK or not
# without UNK, we cannot evaluate on data that contains labels not seen in test
# with UNK, the model learns less well if there are no UNK examples
self.label_dictionary = Dictionary(add_unk=allow_unk_predictions)
assert self.tag_format in ["BIOES", "BIO"]
for label in tag_dictionary.get_items():
if label == "<unk>":
continue
self.label_dictionary.add_item("O")
if self.tag_format == "BIOES":
self.label_dictionary.add_item("S-" + label)
self.label_dictionary.add_item("B-" + label)
self.label_dictionary.add_item("E-" + label)
self.label_dictionary.add_item("I-" + label)
if self.tag_format == "BIO":
self.label_dictionary.add_item("B-" + label)
self.label_dictionary.add_item("I-" + label)
else:
self.label_dictionary = tag_dictionary
# is this a span prediction problem?
self.predict_spans = self._determine_if_span_prediction_problem(self.label_dictionary)
self.tagset_size = len(self.label_dictionary)
log.info(f"SequenceTagger predicts: {self.label_dictionary}")
# ----- Embeddings -----
self.embeddings = embeddings
embedding_dim: int = embeddings.embedding_length
# ----- Initial loss weights parameters -----
self.weight_dict = loss_weights
self.loss_weights = self._init_loss_weights(loss_weights) if loss_weights else None
# ----- RNN specific parameters -----
self.use_rnn = use_rnn
self.rnn_type = rnn_type if not rnn else rnn._get_name()
self.hidden_size = hidden_size if not rnn else rnn.hidden_size
self.rnn_layers = rnn_layers if not rnn else rnn.num_layers
self.bidirectional = bidirectional if not rnn else rnn.bidirectional
# ----- Conditional Random Field parameters -----
self.use_crf = use_crf
# Previously trained models have been trained without an explicit CRF, thus it is required to check
# whether we are loading a model from state dict in order to skip or add START and STOP token
if use_crf and not init_from_state_dict and not self.label_dictionary.start_stop_tags_are_set():
self.label_dictionary.set_start_stop_tags()
self.tagset_size += 2
# ----- Dropout parameters -----
# dropouts
# remove word dropout if there is no contact over the sequence dimension.
if not use_crf and not use_rnn:
word_dropout = 0.0
self.use_dropout: float = dropout
self.use_word_dropout: float = word_dropout
self.use_locked_dropout: float = locked_dropout
if dropout > 0.0:
self.dropout = torch.nn.Dropout(dropout)
if word_dropout > 0.0:
self.word_dropout = flair.nn.WordDropout(word_dropout)
if locked_dropout > 0.0:
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
# ----- Model layers -----
self.reproject_embeddings = reproject_embeddings
if self.reproject_embeddings:
self.embedding2nn = torch.nn.Linear(embedding_dim, embedding_dim)
# ----- RNN layer -----
if use_rnn:
# If shared RNN provided, else create one for model
self.rnn: torch.nn.RNN = (
rnn
if rnn
else self.RNN(
rnn_type,
rnn_layers,
hidden_size,
bidirectional,
rnn_input_dim=embedding_dim,
)
)
num_directions = 2 if self.bidirectional else 1
hidden_output_dim = self.rnn.hidden_size * num_directions
# Whether to train initial hidden state
self.train_initial_hidden_state = train_initial_hidden_state
if self.train_initial_hidden_state:
(
self.hs_initializer,
self.lstm_init_h,
self.lstm_init_c,
) = self._init_initial_hidden_state(num_directions)
# final linear map to tag space
self.linear = torch.nn.Linear(hidden_output_dim, len(self.label_dictionary))
else:
self.linear = torch.nn.Linear(embedding_dim, len(self.label_dictionary))
self.train_initial_hidden_state = False
# the loss function is Viterbi if using CRF, else regular Cross Entropy Loss
self.loss_function = (
ViterbiLoss(self.label_dictionary)
if use_crf
else torch.nn.CrossEntropyLoss(weight=self.loss_weights, reduction="sum")
)
# if using CRF, we also require a CRF and a Viterbi decoder
if use_crf:
self.crf = CRF(self.label_dictionary, self.tagset_size, init_from_state_dict)
self.viterbi_decoder = ViterbiDecoder(self.label_dictionary)
self.to(flair.device)
@property
def label_type(self):
return self.tag_type
def _init_loss_weights(self, loss_weights: Dict[str, float]) -> torch.Tensor:
"""Initializes the loss weights based on given dictionary.
:param loss_weights: dictionary - contains loss weights
"""
n_classes = len(self.label_dictionary)
weight_list = [1.0 for _ in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights:
weight_list[i] = loss_weights[tag]
return torch.tensor(weight_list).to(flair.device)
def _init_initial_hidden_state(self, num_directions: int):
"""Initializes hidden states given the number of directions in RNN.
:param num_directions: Number of directions in RNN.
"""
hs_initializer = torch.nn.init.xavier_normal_
lstm_init_h = torch.nn.Parameter(
torch.randn(self.rnn.num_layers * num_directions, self.hidden_size),
requires_grad=True,
)
lstm_init_c = torch.nn.Parameter(
torch.randn(self.rnn.num_layers * num_directions, self.hidden_size),
requires_grad=True,
)
return hs_initializer, lstm_init_h, lstm_init_c
@staticmethod
def RNN(
rnn_type: str,
rnn_layers: int,
hidden_size: int,
bidirectional: bool,
rnn_input_dim: int,
) -> torch.nn.RNN:
"""Static wrapper function returning an RNN instance from PyTorch.
:param rnn_type: Type of RNN from torch.nn
:param rnn_layers: number of layers to include
:param hidden_size: hidden size of RNN cell
:param bidirectional: If True, RNN cell is bidirectional
:param rnn_input_dim: Input dimension to RNN cell
"""
if rnn_type in ["LSTM", "GRU", "RNN"]:
RNN = getattr(torch.nn, rnn_type)(
rnn_input_dim,
hidden_size,
num_layers=rnn_layers,
dropout=0.0 if rnn_layers == 1 else 0.5,
bidirectional=bidirectional,
batch_first=True,
)
else:
raise Exception(f"Unknown RNN type: {rnn_type}. Please use either LSTM, GRU or RNN.")
return RNN
def forward_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, int]:
# if there are no sentences, there is no loss
if len(sentences) == 0:
return torch.tensor(0.0, dtype=torch.float, device=flair.device, requires_grad=True), 0
sentences = sorted(sentences, key=len, reverse=True)
gold_labels = self._prepare_label_tensor(sentences)
sentence_tensor, lengths = self._prepare_tensors(sentences)
# forward pass to get scores
scores = self.forward(sentence_tensor, lengths)
# calculate loss given scores and labels
return self._calculate_loss(scores, gold_labels)
def _prepare_tensors(self, data_points: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, torch.LongTensor]:
sentences = [data_points] if not isinstance(data_points, list) else data_points
self.embeddings.embed(sentences)
# make a zero-padded tensor for the whole sentence
lengths, sentence_tensor = self._make_padded_tensor_for_batch(sentences)
return sentence_tensor, lengths
def forward(self, sentence_tensor: torch.Tensor, lengths: torch.LongTensor):
"""Forward propagation through network.
:param sentence_tensor: A tensor representing the batch of sentences.
:param lengths: A IntTensor representing the lengths of the respective sentences.
"""
if self.use_dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.use_locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.reproject_embeddings:
sentence_tensor = self.embedding2nn(sentence_tensor)
if self.use_rnn:
packed = pack_padded_sequence(sentence_tensor, lengths, batch_first=True)
rnn_output, hidden = self.rnn(packed)
sentence_tensor, output_lengths = pad_packed_sequence(rnn_output, batch_first=True)
if self.use_dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.use_locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
# linear map to tag space
features = self.linear(sentence_tensor)
# Depending on whether we are using CRF or a linear layer, scores is either:
# -- A tensor of shape (batch size, sequence length, tagset size, tagset size) for CRF
# -- A tensor of shape (aggregated sequence length for all sentences in batch, tagset size) for linear layer
if self.use_crf:
features = self.crf(features)
scores = (features, lengths, self.crf.transitions)
else:
scores = self._get_scores_from_features(features, lengths)
return scores
def _calculate_loss(self, scores: torch.Tensor, labels: torch.LongTensor) -> Tuple[torch.Tensor, int]:
if labels.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
return self.loss_function(scores, labels), len(labels)
def _make_padded_tensor_for_batch(self, sentences: List[Sentence]) -> Tuple[torch.LongTensor, torch.Tensor]:
names = self.embeddings.get_names()
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding(names)]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
return torch.LongTensor(lengths), sentence_tensor
@staticmethod
def _get_scores_from_features(features: torch.Tensor, lengths: torch.Tensor):
"""Remove paddings to get a smaller tensor.
Trims current batch tensor in shape (batch size, sequence length, tagset size)
in such a way that all pads are going to be removed.
:param features: torch.tensor containing all features from forward propagation
:param lengths: length from each sentence in batch in order to trim padding tokens
"""
features_formatted = []
for feat, length in zip(features, lengths):
features_formatted.append(feat[:length])
scores = torch.cat(features_formatted)
return scores
def _get_gold_labels(self, sentences: List[Sentence]) -> List[str]:
"""Extracts gold labels from each sentence.
:param sentences: List of sentences in batch
"""
# spans need to be encoded as token-level predictions
if self.predict_spans:
all_sentence_labels = []
for sentence in sentences:
sentence_labels = ["O"] * len(sentence)
for label in sentence.get_labels(self.label_type):
span: Span = label.data_point
if self.tag_format == "BIOES":
if len(span) == 1:
sentence_labels[span[0].idx - 1] = "S-" + label.value
else:
sentence_labels[span[0].idx - 1] = "B-" + label.value
sentence_labels[span[-1].idx - 1] = "E-" + label.value
for i in range(span[0].idx, span[-1].idx - 1):
sentence_labels[i] = "I-" + label.value
else:
sentence_labels[span[0].idx - 1] = "B-" + label.value
for i in range(span[0].idx, span[-1].idx):
sentence_labels[i] = "I-" + label.value
all_sentence_labels.extend(sentence_labels)
labels = all_sentence_labels
# all others are regular labels for each token
else:
labels = [token.get_label(self.label_type, "O").value for sentence in sentences for token in sentence]
return labels
def _prepare_label_tensor(self, sentences: List[Sentence]):
gold_labels = self._get_gold_labels(sentences)
labels = torch.tensor(
[self.label_dictionary.get_idx_for_item(label) for label in gold_labels],
dtype=torch.long,
device=flair.device,
)
return labels
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
force_token_predictions: bool = False,
):
"""Predicts labels for current batch with CRF or Softmax.
:param sentences: List of sentences in batch
:param mini_batch_size: batch size for test data
:param return_probabilities_for_all_classes: Whether to return probabilities for all classes
:param verbose: whether to use progress bar
:param label_name: which label to predict
:param return_loss: whether to return loss value
:param embedding_storage_mode: determines where to store embeddings - can be "gpu", "cpu" or None.
"""
if label_name is None:
label_name = self.tag_type
with torch.no_grad():
if not sentences:
return sentences
# make sure it's a list
if not isinstance(sentences, list) and not isinstance(sentences, flair.data.Dataset):
sentences = [sentences]
Sentence.set_context_for_sentences(cast(List[Sentence], sentences))
# filter empty sentences
sentences = [sentence for sentence in sentences if len(sentence) > 0]
# reverse sort all sequences by their length
reordered_sentences = sorted(sentences, key=len, reverse=True)
if len(reordered_sentences) == 0:
return sentences
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader, desc="Batch inference")
overall_loss = torch.zeros(1, device=flair.device)
label_count = 0
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
# get features from forward propagation
sentence_tensor, lengths = self._prepare_tensors(batch)
features = self.forward(sentence_tensor, lengths)
# remove previously predicted labels of this type
for sentence in batch:
sentence.remove_labels(label_name)
# if return_loss, get loss value
if return_loss:
gold_labels = self._prepare_label_tensor(batch)
loss = self._calculate_loss(features, gold_labels)
overall_loss += loss[0]
label_count += loss[1]
# make predictions
if self.use_crf:
predictions, all_tags = self.viterbi_decoder.decode(
features, return_probabilities_for_all_classes, batch
)
else:
predictions, all_tags = self._standard_inference(
features, batch, return_probabilities_for_all_classes
)
# add predictions to Sentence
for sentence, sentence_predictions in zip(batch, predictions):
# BIOES-labels need to be converted to spans
if self.predict_spans and not force_token_predictions:
sentence_tags = [label[0] for label in sentence_predictions]
sentence_scores = [label[1] for label in sentence_predictions]
predicted_spans = get_spans_from_bio(sentence_tags, sentence_scores)
for predicted_span in predicted_spans:
span: Span = sentence[predicted_span[0][0] : predicted_span[0][-1] + 1]
span.add_label(label_name, value=predicted_span[2], score=predicted_span[1])
# token-labels can be added directly ("O" and legacy "_" predictions are skipped)
else:
for token, label in zip(sentence.tokens, sentence_predictions):
if label[0] in ["O", "_"]:
continue
token.add_label(typename=label_name, value=label[0], score=label[1])
# all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided
for sentence, sent_all_tags in zip(batch, all_tags):
for token, token_all_tags in zip(sentence.tokens, sent_all_tags):
token.add_tags_proba_dist(label_name, token_all_tags)
store_embeddings(sentences, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
return None
def _standard_inference(self, features: torch.Tensor, batch: List[Sentence], probabilities_for_all_classes: bool):
"""Softmax over emission scores from forward propagation.
:param features: sentence tensor from forward propagation
:param batch: list of sentence
:param probabilities_for_all_classes: whether to return score for each tag in tag dictionary
"""
softmax_batch = F.softmax(features, dim=1).cpu()
scores_batch, prediction_batch = torch.max(softmax_batch, dim=1)
predictions = []
all_tags = []
for sentence in batch:
scores = scores_batch[: len(sentence)]
predictions_for_sentence = prediction_batch[: len(sentence)]
predictions.append(
[
(self.label_dictionary.get_item_for_index(prediction), score.item())
for token, score, prediction in zip(sentence, scores, predictions_for_sentence)
]
)
scores_batch = scores_batch[len(sentence) :]
prediction_batch = prediction_batch[len(sentence) :]
if probabilities_for_all_classes:
lengths = [len(sentence) for sentence in batch]
all_tags = self._all_scores_for_token(batch, softmax_batch, lengths)
return predictions, all_tags
def _all_scores_for_token(self, sentences: List[Sentence], scores: torch.Tensor, lengths: List[int]):
"""Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
tokens = [token for sentence in sentences for token in sentence]
prob_all_tags = [
[
Label(token, self.label_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist, token in zip(scores, tokens)
]
prob_tags_per_sentence = []
previous = 0
for length in lengths:
prob_tags_per_sentence.append(prob_all_tags[previous : previous + length])
previous = length
return prob_tags_per_sentence
def _get_state_dict(self):
"""Returns the state dictionary for this model."""
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"hidden_size": self.hidden_size,
"tag_dictionary": self.label_dictionary,
"tag_format": self.tag_format,
"tag_type": self.tag_type,
"use_crf": self.use_crf,
"use_rnn": self.use_rnn,
"rnn_layers": self.rnn_layers,
"use_dropout": self.use_dropout,
"use_word_dropout": self.use_word_dropout,
"use_locked_dropout": self.use_locked_dropout,
"rnn_type": self.rnn_type,
"reproject_embeddings": self.reproject_embeddings,
"weight_dict": self.weight_dict,
"train_initial_hidden_state": self.train_initial_hidden_state,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
if state["use_crf"] and "transitions" in state["state_dict"]:
state["state_dict"]["crf.transitions"] = state["state_dict"]["transitions"]
del state["state_dict"]["transitions"]
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
tag_dictionary=state.get("tag_dictionary"),
tag_format=state.get("tag_format", "BIOES"),
tag_type=state.get("tag_type"),
use_crf=state.get("use_crf"),
use_rnn=state.get("use_rnn"),
rnn_layers=state.get("rnn_layers"),
hidden_size=state.get("hidden_size"),
dropout=state.get("use_dropout", 0.0),
word_dropout=state.get("use_word_dropout", 0.0),
locked_dropout=state.get("use_locked_dropout", 0.0),
rnn_type=state.get("rnn_type", "LSTM"),
reproject_embeddings=state.get("reproject_embeddings", True),
loss_weights=state.get("weight_dict"),
init_from_state_dict=True,
train_initial_hidden_state=state.get("train_initial_hidden_state", False),
**kwargs,
)
@staticmethod
def _fetch_model(model_name) -> str:
# core Flair models on Huggingface ModelHub
huggingface_model_map = {
"ner": "flair/ner-english",
"ner-fast": "flair/ner-english-fast",
"ner-ontonotes": "flair/ner-english-ontonotes",
"ner-ontonotes-fast": "flair/ner-english-ontonotes-fast",
# Large NER models,
"ner-large": "flair/ner-english-large",
"ner-ontonotes-large": "flair/ner-english-ontonotes-large",
"de-ner-large": "flair/ner-german-large",
"nl-ner-large": "flair/ner-dutch-large",
"es-ner-large": "flair/ner-spanish-large",
# Multilingual NER models
"ner-multi": "flair/ner-multi",
"multi-ner": "flair/ner-multi",
"ner-multi-fast": "flair/ner-multi-fast",
# English POS models
"upos": "flair/upos-english",
"upos-fast": "flair/upos-english-fast",
"pos": "flair/pos-english",
"pos-fast": "flair/pos-english-fast",
# Multilingual POS models
"pos-multi": "flair/upos-multi",
"multi-pos": "flair/upos-multi",
"pos-multi-fast": "flair/upos-multi-fast",
"multi-pos-fast": "flair/upos-multi-fast",
# English SRL models
"frame": "flair/frame-english",
"frame-fast": "flair/frame-english-fast",
# English chunking models
"chunk": "flair/chunk-english",
"chunk-fast": "flair/chunk-english-fast",
# Language-specific NER models
"ar-ner": "megantosh/flair-arabic-multi-ner",
"ar-pos": "megantosh/flair-arabic-dialects-codeswitch-egy-lev",
"da-ner": "flair/ner-danish",
"de-ner": "flair/ner-german",
"de-ler": "flair/ner-german-legal",
"de-ner-legal": "flair/ner-german-legal",
"fr-ner": "flair/ner-french",
"nl-ner": "flair/ner-dutch",
"ner-ukrainian": "dchaplinsky/flair-uk-ner",
# Language-specific POS models
"pos-ukrainian": "dchaplinsky/flair-uk-pos",
}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
hunflair_paper_path = hu_path + "/hunflair_smallish_models"
hunflair_main_path = hu_path + "/hunflair_allcorpus_models"
hu_model_map = {
# English NER models
"ner": "/".join([hu_path, "ner", "en-ner-conll03-v0.4.pt"]),
"ner-pooled": "/".join([hu_path, "ner-pooled", "en-ner-conll03-pooled-v0.5.pt"]),
"ner-fast": "/".join([hu_path, "ner-fast", "en-ner-fast-conll03-v0.4.pt"]),
"ner-ontonotes": "/".join([hu_path, "ner-ontonotes", "en-ner-ontonotes-v0.4.pt"]),
"ner-ontonotes-fast": "/".join([hu_path, "ner-ontonotes-fast", "en-ner-ontonotes-fast-v0.4.pt"]),
# Multilingual NER models
"ner-multi": "/".join([hu_path, "multi-ner", "quadner-large.pt"]),
"multi-ner": "/".join([hu_path, "multi-ner", "quadner-large.pt"]),
"ner-multi-fast": "/".join([hu_path, "multi-ner-fast", "ner-multi-fast.pt"]),
# English POS models
"upos": "/".join([hu_path, "upos", "en-pos-ontonotes-v0.4.pt"]),
"upos-fast": "/".join([hu_path, "upos-fast", "en-upos-ontonotes-fast-v0.4.pt"]),
"pos": "/".join([hu_path, "pos", "en-pos-ontonotes-v0.5.pt"]),
"pos-fast": "/".join([hu_path, "pos-fast", "en-pos-ontonotes-fast-v0.5.pt"]),
# Multilingual POS models
"pos-multi": "/".join([hu_path, "multi-pos", "pos-multi-v0.1.pt"]),
"multi-pos": "/".join([hu_path, "multi-pos", "pos-multi-v0.1.pt"]),
"pos-multi-fast": "/".join([hu_path, "multi-pos-fast", "pos-multi-fast.pt"]),
"multi-pos-fast": "/".join([hu_path, "multi-pos-fast", "pos-multi-fast.pt"]),
# English SRL models
"frame": "/".join([hu_path, "frame", "en-frame-ontonotes-v0.4.pt"]),
"frame-fast": "/".join([hu_path, "frame-fast", "en-frame-ontonotes-fast-v0.4.pt"]),
"frame-large": "/".join([hu_path, "frame-large", "frame-large.pt"]),
# English chunking models
"chunk": "/".join([hu_path, "chunk", "en-chunk-conll2000-v0.4.pt"]),
"chunk-fast": "/".join([hu_path, "chunk-fast", "en-chunk-conll2000-fast-v0.4.pt"]),
# Danish models
"da-pos": "/".join([hu_path, "da-pos", "da-pos-v0.1.pt"]),
"da-ner": "/".join([hu_path, "NER-danish", "da-ner-v0.1.pt"]),
# German models
"de-pos": "/".join([hu_path, "de-pos", "de-pos-ud-hdt-v0.5.pt"]),
"de-pos-tweets": "/".join([hu_path, "de-pos-tweets", "de-pos-twitter-v0.1.pt"]),
"de-ner": "/".join([hu_path, "de-ner", "de-ner-conll03-v0.4.pt"]),
"de-ner-germeval": "/".join([hu_path, "de-ner-germeval", "de-ner-germeval-0.4.1.pt"]),
"de-ler": "/".join([hu_path, "de-ner-legal", "de-ner-legal.pt"]),
"de-ner-legal": "/".join([hu_path, "de-ner-legal", "de-ner-legal.pt"]),
# French models
"fr-ner": "/".join([hu_path, "fr-ner", "fr-ner-wikiner-0.4.pt"]),
# Dutch models
"nl-ner": "/".join([hu_path, "nl-ner", "nl-ner-bert-conll02-v0.8.pt"]),
"nl-ner-rnn": "/".join([hu_path, "nl-ner-rnn", "nl-ner-conll02-v0.5.pt"]),
# Malayalam models
"ml-pos": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-xpos-model.pt",
"ml-upos": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-upos-model.pt",
# Portuguese models
"pt-pos-clinical": "/".join(
[
hu_path,
"pt-pos-clinical",
"pucpr-flair-clinical-pos-tagging-best-model.pt",
]
),
# Keyphase models
"keyphrase": "/".join([hu_path, "keyphrase", "keyphrase-en-scibert.pt"]),
"negation-speculation": "/".join([hu_path, "negation-speculation", "negation-speculation-model.pt"]),
# Biomedical models
"hunflair-paper-cellline": "/".join([hunflair_paper_path, "cellline", "hunflair-celline-v1.0.pt"]),
"hunflair-paper-chemical": "/".join([hunflair_paper_path, "chemical", "hunflair-chemical-v1.0.pt"]),
"hunflair-paper-disease": "/".join([hunflair_paper_path, "disease", "hunflair-disease-v1.0.pt"]),
"hunflair-paper-gene": "/".join([hunflair_paper_path, "gene", "hunflair-gene-v1.0.pt"]),
"hunflair-paper-species": "/".join([hunflair_paper_path, "species", "hunflair-species-v1.0.pt"]),
"hunflair-cellline": "/".join([hunflair_main_path, "cellline", "hunflair-celline-v1.0.pt"]),
"hunflair-chemical": "/".join([hunflair_main_path, "huner-chemical", "hunflair-chemical-full-v1.0.pt"]),
"hunflair-disease": "/".join([hunflair_main_path, "huner-disease", "hunflair-disease-full-v1.0.pt"]),
"hunflair-gene": "/".join([hunflair_main_path, "huner-gene", "hunflair-gene-full-v1.0.pt"]),
"hunflair-species": "/".join([hunflair_main_path, "huner-species", "hunflair-species-full-v1.1.pt"]),
}
cache_dir = Path("models")
get_from_model_hub = False
# check if model name is a valid local file
if Path(model_name).exists():
model_path = model_name
# check if model key is remapped to HF key - if so, print out information
elif model_name in huggingface_model_map:
# get mapped name
hf_model_name = huggingface_model_map[model_name]
# use mapped name instead
model_name = hf_model_name
get_from_model_hub = True
# if not, check if model key is remapped to direct download location. If so, download model
elif model_name in hu_model_map:
model_path = cached_path(hu_model_map[model_name], cache_dir=cache_dir)
# special handling for the taggers by the @redewiegergabe project (TODO: move to model hub)
elif model_name == "de-historic-indirect":
model_file = flair.cache_root / cache_dir / "indirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/indirect.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "indirect.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "indirect" / "final-model.pt")
elif model_name == "de-historic-direct":
model_file = flair.cache_root / cache_dir / "direct" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/direct.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "direct.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "direct" / "final-model.pt")
elif model_name == "de-historic-reported":
model_file = flair.cache_root / cache_dir / "reported" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/reported.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "reported.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "reported" / "final-model.pt")
elif model_name == "de-historic-free-indirect":
model_file = flair.cache_root / cache_dir / "freeIndirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/freeIndirect.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "freeIndirect.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "freeIndirect" / "final-model.pt")
# for all other cases (not local file or special download location), use HF model hub
else:
get_from_model_hub = True
# if not a local file, get from model hub
if get_from_model_hub:
hf_model_name = "pytorch_model.bin"
revision = "main"
if "@" in model_name:
model_name_split = model_name.split("@")
revision = model_name_split[-1]
model_name = model_name_split[0]
# use model name as subfolder
model_folder = model_name.split("/", maxsplit=1)[1] if "/" in model_name else model_name
# Lazy import
from huggingface_hub.file_download import hf_hub_download
try:
model_path = hf_hub_download(
repo_id=model_name,
filename=hf_model_name,
revision=revision,
library_name="flair",
library_version=flair.__version__,
cache_dir=flair.cache_root / "models" / model_folder,
)
except HTTPError:
# output information
log.error("-" * 80)
log.error(
f"ERROR: The key '{model_name}' was neither found on the ModelHub nor is this a valid path to a file on your system!"
)
log.error(" -> Please check https://huggingface.co/models?filter=flair for all available models.")
log.error(" -> Alternatively, point to a model file on your local drive.")
log.error("-" * 80)
Path(flair.cache_root / "models" / model_folder).rmdir() # remove folder again if not valid
raise
return model_path
def _generate_model_card(self, repo_id):
return f"""---
tags:
- flair
- token-classification
- sequence-tagger-model
---
### Demo: How to use in Flair
Requires:
- **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("{repo_id}")
# make example sentence
sentence = Sentence("On September 1st George won 1 dollar while watching Game of Thrones.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```"""
def push_to_hub(
self,
repo_id: str,
token: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SequenceTagger model.",
):
"""Uploads the Sequence Tagger model to a Hugging Face Hub repository.
:param repo_id: A namespace (user or an organization) and a repo name separated by a `/`.
:param token: An authentication token (See https://huggingface.co/settings/token).
:param private: Whether the repository is private.
:param commit_message: Message to commit while pushing.
:return: The url of the repository.
"""
# Lazy import
from huggingface_hub import create_repo, model_info, upload_folder
repo_url = create_repo(
repo_id=repo_id,
token=token,
private=private,
exist_ok=True,
)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Save model weight
local_model_path = tmp_path / "pytorch_model.bin"
self.save(local_model_path)
# Determine if model card already exists
info = model_info(repo_id, use_auth_token=token)
write_readme = all(f.rfilename != "README.md" for f in info.siblings)
# Generate and save model card
if write_readme:
model_card_content = self._generate_model_card(repo_id)
readme_path = tmp_path / "README.md"
with readme_path.open("w", encoding="utf-8") as f:
f.write(model_card_content)
# Upload files
upload_folder(
repo_id=repo_id,
folder_path=tmp_path,
path_in_repo="",
token=token,
commit_message=commit_message,
)
return repo_url
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
def _determine_if_span_prediction_problem(self, dictionary: Dictionary) -> bool:
return any(item.startswith(("B-", "S-", "I-")) for item in dictionary.get_items())
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.predict_spans:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
else:
for datapoint in batch:
# print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label(gold_label_type).value} "
f"{token.get_label('predicted').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "SequenceTagger":
from typing import cast
return cast("SequenceTagger", super().load(model_path=model_path))
| 45,746 | 43.500973 | 137 | py |
flair | flair-master/flair/models/clustering.py | import logging
import pickle
from collections import OrderedDict
from pathlib import Path
from typing import Optional, Union
import joblib
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics import normalized_mutual_info_score
from tqdm import tqdm
from flair.data import Corpus, _iter_dataset
from flair.datasets import DataLoader
from flair.embeddings import DocumentEmbeddings
log = logging.getLogger("flair")
class ClusteringModel:
"""A wrapper class to apply sklearn clustering models on DocumentEmbeddings."""
def __init__(self, model: Union[ClusterMixin, BaseEstimator], embeddings: DocumentEmbeddings) -> None:
"""Instantiate the ClusteringModel.
:param model: the clustering algorithm from sklearn this wrapper will use.
:param embeddings: the flair DocumentEmbedding this wrapper uses to calculate a vector for each sentence.
"""
self.model = model
self.embeddings = embeddings
def fit(self, corpus: Corpus, **kwargs):
"""Trains the model.
:param corpus: the flair corpus this wrapper will use for fitting the model.
"""
X = self._convert_dataset(corpus)
log.info("Start clustering " + str(self.model) + " with " + str(len(X)) + " Datapoints.")
self.model.fit(X, **kwargs)
log.info("Finished clustering.")
def predict(self, corpus: Corpus):
"""Predict labels given a list of sentences and returns the respective class indices.
:param corpus: the flair corpus this wrapper will use for predicting the labels.
"""
X = self._convert_dataset(corpus)
log.info("Start the prediction " + str(self.model) + " with " + str(len(X)) + " Datapoints.")
predict = self.model.predict(X)
for idx, sentence in enumerate(_iter_dataset(corpus.get_all_sentences())):
sentence.set_label("cluster", str(predict[idx]))
log.info("Finished prediction and labeled all sentences.")
return predict
def save(self, model_file: Union[str, Path]):
"""Saves current model.
:param model_file: path where to save the model.
"""
joblib.dump(pickle.dumps(self), str(model_file))
log.info("Saved the model to: " + str(model_file))
@staticmethod
def load(model_file: Union[str, Path]):
"""Loads a model from a given path.
:param model_file: path to the file where the model is saved.
"""
log.info("Loading model from: " + str(model_file))
return pickle.loads(joblib.load(str(model_file)))
def _convert_dataset(
self, corpus, label_type: Optional[str] = None, batch_size: int = 32, return_label_dict: bool = False
):
"""Makes a flair-corpus sklearn compatible.
Turns the corpora into X, y datasets as required for most sklearn clustering models.
Ref.: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster
:param label_type: the label from sentences will be extracted. If the value is none this will be skipped.
"""
log.info("Embed sentences...")
sentences = []
for batch in tqdm(DataLoader(corpus.get_all_sentences(), batch_size=batch_size)):
self.embeddings.embed(batch)
sentences.extend(batch)
X = [sentence.embedding.cpu().detach().numpy() for sentence in sentences]
if label_type is None:
return X
labels = [sentence.get_labels(label_type)[0].value for sentence in sentences]
label_dict = {v: k for k, v in enumerate(OrderedDict.fromkeys(labels))}
y = [label_dict.get(label) for label in labels]
if return_label_dict:
return X, y, label_dict
return X, y
def evaluate(self, corpus: Corpus, label_type: str):
"""This method calculates some evaluation metrics for the clustering.
Also, the result of the evaluation is logged.
:param corpus: the flair corpus this wrapper will use for evaluation.
:param label_type: the label from the sentence will be used for the evaluation.
"""
X, Y = self._convert_dataset(corpus, label_type=label_type)
predict = self.model.predict(X)
log.info("NMI - Score: " + str(normalized_mutual_info_score(predict, Y)))
| 4,340 | 36.422414 | 113 | py |
flair | flair-master/flair/models/multitask_model.py | import logging
import random
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import flair.nn
from flair.data import DT, Dictionary, Sentence
from flair.file_utils import cached_path
from flair.nn import Classifier
from flair.training_utils import Result
log = logging.getLogger("flair")
class MultitaskModel(flair.nn.Classifier):
"""Multitask Model class which acts as wrapper for creating custom multitask models.
Takes different tasks as input, parameter sharing is done by objects in flair,
i.e. creating a Embedding Layer and passing it to two different Models, will
result in a hard parameter-shared embedding layer. The abstract class takes care
of calling the correct forward propagation and loss function of the respective
model.
"""
def __init__(
self,
models: List[flair.nn.Classifier],
task_ids: Optional[List[str]] = None,
loss_factors: Optional[List[float]] = None,
use_all_tasks: bool = False,
) -> None:
"""Instantiates the MultiTaskModel.
:param models: Key (Task ID) - Value (flair.nn.Model) Pairs to stack model
"""
super().__init__()
task_ids_internal: List[str] = task_ids if task_ids else [f"Task_{i}" for i in range(len(models))]
self.tasks: Dict[str, flair.nn.Classifier] = {}
self.loss_factors: Dict[str, float] = {}
self.use_all_tasks = use_all_tasks
if not loss_factors:
loss_factors = [1.0] * len(models)
for task_id, model, loss_factor in zip(task_ids_internal, models, loss_factors):
self.add_module(task_id, model)
self.tasks[task_id] = model
self.loss_factors[task_id] = loss_factor
# the multi task model has several labels
self._label_type = model.label_type
self.to(flair.device)
def forward(self, *args) -> torch.Tensor:
raise NotImplementedError("`forward` is not used for multitask learning")
def _prepare_tensors(self, data_points: List[DT]) -> Tuple[torch.Tensor, ...]:
raise NotImplementedError("`_prepare_tensors` is not used for multitask learning")
def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
"""Calls the respective forward loss of each model and sums them weighted by their loss factors.
:param sentences: batch of sentences
:return: loss
"""
batch_split = self.split_batch_to_task_ids(sentences, all_tasks=self.use_all_tasks)
loss = torch.tensor(0.0, device=flair.device)
count = 0
for task_id, split in batch_split.items():
task_loss, task_count = self.tasks[task_id].forward_loss([sentences[i] for i in split])
loss += self.loss_factors[task_id] * task_loss
count += task_count
return loss, count
def predict(
self,
sentences,
**predictargs,
):
for task in self.tasks.values():
task.predict(sentences, **predictargs)
@staticmethod
def split_batch_to_task_ids(sentences: Union[List[Sentence], Sentence], all_tasks: bool = False) -> Dict:
"""Splits a batch of sentences to its respective model.
If single sentence is assigned to several tasks (i.e. same corpus but different tasks), then the model
assignment for this batch is randomly chosen.
:param sentences: batch of sentences
:param all_tasks: use all tasks of each sentence. If deactivated, a random task will be sampled
:return: Key-value pairs as (task_id, list of sentences ids in batch)
"""
batch_to_task_mapping: Dict[str, List[int]] = {}
for sentence_id, sentence in enumerate(sentences):
if all_tasks:
multitask_ids = sentence.get_labels("multitask_id")
else:
multitask_ids = [random.choice(sentence.get_labels("multitask_id"))]
for multitask_id in multitask_ids:
if multitask_id.value in batch_to_task_mapping:
batch_to_task_mapping[multitask_id.value].append(sentence_id)
elif multitask_id.value not in batch_to_task_mapping:
batch_to_task_mapping[multitask_id.value] = [sentence_id]
return batch_to_task_mapping
def evaluate(
self,
data_points,
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
evaluate_all: bool = True,
**evalargs,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation results and a loss value.
:param sentences: batch of sentences
:param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),
'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)
:param mini_batch_size: size of batches
:param evaluate_all: choose if all tasks should be evaluated, or a single one, depending on gold_label_type
:return: Tuple of Result object and loss value (float)
"""
if not evaluate_all:
if gold_label_type not in self.tasks:
raise ValueError(
"evaluating a single task on a multitask model requires 'gold_label_type' to be a valid task."
)
data = [
dp
for dp in data_points
if any(label.value == gold_label_type for label in dp.get_labels("multitask_id"))
]
return self.tasks[gold_label_type].evaluate(
data,
gold_label_type=self.tasks[gold_label_type].label_type,
out_path=out_path,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
main_evaluation_metric=main_evaluation_metric,
exclude_labels=exclude_labels,
gold_label_dictionary=gold_label_dictionary,
return_loss=return_loss,
**evalargs,
)
batch_split = self.split_batch_to_task_ids(data_points, all_tasks=True)
loss = torch.tensor(0.0, device=flair.device)
main_score = 0.0
all_detailed_results = ""
all_classification_report: Dict[str, Dict[str, Any]] = {}
for task_id, split in batch_split.items():
result = self.tasks[task_id].evaluate(
data_points=[data_points[i] for i in split],
gold_label_type=self.tasks[task_id].label_type,
out_path=f"{out_path}_{task_id}.txt" if out_path is not None else None,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
main_evaluation_metric=main_evaluation_metric,
exclude_labels=exclude_labels,
gold_label_dictionary=gold_label_dictionary,
return_loss=return_loss,
**evalargs,
)
log.info(
f"{task_id} - {self.tasks[task_id]._get_name()} - "
f"loss: {result.loss} - {main_evaluation_metric[1]} "
f"({main_evaluation_metric[0]}) {round(result.main_score, 4)}"
)
loss += result.loss
main_score += result.main_score
all_detailed_results += (
50 * "-"
+ "\n\n"
+ task_id
+ " - "
+ "Label type: "
+ self.tasks[task_id].label_type
+ "\n\n"
+ result.detailed_results
)
all_classification_report[task_id] = result.classification_report
scores = {"loss": loss.item() / len(batch_split)}
return Result(
main_score=main_score / len(batch_split),
detailed_results=all_detailed_results,
scores=scores,
classification_report=all_classification_report,
)
def _get_state_dict(self):
"""Returns the state dict of the multitask model which has multiple models underneath.
:return model_state: model state for the multitask model
"""
initial_model_state = super()._get_state_dict()
initial_model_state["state_dict"] = {} # the model state is stored per model already.
model_state = {
**initial_model_state,
"model_states": {task: model._get_state_dict() for task, model in self.tasks.items()},
"loss_factors": [self.loss_factors[task] for task in self.tasks],
"use_all_tasks": self.use_all_tasks,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
"""Initializes the model based on given state dict."""
models = []
tasks = []
loss_factors = state["loss_factors"]
for task, task_state in state["model_states"].items():
models.append(Classifier.load(task_state))
tasks.append(task)
model = cls(
models=models,
task_ids=tasks,
loss_factors=loss_factors,
use_all_tasks=state.get("use_all_tasks", False),
**kwargs,
)
return model
@property
def label_type(self):
return self._label_type
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
# biomedical models
model_map["bioner"] = "/".join([hu_path, "bioner", "hunflair.pt"])
model_map["hunflair"] = "/".join([hu_path, "bioner", "hunflair.pt"])
model_map["hunflair-paper"] = "/".join([hu_path, "bioner", "hunflair-paper.pt"])
# entity linker
model_map["linker"] = "/".join([hu_path, "zelda", "v2", "zelda-v2.pt"])
model_map["zelda"] = "/".join([hu_path, "zelda", "v2", "zelda-v2.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "MultitaskModel":
from typing import cast
return cast("MultitaskModel", super().load(model_path=model_path))
| 10,773 | 38.756458 | 115 | py |
flair | flair-master/flair/models/entity_linker_model.py | import logging
import re
from functools import lru_cache
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Union
from unicodedata import category
import torch
import flair.embeddings
import flair.nn
from flair.data import Dictionary, Sentence, Span
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class CandidateGenerator:
"""Given a string, the CandidateGenerator returns possible target classes as candidates."""
def __init__(self, candidates: Union[str, Dict], backoff: bool = True) -> None:
# internal candidate lists of generator
self.mention_to_candidates_map: Dict = {}
# load Zelda candidates if so passed
if isinstance(candidates, str) and candidates.lower() == "zelda":
zelda_path: str = "https://flair.informatik.hu-berlin.de/resources/datasets/zelda"
zelda_candidates = cached_path(f"{zelda_path}/zelda_mention_entities_counter.pickle", cache_dir="datasets")
import pickle
with open(zelda_candidates, "rb") as handle:
mention_entities_counter = pickle.load(handle)
# create candidate lists
candidate_lists = {}
for mention in mention_entities_counter:
candidate_lists[mention] = list(mention_entities_counter[mention].keys())
self.mention_to_candidates_map = candidate_lists
elif isinstance(candidates, Dict):
self.mention_to_candidates_map = candidates
# if lower casing is enabled, create candidate lists of lower cased versions
self.backoff = backoff
if self.backoff:
# create a new dictionary for lower cased mentions
lowercased_mention_to_candidates_map: Dict = {}
# go through each mention and its candidates
for mention, candidates in self.mention_to_candidates_map.items():
backoff_mention = self._make_backoff_string(mention)
# check if backoff mention already seen. If so, add candidates. Else, create new entry.
if backoff_mention in lowercased_mention_to_candidates_map:
current_candidates = lowercased_mention_to_candidates_map[backoff_mention]
lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)
else:
lowercased_mention_to_candidates_map[backoff_mention] = candidates
# set lowercased version as map
self.mention_to_candidates_map = lowercased_mention_to_candidates_map
@lru_cache(maxsize=50000)
def _make_backoff_string(self, mention: str) -> str:
backoff_mention = mention.lower()
backoff_mention = "".join(ch for ch in backoff_mention if category(ch)[0] not in "P")
backoff_mention = re.sub(" +", " ", backoff_mention)
return backoff_mention
def get_candidates(self, mention: str) -> Set[str]:
"""Given a mention, this method returns a set of candidate classes."""
if self.backoff:
mention = self._make_backoff_string(mention)
return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()
class EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):
"""Entity Linking Model.
The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.
To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.
The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.
"""
def __init__(
self,
embeddings: flair.embeddings.TokenEmbeddings,
label_dictionary: Dictionary,
pooling_operation: str = "first_last",
label_type: str = "nel",
candidates: Optional[CandidateGenerator] = None,
**classifierargs,
) -> None:
"""Initializes an EntityLinker.
:param embeddings: embeddings used to embed the words/sentences
:param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>
:param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.
E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates
the embedding of the first and the embedding of the last word.
:param label_type: name of the label you use.
"""
super().__init__(
embeddings=embeddings,
label_dictionary=label_dictionary,
final_embedding_size=embeddings.embedding_length * 2
if pooling_operation == "first_last"
else embeddings.embedding_length,
**classifierargs,
)
self.pooling_operation = pooling_operation
self._label_type = label_type
cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {
"average": self.emb_mean,
"first": self.emb_first,
"last": self.emb_last,
"first_last": self.emb_firstAndLast,
}
if pooling_operation not in cases:
raise KeyError('pooling_operation has to be one of "average", "first", "last" or "first_last"')
self.aggregated_embedding = cases[pooling_operation]
self.candidates = candidates
self.to(flair.device)
def emb_first(self, span: Span, embedding_names):
return span.tokens[0].get_embedding(embedding_names)
def emb_last(self, span: Span, embedding_names):
return span.tokens[-1].get_embedding(embedding_names)
def emb_firstAndLast(self, span: Span, embedding_names):
return torch.cat(
(span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0
)
def emb_mean(self, span, embedding_names):
return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
return sentence.get_spans(self.label_type)
def _filter_data_point(self, data_point: Sentence) -> bool:
return bool(data_point.get_labels(self.label_type))
def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:
return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"word_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_type": self.label_type,
"label_dictionary": self.label_dictionary,
"pooling_operation": self.pooling_operation,
"loss_weights": self.weight_dict,
"candidates": self.candidates,
}
return model_state
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
eval_line = f"\n{datapoint.to_original_text()}\n"
for span in datapoint.get_spans(gold_label_type):
symbol = "✓" if span.get_label(gold_label_type).value == span.get_label("predicted").value else "❌"
eval_line += (
f' - "{span.text}" / {span.get_label(gold_label_type).value}'
f' --> {span.get_label("predicted").value} ({symbol})\n'
)
lines.append(eval_line)
return lines
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# remap state dict for models serialized with Flair <= 0.11.3
import re
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^word_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
return super()._init_model_with_state_dict(
state,
embeddings=state.get("word_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
pooling_operation=state.get("pooling_operation"),
loss_weights=state.get("loss_weights", {"<unk>": 0.3}),
candidates=state.get("candidates", None),
**kwargs,
)
@property
def label_type(self):
return self._label_type
def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):
if not self.candidates:
return scores
masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)
for idx, span in enumerate(data_points):
# get the candidates
candidate_set = self.candidates.get_candidates(span.text)
# during training, add the gold value as candidate
if self.training:
candidate_set.add(span.get_label(self.label_type).value)
candidate_set.add("<unk>")
indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]
masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]
return masked_scores
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "EntityLinker":
from typing import cast
return cast("EntityLinker", super().load(model_path=model_path))
| 9,722 | 41.458515 | 188 | py |
flair | flair-master/flair/models/text_classification_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Union
import torch
import flair.embeddings
import flair.nn
from flair.data import Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class TextClassifier(flair.nn.DefaultClassifier[Sentence, Sentence]):
"""Text Classification Model.
The model takes word embeddings, puts them into an RNN to obtain a text
representation, and puts the text representation in the end into a linear
layer to get the actual class label. The model can handle single and multi
class data sets.
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
**classifierargs,
) -> None:
"""Initializes a TextClassifier.
:param embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param multi_label: auto-detected by default, but you can set this to True to force multi-label prediction
or False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
super().__init__(
**classifierargs,
embeddings=embeddings,
final_embedding_size=embeddings.embedding_length,
)
self._label_type = label_type
# auto-spawn on GPU if available
self.to(flair.device)
def _get_embedding_for_data_point(self, prediction_data_point: Sentence) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
return prediction_data_point.get_embedding(embedding_names)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Sentence]:
return [sentence]
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"multi_label": self.multi_label,
"multi_label_threshold": self.multi_label_threshold,
"weight_dict": self.weight_dict,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
import re
# remap state dict for models serialized with Flair <= 0.11.3
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^document_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
return super()._init_model_with_state_dict(
state,
embeddings=state.get("document_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
multi_label=state.get("multi_label"),
multi_label_threshold=state.get("multi_label_threshold", 0.5),
loss_weights=state.get("weight_dict"),
**kwargs,
)
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["de-offensive-language"] = "/".join(
[hu_path, "de-offensive-language", "germ-eval-2018-task-1-v0.8.pt"]
)
# English sentiment models
model_map["sentiment"] = "/".join(
[
hu_path,
"sentiment-curated-distilbert",
"sentiment-en-mix-distillbert_4.pt",
]
)
model_map["en-sentiment"] = "/".join(
[
hu_path,
"sentiment-curated-distilbert",
"sentiment-en-mix-distillbert_4.pt",
]
)
model_map["sentiment-fast"] = "/".join(
[hu_path, "sentiment-curated-fasttext-rnn", "sentiment-en-mix-ft-rnn_v8.pt"]
)
# Communicative Functions Model
model_map["communicative-functions"] = "/".join([hu_path, "comfunc", "communicative-functions.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def label_type(self):
return self._label_type
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TextClassifier":
from typing import cast
return cast("TextClassifier", super().load(model_path=model_path))
| 4,864 | 34.510949 | 114 | py |
flair | flair-master/flair/models/relation_extractor_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch
import flair.embeddings
import flair.nn
from flair.data import Relation, Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class RelationExtractor(flair.nn.DefaultClassifier[Sentence, Relation]):
def __init__(
self,
embeddings: flair.embeddings.TokenEmbeddings,
label_type: str,
entity_label_type: str,
entity_pair_filters: Optional[List[Tuple[str, str]]] = None,
pooling_operation: str = "first_last",
train_on_gold_pairs_only: bool = False,
**classifierargs,
) -> None:
"""Initializes a RelationClassifier.
:param document_embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param beta: Parameter for F-beta score for evaluation and training annealing
:param train_on_gold_pairs_only: Set true to not train to predict no relation.
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
# pooling operation to get embeddings for entites
self.pooling_operation = pooling_operation
relation_representation_length = 2 * embeddings.embedding_length
if self.pooling_operation == "first_last":
relation_representation_length *= 2
super().__init__(
embeddings=embeddings,
final_embedding_size=relation_representation_length,
**classifierargs,
)
# set embeddings
self.embeddings: flair.embeddings.TokenEmbeddings = embeddings
# set relation and entity label types
self._label_type = label_type
self.entity_label_type = entity_label_type
self.train_on_gold_pairs_only = train_on_gold_pairs_only
# whether to use gold entity pairs, and whether to filter entity pairs by type
if entity_pair_filters is not None:
self.entity_pair_filters: Optional[Set[Tuple[str, str]]] = set(entity_pair_filters)
else:
self.entity_pair_filters = None
self.to(flair.device)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Relation]:
entity_pairs = []
entity_spans = sentence.get_spans(self.entity_label_type)
for span_1 in entity_spans:
for span_2 in entity_spans:
if span_1 == span_2:
continue
# filter entity pairs according to their tags if set
if (
self.entity_pair_filters is not None
and (
span_1.get_label(self.entity_label_type).value,
span_2.get_label(self.entity_label_type).value,
)
not in self.entity_pair_filters
):
continue
relation = Relation(span_1, span_2)
if self.training and self.train_on_gold_pairs_only and relation.get_label(self.label_type).value == "O":
continue
entity_pairs.append(relation)
return entity_pairs
def _get_embedding_for_data_point(self, prediction_data_point: Relation) -> torch.Tensor:
span_1 = prediction_data_point.first
span_2 = prediction_data_point.second
embedding_names = self.embeddings.get_names()
if self.pooling_operation == "first_last":
return torch.cat(
[
span_1.tokens[0].get_embedding(embedding_names),
span_1.tokens[-1].get_embedding(embedding_names),
span_2.tokens[0].get_embedding(embedding_names),
span_2.tokens[-1].get_embedding(embedding_names),
]
)
else:
return torch.cat(
[span_1.tokens[0].get_embedding(embedding_names), span_2.tokens[0].get_embedding(embedding_names)]
)
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
eval_line = f"\n{datapoint.to_original_text()}\n"
for relation in datapoint.get_relations(gold_label_type):
symbol = (
"✓" if relation.get_label(gold_label_type).value == relation.get_label("predicted").value else "❌"
)
eval_line += (
f' - "{relation.text}"\t{relation.get_label(gold_label_type).value}'
f' --> {relation.get_label("predicted").value} ({symbol})\n'
)
lines.append(eval_line)
return lines
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"entity_label_type": self.entity_label_type,
"weight_dict": self.weight_dict,
"pooling_operation": self.pooling_operation,
"entity_pair_filters": self.entity_pair_filters,
"train_on_gold_pairs_only": self.train_on_gold_pairs_only,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
entity_label_type=state.get("entity_label_type"),
loss_weights=state.get("weight_dict"),
pooling_operation=state.get("pooling_operation"),
entity_pair_filters=state.get("entity_pair_filters"),
train_on_gold_pairs_only=state.get("train_on_gold_pairs_only", False),
**kwargs,
)
@property
def label_type(self):
return self._label_type
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["relations"] = "/".join([hu_path, "relations", "relations-v11.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "RelationExtractor":
from typing import cast
return cast("RelationExtractor", super().load(model_path=model_path))
| 6,828 | 37.801136 | 120 | py |
flair | flair-master/flair/models/tars_model.py | import logging
import typing
from abc import ABC
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import minmax_scale
from tqdm import tqdm
import flair
from flair.data import Corpus, Dictionary, Sentence, Span
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import (
TokenEmbeddings,
TransformerDocumentEmbeddings,
TransformerWordEmbeddings,
)
from flair.file_utils import cached_path
from flair.models.sequence_tagger_model import SequenceTagger
from flair.models.text_classification_model import TextClassifier
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class FewshotClassifier(flair.nn.Classifier[Sentence], ABC):
def __init__(self) -> None:
self._current_task = None
self._task_specific_attributes: Dict[str, Dict[str, Any]] = {}
self.label_nearest_map = None
self.tars_model: flair.nn.Classifier[Sentence]
self.separator: str
super().__init__()
def forward_loss(self, data_points: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
if not isinstance(data_points, list):
data_points = [data_points]
# Transform input data into TARS format
sentences = self._get_tars_formatted_sentences(data_points)
loss, count = self.tars_model.forward_loss(sentences)
return loss, count
@property
def tars_embeddings(self):
raise NotImplementedError
def _get_tars_formatted_sentence(self, label, sentence):
raise NotImplementedError
def _get_tars_formatted_sentences(self, sentences: List[Sentence]):
label_text_pairs = []
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
for sentence in sentences:
label_text_pairs_for_sentence = []
if self.training and self.num_negative_labels_to_sample is not None:
positive_labels = list(
OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])
)
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
for label in positive_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
for label in sampled_negative_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
else:
for label in all_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
label_text_pairs.extend(label_text_pairs_for_sentence)
return label_text_pairs
def _get_nearest_labels_for(self, labels):
# if there are no labels, return a random sample as negatives
if len(labels) == 0:
tags = self.get_current_label_dictionary().get_items()
import random
sample = random.sample(tags, k=self.num_negative_labels_to_sample)
return sample
already_sampled_negative_labels = set()
# otherwise, go through all labels
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels or plausible_label in labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64")
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.default_rng().choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
def train(self, mode=True):
"""Populate label similarity map based on cosine similarity before running epoch.
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
"""
if mode and self.num_negative_labels_to_sample is not None:
self._compute_label_similarity_for_current_epoch()
super().train(mode)
def _compute_label_similarity_for_current_epoch(self):
"""Compute the similarity between all labels for better sampling of negatives."""
# get and embed all labels by making a Sentence object that contains only the label text
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
label_sentences = [Sentence(label) for label in all_labels]
self.tars_embeddings.eval() # TODO: check if this is necessary
self.tars_embeddings.embed(label_sentences)
self.tars_embeddings.train()
# get each label embedding and scale between 0 and 1
if isinstance(self.tars_embeddings, TokenEmbeddings):
encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]
else:
encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]
normalized_encoding = minmax_scale(encodings_np)
# compute similarity matrix
similarity_matrix = cosine_similarity(normalized_encoding)
# the higher the similarity, the greater the chance that a label is
# sampled as negative example
negative_label_probabilities = {}
for row_index, label in enumerate(all_labels):
negative_label_probabilities[label] = {}
for column_index, other_label in enumerate(all_labels):
if label != other_label:
negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]
self.label_nearest_map = negative_label_probabilities
def get_current_label_dictionary(self):
label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"]
return label_dictionary
def get_current_label_type(self):
return self._task_specific_attributes[self._current_task]["label_type"]
def is_current_task_multi_label(self):
return self._task_specific_attributes[self._current_task]["multi_label"]
def add_and_switch_to_new_task(
self,
task_name: str,
label_dictionary: Union[List, Set, Dictionary, str],
label_type: str,
multi_label: bool = True,
force_switch: bool = False,
):
"""Adds a new task to an existing TARS model.
Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor
except for model choice, batch size and negative sampling. This method does not store the resultant model onto
disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
"""
if task_name in self._task_specific_attributes and not force_switch:
log.warning(f"Task `{task_name}` already exists in TARS model. Switching to it.")
else:
# make label dictionary if no Dictionary object is passed
if isinstance(label_dictionary, Dictionary):
label_dictionary = label_dictionary.get_items()
if type(label_dictionary) == str:
label_dictionary = [label_dictionary]
# prepare dictionary of tags (without B- I- prefixes and without UNK)
tag_dictionary = Dictionary(add_unk=False)
for tag in label_dictionary:
if tag == "<unk>" or tag == "O":
continue
if len(tag) > 1 and tag[1] == "-":
tag = tag[2:]
tag_dictionary.add_item(tag)
else:
tag_dictionary.add_item(tag)
self._task_specific_attributes[task_name] = {
"label_dictionary": tag_dictionary,
"label_type": label_type,
"multi_label": multi_label,
}
self.switch_to_task(task_name)
def list_existing_tasks(self) -> Set[str]:
"""Lists existing tasks in the loaded TARS model on the console."""
return set(self._task_specific_attributes.keys())
def switch_to_task(self, task_name):
"""Switches to a task which was previously added."""
if task_name not in self._task_specific_attributes:
log.error(
"Provided `%s` does not exist in the model. Consider calling `add_and_switch_to_new_task` first.",
task_name,
)
else:
self._current_task = task_name
def _drop_task(self, task_name):
if task_name in self._task_specific_attributes:
if self._current_task == task_name:
log.error(
"`%s` is the current task. Switch to some other task before dropping this.",
task_name,
)
else:
self._task_specific_attributes.pop(task_name)
else:
log.warning("No task exists with the name `%s`.", task_name)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@property
def label_type(self):
return self.get_current_label_type()
def predict_zero_shot(
self,
sentences: Union[List[Sentence], Sentence],
candidate_label_set: Union[List[str], Set[str], str],
multi_label: bool = True,
):
"""Make zero shot predictions from the TARS model.
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
"""
# check if candidate_label_set is empty
if candidate_label_set is None or len(candidate_label_set) == 0:
log.warning("Provided candidate_label_set is empty")
return
# make list if only one candidate label is passed
if isinstance(candidate_label_set, str):
candidate_label_set = {candidate_label_set}
# create label dictionary
label_dictionary = Dictionary(add_unk=False)
for label in candidate_label_set:
label_dictionary.add_item(label)
# note current task
existing_current_task = self._current_task
# create a temporary task
self.add_and_switch_to_new_task(
task_name="ZeroShot",
label_dictionary=label_dictionary,
label_type="-".join(label_dictionary.get_items()),
multi_label=multi_label,
force_switch=True, # overwrite any older configuration
)
try:
# make zero shot predictions
self.predict(sentences)
finally:
# switch to the pre-existing task
self.switch_to_task(existing_current_task)
self._drop_task("ZeroShot")
return
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
yield from super().get_used_tokens(corpus)
for label in self.get_current_label_dictionary().idx2item:
yield [label.decode("utf-8")]
yield [self.separator]
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "FewshotClassifier":
from typing import cast
return cast("FewshotClassifier", super().load(model_path=model_path))
class TARSTagger(FewshotClassifier):
"""TARS model for sequence tagging.
In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the
probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which
is inflated by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
) -> None:
"""Initializes a TarsTagger.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
"""
super().__init__()
if isinstance(embeddings, str):
embeddings = TransformerWordEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item("entity")
tars_dictionary.span_labels = True
# initialize a bare-bones sequence tagger
self.tars_model: SequenceTagger = SequenceTagger(
hidden_size=123,
embeddings=embeddings,
tag_dictionary=tars_dictionary,
tag_type=self.static_label_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
def _get_tars_formatted_sentence(self, label, sentence):
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" "))
# make a tars sentence where all labels are O by default
tars_sentence = Sentence(label_text_pair, use_tokenizer=False)
for entity_label in sentence.get_labels(self.label_type):
if entity_label.value == label:
new_span = Span(
[tars_sentence.get_token(token.idx + label_length) for token in entity_label.data_point]
)
new_span.add_label(self.static_label_type, value="entity")
tars_sentence.copy_context_from_sentence(sentence)
return tars_sentence
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"current_task": self._current_task,
"tag_type": self.get_current_label_type(),
"tag_dictionary": self.get_current_label_dictionary(),
"tars_embeddings": self.tars_model.embeddings.save_embeddings(use_state_dict=False),
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"prefix": self.prefix,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _fetch_model(model_name) -> str:
if model_name == "tars-ner":
cache_dir = Path("models")
model_name = cached_path(
"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt",
cache_dir=cache_dir,
)
return model_name
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
tars_embeddings = state.get("tars_embeddings")
if tars_embeddings is None:
tars_model = state["tars_model"]
tars_embeddings = tars_model.embeddings
# init new TARS classifier
model = super()._init_model_with_state_dict(
state,
task_name=state.get("current_task"),
label_dictionary=state.get("tag_dictionary"),
label_type=state.get("tag_type"),
embeddings=tars_embeddings,
num_negative_labels_to_sample=state.get("num_negative_labels_to_sample"),
prefix=state.get("prefix"),
**kwargs,
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
return model
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
most_probable_first: bool = True,
):
"""Predict sequence tags for Named Entity Recognition task.
Args:
sentences: a Sentence or a List of Sentence
mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
verbose: set to True to display a progress bar
return_loss: set to True to also compute the loss
label_name: set this to change the name of the label type that is predicted
embedding_storage_mode: default is 'none' which doesn't store the embeddings in RAM. Only set to 'cpu' or 'gpu'
if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
return_probabilities_for_all_classes: if True, all classes will be added with their respective confidences.
most_probable_first: if True, nested predictions will be removed, if False all predictions will be returned,
including overlaps
"""
if label_name is None:
label_name = self.get_current_label_type()
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
overall_loss = 0
overall_count = 0
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
tars_sentences: List[Sentence] = []
all_labels_to_sentence: List[Dict[str, Sentence]] = []
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
labels_to_sentence: Dict[str, Sentence] = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
tars_sentences.append(tars_sentence)
labels_to_sentence[label] = tars_sentence
all_labels_to_sentence.append(labels_to_sentence)
loss_and_count = self.tars_model.predict(
tars_sentences,
label_name=label_name,
mini_batch_size=mini_batch_size,
return_loss=return_loss,
)
if return_loss:
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# go through each sentence in the batch
for sentence, labels_to_sentence in zip(batch, all_labels_to_sentence):
# always remove tags first
sentence.remove_labels(label_name)
all_detected = {}
for label, tars_sentence in labels_to_sentence.items():
for predicted in tars_sentence.get_labels(label_name):
predicted.set_value(label, predicted.score)
all_detected[predicted] = predicted.score
if most_probable_first:
import operator
already_set_indices: List[int] = []
sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))
sorted_x.reverse()
for tuple in sorted_x:
# get the span and its label
label = tuple[0]
label_length = (
0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" "))
)
# determine whether tokens in this span already have a label
tag_this = True
for token in label.data_point:
corresponding_token = sentence.get_token(token.idx - label_length)
if corresponding_token is None:
tag_this = False
continue
if corresponding_token.idx in already_set_indices:
tag_this = False
continue
# only add if all tokens have no label
if tag_this:
# make and add a corresponding predicted span
predicted_span = Span(
[sentence.get_token(token.idx - label_length) for token in label.data_point]
)
predicted_span.add_label(label_name, value=label.value, score=label.score)
# set indices so that no token can be tagged twice
already_set_indices.extend(token.idx for token in predicted_span)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
return None
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.tars_model.predict_spans:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TARSTagger":
from typing import cast
return cast("TARSTagger", super().load(model_path=model_path))
class TARSClassifier(FewshotClassifier):
"""TARS model for text classification.
In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the
probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
LABEL_MATCH = "YES"
LABEL_NO_MATCH = "NO"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
) -> None:
"""Initializes a TarsClassifier.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super().__init__()
if isinstance(embeddings, str):
embeddings = TransformerDocumentEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item(self.LABEL_NO_MATCH)
tars_dictionary.add_item(self.LABEL_MATCH)
# initialize a bare-bones sequence tagger
self.tars_model = TextClassifier(
embeddings=embeddings,
label_dictionary=tars_dictionary,
label_type=self.static_label_type,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
self.clean_up_labels = True
def _clean(self, label_value: str) -> str:
if self.clean_up_labels:
return label_value.replace("_", " ")
else:
return label_value
def _get_tars_formatted_sentence(self, label, sentence):
label = self._clean(label)
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]
tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH
tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)
tars_sentence.copy_context_from_sentence(sentence)
return tars_sentence
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"current_task": self._current_task,
"tars_embeddings": self.tars_model.embeddings.save_embeddings(use_state_dict=False),
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"task_specific_attributes": self._task_specific_attributes,
}
if self._current_task is not None:
model_state.update(
{
"label_type": self.get_current_label_type(),
"label_dictionary": self.get_current_label_dictionary(),
}
)
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# get the serialized embeddings
tars_embeddings = state.get("tars_embeddings")
if tars_embeddings is None:
tars_model = state["tars_model"]
if hasattr(tars_model, "embeddings"):
tars_embeddings = tars_model.embeddings
else:
tars_embeddings = tars_model.document_embeddings
# remap state dict for models serialized with Flair <= 0.11.3
import re
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^tars_model.document_embeddings\\.", "tars_model.embeddings.", key)] = state_dict.pop(
key
)
# init new TARS classifier
model: TARSClassifier = super()._init_model_with_state_dict(
state,
task_name=state["current_task"],
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type", "default_label"),
embeddings=tars_embeddings,
num_negative_labels_to_sample=state.get("num_negative_labels_to_sample"),
**kwargs,
)
# set all task information
model._task_specific_attributes = state.get("task_specific_attributes")
return model
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
label_threshold: float = 0.5,
multi_label: Optional[bool] = None,
force_label: bool = False,
):
"""Predict sentences on the Text Classification task.
Args:
return_probabilities_for_all_classes: if True, all classes will be added with their respective confidences.
sentences: a Sentence or a List of Sentence
force_label: when multilabel is active, you can force to always get at least one prediction.
multi_label: if True multiple labels can be predicted. Defaults to the setting of the configured task.
label_threshold: when multi_label, specify the threshold when a class is considered as predicted.
mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
verbose: set to True to display a progress bar
return_loss: set to True to also compute the loss
label_name: set this to change the name of the label type that is predicted
embedding_storage_mode: default is 'none' which doesn't store the embeddings in RAM. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
if multi_label is None:
multi_label = self.is_current_task_multi_label()
if not multi_label:
label_threshold = 0.0
# with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progressbar = tqdm(dataloader)
progressbar.set_description("Batch inference")
dataloader = progressbar
overall_loss = 0
overall_count = 0
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
tars_sentences: List[Sentence] = []
all_labels_to_sentence: List[Dict[str, Sentence]] = []
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
labels_to_sentence: Dict[str, Sentence] = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
tars_sentences.append(tars_sentence)
labels_to_sentence[label] = tars_sentence
all_labels_to_sentence.append(labels_to_sentence)
loss_and_count = self.tars_model.predict(
tars_sentences,
label_name=label_name,
mini_batch_size=mini_batch_size,
return_loss=return_loss,
)
if return_loss:
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# go through each sentence in the batch
for sentence, labels_to_sentence in zip(batch, all_labels_to_sentence):
# always remove tags first
sentence.remove_labels(label_name)
best_value = ""
best_score = 0.0
for label, tars_sentence in labels_to_sentence.items():
# add all labels that according to TARS match the text and are above threshold
predicted_tars_label = tars_sentence.get_label(label_name)
score = (
predicted_tars_label.score
if predicted_tars_label.value == self.LABEL_MATCH
else 1 - predicted_tars_label.score
)
if score > label_threshold:
# do not add labels below confidence threshold
sentence.add_label(label_name, label, score)
if score > best_score:
best_score = score
best_value = label
# only use label with the highest confidence if enforcing single-label predictions
# add the label with the highest score even if below the threshold if force label is activated.
if not multi_label or (multi_label and force_label and len(sentence.get_labels(label_name)) == 0):
# remove previously added labels and only add the best label
sentence.remove_labels(label_name)
sentence.add_label(
typename=label_name,
value=best_value,
score=best_score,
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
return None
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TARSClassifier":
from typing import cast
return cast("TARSClassifier", super().load(model_path=model_path))
| 40,613 | 40.956612 | 126 | py |
flair | flair-master/flair/models/lemmatizer_model.py | import logging
from math import inf
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
import flair.embeddings
import flair.nn
from flair.data import Dictionary, Sentence, Token
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Lemmatizer(flair.nn.Classifier[Sentence]):
def __init__(
self,
embeddings: Optional[flair.embeddings.TokenEmbeddings] = None,
label_type: str = "lemma",
rnn_input_size: int = 50,
rnn_hidden_size: int = 256,
rnn_layers: int = 2,
encode_characters: bool = True,
char_dict: Union[str, Dictionary] = "common-chars-lemmatizer",
max_sequence_length_dependent_on_input: bool = True,
max_sequence_length: int = 20,
use_attention: bool = True,
beam_size: int = 1,
start_symbol_for_encoding: bool = True,
end_symbol_for_encoding: bool = True,
bidirectional_encoding: bool = True,
) -> None:
"""Initializes a Lemmatizer model.
The model consists of a decoder and an encoder. The encoder is either a RNN-cell (torch.nn.GRU)
or a Token-Embedding from flair if an embedding is handed to the constructor (token_embedding).
The output of the encoder is used as the initial hidden state to the decoder, which is an RNN-cell (GRU)
that predicts the lemma of the given token one letter at a time.
Note that one can use data in which only those words are annotated that differ from their lemma or data
in which all words are annotated with a (maybe equal) lemma.
:param embeddings: Embedding used to encode sentence
:param rnn_input_size: Input size of the RNN('s). Each letter of a token is represented by a hot-one-vector
over the given character dictionary. This vector is transformed to a input_size vector with a linear layer.
:param rnn_hidden_size: size of the hidden state of the RNN('s).
:param rnn_layers: Number of stacked RNN cells
:param beam_size: Number of hypothesis used when decoding the output of the RNN. Only used in prediction.
:param char_dict: Dictionary of characters the model is able to process. The dictionary must contain <unk> for
the handling of unknown characters. If None, a standard dictionary will be loaded. One can either hand
over a path to a dictionary or the dictionary itself.
:param label_type: Name of the gold labels to use.
:param max_sequence_length_dependent_on_input: If set to True, the maximum length of a decoded sequence in
the prediction depends on the sentences you want to lemmatize. To be precise the maximum length is
computed as the length of the longest token in the sentences plus one.
:param max_sequence_length: If set to True and max_sequence_length_dependend_on_input is False a fixed
maximum length for the decoding will be used for all sentences.
:param use_attention: whether to use attention. Only sensible if encoding via RNN
"""
super().__init__()
self._label_type = label_type
self.beam_size = beam_size
self.max_sequence_length = max_sequence_length
self.dependent_on_input = max_sequence_length_dependent_on_input
self.start_symbol = start_symbol_for_encoding
self.end_symbol = end_symbol_for_encoding
self.bi_encoding = bidirectional_encoding
self.rnn_hidden_size = rnn_hidden_size
# whether to encode characters and whether to use attention (attention can only be used if chars are encoded)
self.encode_characters = encode_characters
self.use_attention = use_attention
if not self.encode_characters:
self.use_attention = False
# character dictionary for decoding and encoding
self.char_dictionary = char_dict if isinstance(char_dict, Dictionary) else Dictionary.load(char_dict)
# make sure <unk> is in dictionary for handling of unknown characters
if not self.char_dictionary.add_unk:
raise KeyError("<unk> must be contained in char_dict")
# add special symbols to dictionary if necessary and save respective indices
self.dummy_index = self.char_dictionary.add_item("<>")
self.start_index = self.char_dictionary.add_item("<S>")
self.end_index = self.char_dictionary.add_item("<E>")
# ---- ENCODER ----
# encoder character embeddings
self.encoder_character_embedding = nn.Embedding(len(self.char_dictionary), rnn_input_size)
# encoder pre-trained embeddings
self.encoder_embeddings = embeddings
hidden_input_size = 0
if embeddings:
hidden_input_size += embeddings.embedding_length
if encode_characters:
hidden_input_size += rnn_hidden_size
if encode_characters and bidirectional_encoding:
hidden_input_size += rnn_hidden_size
self.emb_to_hidden = nn.Linear(hidden_input_size, rnn_hidden_size)
# encoder RNN
self.encoder_rnn = nn.GRU(
input_size=rnn_input_size,
hidden_size=self.rnn_hidden_size,
batch_first=True,
num_layers=rnn_layers,
bidirectional=self.bi_encoding,
)
# additional encoder linear layer if bidirectional encoding
if self.bi_encoding:
self.bi_hidden_states_to_hidden_size: Optional[nn.Linear] = nn.Linear(
2 * self.rnn_hidden_size, self.rnn_hidden_size, bias=False
)
else:
self.bi_hidden_states_to_hidden_size = None
# ---- DECODER ----
# decoder: linear layers to transform vectors to and from alphabet_size
self.decoder_character_embedding = nn.Embedding(len(self.char_dictionary), rnn_input_size)
# when using attention we concatenate attention outcome and decoder hidden states
self.character_decoder = nn.Linear(
2 * self.rnn_hidden_size if self.use_attention else self.rnn_hidden_size,
len(self.char_dictionary),
)
# decoder RNN
self.rnn_input_size = rnn_input_size
self.rnn_layers = rnn_layers
self.decoder_rnn = nn.GRU(
input_size=rnn_input_size,
hidden_size=self.rnn_hidden_size,
batch_first=True,
num_layers=rnn_layers,
)
# loss and softmax
self.loss = nn.CrossEntropyLoss(reduction="sum")
# self.unreduced_loss = nn.CrossEntropyLoss(reduction='none') # for prediction
self.softmax = nn.Softmax(dim=2)
self.to(flair.device)
@property
def label_type(self):
return self._label_type
def words_to_char_indices(
self,
tokens: List[str],
end_symbol=True,
start_symbol=False,
padding_in_front=False,
seq_length=None,
):
"""For a given list of strings this function creates index vectors that represent the characters of the strings.
Each string is represented by sequence_length (maximum string length + entries for special symbold) many
indices representing characters in self.char_dict.
One can manually set the vector length with the parameter seq_length, though the vector length is always
at least maximum string length in the list.
:param end_symbol: add self.end_index at the end of each representation
:param start_symbol: add self.start_index in front of of each representation
:param padding_in_front: whether to fill up with self.dummy_index in front or in back of strings
"""
# add additional columns for special symbols if necessary
c = int(end_symbol) + int(start_symbol)
max_length = max(len(token) for token in tokens) + c
sequence_length = max_length if not seq_length else max(seq_length, max_length)
# initialize with dummy symbols
tensor = self.dummy_index * torch.ones(len(tokens), sequence_length, dtype=torch.long).to(flair.device)
for i in range(len(tokens)):
dif = sequence_length - (len(tokens[i]) + c)
shift = 0
if padding_in_front:
shift += dif
if start_symbol:
tensor[i][0 + shift] = self.start_index
if end_symbol:
tensor[i][len(tokens[i]) + int(start_symbol) + shift] = self.end_index
for index, letter in enumerate(tokens[i]):
tensor[i][index + int(start_symbol) + shift] = self.char_dictionary.get_idx_for_item(letter)
return tensor
def forward_pass(self, sentences: Union[List[Sentence], Sentence]):
if isinstance(sentences, Sentence):
sentences = [sentences]
# encode inputs
initial_hidden_states, all_encoder_outputs = self.encode(sentences)
# get labels (we assume each token has a lemma label)
labels = [token.get_label(self._label_type).value for sentence in sentences for token in sentence]
# get char indices for labels of sentence
# (batch_size, max_sequence_length) batch_size = #words in sentence,
# max_sequence_length = length of longest label of sentence + 1
decoder_input_indices = self.words_to_char_indices(
labels, start_symbol=True, end_symbol=False, padding_in_front=False
)
# get char embeddings
# (batch_size,max_sequence_length,input_size), i.e. replaces char indices with vectors of length input_size
output_vectors, _ = self.decode(decoder_input_indices, initial_hidden_states, all_encoder_outputs)
return output_vectors, labels
def decode(self, decoder_input_indices, initial_hidden_states, all_encoder_outputs):
# take decoder input and initial hidden and pass through RNN
input_tensor = self.decoder_character_embedding(decoder_input_indices)
output, hidden = self.decoder_rnn(input_tensor, initial_hidden_states)
# if all encoder outputs are provided, use attention
if self.use_attention:
attention_coeff = torch.softmax(torch.matmul(all_encoder_outputs, torch.transpose(output, 1, 2)), dim=1)
# take convex combinations of encoder hidden states as new output using the computed attention coefficients
attention_output = torch.transpose(
torch.matmul(torch.transpose(all_encoder_outputs, 1, 2), attention_coeff),
1,
2,
)
output = torch.cat((output, attention_output), dim=2)
# transform output to vectors of size len(char_dict) -> (batch_size, max_sequence_length, alphabet_size)
output_vectors = self.character_decoder(output)
return output_vectors, hidden
def _prepare_tensors(self, sentences: List[Sentence]) -> Tuple[Optional[torch.Tensor], ...]:
# get all tokens
tokens = [token for sentence in sentences for token in sentence]
# encode input characters by sending them through RNN
if self.encode_characters:
# get one-hots for characters and add special symbols / padding
encoder_input_indices = self.words_to_char_indices(
[token.text for token in tokens],
start_symbol=self.start_symbol,
end_symbol=self.end_symbol,
padding_in_front=False,
)
# determine length of each token
extra = 0
if self.start_symbol:
extra += 1
if self.end_symbol:
extra += 1
lengths = torch.tensor([len(token.text) + extra for token in tokens], device=flair.device)
else:
encoder_input_indices = None
lengths = None
if self.encoder_embeddings:
# embed sentences
self.encoder_embeddings.embed(sentences)
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(
self.rnn_layers * [torch.stack([token.get_embedding() for token in tokens])]
)
else:
token_embedding_hidden = None
return encoder_input_indices, lengths, token_embedding_hidden
def forward(
self,
encoder_input_indices: Optional[torch.Tensor],
lengths: Optional[torch.Tensor],
token_embedding_hidden: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# variable to store initial hidden states for decoder
initial_hidden_for_decoder = []
# encode input characters by sending them through RNN
if encoder_input_indices is not None and lengths is not None:
input_vectors = self.encoder_character_embedding(encoder_input_indices)
# test packing and padding
packed_sequence = torch.nn.utils.rnn.pack_padded_sequence(
input_vectors,
lengths,
enforce_sorted=False,
batch_first=True,
)
encoding_flat, initial_hidden_states = self.encoder_rnn(packed_sequence)
encoder_outputs, lengths = torch.nn.utils.rnn.pad_packed_sequence(encoding_flat, batch_first=True)
# since bidirectional rnn is only used in encoding we need to project outputs to hidden_size of decoder
if self.bi_encoding and self.bi_hidden_states_to_hidden_size is not None:
encoder_outputs = self.bi_hidden_states_to_hidden_size(encoder_outputs)
# concatenate the final hidden states of the encoder. These will be projected to hidden_size of
# decoder later with self.emb_to_hidden
conditions = torch.cat(2 * [torch.eye(self.rnn_layers).bool()])
bi_states = [initial_hidden_states[conditions[:, i], :, :] for i in range(self.rnn_layers)]
initial_hidden_states = torch.stack([torch.cat((b[0, :, :], b[1, :, :]), dim=1) for b in bi_states])
initial_hidden_for_decoder.append(initial_hidden_states)
# mask out vectors that correspond to a dummy symbol (TODO: check attention masking)
mask = torch.cat(
(self.rnn_hidden_size * [(encoder_input_indices == self.dummy_index).unsqueeze(2)]),
dim=2,
)
all_encoder_outputs: Optional[torch.Tensor] = torch.where(
mask, torch.tensor(0.0, device=flair.device), encoder_outputs
)
else:
all_encoder_outputs = None
# use token embedding as initial hidden state for decoder
if token_embedding_hidden is not None:
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden, all_encoder_outputs
def encode(self, sentences: List[Sentence]):
tensors = self._prepare_tensors(sentences)
return self.forward(*tensors)
def encode_token(self, token: Token):
# variable to store initial hidden states for decoder
initial_hidden_for_decoder = []
all_encoder_outputs = None
# encode input characters by sending them through RNN
if self.encode_characters:
# note that we do not need to fill up with dummy symbols since we process each token seperately
encoder_input_indices = self.words_to_char_indices(
[token.text], start_symbol=self.start_symbol, end_symbol=self.end_symbol
)
# embed character one-hots
input_vector = self.encoder_character_embedding(encoder_input_indices)
# send through encoder RNN (produces initial hidden for decoder)
all_encoder_outputs, initial_hidden_states = self.encoder_rnn(input_vector)
# since bidirectional rnn is only used in encoding we need to project outputs to hidden_size of decoder
if self.bi_encoding and self.bi_hidden_states_to_hidden_size is not None:
# project 2*hidden_size to hidden_size
all_encoder_outputs = self.bi_hidden_states_to_hidden_size(all_encoder_outputs)
# concatenate the final hidden states of the encoder. These will be projected to hidden_size of decoder
# later with self.emb_to_hidden
conditions = torch.cat(2 * [torch.eye(self.rnn_layers).bool()])
bi_states = [initial_hidden_states[conditions[:, i], :, :] for i in range(self.rnn_layers)]
initial_hidden_states = torch.stack([torch.cat((b[0, :, :], b[1, :, :]), dim=1) for b in bi_states])
initial_hidden_for_decoder.append(initial_hidden_states)
# use token embedding as initial hidden state for decoder
if self.encoder_embeddings:
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(self.rnn_layers * [token.get_embedding()]).unsqueeze(1)
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden_for_decoder = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden_for_decoder, all_encoder_outputs
def _calculate_loss(self, scores, labels):
# score vector has to have a certain format for (2d-)loss fct (batch_size, alphabet_size, 1, max_seq_length)
scores_in_correct_format = scores.permute(0, 2, 1).unsqueeze(2)
# create target vector (batch_size, max_label_seq_length + 1)
target = self.words_to_char_indices(labels, start_symbol=False, end_symbol=True, padding_in_front=False)
target.unsqueeze_(1) # (batch_size, 1, max_label_seq_length + 1)
return self.loss(scores_in_correct_format, target), len(labels)
def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 16,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name="predicted",
return_loss=False,
embedding_storage_mode="none",
):
"""Predict lemmas of words for a given (list of) sentence(s).
:param sentences: sentences to predict
:param label_name: label name used for predicted lemmas
:param mini_batch_size: number of tokens that are send through the RNN simultaneously, assuming batching_in_rnn
is set to True
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
:param return_loss: whether or not to compute and return loss. Setting it to True only makes sense if labels
are provided
:param verbose: If True, lemmatized sentences will be printed in the console.
"""
if isinstance(sentences, Sentence):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
# filter empty sentences
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# max length of the predicted sequences
if not self.dependent_on_input:
max_length = self.max_sequence_length
else:
max_length = max([len(token.text) + 1 for sentence in sentences for token in sentence])
# for printing
line_to_print = ""
overall_loss = 0.0
number_tokens_in_total = 0
with torch.no_grad():
dataloader = DataLoader(dataset=FlairDatapointDataset(sentences), batch_size=mini_batch_size)
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
# remove previously predicted labels of this type
for sentence in batch:
for token in sentence:
token.remove_labels(label_name)
# create list of tokens in batch
tokens_in_batch = [token for sentence in batch for token in sentence]
number_tokens = len(tokens_in_batch)
number_tokens_in_total += number_tokens
# encode inputs
hidden, all_encoder_outputs = self.encode(batch)
# create input for first pass (batch_size, 1, input_size), first letter is special character <S>
# sequence length is always set to one in prediction
input_indices = self.start_index * torch.ones(
number_tokens, dtype=torch.long, device=flair.device
).unsqueeze(1)
# option 1: greedy decoding
if self.beam_size == 1:
# predictions
predicted: List[List[int]] = [[] for _ in range(number_tokens)]
for _decode_step in range(max_length):
# decode next character
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# pick top beam size many outputs with highest probabilities
input_indices = log_softmax_probs.argmax(dim=2)
for i in range(number_tokens):
if len(predicted[i]) > 0 and predicted[i][-1] == self.end_index:
continue
predicted[i].append(input_indices[i].item())
for t_id, token in enumerate(tokens_in_batch):
predicted_lemma = "".join(
self.char_dictionary.get_item_for_index(idx) if idx != self.end_index else ""
for idx in predicted[t_id]
)
token.set_label(typename=label_name, value=predicted_lemma)
# option 2: beam search
else:
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
# out_probs = self.softmax(output_vectors).squeeze(1)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2).squeeze(1)
# make sure no dummy symbol <> or start symbol <S> is predicted
log_softmax_probs[:, self.dummy_index] = -inf
log_softmax_probs[:, self.start_index] = -inf
# pick top beam size many outputs with highest probabilities
# probabilities, leading_indices = out_probs.topk(self.beam_size, 1) # max prob along dimension 1
log_probabilities, leading_indices = log_softmax_probs.topk(self.beam_size, 1)
# leading_indices and probabilities have size (batch_size, beam_size)
# keep scores of beam_size many hypothesis for each token in the batch
scores = log_probabilities.view(-1, 1)
# stack all leading indices of all hypothesis and corresponding hidden states in two tensors
leading_indices = leading_indices.view(-1, 1) # this vector goes through RNN in each iteration
hidden_states_beam = torch.stack(self.beam_size * [hidden], dim=2).view(
self.rnn_layers, -1, self.rnn_hidden_size
)
# save sequences so far
sequences = torch.tensor([[i.item()] for i in leading_indices], device=flair.device)
# keep track of how many hypothesis were completed for each token
n_completed = [0 for _ in range(number_tokens)] # cpu
final_candidates: List[List[Tuple[torch.Tensor, float]]] = [[] for _ in range(number_tokens)] # cpu
# if all_encoder_outputs returned, expand them to beam size (otherwise keep this as None)
batched_encoding_output = (
torch.stack(self.beam_size * [all_encoder_outputs], dim=1).view(
self.beam_size * number_tokens, -1, self.rnn_hidden_size
)
if self.use_attention
else None
)
for _j in range(1, max_length):
output_vectors, hidden_states_beam = self.decode(
leading_indices, hidden_states_beam, batched_encoding_output
)
# decode with log softmax
out_log_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# make sure no dummy symbol <> or start symbol <S> is predicted
out_log_probs[:, 0, self.dummy_index] = -inf
out_log_probs[:, 0, self.start_index] = -inf
log_probabilities, index_candidates = out_log_probs.topk(self.beam_size, 2)
log_probabilities.squeeze_(1)
index_candidates.squeeze_(1)
# check if an end symbol <E> has been predicted and, in that case, set hypothesis aside
end_symbols = (index_candidates == self.end_index).nonzero(as_tuple=False)
for tuple in end_symbols:
# if the sequence is already ended, do not record as candidate
if sequences[tuple[0], -1].item() == self.end_index:
continue
# index of token in in list tokens_in_batch
token_number = torch.div(tuple[0], self.beam_size, rounding_mode="trunc")
# print(token_number)
seq = sequences[tuple[0], :] # hypothesis sequence
# hypothesis score
score = (scores[tuple[0]] + log_probabilities[tuple[0], tuple[1]]) / (len(seq) + 1)
final_candidates[token_number].append((seq, score.item()))
# TODO: remove token if number of completed hypothesis exceeds given value
n_completed[token_number] += 1
# set score of corresponding entry to -inf so it will not be expanded
log_probabilities[tuple[0], tuple[1]] = -inf
# get leading_indices for next expansion
# find highest scoring hypothesis among beam_size*beam_size possible ones for each token
# take beam_size many copies of scores vector and add scores of possible new extensions
# size (beam_size*batch_size, beam_size)
hypothesis_scores = torch.cat(self.beam_size * [scores], dim=1) + log_probabilities
# print(hypothesis_scores)
# reshape to vector of size (batch_size, beam_size*beam_size),
# each row contains beam_size*beam_size scores of the new possible hypothesis
hypothesis_scores_per_token = hypothesis_scores.view(number_tokens, self.beam_size**2)
# print(hypothesis_scores_per_token)
# choose beam_size best for each token - size (batch_size, beam_size)
(
best_scores,
indices_per_token,
) = hypothesis_scores_per_token.topk(self.beam_size, 1)
# out of indices_per_token we now need to recompute the original indices of the hypothesis in
# a list of length beam_size*batch_size
# where the first three inidices belong to the first token, the next three to the second token,
# and so on
beam_numbers: List[int] = []
seq_numbers: List[int] = []
for i, row in enumerate(indices_per_token):
beam_numbers.extend(i * self.beam_size + index.item() // self.beam_size for index in row)
seq_numbers.extend(index.item() % self.beam_size for index in row)
# with these indices we can compute the tensors for the next iteration
# expand sequences with corresponding index
sequences = torch.cat(
(
sequences[beam_numbers],
index_candidates[beam_numbers, seq_numbers].unsqueeze(1),
),
dim=1,
)
# add log-probabilities to the scores
scores = scores[beam_numbers] + log_probabilities[beam_numbers, seq_numbers].unsqueeze(1)
# save new leading indices
leading_indices = index_candidates[beam_numbers, seq_numbers].unsqueeze(1)
# save corresponding hidden states
hidden_states_beam = hidden_states_beam[:, beam_numbers, :]
# it may happen that no end symbol <E> is predicted for a token in all of the max_length iterations
# in that case we append one of the final seuqences without end symbol to the final_candidates
best_scores, indices = scores.view(number_tokens, -1).topk(1, 1)
for j, (score, index) in enumerate(zip(best_scores.squeeze(1), indices.squeeze(1))):
if len(final_candidates[j]) == 0:
beam = j * self.beam_size + index.item()
final_candidates[j].append((sequences[beam, :], score.item() / max_length))
# get best final hypothesis for each token
output_sequences = []
for candidate in final_candidates:
l_ordered = sorted(candidate, key=lambda tup: tup[1], reverse=True)
output_sequences.append(l_ordered[0])
# get characters from index sequences and add predicted label to token
for i, out_seq in enumerate(output_sequences):
predicted_lemma = ""
for idx in out_seq[0]:
predicted_lemma += self.char_dictionary.get_item_for_index(idx)
line_to_print += predicted_lemma
line_to_print += " "
tokens_in_batch[i].add_tag(tag_type=label_name, tag_value=predicted_lemma)
if return_loss:
overall_loss += self.forward_loss(batch)[0].item()
store_embeddings(batch, storage_mode=embedding_storage_mode)
if verbose:
log.info(line_to_print)
if return_loss:
return overall_loss, number_tokens_in_total
return None
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.encoder_embeddings.save_embeddings(use_state_dict=False),
"rnn_input_size": self.rnn_input_size,
"rnn_hidden_size": self.rnn_hidden_size,
"rnn_layers": self.rnn_layers,
"char_dict": self.char_dictionary,
"label_type": self._label_type,
"beam_size": self.beam_size,
"max_sequence_length": self.max_sequence_length,
"dependent_on_input": self.dependent_on_input,
"use_attention": self.use_attention,
"encode_characters": self.encode_characters,
"start_symbol": self.start_symbol,
"end_symbol": self.end_symbol,
"bidirectional_encoding": self.bi_encoding,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
encode_characters=state.get("encode_characters"),
rnn_input_size=state.get("rnn_input_size"),
rnn_hidden_size=state.get("rnn_hidden_size"),
rnn_layers=state.get("rnn_layers"),
char_dict=state.get("char_dict"),
label_type=state.get("label_type"),
beam_size=state.get("beam_size"),
max_sequence_length_dependent_on_input=state.get("dependent_on_input"),
max_sequence_length=state.get("max_sequence_length"),
use_attention=state.get("use_attention"),
start_symbol_for_encoding=state.get("start_symbol"),
end_symbol_for_encoding=state.get("end_symbol"),
bidirectional_encoding=state.get("bidirectional_encoding"),
**kwargs,
)
def _print_predictions(self, batch, gold_label_type):
lines = []
for sentence in batch:
eval_line = (
f" - Text: {' '.join([token.text for token in sentence])}\n"
f" - Gold-Lemma: {' '.join([token.get_label(gold_label_type).value for token in sentence])}\n"
f" - Predicted: {' '.join([token.get_label('predicted').value for token in sentence])}\n\n"
)
lines.append(eval_line)
return lines
def evaluate(self, *args, **kwargs) -> Result:
# Overwrites evaluate of parent class to remove the "by class" printout
result = super().evaluate(*args, **kwargs)
result.detailed_results = result.detailed_results.split("\n\n")[0]
return result
| 34,759 | 48.026798 | 120 | py |
flair | flair-master/flair/models/__init__.py | from .clustering import ClusteringModel
from .entity_linker_model import EntityLinker
from .language_model import LanguageModel
from .lemmatizer_model import Lemmatizer
from .multitask_model import MultitaskModel
from .pairwise_classification_model import TextPairClassifier
from .pairwise_regression_model import TextPairRegressor
from .regexp_tagger import RegexpTagger
from .relation_classifier_model import RelationClassifier
from .relation_extractor_model import RelationExtractor
from .sequence_tagger_model import SequenceTagger
from .tars_model import FewshotClassifier, TARSClassifier, TARSTagger
from .text_classification_model import TextClassifier
from .text_regression_model import TextRegressor
from .word_tagger_model import TokenClassifier, WordTagger
__all__ = [
"EntityLinker",
"LanguageModel",
"Lemmatizer",
"TextPairClassifier",
"TextPairRegressor",
"RelationClassifier",
"RelationExtractor",
"RegexpTagger",
"SequenceTagger",
"TokenClassifier",
"WordTagger",
"FewshotClassifier",
"TARSClassifier",
"TARSTagger",
"TextClassifier",
"TextRegressor",
"ClusteringModel",
"MultitaskModel",
]
| 1,180 | 30.918919 | 69 | py |
flair | flair-master/flair/models/language_model.py | import math
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
from torch import logsumexp, nn
from torch.optim import Optimizer
import flair
from flair.data import Dictionary
from flair.nn.recurrent import create_recurrent_layer
class LanguageModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(
self,
dictionary: Dictionary,
is_forward_lm: bool,
hidden_size: int,
nlayers: int,
embedding_size: int = 100,
nout=None,
document_delimiter: str = "\n",
dropout=0.1,
recurrent_type="LSTM",
has_decoder=True,
) -> None:
super().__init__()
self.dictionary = dictionary
self.document_delimiter = document_delimiter
self.is_forward_lm: bool = is_forward_lm
self.dropout = dropout
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.nlayers = nlayers
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(len(dictionary), embedding_size)
self.rnn, self.state_count = create_recurrent_layer(
recurrent_type, embedding_size, hidden_size, nlayers, dropout
)
self.recurrent_type = recurrent_type
self.hidden = None
self.nout = nout
if nout is not None:
self.proj: Optional[nn.Linear] = nn.Linear(hidden_size, nout)
self.initialize(self.proj.weight)
hidden_size = nout
else:
self.proj = None
if has_decoder:
self.decoder: Optional[nn.Linear] = nn.Linear(hidden_size, len(dictionary))
else:
self.decoder = None
self.init_weights()
# auto-spawn on GPU if available
self.to(flair.device)
def init_weights(self):
initrange = 0.1
self.encoder.weight.detach().uniform_(-initrange, initrange)
if self.decoder is not None:
self.decoder.bias.detach().fill_(0)
self.decoder.weight.detach().uniform_(-initrange, initrange)
def set_hidden(self, hidden):
self.hidden = hidden
def forward(self, input, hidden, ordered_sequence_lengths=None, decode=True):
encoded = self.encoder(input)
emb = self.drop(encoded)
if hasattr(self.rnn, "flatten_parameters"):
self.rnn.flatten_parameters()
if len(hidden) == 1:
output, h = self.rnn(emb, hidden[0])
hidden = (h,)
else:
output, hidden = self.rnn(emb, hidden)
if self.proj is not None:
output = self.proj(output)
output = self.drop(output)
if decode:
decoded = self.decoder(output)
return (
decoded,
output,
hidden,
)
else:
return output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).detach()
return tuple(
weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach() for _ in range(self.state_count)
)
def get_representation(
self,
strings: List[str],
start_marker: str,
end_marker: str,
chars_per_chunk: int = 512,
):
len_longest_str: int = len(max(strings, key=len))
# pad strings with whitespaces to longest sentence
padded_strings: List[str] = []
for string in strings:
if not self.is_forward_lm:
string = string[::-1]
padded = f"{start_marker}{string}{end_marker}"
padded_strings.append(padded)
# cut up the input into chunks of max charlength = chunk_size
chunks = []
splice_begin = 0
longest_padded_str: int = len_longest_str + len(start_marker) + len(end_marker)
for splice_end in range(chars_per_chunk, longest_padded_str, chars_per_chunk):
chunks.append([text[splice_begin:splice_end] for text in padded_strings])
splice_begin = splice_end
chunks.append([text[splice_begin:longest_padded_str] for text in padded_strings])
hidden = self.init_hidden(len(chunks[0]))
padding_char_index = self.dictionary.get_idx_for_item(" ")
batches: List[torch.Tensor] = []
# push each chunk through the RNN language model
for chunk in chunks:
len_longest_chunk: int = len(max(chunk, key=len))
sequences_as_char_indices: List[List[int]] = []
for string in chunk:
char_indices = self.dictionary.get_idx_for_items(list(string))
char_indices += [padding_char_index] * (len_longest_chunk - len(string))
sequences_as_char_indices.append(char_indices)
t = torch.tensor(sequences_as_char_indices, dtype=torch.long).to(device=flair.device, non_blocking=True)
batches.append(t)
output_parts = []
for batch in batches:
batch = batch.transpose(0, 1)
rnn_output, hidden = self.forward(batch, hidden, decode=False)
output_parts.append(rnn_output)
# concatenate all chunks to make final output
output = torch.cat(output_parts)
return output
def get_output(self, text: str):
char_indices = [self.dictionary.get_idx_for_item(char) for char in text]
input_vector = torch.LongTensor([char_indices]).transpose(0, 1)
hidden = self.init_hidden(1)
prediction, rnn_output, hidden = self.forward(input_vector, hidden)
return self.repackage_hidden(hidden)
def repackage_hidden(self, h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == torch.Tensor:
return h.clone().detach()
else:
return tuple(self.repackage_hidden(v) for v in h)
@staticmethod
def initialize(matrix):
in_, out_ = matrix.size()
stdv = math.sqrt(3.0 / (in_ + out_))
matrix.detach().uniform_(-stdv, stdv)
@classmethod
def load_language_model(cls, model_file: Union[Path, str], has_decoder=True):
state = torch.load(str(model_file), map_location=flair.device)
document_delimiter = state.get("document_delimiter", "\n")
has_decoder = state.get("has_decoder", True) and has_decoder
model = cls(
dictionary=state["dictionary"],
is_forward_lm=state["is_forward_lm"],
hidden_size=state["hidden_size"],
nlayers=state["nlayers"],
embedding_size=state["embedding_size"],
nout=state["nout"],
document_delimiter=document_delimiter,
dropout=state["dropout"],
recurrent_type=state.get("recurrent_type", "lstm"),
has_decoder=has_decoder,
)
model.load_state_dict(state["state_dict"], strict=has_decoder)
model.eval()
model.to(flair.device)
return model
@classmethod
def load_checkpoint(cls, model_file: Union[Path, str]):
state = torch.load(str(model_file), map_location=flair.device)
epoch = state["epoch"] if "epoch" in state else None
split = state["split"] if "split" in state else None
loss = state["loss"] if "loss" in state else None
document_delimiter = state.get("document_delimiter", "\n")
optimizer_state_dict = state.get("optimizer_state_dict")
model = cls(
dictionary=state["dictionary"],
is_forward_lm=state["is_forward_lm"],
hidden_size=state["hidden_size"],
nlayers=state["nlayers"],
embedding_size=state["embedding_size"],
nout=state["nout"],
document_delimiter=document_delimiter,
dropout=state["dropout"],
recurrent_type=state.get("recurrent_type", "lstm"),
)
model.load_state_dict(state["state_dict"])
model.eval()
model.to(flair.device)
return {
"model": model,
"epoch": epoch,
"split": split,
"loss": loss,
"optimizer_state_dict": optimizer_state_dict,
}
def save_checkpoint(
self,
file: Union[Path, str],
optimizer: Optimizer,
epoch: int,
split: int,
loss: float,
):
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"split": split,
"loss": loss,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
torch.save(model_state, str(file), pickle_protocol=4)
def save(self, file: Union[Path, str]):
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
torch.save(model_state, str(file), pickle_protocol=4)
def generate_text(
self,
prefix: str = "\n",
number_of_characters: int = 1000,
temperature: float = 1.0,
break_on_suffix=None,
) -> Tuple[str, float]:
if prefix == "":
prefix = "\n"
with torch.no_grad():
characters = []
idx2item = self.dictionary.idx2item
# initial hidden state
hidden = self.init_hidden(1)
if len(prefix) > 1:
char_tensors = []
for character in prefix[:-1]:
char_tensors.append(
torch.tensor(self.dictionary.get_idx_for_item(character)).unsqueeze(0).unsqueeze(0)
)
input = torch.cat(char_tensors).to(flair.device)
prediction, _, hidden = self.forward(input, hidden)
input = torch.tensor(self.dictionary.get_idx_for_item(prefix[-1])).unsqueeze(0).unsqueeze(0)
log_prob = torch.zeros(1, device=flair.device)
for _i in range(number_of_characters):
input = input.to(flair.device)
# get predicted weights
prediction, _, hidden = self.forward(input, hidden)
prediction = prediction.squeeze().detach()
decoder_output = prediction
# divide by temperature
prediction = prediction.div(temperature)
# to prevent overflow problem with small temperature values, substract largest value from all
# this makes a vector in which the largest value is 0
max = torch.max(prediction)
prediction -= max
# compute word weights with exponential function
word_weights = prediction.exp().cpu()
# try sampling multinomial distribution for next character
try:
word_idx = torch.multinomial(word_weights, 1)[0]
except: # noqa: E722 TODO: figure out exception type
word_idx = torch.tensor(0)
# print(word_idx)
prob = decoder_output[word_idx] - logsumexp(decoder_output, dim=0)
log_prob += prob
input = word_idx.detach().unsqueeze(0).unsqueeze(0)
word = idx2item[word_idx].decode("UTF-8")
characters.append(word)
if break_on_suffix is not None and "".join(characters).endswith(break_on_suffix):
break
text = prefix + "".join(characters)
log_prob_float = log_prob.item()
log_prob_float /= len(characters)
if not self.is_forward_lm:
text = text[::-1]
return text, -log_prob_float
def calculate_perplexity(self, text: str) -> float:
if not self.is_forward_lm:
text = text[::-1]
# input ids
input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)
input = input.to(flair.device)
# push list of character IDs through model
hidden = self.init_hidden(1)
prediction, _, hidden = self.forward(input, hidden)
# the target is always the next character
targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])
targets = targets.to(flair.device)
# use cross entropy loss to compare output of forward pass with targets
cross_entroy_loss = torch.nn.CrossEntropyLoss()
loss = cross_entroy_loss(prediction.view(-1, len(self.dictionary)), targets).item()
# exponentiate cross-entropy loss to calculate perplexity
perplexity = math.exp(loss)
return perplexity
def __getstate__(self):
# "document_delimiter" property may be missing in some older pre-trained models
self.document_delimiter = getattr(self, "document_delimiter", "\n")
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
return model_state
def __setstate__(self, d):
# special handling for deserializing language models
if "state_dict" in d:
# re-initialize language model with constructor arguments
language_model = LanguageModel(
dictionary=d["dictionary"],
is_forward_lm=d["is_forward_lm"],
hidden_size=d["hidden_size"],
nlayers=d["nlayers"],
embedding_size=d["embedding_size"],
nout=d["nout"],
document_delimiter=d["document_delimiter"],
dropout=d["dropout"],
recurrent_type=d.get("recurrent_type", "lstm"),
has_decoder=d.get("has_decoder", True),
)
language_model.load_state_dict(d["state_dict"], strict=d.get("has_decoder", True))
# copy over state dictionary to self
for key in language_model.__dict__:
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
else:
if "recurrent_type" not in d:
d["recurrent_type"] = "lstm"
if "state_count" not in d:
d["state_count"] = 2
super().__setstate__(d)
def _apply(self, fn):
# models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute
# check if this is the case and if so, set it
for child_module in self.children():
if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, "_flat_weights_names"):
_flat_weights_names = []
num_direction = 2 if child_module.__dict__["bidirectional"] else 1
for layer in range(child_module.__dict__["num_layers"]):
for direction in range(num_direction):
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if child_module.__dict__["bias"]:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
param_names = [x.format(layer, suffix) for x in param_names]
_flat_weights_names.extend(param_names)
child_module._flat_weights_names = _flat_weights_names
child_module._apply(fn)
| 17,001 | 35.021186 | 117 | py |
flair | flair-master/flair/models/relation_classifier_model.py | import itertools
import logging
import typing
from abc import ABC, abstractmethod
from pathlib import Path
from typing import (
Any,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import torch
from torch.utils.data.dataset import Dataset
import flair
from flair.data import (
Corpus,
Dictionary,
Label,
Relation,
Sentence,
Span,
Token,
_iter_dataset,
)
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import DocumentEmbeddings, TransformerDocumentEmbeddings
from flair.tokenization import SpaceTokenizer
logger: logging.Logger = logging.getLogger("flair")
class EncodedSentence(Sentence):
"""A Sentence that expresses that a sentence is encoded and compatible with the relation classifier.
For inference, i.e. `predict` and `evaluate`, the relation classifier internally encodes the sentences.
Therefore, these functions work with the regular flair sentence objects.
"""
class EncodingStrategy(ABC):
"""The encoding of the head and tail entities in a sentence with a relation annotation."""
special_tokens: Set[str] = set()
def __init__(self, add_special_tokens: bool = False) -> None:
self.add_special_tokens = add_special_tokens
@abstractmethod
def encode_head(self, head_span: Span, label: Label) -> str:
"""Returns the encoded string representation of the head span.
Multi-token head encodings tokens are separated by a space.
"""
...
@abstractmethod
def encode_tail(self, tail_span: Span, label: Label) -> str:
"""Returns the encoded string representation of the tail span.
Multi-token tail encodings tokens are separated by a space.
"""
...
class EntityMask(EncodingStrategy):
"""An `class`:EncodingStrategy: that masks the head and tail relation entities.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[TAIL] and Sergey Brin founded [HEAD]" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [TAIL] founded [HEAD]" -> Relation(head='Google', tail='Sergey Brin').
"""
special_tokens: Set[str] = {"[HEAD]", "[TAIL]"}
def encode_head(self, head_span: Span, label: Label) -> str:
return "[HEAD]"
def encode_tail(self, tail_span: Span, label: Label) -> str:
return "[TAIL]"
class TypedEntityMask(EncodingStrategy):
"""An `class`:EncodingStrategy: that masks the head and tail relation entities with their label.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[TAIL-PER] and Sergey Brin founded [HEAD-ORG]" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [TAIL-PER] founded [HEAD-ORG]" -> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
return f"[HEAD-{label.value}]"
def encode_tail(self, tail: Span, label: Label) -> str:
return f"[TAIL-{label.value}]"
class EntityMarker(EncodingStrategy):
"""An `class`:EncodingStrategy: that marks the head and tail relation entities.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[HEAD] Larry Page [/HEAD] and Sergey Brin founded [TAIL] Google [/TAIL]"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [HEAD] Sergey Brin [/HEAD] founded [TAIL] Google [/TAIL]"
-> Relation(head='Google', tail='Sergey Brin').
"""
special_tokens: Set[str] = {"[HEAD]", "[/HEAD]", "[TAIL]", "[/TAIL]"}
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"[HEAD] {space_tokenized_text} [/HEAD]"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"[TAIL] {space_tokenized_text} [/TAIL]"
class TypedEntityMarker(EncodingStrategy):
"""An `class`:EncodingStrategy: that marks the head and tail relation entities with their label.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[HEAD-PER] Larry Page [/HEAD-PER] and Sergey Brin founded [TAIL-ORG] Google [/TAIL-ORG]"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [HEAD-PER] Sergey Brin [/HEAD-PER] founded [TAIL-ORG] Google [/TAIL-ORG]"
-> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"[HEAD-{label.value}] {space_tokenized_text} [/HEAD-{label.value}]"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"[TAIL-{label.value}] {space_tokenized_text} [/TAIL-{label.value}]"
class EntityMarkerPunct(EncodingStrategy):
"""An alternate version of `class`:EntityMarker: with punctuations as control tokens.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "@ Larry Page @ and Sergey Brin founded # Google #" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and @ Sergey Brin @ founded # Google #" -> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"@ {space_tokenized_text} @"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"# {space_tokenized_text} #"
class TypedEntityMarkerPunct(EncodingStrategy):
"""An alternate version of `class`:TypedEntityMarker: with punctuations as control tokens.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "@ * PER * Larry Page @ and Sergey Brin founded # * ORG * Google #"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and @ * PER * Sergey Brin @ founded # * ORG * Google #"
-> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"@ * {label.value} * {space_tokenized_text} @"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"# ^ {label.value} ^ {space_tokenized_text} #"
class _Entity(NamedTuple):
"""A `_Entity` encapsulates either a relation's head or a tail span, including its label.
This class servers as an internal helper class.
"""
span: Span
label: Label
# TODO: This closely shadows the RelationExtractor name. Maybe we need a better name here.
# - MaskedRelationClassifier ?
# This depends if this relation classification architecture should replace or offer as an alternative.
class RelationClassifier(flair.nn.DefaultClassifier[EncodedSentence, EncodedSentence]):
"""Relation Classifier to predict the relation between two entities.
---- Task ----
Relation Classification (RC) is the task of identifying the semantic relation between two entities in a text.
In contrast to (end-to-end) Relation Extraction (RE), RC requires pre-labelled entities.
Example:
-------
For the `founded_by` relation from `ORG` (head) to `PER` (tail) and the sentence
"Larry Page and Sergey Brin founded Google .", we extract the relations
- founded_by(head='Google', tail='Larry Page') and
- founded_by(head='Google', tail='Sergey Brin').
---- Architecture ----
The Relation Classifier Model builds upon a text classifier.
The model generates an encoded sentence for each entity pair
in the cross product of all entities in the original sentence.
In the encoded representation, the entities in the current entity pair are masked/marked with control tokens.
(For an example, see the docstrings of different encoding strategies, e.g. :class:`TypedEntityMarker`.)
Then, for each encoded sentence, the model takes its document embedding and puts the resulting
text representation(s) through a linear layer to get the class relation label.
The implemented encoding strategies are taken from this paper by Zhou et al.: https://arxiv.org/abs/2102.01373
Note: Currently, the model has no multi-label support.
"""
def __init__(
self,
embeddings: DocumentEmbeddings,
label_dictionary: Dictionary,
label_type: str,
entity_label_types: Union[str, Sequence[str], Dict[str, Optional[Set[str]]]],
entity_pair_labels: Optional[Set[Tuple[str, str]]] = None,
entity_threshold: Optional[float] = None,
cross_augmentation: bool = True,
encoding_strategy: EncodingStrategy = TypedEntityMarker(),
zero_tag_value: str = "O",
allow_unk_tag: bool = True,
**classifierargs,
) -> None:
"""Initializes a `RelationClassifier`.
:param embeddings: The document embeddings used to embed each sentence
:param label_dictionary: A Dictionary containing all predictable labels from the corpus
:param label_type: The label type which is going to be predicted, in case a corpus has multiple annotations
:param entity_label_types: A label type or sequence of label types of the required relation entities.
You can also specify a label filter in a dictionary with the label type as key and
the valid entity labels as values in a set.
E.g. to use only 'PER' and 'ORG' labels from a NER-tagger: `{'ner': {'PER', 'ORG'}}`.
To use all labels from 'ner', pass 'ner'.
:param entity_pair_labels: A set of valid relation entity pair combinations, used as relation candidates.
Specify valid entity pairs in a set of tuples of labels (<HEAD>, <TAIL>).
E.g. for the `born_in` relation, only relations from 'PER' to 'LOC' make sense.
Here, relations from 'PER' to 'PER' are not meaningful, so
it is advised to specify the `entity_pair_labels` as `{('PER', 'ORG')}`.
This setting may help to reduce the number of relation candidates.
Leaving this parameter as `None` (default) disables the relation-candidate-filter,
i.e. the model classifies the relation for each entity pair
in the cross product of *all* entity pairs (inefficient).
:param entity_threshold: Only pre-labelled entities above this threshold are taken into account by the model.
:param cross_augmentation: If `True`, use cross augmentation to transform `Sentence`s into `EncodedSentenece`s.
When cross augmentation is enabled, the transformation functions,
e.g. `transform_corpus`, generate an encoded sentence for each entity pair
in the cross product of all entities in the original sentence.
When disabling cross augmentation, the transform functions only generate
encoded sentences for each gold relation annotation in the original sentence.
:param encoding_strategy: An instance of a class conforming the :class:`EncodingStrategy` protocol
:param zero_tag_value: The label to use for out-of-class relations
:param allow_unk_tag: If `False`, removes `<unk>` from the passed label dictionary, otherwise do nothing.
:param classifierargs: The remaining parameters passed to the underlying `DefaultClassifier`
"""
# Set label type and prepare label dictionary
self._label_type = label_type
self._zero_tag_value = zero_tag_value
self._allow_unk_tag = allow_unk_tag
modified_label_dictionary: Dictionary = Dictionary(add_unk=self._allow_unk_tag)
modified_label_dictionary.add_item(self._zero_tag_value)
for label in label_dictionary.get_items():
if label != "<unk>":
modified_label_dictionary.add_item(label)
# Initialize super default classifier
super().__init__(
embeddings=embeddings,
label_dictionary=modified_label_dictionary,
final_embedding_size=embeddings.embedding_length,
**classifierargs,
)
if isinstance(entity_label_types, str):
self.entity_label_types: Dict[str, Optional[Set[str]]] = {entity_label_types: None}
elif isinstance(entity_label_types, Sequence):
self.entity_label_types = {entity_label_type: None for entity_label_type in entity_label_types}
else:
self.entity_label_types = entity_label_types
self.entity_pair_labels = entity_pair_labels
self.entity_threshold = entity_threshold
self.cross_augmentation = cross_augmentation
self.encoding_strategy = encoding_strategy
# Add the special tokens from the encoding strategy
if (
self.encoding_strategy.add_special_tokens
and self.encoding_strategy.special_tokens
and isinstance(self.embeddings, TransformerDocumentEmbeddings)
):
special_tokens: List[str] = list(self.encoding_strategy.special_tokens)
tokenizer = self.embeddings.tokenizer
tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})
self.embeddings.model.resize_token_embeddings(len(tokenizer))
logger.info(
f"{self.__class__.__name__}: "
f"Added {', '.join(special_tokens)} as additional special tokens to {self.embeddings.name}"
)
# Auto-spawn on GPU, if available
self.to(flair.device)
def _valid_entities(self, sentence: Sentence) -> Iterator[_Entity]:
"""Yields all valid entities, filtered under the specification of `self.entity_label_types`.
:param sentence: A flair `Sentence` object with entity annotations
:return: Valid entities as `_Entity`
"""
for label_type, valid_labels in self.entity_label_types.items():
for entity_span in sentence.get_spans(type=label_type):
entity_label: Label = entity_span.get_label(label_type=label_type)
# Only use entities labelled with the specified labels for each label type
if valid_labels is not None and entity_label.value not in valid_labels:
continue
# Only use entities above the specified threshold
if self.entity_threshold is not None and entity_label.score <= self.entity_threshold:
continue
yield _Entity(span=entity_span, label=entity_label)
def _entity_pair_permutations(
self,
sentence: Sentence,
) -> Iterator[Tuple[_Entity, _Entity, Optional[str]]]:
"""Yields all valid entity pair permutations (relation candidates).
If the passed sentence contains relation annotations,
the relation gold label will be yielded along with the participating entities.
The permutations are constructed by a filtered cross-product
under the specification of `self.entity_label_types` and `self.entity_pair_labels`.
:param sentence: A flair `Sentence` object with entity annotations
:yields: Tuples of (HEAD, TAIL, gold_label).
The head and tail `_Entity`s have span references to the passed sentence.
"""
valid_entities: List[_Entity] = list(self._valid_entities(sentence))
# Use a dictionary to find gold relation annotations for a given entity pair
relation_to_gold_label: Dict[str, str] = {
relation.unlabeled_identifier: relation.get_label(self.label_type, zero_tag_value=self.zero_tag_value).value
for relation in sentence.get_relations(self.label_type)
}
# Yield head and tail entity pairs from the cross product of all entities
for head, tail in itertools.product(valid_entities, repeat=2):
# Remove identity relation entity pairs
if head.span is tail.span:
continue
# Remove entity pairs with labels that do not match any
# of the specified relations in `self.entity_pair_labels`
if (
self.entity_pair_labels is not None
and (head.label.value, tail.label.value) not in self.entity_pair_labels
):
continue
# Obtain gold label, if existing
original_relation: Relation = Relation(first=head.span, second=tail.span)
gold_label: Optional[str] = relation_to_gold_label.get(original_relation.unlabeled_identifier)
yield head, tail, gold_label
def _encode_sentence(
self,
head: _Entity,
tail: _Entity,
gold_label: Optional[str] = None,
) -> EncodedSentence:
"""Returns a new `Sentence` object with masked/marked head and tail spans according to the encoding strategy.
If provided, the encoded sentence also has the corresponding gold label annotation from `self.label_type`.
:param head: The head `_Entity`
:param tail: The tail `_Entity`
:param gold_label: An optional gold label of the induced relation by the head and tail entity
:return: The `EncodedSentence` (with gold annotations)
"""
# Some sanity checks
original_sentence: Sentence = head.span.sentence
assert original_sentence is tail.span.sentence, "The head and tail need to come from the same sentence."
# Pre-compute non-leading head and tail tokens for entity masking
non_leading_head_tokens: List[Token] = head.span.tokens[1:]
non_leading_tail_tokens: List[Token] = tail.span.tokens[1:]
# We can not use the plaintext of the head/tail span in the sentence as the mask/marker
# since there may be multiple occurrences of the same entity mentioned in the sentence.
# Therefore, we use the span's position in the sentence.
encoded_sentence_tokens: List[str] = []
for token in original_sentence:
if token is head.span[0]:
encoded_sentence_tokens.append(self.encoding_strategy.encode_head(head.span, head.label))
elif token is tail.span[0]:
encoded_sentence_tokens.append(self.encoding_strategy.encode_tail(tail.span, tail.label))
elif all(
token is not non_leading_entity_token
for non_leading_entity_token in itertools.chain(non_leading_head_tokens, non_leading_tail_tokens)
):
encoded_sentence_tokens.append(token.text)
# Create masked sentence
encoded_sentence: EncodedSentence = EncodedSentence(
" ".join(encoded_sentence_tokens), use_tokenizer=SpaceTokenizer()
)
if gold_label is not None:
# Add gold relation annotation as sentence label
# Using the sentence label instead of annotating a separate `Relation` object is easier to manage since,
# during prediction, the forward pass does not need any knowledge about the entities in the sentence.
encoded_sentence.add_label(typename=self.label_type, value=gold_label, score=1.0)
encoded_sentence.copy_context_from_sentence(original_sentence)
return encoded_sentence
def _encode_sentence_for_inference(
self,
sentence: Sentence,
) -> Iterator[Tuple[EncodedSentence, Relation]]:
"""Create Encoded Sentences and Relation pairs for Inference.
Yields encoded sentences annotated with their gold relation and
the corresponding relation object in the original sentence for all valid entity pair permutations.
The created encoded sentences are newly created sentences with no reference to the passed sentence.
Important properties:
- Every sentence has exactly one encoded head and tail entity token. Therefore, every encoded sentence has
**exactly** one induced relation annotation, the gold annotation or `self.zero_tag_value`.
- The created relations have head and tail spans from the original passed sentence.
:param sentence: A flair `Sentence` object with entity annotations
:return: Encoded sentences annotated with their gold relation and
the corresponding relation in the original sentence
"""
for head, tail, gold_label in self._entity_pair_permutations(sentence):
masked_sentence: EncodedSentence = self._encode_sentence(
head=head,
tail=tail,
gold_label=gold_label if gold_label is not None else self.zero_tag_value,
)
original_relation: Relation = Relation(first=head.span, second=tail.span)
yield masked_sentence, original_relation
def _encode_sentence_for_training(self, sentence: Sentence) -> Iterator[EncodedSentence]:
"""Create Encoded Sentences and Relation pairs for Training.
Same as `self._encode_sentence_for_inference`.
with the option of disabling cross augmentation via `self.cross_augmentation`
(and that the relation with reference to the original sentence is not returned).
"""
for head, tail, gold_label in self._entity_pair_permutations(sentence):
if gold_label is None:
if self.cross_augmentation:
gold_label = self.zero_tag_value
else:
continue # Skip generated data points that do not express an originally annotated relation
masked_sentence: EncodedSentence = self._encode_sentence(
head=head,
tail=tail,
gold_label=gold_label,
)
yield masked_sentence
def transform_sentence(self, sentences: Union[Sentence, List[Sentence]]) -> List[EncodedSentence]:
"""Transforms sentences into encoded sentences specific to the `RelationClassifier`.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param sentences: A (list) of sentence(s) to transform
:return: A list of encoded sentences specific to the `RelationClassifier`
"""
if not isinstance(sentences, list):
sentences = [sentences]
return [
encoded_sentence
for sentence in sentences
for encoded_sentence in self._encode_sentence_for_training(sentence)
]
def transform_dataset(self, dataset: Dataset[Sentence]) -> FlairDatapointDataset[EncodedSentence]:
"""Transforms a dataset into a dataset containing encoded sentences specific to the `RelationClassifier`.
The returned dataset is stored in memory.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param dataset: A dataset of sentences to transform
:return: A dataset of encoded sentences specific to the `RelationClassifier`
"""
data_loader: DataLoader = DataLoader(dataset, batch_size=1)
original_sentences: List[Sentence] = [batch[0] for batch in iter(data_loader)]
return FlairDatapointDataset(self.transform_sentence(original_sentences))
def transform_corpus(self, corpus: Corpus[Sentence]) -> Corpus[EncodedSentence]:
"""Transforms a corpus into a corpus containing encoded sentences specific to the `RelationClassifier`.
The splits of the returned corpus are stored in memory.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param corpus: A corpus of sentences to transform
:return: A corpus of encoded sentences specific to the `RelationClassifier`
"""
return Corpus(
train=self.transform_dataset(corpus.train) if corpus.train is not None else None,
dev=self.transform_dataset(corpus.dev) if corpus.dev is not None else None,
test=self.transform_dataset(corpus.test) if corpus.test is not None else None,
name=corpus.name,
# If we sample missing splits, the encoded sentences that correspond to the same original sentences
# may get distributed into different splits. For training purposes, this is always undesired.
sample_missing_splits=False,
)
def _get_embedding_for_data_point(self, prediction_data_point: EncodedSentence) -> torch.Tensor:
embedding_names: List[str] = self.embeddings.get_names()
return prediction_data_point.get_embedding(embedding_names)
def _get_data_points_from_sentence(self, sentence: EncodedSentence) -> List[EncodedSentence]:
"""Returns the encoded sentences to which labels are added.
To encode sentences, use the `transform` function of the `RelationClassifier`.
"""
# Ensure that all sentences are encoded properly
if not isinstance(sentence, EncodedSentence):
raise ValueError(
"Some of the passed sentences are not encoded "
"to be compatible with the relation classifier's forward pass.\n"
"Did you transform your raw sentences into encoded sentences? "
"Use the\n"
"\t- transform_sentence\n"
"\t- transform_dataset\n"
"\t- transform_corpus\n"
"functions to transform you data first. "
"When using the ModelTrainer to train a relation classification model, "
"be sure to pass a transformed corpus:\n"
"WRONG: trainer: ModelTrainer = ModelTrainer(model=model, corpus=corpus)\n"
"CORRECT: trainer: ModelTrainer = ModelTrainer(model=model, corpus=model.transform_corpus(corpus))"
)
return [sentence]
def predict(
self,
sentences: Union[List[Sentence], List[EncodedSentence], Sentence, EncodedSentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss: bool = False,
embedding_storage_mode: str = "none",
) -> Optional[Tuple[torch.Tensor, int]]:
"""Predicts the class labels for the given sentence(s).
Standard `Sentence` objects and `EncodedSentences` specific to the `RelationClassifier` are allowed as input.
The (relation) labels are directly added to the sentences.
:param sentences: A list of (encoded) sentences.
:param mini_batch_size: The mini batch size to use
:param return_probabilities_for_all_classes: Return probabilities for all classes instead of only best predicted
:param verbose: Set to display a progress bar
:param return_loss: Set to return loss
:param label_name: Set to change the predicted label type name
:param embedding_storage_mode: The default is 'none', which is always best.
Only set to 'cpu' or 'gpu' if you wish to predict
and keep the generated embeddings in CPU or GPU memory, respectively.
:return: The loss and the total number of classes, if `return_loss` is set
"""
prediction_label_type: str = self.label_type if label_name is None else label_name
if not isinstance(sentences, list):
sentences = [sentences]
loss: Optional[Tuple[torch.Tensor, int]]
encoded_sentences: List[EncodedSentence]
if all(isinstance(sentence, EncodedSentence) for sentence in sentences):
# Deal with the case where all sentences are encoded sentences
# mypy does not infer the type of "sentences" restricted by the if statement
encoded_sentences = cast(List[EncodedSentence], sentences)
loss = super().predict(
encoded_sentences,
mini_batch_size=mini_batch_size,
return_probabilities_for_all_classes=return_probabilities_for_all_classes,
verbose=verbose,
label_name=prediction_label_type,
return_loss=return_loss,
embedding_storage_mode=embedding_storage_mode,
)
elif all(not isinstance(sentence, EncodedSentence) for sentence in sentences):
# Deal with the case where all sentences are standard (non-encoded) sentences
Sentence.set_context_for_sentences(cast(List[Sentence], sentences))
sentences_with_relation_reference: List[Tuple[EncodedSentence, Relation]] = list(
itertools.chain.from_iterable(self._encode_sentence_for_inference(sentence) for sentence in sentences)
)
encoded_sentences = [x[0] for x in sentences_with_relation_reference]
loss = super().predict(
encoded_sentences,
mini_batch_size=mini_batch_size,
return_probabilities_for_all_classes=return_probabilities_for_all_classes,
verbose=verbose,
label_name=prediction_label_type,
return_loss=return_loss,
embedding_storage_mode=embedding_storage_mode,
)
# For each encoded sentence, transfer its prediction onto the original relation
for encoded_sentence, original_relation in sentences_with_relation_reference:
for label in encoded_sentence.get_labels(prediction_label_type):
original_relation.add_label(prediction_label_type, value=label.value, score=label.score)
else:
raise ValueError("All passed sentences must be either uniformly encoded or not.")
return loss if return_loss else None
def _get_state_dict(self) -> Dict[str, Any]:
model_state: Dict[str, Any] = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"entity_label_types": self.entity_label_types,
"entity_pair_labels": self.entity_pair_labels,
"entity_threshold": self.entity_threshold,
"cross_augmentation": self.cross_augmentation,
"encoding_strategy": self.encoding_strategy,
"zero_tag_value": self.zero_tag_value,
"allow_unk_tag": self.allow_unk_tag,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state: Dict[str, Any], **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state["embeddings"],
label_dictionary=state["label_dictionary"],
label_type=state["label_type"],
entity_label_types=state["entity_label_types"],
entity_pair_labels=state["entity_pair_labels"],
entity_threshold=state["entity_threshold"],
cross_augmentation=state["cross_augmentation"],
encoding_strategy=state["encoding_strategy"],
zero_tag_value=state["zero_tag_value"],
allow_unk_tag=state["allow_unk_tag"],
**kwargs,
)
@property
def label_type(self) -> str:
return self._label_type
@property
def zero_tag_value(self) -> str:
return self._zero_tag_value
@property
def allow_unk_tag(self) -> bool:
return self._allow_unk_tag
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
yield from super().get_used_tokens(corpus)
for sentence in _iter_dataset(corpus.get_all_sentences()):
for span in sentence.get_spans(self.label_type):
yield self.encoding_strategy.encode_head(span, span.get_label(self.label_type)).split(" ")
yield self.encoding_strategy.encode_tail(span, span.get_label(self.label_type)).split(" ")
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "RelationClassifier":
from typing import cast
return cast("RelationClassifier", super().load(model_path=model_path))
| 34,165 | 45.995873 | 120 | py |
flair | flair-master/flair/models/sequence_tagger_utils/viterbi.py | from typing import Tuple
import numpy as np
import torch
import torch.nn
from torch.nn.functional import softmax
from torch.nn.utils.rnn import pack_padded_sequence
import flair
from flair.data import Dictionary, Label, List, Sentence
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class ViterbiLoss(torch.nn.Module):
"""Calculates the loss for each sequence up to its length t."""
def __init__(self, tag_dictionary: Dictionary) -> None:
"""Create an instance of the Viterbi loss.
:param tag_dictionary: tag_dictionary of task
"""
super().__init__()
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def forward(self, features_tuple: tuple, targets: torch.Tensor) -> torch.Tensor:
"""Forward propagation of Viterbi Loss.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentences in batch, transitions from CRF
:param targets: true tags for sentences which will be converted to matrix indices.
:return: summed Viterbi Loss over all data points
"""
features, lengths, transitions = features_tuple
batch_size = features.size(0)
seq_len = features.size(1)
targets, targets_matrix_indices = self._format_targets(targets, lengths)
targets_matrix_indices = torch.tensor(targets_matrix_indices, dtype=torch.long).unsqueeze(2).to(flair.device)
# scores_at_targets[range(features.shape[0]), lengths.values -1]
# Squeeze crf scores matrices in 1-dim shape and gather scores at targets by matrix indices
scores_at_targets = torch.gather(features.view(batch_size, seq_len, -1), 2, targets_matrix_indices)
scores_at_targets = pack_padded_sequence(scores_at_targets, lengths, batch_first=True)[0]
transitions_to_stop = transitions[
np.repeat(self.stop_tag, features.shape[0]),
[target[length - 1] for target, length in zip(targets, lengths)],
]
gold_score = scores_at_targets.sum() + transitions_to_stop.sum()
scores_upto_t = torch.zeros(batch_size, self.tagset_size, device=flair.device)
for t in range(max(lengths)):
batch_size_t = sum(
[length > t for length in lengths]
) # since batch is ordered, we can save computation time by reducing our effective batch_size
if t == 0:
# Initially, get scores from <start> tag to all other tags
scores_upto_t[:batch_size_t] = (
scores_upto_t[:batch_size_t] + features[:batch_size_t, t, :, self.start_tag]
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and log-sum-exp
# Remember, the cur_tag of the previous timestep is the prev_tag of this timestep
scores_upto_t[:batch_size_t] = self._log_sum_exp(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t].unsqueeze(1), dim=2
)
all_paths_scores = self._log_sum_exp(scores_upto_t + transitions[self.stop_tag].unsqueeze(0), dim=1).sum()
viterbi_loss = all_paths_scores - gold_score
return viterbi_loss
@staticmethod
def _log_sum_exp(tensor, dim):
"""Calculates the log-sum-exponent of a tensor's dimension in a numerically stable way.
:param tensor: tensor
:param dim: dimension to calculate log-sum-exp of
:return: log-sum-exp
"""
m, _ = torch.max(tensor, dim)
m_expanded = m.unsqueeze(dim).expand_as(tensor)
return m + torch.log(torch.sum(torch.exp(tensor - m_expanded), dim))
def _format_targets(self, targets: torch.Tensor, lengths: torch.IntTensor):
"""Formats targets into matrix indices.
CRF scores contain per sentence, per token a (tagset_size x tagset_size) matrix, containing emission score for
token j + transition prob from previous token i. Means, if we think of our rows as "to tag" and our columns
as "from tag", the matrix in cell [10,5] would contain the emission score for tag 10 + transition score
from previous tag 5 and could directly be addressed through the 1-dim indices (10 + tagset_size * 5) = 70,
if our tagset consists of 12 tags.
:param targets: targets as in tag dictionary
:param lengths: lengths of sentences in batch
"""
targets_per_sentence = []
targets_list = targets.tolist()
for cut in lengths:
targets_per_sentence.append(targets_list[:cut])
targets_list = targets_list[cut:]
for t in targets_per_sentence:
t += [self.tag_dictionary.get_idx_for_item(STOP_TAG)] * (int(lengths.max().item()) - len(t))
matrix_indices = [
[self.tag_dictionary.get_idx_for_item(START_TAG) + (s[0] * self.tagset_size)]
+ [s[i] + (s[i + 1] * self.tagset_size) for i in range(0, len(s) - 1)]
for s in targets_per_sentence
]
return targets_per_sentence, matrix_indices
class ViterbiDecoder:
"""Decodes a given sequence using the Viterbi algorithm."""
def __init__(self, tag_dictionary: Dictionary) -> None:
"""Initialize the Viterbi Decoder.
:param tag_dictionary: Dictionary of tags for sequence labeling task
"""
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def decode(
self, features_tuple: tuple, probabilities_for_all_classes: bool, sentences: List[Sentence]
) -> Tuple[List, List]:
"""Decoding function returning the most likely sequence of tags.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentence in batch, transitions of CRF
:param probabilities_for_all_classes: whether to return probabilities for all tags
:return: decoded sequences
"""
features, lengths, transitions = features_tuple
all_tags = []
batch_size = features.size(0)
seq_len = features.size(1)
# Create a tensor to hold accumulated sequence scores at each current tag
scores_upto_t = torch.zeros(batch_size, seq_len + 1, self.tagset_size).to(flair.device)
# Create a tensor to hold back-pointers
# i.e., indices of the previous_tag that corresponds to maximum accumulated score at current tag
# Let pads be the <end> tag index, since that was the last tag in the decoded sequence
backpointers = (
torch.ones((batch_size, seq_len + 1, self.tagset_size), dtype=torch.long, device=flair.device)
* self.stop_tag
)
for t in range(seq_len):
batch_size_t = sum([length > t for length in lengths]) # effective batch size (sans pads) at this timestep
terminates = [i for i, length in enumerate(lengths) if length == t + 1]
if t == 0:
scores_upto_t[:batch_size_t, t] = features[:batch_size_t, t, :, self.start_tag]
backpointers[:batch_size_t, t, :] = (
torch.ones((batch_size_t, self.tagset_size), dtype=torch.long) * self.start_tag
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and
# choose the previous timestep that corresponds to the max. accumulated score for each current timestep
scores_upto_t[:batch_size_t, t], backpointers[:batch_size_t, t, :] = torch.max(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t, t - 1].unsqueeze(1), dim=2
)
# If sentence is over, add transition to STOP-tag
if terminates:
scores_upto_t[terminates, t + 1], backpointers[terminates, t + 1, :] = torch.max(
scores_upto_t[terminates, t].unsqueeze(1) + transitions[self.stop_tag].unsqueeze(0), dim=2
)
# Decode/trace best path backwards
decoded = torch.zeros((batch_size, backpointers.size(1)), dtype=torch.long, device=flair.device)
pointer = torch.ones((batch_size, 1), dtype=torch.long, device=flair.device) * self.stop_tag
for t in list(reversed(range(backpointers.size(1)))):
decoded[:, t] = torch.gather(backpointers[:, t, :], 1, pointer).squeeze(1)
pointer = decoded[:, t].unsqueeze(1)
# Sanity check
assert torch.equal(
decoded[:, 0], torch.ones((batch_size), dtype=torch.long, device=flair.device) * self.start_tag
)
# remove start-tag and backscore to stop-tag
scores_upto_t = scores_upto_t[:, :-1, :]
decoded = decoded[:, 1:]
# Max + Softmax to get confidence score for predicted label and append label to each token
scores = softmax(scores_upto_t, dim=2)
confidences = torch.max(scores, dim=2)
tags = []
for tag_seq, tag_seq_conf, length_seq in zip(decoded, confidences.values, lengths):
tags.append(
[
(self.tag_dictionary.get_item_for_index(tag), conf.item())
for tag, conf in list(zip(tag_seq, tag_seq_conf))[:length_seq]
]
)
if probabilities_for_all_classes:
all_tags = self._all_scores_for_token(scores.cpu(), lengths, sentences)
return tags, all_tags
def _all_scores_for_token(self, scores: torch.Tensor, lengths: torch.IntTensor, sentences: List[Sentence]):
"""Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
prob_tags_per_sentence = []
for scores_sentence, length, sentence in zip(scores, lengths, sentences):
scores_sentence = scores_sentence[:length]
prob_tags_per_sentence.append(
[
[
Label(token, self.tag_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist, token in zip(scores_sentence, sentence)
]
)
return prob_tags_per_sentence
| 10,765 | 44.046025 | 119 | py |
flair | flair-master/flair/models/sequence_tagger_utils/crf.py | import torch
import flair
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class CRF(torch.nn.Module):
"""Conditional Random Field.
Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod).
Classifier which predicts single tag / class / label for given word based on not just the word,
but also on previous seen annotations.
"""
def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:
"""Initialize the Conditional Random Field.
:param tag_dictionary: tag dictionary in order to find ID for start and stop tags
:param tagset_size: number of tag from tag dictionary
:param init_from_state_dict: whether we load pretrained model from state dict
"""
super().__init__()
self.tagset_size = tagset_size
# Transitions are used in the following way: transitions[to, from].
self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))
# If we are not using a pretrained model and train a fresh one, we need to set transitions from any tag
# to START-tag and from STOP-tag to any other tag to -10000.
if not init_from_state_dict:
self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000
self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000
self.to(flair.device)
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""Forward propagation of Conditional Random Field.
:param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size)
:return: CRF scores (emission scores for each token + transitions prob from previous state) in
shape (batch_size, seq len, tagset size, tagset size)
"""
batch_size, seq_len = features.size()[:2]
emission_scores = features
emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)
crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)
return crf_scores
| 2,171 | 41.588235 | 119 | py |
flair | flair-master/flair/models/sequence_tagger_utils/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/flair/embeddings/document.py | import logging
from typing import Any, Dict, List, Optional, Union, cast
import torch
from sklearn.feature_extraction.text import TfidfVectorizer
from torch.nn import RNNBase
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import flair
from flair.data import Sentence
from flair.embeddings.base import (
DocumentEmbeddings,
load_embeddings,
register_embeddings,
)
from flair.embeddings.token import FlairEmbeddings, StackedEmbeddings, TokenEmbeddings
from flair.embeddings.transformer import (
TransformerEmbeddings,
TransformerOnnxDocumentEmbeddings,
)
from flair.nn import LockedDropout, WordDropout
log = logging.getLogger("flair")
@register_embeddings
class TransformerDocumentEmbeddings(DocumentEmbeddings, TransformerEmbeddings):
onnx_cls = TransformerOnnxDocumentEmbeddings
def __init__(
self,
model: str = "bert-base-uncased", # set parameters with different default values
layers: str = "-1",
layer_mean: bool = False,
is_token_embedding: bool = False,
**kwargs,
) -> None:
"""Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param cls_pooling: Pooling strategy for combining token level embeddings. options are 'cls', 'max', 'mean'.
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
TransformerEmbeddings.__init__(
self,
model=model,
layers=layers,
layer_mean=layer_mean,
is_token_embedding=is_token_embedding,
is_document_embedding=True,
**kwargs,
)
@classmethod
def create_from_state(cls, **state):
# this parameter is fixed
del state["is_document_embedding"]
return cls(**state)
@register_embeddings
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: Union[TokenEmbeddings, List[TokenEmbeddings]],
fine_tune_mode: str = "none",
pooling: str = "mean",
) -> None:
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param fine_tune_mode: if set to "linear" a trainable layer is added, if set to
"nonlinear", a nonlinearity is added as well. Set this to make the pooling trainable.
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
if isinstance(embeddings, StackedEmbeddings):
embeddings = embeddings.embeddings
elif isinstance(embeddings, TokenEmbeddings):
embeddings = [embeddings]
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(self.embedding_length, self.embedding_length, bias=False)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU()
self.embedding_flex_nonlinear_map = torch.nn.Linear(self.embedding_length, self.embedding_length)
self.__embedding_length = self.embeddings.embedding_length
self.to(flair.device)
if pooling not in ["min", "max", "mean"]:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.pooling = pooling
self.name: str = f"document_{self.pooling}"
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences.
If embeddings are already added, updates only if embeddings are non-static.
"""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = torch.cat([token.get_embedding().unsqueeze(0) for token in sentence.tokens], dim=0).to(
flair.device
)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = torch.mean(word_embeddings, 0)
elif self.pooling == "max":
pooled_embedding, _ = torch.max(word_embeddings, 0)
elif self.pooling == "min":
pooled_embedding, _ = torch.min(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentPoolEmbeddings":
embeddings = cast(StackedEmbeddings, load_embeddings(params.pop("embeddings"))).embeddings
return cls(embeddings=embeddings, **params)
def to_params(self) -> Dict[str, Any]:
return {
"pooling": self.pooling,
"fine_tune_mode": self.fine_tune_mode,
"embeddings": self.embeddings.save_embeddings(False),
}
@register_embeddings
class DocumentTFIDFEmbeddings(DocumentEmbeddings):
def __init__(
self,
train_dataset: List[Sentence],
vectorizer: Optional[TfidfVectorizer] = None,
**vectorizer_params,
) -> None:
"""The constructor for DocumentTFIDFEmbeddings.
:param train_dataset: the train dataset which will be used to construct a vectorizer
:param vectorizer_params: parameters given to Scikit-learn's TfidfVectorizer constructor
"""
super().__init__()
import numpy as np
if vectorizer is not None:
self.vectorizer = vectorizer
if len(train_dataset) > 0:
raise ValueError("Cannot initialize document tfidf embeddings with a vectorizer and with a dataset")
else:
self.vectorizer = TfidfVectorizer(dtype=np.float32, **vectorizer_params)
self.vectorizer.fit([s.to_original_text() for s in train_dataset])
self.__embedding_length: int = len(self.vectorizer.vocabulary_)
self.to(flair.device)
self.name: str = "document_tfidf"
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
raw_sentences = [s.to_original_text() for s in sentences]
tfidf_vectors = torch.from_numpy(self.vectorizer.transform(raw_sentences).A)
for sentence_id, sentence in enumerate(sentences):
sentence.set_embedding(self.name, tfidf_vectors[sentence_id])
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentTFIDFEmbeddings":
return cls(train_dataset=[], vectorizer=params["vectorizer"])
def to_params(self) -> Dict[str, Any]:
return {
"vectorizer": self.vectorizer,
}
@register_embeddings
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
fine_tune: bool = True,
) -> None:
"""Instantiates an RNN that works upon some token embeddings.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = not fine_tune
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(self.length_of_all_token_embeddings, self.embeddings_dimension)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn: RNNBase = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.locked_dropout = LockedDropout(locked_dropout) if locked_dropout > 0.0 else None
self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
"""Add embeddings to all sentences in the given list of sentences.
If embeddings are already added, update only if embeddings are non-static.
"""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
self.rnn.zero_grad()
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push through RNN
packed = pack_padded_sequence(sentence_tensor, lengths, enforce_sorted=False, batch_first=True) # type: ignore[arg-type]
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)
# after-RNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from RNN
for sentence_no, length in enumerate(lengths):
last_rep = outputs[sentence_no, length - 1]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[sentence_no, 0]
embedding = torch.cat([first_rep, last_rep], 0)
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _apply(self, fn):
# models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute
# check if this is the case and if so, set it
for child_module in self.children():
if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, "_flat_weights_names"):
_flat_weights_names = []
num_direction = 2 if child_module.__dict__["bidirectional"] else 1
for layer in range(child_module.__dict__["num_layers"]):
for direction in range(num_direction):
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if child_module.__dict__["bias"]:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
param_names = [x.format(layer, suffix) for x in param_names]
_flat_weights_names.extend(param_names)
child_module._flat_weights_names = _flat_weights_names
child_module._apply(fn)
def to_params(self):
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"embeddings": self.embeddings.save_embeddings(False),
"hidden_size": self.rnn.hidden_size,
"rnn_layers": self.rnn.num_layers,
"reproject_words": self.reproject_words,
"reproject_words_dimension": self.embeddings_dimension,
"bidirectional": self.bidirectional,
"dropout": self.dropout.p if self.dropout is not None else 0.0,
"word_dropout": self.word_dropout.p if self.word_dropout is not None else 0.0,
"locked_dropout": self.locked_dropout.p if self.locked_dropout is not None else 0.0,
"rnn_type": self.rnn_type,
"fine_tune": not self.static_embeddings,
}
return model_state
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentRNNEmbeddings":
stacked_embeddings = load_embeddings(params["embeddings"])
assert isinstance(stacked_embeddings, StackedEmbeddings)
return cls(
embeddings=stacked_embeddings.embeddings,
hidden_size=params["hidden_size"],
rnn_layers=params["rnn_layers"],
reproject_words=params["reproject_words"],
reproject_words_dimension=params["reproject_words_dimension"],
bidirectional=params["bidirectional"],
dropout=params["dropout"],
word_dropout=params["word_dropout"],
locked_dropout=params["locked_dropout"],
rnn_type=params["rnn_type"],
fine_tune=params["fine_tune"],
)
def __setstate__(self, d):
# re-initialize language model with constructor arguments
language_model = DocumentRNNEmbeddings(
embeddings=d["embeddings"],
hidden_size=d["hidden_size"],
rnn_layers=d["rnn_layers"],
reproject_words=d["reproject_words"],
reproject_words_dimension=d["reproject_words_dimension"],
bidirectional=d["bidirectional"],
dropout=d["dropout"],
word_dropout=d["word_dropout"],
locked_dropout=d["locked_dropout"],
rnn_type=d["rnn_type"],
fine_tune=d["fine_tune"],
)
# special handling for deserializing language models
if "state_dict" in d:
language_model.load_state_dict(d["state_dict"])
# copy over state dictionary to self
for key in language_model.__dict__:
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
@register_embeddings
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings]) -> None:
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(flair_embeddings):
self.add_module(f"lm_embedding_{i}", embedding)
if not embedding.static_embeddings:
self.static_embeddings = False
self._embedding_length: int = sum(embedding.embedding_length for embedding in flair_embeddings)
self.eval()
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(embedding.name, sentence[0]._embeddings[embedding.name])
return sentences
def get_names(self) -> List[str]:
if "__names" not in self.__dict__:
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
return self.__names
def to_params(self) -> Dict[str, Any]:
return {"flair_embeddings": [embedding.save_embeddings(False) for embedding in self.embeddings]}
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentLMEmbeddings":
return cls([cast(FlairEmbeddings, load_embeddings(embedding)) for embedding in params["flair_embeddings"]])
@register_embeddings
class SentenceTransformerDocumentEmbeddings(DocumentEmbeddings):
def __init__(
self,
model: str = "bert-base-nli-mean-tokens",
batch_size: int = 1,
) -> None:
"""Instantiates a document embedding using the SentenceTransformer Embeddings.
:param model: string name of models from SentencesTransformer Class
:param name: string name of embedding type which will be set to Sentence object
:param batch_size: int number of sentences to processed in one batch
"""
super().__init__()
try:
from sentence_transformers import SentenceTransformer
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "sentence-transformers" is not installed!')
log.warning('To use Sentence Transformers, please first install with "pip install sentence-transformers"')
log.warning("-" * 100)
pass
self.model_name = model
self.model = SentenceTransformer(
model, cache_folder=str(flair.cache_root / "embeddings" / "sentence-transformer")
)
self.name = "sentence-transformers-" + str(model)
self.batch_size = batch_size
self.static_embeddings = True
self.eval()
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_batches = [
sentences[i * self.batch_size : (i + 1) * self.batch_size]
for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)
]
for batch in sentence_batches:
self._add_embeddings_to_sentences(batch)
return sentences
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
# convert to plain strings, embedded in a list for the encode function
sentences_plain_text = [sentence.to_plain_string() for sentence in sentences]
embeddings = self.model.encode(sentences_plain_text, convert_to_numpy=False)
for sentence, embedding in zip(sentences, embeddings):
sentence.set_embedding(self.name, embedding)
@property
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return self.model.get_sentence_embedding_dimension()
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "SentenceTransformerDocumentEmbeddings":
return cls(**params)
def to_params(self) -> Dict[str, Any]:
return {
"model": self.model_name,
"batch_size": self.batch_size,
}
@register_embeddings
class DocumentCNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
kernels=((100, 3), (100, 4), (100, 5)),
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
fine_tune: bool = True,
) -> None:
"""Instantiates a CNN that works uppons some token embeddings.
:param embeddings: a list of token embeddings
:param kernels: list of (number of kernels, kernel size)
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.kernels = kernels
self.reproject_words = reproject_words
self.static_embeddings = not fine_tune
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
if self.reproject_words:
self.word_reprojection_map: Optional[torch.nn.Linear] = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
else:
self.word_reprojection_map = None
# CNN
self.__embedding_length: int = sum([kernel_num for kernel_num, kernel_size in self.kernels])
self.convs = torch.nn.ModuleList(
[
torch.nn.Conv1d(self.embeddings_dimension, kernel_num, kernel_size)
for kernel_num, kernel_size in self.kernels
]
)
self.pool = torch.nn.AdaptiveMaxPool1d(1)
self.name = "document_cnn"
# dropouts
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.locked_dropout = LockedDropout(locked_dropout) if locked_dropout > 0.0 else None
self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None
self.to(flair.device)
self.min_sequence_length = max(kernel_size for _, kernel_size in self.kernels)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
"""Add embeddings to all sentences in the given list of sentences.
If embeddings are already added, update only if embeddings are non-static.
"""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
self.zero_grad() # is it necessary?
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
padding_length: int = max(max(lengths), self.min_sequence_length)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * padding_length,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = padding_length - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
padding_length,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.word_reprojection_map is not None:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push CNN
x = sentence_tensor
x = x.permute(0, 2, 1)
rep = [self.pool(torch.nn.functional.relu(conv(x))) for conv in self.convs]
outputs = torch.cat(rep, 1)
outputs = outputs.reshape(outputs.size(0), -1)
# after-CNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from CNN
for sentence_no, _length in enumerate(lengths):
embedding = outputs[sentence_no]
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _apply(self, fn):
for child_module in self.children():
child_module._apply(fn)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentCNNEmbeddings":
embeddings = cast(StackedEmbeddings, load_embeddings(params.pop("embeddings"))).embeddings
return cls(embeddings=embeddings, **params)
def to_params(self) -> Dict[str, Any]:
return {
"embeddings": self.embeddings.save_embeddings(False),
"kernels": self.kernels,
"reproject_words": self.reproject_words,
"reproject_words_dimension": self.embeddings_dimension,
"dropout": 0.0 if self.dropout is None else self.dropout.p,
"word_dropout": 0.0 if self.word_dropout is None else self.word_dropout.p,
"locked_dropout": 0.0 if self.locked_dropout is None else self.locked_dropout.dropout_rate,
"fine_tune": not self.static_embeddings,
}
| 30,169 | 38.080311 | 129 | py |
flair | flair-master/flair/embeddings/base.py | import inspect
import logging
from abc import abstractmethod
from typing import Any, Dict, Generic, List, Sequence, Type, Union
import torch
from torch.nn import Parameter, ParameterList
import flair
from flair.data import DT, Sentence
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module, Generic[DT]):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
embeddings_name: str # class-variable referring to the "class embedding name"
def __init__(self) -> None:
"""Set some attributes that would otherwise result in errors. Overwrite these in your embedding class."""
if not hasattr(self, "name"):
self.name: str = "unnamed_embedding"
if not hasattr(self, "static_embeddings"):
# if the embeddings for a sentence are the same in each epoch, set this to True for improved efficiency
self.static_embeddings = False
super().__init__()
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
raise NotImplementedError
@property
@abstractmethod
def embedding_type(self) -> str:
raise NotImplementedError
def embed(self, data_points: Union[DT, List[DT]]) -> List[DT]:
"""Add embeddings to all words in a list of sentences.
If embeddings are already added, updates only if embeddings are non-static.
"""
# if only one sentence is passed, convert to list of sentence
if not isinstance(data_points, list):
data_points = [data_points]
if not self._everything_embedded(data_points):
self._add_embeddings_internal(data_points)
return data_points
def _everything_embedded(self, data_points: Sequence[DT]) -> bool:
return all(self.name in data_point._embeddings for data_point in data_points)
@abstractmethod
def _add_embeddings_internal(self, sentences: List[DT]):
"""Private method for adding embeddings to all words in a list of sentences."""
def get_names(self) -> List[str]:
"""Returns a list of embedding names.
In most cases, it is just a list with one item, namely the name of
this embedding. But in some cases, the embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack.
"""
return [self.name]
def get_named_embeddings_dict(self) -> Dict:
return {self.name: self}
@staticmethod
def get_instance_parameters(locals: dict) -> dict:
class_definition = locals.get("__class__")
instance_parameter_names = set(inspect.signature(class_definition.__init__).parameters) # type: ignore[misc]
instance_parameter_names.remove("self")
instance_parameter_names.add("__class__")
instance_parameters = {
class_attribute: attribute_value
for class_attribute, attribute_value in locals.items()
if class_attribute in instance_parameter_names
}
return instance_parameters
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
raise NotImplementedError
def to_params(self) -> Dict[str, Any]:
raise NotImplementedError
@classmethod
def load_embedding(cls, params: Dict[str, Any]):
state_dict = params.pop("state_dict", None)
embedding = cls.from_params(params)
if state_dict is not None:
embedding.load_state_dict(state_dict)
return embedding
def save_embeddings(self, use_state_dict: bool = True):
params = self.to_params()
if use_state_dict:
params["state_dict"] = self.state_dict()
params["__cls__"] = type(self).embeddings_name
return params
class ScalarMix(torch.nn.Module):
"""Mixes several tensors by a learned weighting.
Computes a parameterised scalar mixture of N tensors.
This method was proposed by Liu et al. (2019) in the paper:
"Linguistic Knowledge and Transferability of Contextual Representations" (https://arxiv.org/abs/1903.08855)
The implementation is copied and slightly modified from the allennlp repository and is licensed under Apache 2.0.
It can be found under:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/scalar_mix.py.
"""
def __init__(self, mixture_size: int, trainable: bool = False) -> None:
"""Inits scalar mix implementation.
``mixture = gamma * sum(s_k * tensor_k)`` where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
:param mixture_size: size of mixtures (usually the number of layers)
"""
super().__init__()
self.mixture_size = mixture_size
initial_scalar_parameters = [0.0] * mixture_size
self.scalar_parameters = ParameterList(
[
Parameter(
torch.tensor(
[initial_scalar_parameters[i]],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(
torch.tensor(
[1.0],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
"""Forward pass of scalar mix.
Computes a weighted average of the ``tensors``. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
:param tensors: list of input tensors
:return: computed weighted average of input tensors
"""
if len(tensors) != self.mixture_size:
log.error(
"{} tensors were passed, but the module was initialized to mix {} tensors.".format(
len(tensors), self.mixture_size
)
)
normed_weights = torch.nn.functional.softmax(torch.cat(list(self.scalar_parameters)), dim=0)
normed_weights_split = torch.split(normed_weights, split_size_or_sections=1)
pieces = []
for weight, tensor in zip(normed_weights_split, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
class DocumentEmbeddings(Embeddings[Sentence]):
"""Abstract base class for all document-level embeddings. Every new type of document embedding must implement these methods."""
@property
def embedding_type(self) -> str:
return "sentence-level"
class TokenEmbeddings(Embeddings[Sentence]):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
def embedding_type(self) -> str:
return "word-level"
def _everything_embedded(self, data_points: Sequence[Sentence]) -> bool:
for sentence in data_points:
for token in sentence.tokens:
if self.name not in token._embeddings.keys():
return False
return True
EMBEDDING_CLASSES: Dict[str, Type[Embeddings]] = {}
def register_embeddings(*args):
name = None
def _register(cls):
nonlocal name
if name is None:
name = cls.__name__
cls.embeddings_name = name
EMBEDDING_CLASSES[name] = cls
return cls
if len(args) == 1 and callable(args[0]):
return _register(args[0])
elif len(args) > 0:
name = args[0]
return _register
def load_embeddings(params: Dict[str, Any]) -> Embeddings:
cls_name = params.pop("__cls__")
cls = EMBEDDING_CLASSES[cls_name]
return cls.load_embedding(params)
| 7,959 | 33.912281 | 131 | py |
flair | flair-master/flair/embeddings/legacy.py | import logging
import re
from abc import abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import torch
from deprecated import deprecated
from transformers import (
AlbertModel,
AlbertTokenizer,
BertModel,
BertTokenizer,
CamembertModel,
CamembertTokenizer,
GPT2Model,
GPT2Tokenizer,
OpenAIGPTModel,
OpenAIGPTTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaModel,
RobertaTokenizer,
XLMModel,
XLMRobertaModel,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetModel,
XLNetTokenizer,
)
import flair
from flair.data import Sentence, Token
from flair.embeddings.base import ScalarMix
from flair.embeddings.document import DocumentEmbeddings
from flair.embeddings.token import StackedEmbeddings, TokenEmbeddings
from flair.file_utils import cached_path
from flair.nn import LockedDropout, WordDropout
log = logging.getLogger("flair")
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.
ELMo word vectors can be constructed by combining layers in different ways.
Default is to concatene the top 3 layers in the LM.
"""
def __init__(
self,
model: str = "original",
options_file: Optional[str] = None,
weight_file: Optional[str] = None,
embedding_mode: str = "all",
) -> None:
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
try:
import allennlp.commands.elmo
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning('To use ELMoEmbeddings, please first install with "pip install allennlp==0.9.0"')
log.warning("-" * 100)
pass
assert embedding_mode in ["all", "top", "average"]
self.name = f"elmo-{model}-{embedding_mode}"
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model in ["large", "5.5B"]:
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = (
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
)
weight_file = (
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
)
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
if embedding_mode == "all":
self.embedding_mode_fn = self.use_layers_all
elif embedding_mode == "top":
self.embedding_mode_fn = self.use_layers_top
elif embedding_mode == "average":
self.embedding_mode_fn = self.use_layers_average
# put on Cuda if available
from flair import device
if device.type == "cuda":
cuda_device = device.index
elif device.type == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence([Token("hello")])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0][0].get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def use_layers_all(self, x):
return torch.cat(x, 0)
def use_layers_top(self, x):
return x[-1]
def use_layers_average(self, x):
return torch.mean(torch.stack(x), 0)
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# ELMoEmbeddings before Release 0.5 did not set self.embedding_mode_fn
if not getattr(self, "embedding_mode_fn", None):
self.embedding_mode_fn = self.use_layers_all
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
elmo_embedding_layers = [
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :]),
]
word_embedding = self.embedding_mode_fn(elmo_embedding_layers)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
def __setstate__(self, state):
self.__dict__ = state
if re.fullmatch(r"cuda:[0-9]+", str(flair.device)):
cuda_device = int(str(flair.device).split(":")[-1])
elif str(flair.device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee.cuda_device = cuda_device
self.ee.elmo_bilm.to(device=flair.device)
self.ee.elmo_bilm._elmo_lstm._states = tuple(
[state.to(flair.device) for state in self.ee.elmo_bilm._elmo_lstm._states]
)
class CharLMEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
@deprecated(version="0.4", reason="Use 'FlairEmbeddings' instead.")
def __init__(
self,
model: str,
detach: bool = True,
use_cache: bool = False,
cache_directory: Optional[Path] = None,
) -> None:
"""Initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down
training and often leads to worse results, so not recommended.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
"""
super().__init__()
cache_dir = Path("embeddings")
# multilingual forward (English, German, French, Italian, Dutch, Polish)
if model.lower() == "multi-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# multilingual backward (English, German, French, Italian, Dutch, Polish)
elif model.lower() == "multi-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-forward
elif model.lower() == "mix-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-backward
elif model.lower() == "mix-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-forward
elif model.lower() == "german-forward" or model.lower() == "de-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-backward
elif model.lower() == "german-backward" or model.lower() == "de-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish forward
elif model.lower() == "polish-forward" or model.lower() == "pl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish backward
elif model.lower() == "polish-backward" or model.lower() == "pl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian forward
elif model.lower() == "slovenian-forward" or model.lower() == "sl-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian backward
elif model.lower() == "slovenian-backward" or model.lower() == "sl-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian forward
elif model.lower() == "bulgarian-forward" or model.lower() == "bg-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian backward
elif model.lower() == "bulgarian-backward" or model.lower() == "bg-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch forward
elif model.lower() == "dutch-forward" or model.lower() == "nl-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch backward
elif model.lower() == "dutch-backward" or model.lower() == "nl-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish forward
elif model.lower() == "swedish-forward" or model.lower() == "sv-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish backward
elif model.lower() == "swedish-backward" or model.lower() == "sv-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# French forward
elif model.lower() == "french-forward" or model.lower() == "fr-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French backward
elif model.lower() == "french-backward" or model.lower() == "fr-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech forward
elif model.lower() == "czech-forward" or model.lower() == "cs-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Czech backward
elif model.lower() == "czech-backward" or model.lower() == "cs-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese forward
elif model.lower() == "portuguese-forward" or model.lower() == "pt-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese backward
elif model.lower() == "portuguese-backward" or model.lower() == "pt-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(f'The given model "{model}" is not available or is not a valid path.')
self.name = str(model)
self.static_embeddings = detach
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.detach = detach
self.is_forward_lm: bool = self.lm.is_forward_lm
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
start_marker = "\n"
end_marker = " "
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
text_sentences, start_marker, end_marker, self.chars_per_chunk
)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = len(start_marker)
offset_backward: int = len(sentence_text) + len(start_marker)
for token in sentence.tokens:
offset_forward += len(token.text)
offset = offset_forward if self.is_forward_lm else offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self) -> str:
return self.name
class XLNetEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlnet-large-cased",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""XLNet embeddings, as proposed in Yang et al., 2019.
:param pretrained_model_name_or_path: name or path of XLNet model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLNetTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLNetModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class XLMEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlm-mlm-en-2048",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""XLM embeddings, as proposed in Guillaume et al., 2019.
:param pretrained_model_name_or_path: name or path of XLM model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLMTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLMModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class OpenAIGPTEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "openai-gpt",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""OpenAI GPT embeddings, as proposed in Radford et al. 2018.
:param pretrained_model_name_or_path: name or path of OpenAI GPT model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = OpenAIGPTModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class OpenAIGPT2Embeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "gpt2-medium",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""OpenAI GPT-2 embeddings, as proposed in Radford et al. 2019.
:param pretrained_model_name_or_path: name or path of OpenAI GPT-2 model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = GPT2Model.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
)
return sentences
class RoBERTaEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "roberta-base",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""RoBERTa, as proposed by Liu et al. 2019.
:param pretrained_model_name_or_path: name or path of RoBERTa model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = RobertaTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = RobertaModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
class CamembertEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "camembert-base",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""CamemBERT, a Tasty French Language Model, as proposed by Martin et al. 2019.
:param pretrained_model_name_or_path: name or path of RoBERTa model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = CamembertTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = CamembertModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
def __getstate__(self):
state = self.__dict__.copy()
state["tokenizer"] = None
return state
def __setstate__(self, d):
super().__setstate__(d)
# 1-camembert-base -> camembert-base
if any(char.isdigit() for char in self.name):
self.tokenizer = CamembertTokenizer.from_pretrained("-".join(self.name.split("-")[1:]))
else:
self.tokenizer = CamembertTokenizer.from_pretrained(self.name)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
class XLMRobertaEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlm-roberta-large",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""XLM-RoBERTa as proposed by Conneau et al. 2019.
:param pretrained_model_name_or_path: name or path of XLM-R model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLMRobertaTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLMRobertaModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
def __getstate__(self):
state = self.__dict__.copy()
state["tokenizer"] = None
return state
def __setstate__(self, d):
super().__setstate__(d)
# 1-xlm-roberta-large -> xlm-roberta-large
self.tokenizer = self.tokenizer = XLMRobertaTokenizer.from_pretrained("-".join(self.name.split("-")[1:]))
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def _extract_embeddings(
hidden_states: List[torch.FloatTensor],
layers: List[int],
pooling_operation: str,
subword_start_idx: int,
subword_end_idx: int,
use_scalar_mix: bool = False,
) -> List[torch.FloatTensor]:
"""Extracts subword embeddings from specified layers from hidden states.
:param hidden_states: list of hidden states from model
:param layers: list of layers
:param pooling_operation: pooling operation for subword embeddings (supported: first, last, first_last and mean)
:param subword_start_idx: defines start index for subword
:param subword_end_idx: defines end index for subword
:param use_scalar_mix: determines, if scalar mix should be used
:return: list of extracted subword embeddings.
"""
subtoken_embeddings: List[torch.FloatTensor] = []
for layer in layers:
current_embeddings = hidden_states[layer][0][subword_start_idx:subword_end_idx]
first_embedding: torch.FloatTensor = current_embeddings[0]
if pooling_operation == "first_last":
last_embedding: torch.FloatTensor = current_embeddings[-1]
final_embedding: torch.FloatTensor = torch.cat([first_embedding, last_embedding])
elif pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
elif pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [embedding.unsqueeze(0) for embedding in current_embeddings]
final_embedding: torch.FloatTensor = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
else:
final_embedding: torch.FloatTensor = first_embedding
subtoken_embeddings.append(final_embedding)
if use_scalar_mix:
sm = ScalarMix(mixture_size=len(subtoken_embeddings))
sm_embeddings = sm(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
return subtoken_embeddings
def _build_token_subwords_mapping(sentence: Sentence, tokenizer: PreTrainedTokenizer) -> Tuple[Dict[int, int], str]:
"""Builds a dictionary that stores the following information:
Token index (key) and number of corresponding subwords (value) for a sentence.
:param sentence: input sentence
:param tokenizer: Transformers tokenization object
:return: dictionary of token index to corresponding number of subwords, tokenized string
"""
token_subwords_mapping: Dict[int, int] = {}
tokens = []
for token in sentence.tokens:
token_text = token.text
subwords = tokenizer.tokenize(token_text)
tokens.append(token.text if subwords else tokenizer.unk_token)
token_subwords_mapping[token.idx] = len(subwords) if subwords else 1
return token_subwords_mapping, " ".join(tokens)
def _build_token_subwords_mapping_gpt2(
sentence: Sentence, tokenizer: PreTrainedTokenizer
) -> Tuple[Dict[int, int], str]:
"""Builds a dictionary that stores the following information:
Token index (key) and number of corresponding subwords (value) for a sentence.
:param sentence: input sentence
:param tokenizer: Transformers tokenization object
:return: dictionary of token index to corresponding number of subwords, tokenized string
"""
token_subwords_mapping: Dict[int, int] = {}
tokens = []
for token in sentence.tokens:
# Dummy token is needed to get the actually token tokenized correctly with special ``Ġ`` symbol
if token.idx == 1:
token_text = token.text
subwords = tokenizer.tokenize(token_text)
else:
token_text = "X " + token.text
subwords = tokenizer.tokenize(token_text)[1:]
tokens.append(token.text if subwords else tokenizer.unk_token)
token_subwords_mapping[token.idx] = len(subwords) if subwords else 1
return token_subwords_mapping, " ".join(tokens)
def _get_transformer_sentence_embeddings(
sentences: List[Sentence],
tokenizer: PreTrainedTokenizer,
model: PreTrainedModel,
name: str,
layers: List[int],
pooling_operation: str,
use_scalar_mix: bool,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
) -> List[Sentence]:
"""Builds sentence embeddings for Transformer-based architectures.
:param sentences: input sentences
:param tokenizer: tokenization object
:param model: model object
:param name: name of the Transformer-based model
:param layers: list of layers
:param pooling_operation: defines pooling operation for subword extraction
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s)
:param bos_token: defines begin of sentence token (used for left padding)
:param eos_token: defines end of sentence token (used for right padding)
:return: list of sentences (each token of a sentence is now embedded).
"""
with torch.no_grad():
for sentence in sentences:
token_subwords_mapping: Dict[int, int] = {}
if ("gpt2" in name or "roberta" in name) and "xlm" not in name:
(
token_subwords_mapping,
tokenized_string,
) = _build_token_subwords_mapping_gpt2(sentence=sentence, tokenizer=tokenizer)
else:
(
token_subwords_mapping,
tokenized_string,
) = _build_token_subwords_mapping(sentence=sentence, tokenizer=tokenizer)
subwords = tokenizer.tokenize(tokenized_string)
offset = 0
if bos_token:
subwords = [bos_token, *subwords]
offset = 1
if eos_token:
subwords = [*subwords, eos_token]
indexed_tokens = tokenizer.convert_tokens_to_ids(subwords)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
for token in sentence.tokens:
len_subwords = token_subwords_mapping[token.idx]
subtoken_embeddings = _extract_embeddings(
hidden_states=hidden_states,
layers=layers,
pooling_operation=pooling_operation,
subword_start_idx=offset,
subword_end_idx=offset + len_subwords,
use_scalar_mix=use_scalar_mix,
)
offset += len_subwords
final_subtoken_embedding = torch.cat(subtoken_embeddings)
token.set_embedding(name, final_subtoken_embedding)
return sentences
class BertEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
bert_model_or_path: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
:param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
and vocab file (names of three files should be - config.json, pytorch_model.bin/model.chkpt, vocab.txt)
:param layers: string indicating which layers to take for embedding
:param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
the average ('mean') or use first word piece embedding as token embedding ('first).
"""
super().__init__()
if "distilbert" in bert_model_or_path:
try:
from transformers import DistilBertModel, DistilBertTokenizer
except ImportError:
log.warning("-" * 100)
log.warning("ATTENTION! To use DistilBert, please first install a recent version of transformers!")
log.warning("-" * 100)
pass
self.tokenizer = DistilBertTokenizer.from_pretrained(bert_model_or_path)
self.model = DistilBertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
elif "albert" in bert_model_or_path:
self.tokenizer = AlbertTokenizer.from_pretrained(bert_model_or_path)
self.model = AlbertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
else:
self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
self.model = BertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.name = str(bert_model_or_path)
self.static_embeddings = True
class BertInputFeatures:
"""Private helper class for holding BERT-formatted features."""
def __init__(
self,
unique_id,
tokens,
input_ids,
input_mask,
input_type_ids,
token_subtoken_count,
) -> None:
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.token_subtoken_count = token_subtoken_count
def _convert_sentences_to_features(self, sentences, max_sequence_length: int) -> [BertInputFeatures]:
max_sequence_length = max_sequence_length + 2
features: List[BertEmbeddings.BertInputFeatures] = []
for sentence_index, sentence in enumerate(sentences):
bert_tokenization: List[str] = []
token_subtoken_count: Dict[int, int] = {}
for token in sentence:
subtokens = self.tokenizer.tokenize(token.text)
bert_tokenization.extend(subtokens)
token_subtoken_count[token.idx] = len(subtokens)
if len(bert_tokenization) > max_sequence_length - 2:
bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in bert_tokenization:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_sequence_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
features.append(
BertEmbeddings.BertInputFeatures(
unique_id=sentence_index,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
token_subtoken_count=token_subtoken_count,
)
)
return features
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static.
"""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[self.tokenizer.tokenize(sentence.to_tokenized_string()) for sentence in sentences],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(sentences, longest_sentence_in_batch)
all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(flair.device)
all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(flair.device)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.to(flair.device)
self.model.eval()
all_encoder_layers = self.model(all_input_ids, attention_mask=all_input_masks)[-1]
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = all_encoder_layers[int(layer_index)][sentence_index]
all_layers.append(layer_output[token_index])
if self.use_scalar_mix:
sm = ScalarMix(mixture_size=len(all_layers))
sm_embeddings = sm(all_layers)
all_layers = [sm_embeddings]
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx + feature.token_subtoken_count[token.idx]
]
embeddings = [embedding.unsqueeze(0) for embedding in embeddings]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return (
len(self.layer_indexes) * self.model.config.hidden_size
if not self.use_scalar_mix
else self.model.config.hidden_size
)
class DocumentMeanEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.3.1",
reason="The functionality of this class is moved to 'DocumentPoolEmbeddings'",
)
def __init__(self, token_embeddings: List[TokenEmbeddings]) -> None:
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=token_embeddings)
self.name: str = "document_mean"
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static.
"""
everything_embedded: bool = True
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded:
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
mean_embedding = torch.mean(word_embeddings, 0)
sentence.set_embedding(self.name, mean_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentLSTMEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.4",
reason="The functionality of this class is moved to 'DocumentRNNEmbeddings'",
)
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
) -> None:
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the lstm
:param rnn_layers: the number of layers for the lstm
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the lstm or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used.
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.name = "document_lstm"
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional LSTM on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(self.length_of_all_token_embeddings, self.embeddings_dimension)
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static.
"""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for _i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for _add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(self.length_of_all_token_embeddings, dtype=torch.float).unsqueeze(0).to(flair.device)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
lstm_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM LSTM
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class ELMoTransformerEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018."""
@deprecated(
version="0.4.2",
reason="Not possible to load or save ELMo Transformer models. @stefan-it is working on it.",
)
def __init__(self, model_file: str) -> None:
super().__init__()
try:
from allennlp.data.token_indexers.elmo_indexer import (
ELMoTokenCharactersIndexer,
)
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (
BidirectionalLanguageModelTokenEmbedder,
)
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp"
)
log.warning("-" * 100)
pass
self.name = "elmo-transformer"
self.static_embeddings = True
self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(
archive_file=model_file,
dropout=0.2,
bos_eos_tokens=("<S>", "</S>"),
remove_bos_eos=True,
requires_grad=False,
)
self.lm_embedder = self.lm_embedder.to(device=flair.device)
self.vocab = self.lm_embedder._lm.vocab
self.indexer = ELMoTokenCharactersIndexer()
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# Avoid conflicts with flair's Token class
import allennlp.data.tokenizers.token as allen_nlp_token
indexer = self.indexer
vocab = self.vocab
for sentence in sentences:
character_indices = indexer.tokens_to_indices(
[allen_nlp_token.Token(token.text) for token in sentence], vocab, "elmo"
)["elmo"]
indices_tensor = torch.LongTensor([character_indices])
indices_tensor = indices_tensor.to(device=flair.device)
embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
embedding = embeddings[token_idx]
word_embedding = torch.FloatTensor(embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
| 63,878 | 39.099812 | 174 | py |
flair | flair-master/flair/embeddings/image.py | import logging
from typing import Any, Dict, List, Optional
import torch
import torch.nn.functional as F
from torch.nn import (
AdaptiveAvgPool2d,
AdaptiveMaxPool2d,
Conv2d,
Dropout2d,
Linear,
MaxPool2d,
Parameter,
ReLU,
Sequential,
TransformerEncoder,
TransformerEncoderLayer,
)
import flair
from flair.data import Image
from flair.embeddings.base import Embeddings, register_embeddings
log = logging.getLogger("flair")
class ImageEmbeddings(Embeddings[Image]):
@property
def embedding_type(self) -> str:
return "image-level"
def to_params(self) -> Dict[str, Any]:
# legacy pickle-like saving for image embeddings, as implementation details are not obvious
return self.__getstate__() # type: ignore[operator]
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
# legacy pickle-like loading for image embeddings, as implementation details are not obvious
embedding = cls.__new__(cls)
embedding.__setstate__(params)
return embedding
@register_embeddings
class IdentityImageEmbeddings(ImageEmbeddings):
def __init__(self, transforms) -> None:
import PIL as pythonimagelib
self.PIL = pythonimagelib
self.name = "Identity"
self.transforms = transforms
self.__embedding_length: Optional[int] = None
self.static_embeddings = True
super().__init__()
def _add_embeddings_internal(self, images: List[Image]):
for image in images:
image_data = self.PIL.Image.open(image.imageURL)
image_data.load()
image.set_embedding(self.name, self.transforms(image_data))
@property
def embedding_length(self) -> int:
assert self.__embedding_length is not None
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class PrecomputedImageEmbeddings(ImageEmbeddings):
def __init__(self, url2tensor_dict, name) -> None:
self.url2tensor_dict = url2tensor_dict
self.name = name
self.__embedding_length = len(list(self.url2tensor_dict.values())[0])
self.static_embeddings = True
super().__init__()
def _add_embeddings_internal(self, images: List[Image]):
for image in images:
if image.imageURL in self.url2tensor_dict:
image.set_embedding(self.name, self.url2tensor_dict[image.imageURL])
else:
image.set_embedding(self.name, torch.zeros(self.__embedding_length, device=flair.device))
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class NetworkImageEmbeddings(ImageEmbeddings):
def __init__(self, name, pretrained=True, transforms=None) -> None:
super().__init__()
try:
import torchvision
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "torchvision" is not installed!')
log.warning('To use convnets pretraned on ImageNet, please first install with "pip install torchvision"')
log.warning("-" * 100)
pass
model_info = {
"resnet50": (torchvision.models.resnet50, lambda x: list(x)[:-1], 2048),
"mobilenet_v2": (
torchvision.models.mobilenet_v2,
lambda x: list(x)[:-1] + [torch.nn.AdaptiveAvgPool2d((1, 1))],
1280,
),
}
transforms = [] if transforms is None else transforms
transforms += [torchvision.transforms.ToTensor()]
if pretrained:
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
transforms += [torchvision.transforms.Normalize(mean=imagenet_mean, std=imagenet_std)]
self.transforms = torchvision.transforms.Compose(transforms)
if name in model_info:
model_constructor = model_info[name][0]
model_features = model_info[name][1]
embedding_length = model_info[name][2]
net = model_constructor(pretrained=pretrained)
modules = model_features(net.children())
self.features = torch.nn.Sequential(*modules)
self.__embedding_length = embedding_length
self.name = name
else:
raise Exception(f"Image embeddings {name} not available.")
def _add_embeddings_internal(self, images: List[Image]):
image_tensor = torch.stack([self.transforms(image.data) for image in images])
image_embeddings = self.features(image_tensor)
image_embeddings = (
image_embeddings.view(image_embeddings.shape[:2]) if image_embeddings.dim() == 4 else image_embeddings
)
if image_embeddings.dim() != 2:
raise Exception(f"Unknown embedding shape of length {image_embeddings.dim()}")
for image_id, image in enumerate(images):
image.set_embedding(self.name, image_embeddings[image_id])
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class ConvTransformNetworkImageEmbeddings(ImageEmbeddings):
def __init__(self, feats_in, convnet_parms, posnet_parms, transformer_parms) -> None:
super().__init__()
adaptive_pool_func_map = {"max": AdaptiveMaxPool2d, "avg": AdaptiveAvgPool2d}
convnet_arch: List[Any] = [] if convnet_parms["dropout"][0] <= 0 else [Dropout2d(convnet_parms["dropout"][0])]
convnet_arch.extend(
[
Conv2d(
in_channels=feats_in,
out_channels=convnet_parms["n_feats_out"][0],
kernel_size=convnet_parms["kernel_sizes"][0],
padding=convnet_parms["kernel_sizes"][0][0] // 2,
stride=convnet_parms["strides"][0],
groups=convnet_parms["groups"][0],
),
ReLU(),
]
)
if "0" in convnet_parms["pool_layers_map"]:
convnet_arch.append(MaxPool2d(kernel_size=convnet_parms["pool_layers_map"]["0"]))
for layer_id, (kernel_size, n_in, n_out, groups, stride, dropout) in enumerate(
zip(
convnet_parms["kernel_sizes"][1:],
convnet_parms["n_feats_out"][:-1],
convnet_parms["n_feats_out"][1:],
convnet_parms["groups"][1:],
convnet_parms["strides"][1:],
convnet_parms["dropout"][1:],
)
):
if dropout > 0:
convnet_arch.append(Dropout2d(dropout))
convnet_arch.append(
Conv2d(
in_channels=n_in,
out_channels=n_out,
kernel_size=kernel_size,
padding=kernel_size[0] // 2,
stride=stride,
groups=groups,
)
)
convnet_arch.append(ReLU())
if str(layer_id + 1) in convnet_parms["pool_layers_map"]:
convnet_arch.append(MaxPool2d(kernel_size=convnet_parms["pool_layers_map"][str(layer_id + 1)]))
convnet_arch.append(
adaptive_pool_func_map[convnet_parms["adaptive_pool_func"]](output_size=convnet_parms["output_size"])
)
self.conv_features = Sequential(*convnet_arch)
conv_feat_dim = convnet_parms["n_feats_out"][-1]
if posnet_parms is not None and transformer_parms is not None:
self.use_transformer = True
if posnet_parms["nonlinear"]:
posnet_arch = [
Linear(2, posnet_parms["n_hidden"]),
ReLU(),
Linear(posnet_parms["n_hidden"], conv_feat_dim),
]
else:
posnet_arch = [Linear(2, conv_feat_dim)]
self.position_features = Sequential(*posnet_arch)
transformer_layer = TransformerEncoderLayer(
d_model=conv_feat_dim, **transformer_parms["transformer_encoder_parms"]
)
self.transformer = TransformerEncoder(transformer_layer, num_layers=transformer_parms["n_blocks"])
# <cls> token initially set to 1/D, so it attends to all image features equally
self.cls_token = Parameter(torch.ones(conv_feat_dim, 1) / conv_feat_dim)
self._feat_dim = conv_feat_dim
else:
self.use_transformer = False
self._feat_dim = convnet_parms["output_size"][0] * convnet_parms["output_size"][1] * conv_feat_dim
def forward(self, x):
x = self.conv_features(x) # [b, d, h, w]
b, d, h, w = x.shape
if self.use_transformer:
# add positional encodings
y = torch.stack(
[
torch.cat([torch.arange(h).unsqueeze(1)] * w, dim=1),
torch.cat([torch.arange(w).unsqueeze(0)] * h, dim=0),
]
) # [2, h, w
y = y.view([2, h * w]).transpose(1, 0) # [h*w, 2]
y = y.type(torch.float32).to(flair.device)
y = self.position_features(y).transpose(1, 0).view([d, h, w]) # [h*w, d] => [d, h, w]
y = y.unsqueeze(dim=0) # [1, d, h, w]
x = x + y # [b, d, h, w] + [1, d, h, w] => [b, d, h, w]
# reshape the pixels into the sequence
x = x.view([b, d, h * w]) # [b, d, h*w]
# layer norm after convolution and positional encodings
x = F.layer_norm(x.permute([0, 2, 1]), (d,)).permute([0, 2, 1])
# add <cls> token
x = torch.cat([x, torch.stack([self.cls_token] * b)], dim=2) # [b, d, h*w+1]
# transformer requires input in the shape [h*w+1, b, d]
x = (
x.view([b * d, h * w + 1]).transpose(1, 0).view([h * w + 1, b, d])
) # [b, d, h*w+1] => [b*d, h*w+1] => [h*w+1, b*d] => [h*w+1, b*d]
x = self.transformer(x) # [h*w+1, b, d]
# the output is an embedding of <cls> token
x = x[-1, :, :] # [b, d]
else:
x = x.view([-1, self._feat_dim])
x = F.layer_norm(x, (self._feat_dim,))
return x
def _add_embeddings_internal(self, images: List[Image]):
image_tensor = torch.stack([image.data for image in images])
image_embeddings = self.forward(image_tensor)
for image_id, image in enumerate(images):
image.set_embedding(self.name, image_embeddings[image_id])
@property
def embedding_length(self):
return self._feat_dim
def __str__(self) -> str:
return self.name
| 10,902 | 37.663121 | 118 | py |
flair | flair-master/flair/embeddings/transformer.py | import inspect
import os
import random
import re
import tempfile
import warnings
import zipfile
from abc import abstractmethod
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
import torch
from torch.jit import ScriptModule
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoModel,
AutoTokenizer,
FeatureExtractionMixin,
LayoutLMTokenizer,
LayoutLMTokenizerFast,
LayoutLMv2FeatureExtractor,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.tokenization_utils_base import LARGE_INTEGER
from transformers.utils import PaddingStrategy
import flair
from flair.data import Sentence, Token, log
from flair.embeddings.base import (
DocumentEmbeddings,
Embeddings,
TokenEmbeddings,
register_embeddings,
)
SENTENCE_BOUNDARY_TAG: str = "[FLERT]"
@torch.jit.script_if_tracing
def pad_sequence_embeddings(all_hidden_states: List[torch.Tensor]) -> torch.Tensor:
embedding_length = all_hidden_states[0].shape[1]
longest_token_sequence_in_batch = 0
for hidden_states in all_hidden_states:
if hidden_states.shape[0] > longest_token_sequence_in_batch:
longest_token_sequence_in_batch = hidden_states.shape[0]
pre_allocated_zero_tensor = torch.zeros(
embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = []
for hidden_states in all_hidden_states:
all_embs.append(hidden_states.view(-1))
nb_padding_tokens = longest_token_sequence_in_batch - hidden_states.shape[0]
if nb_padding_tokens > 0:
all_embs.append(pre_allocated_zero_tensor[: embedding_length * nb_padding_tokens])
return torch.cat(all_embs).view(len(all_hidden_states), longest_token_sequence_in_batch, embedding_length)
@torch.jit.script_if_tracing
def truncate_hidden_states(hidden_states: torch.Tensor, input_ids: torch.Tensor) -> torch.Tensor:
return hidden_states[:, :, : input_ids.size()[1]]
@torch.jit.script_if_tracing
def combine_strided_tensors(
hidden_states: torch.Tensor,
overflow_to_sample_mapping: torch.Tensor,
half_stride: int,
max_length: int,
default_value: int,
) -> torch.Tensor:
_, counts = torch.unique(overflow_to_sample_mapping, sorted=True, return_counts=True)
sentence_count = int(overflow_to_sample_mapping.max().item() + 1)
token_count = max_length + (max_length - 2) * int(counts.max().item() - 1)
if hidden_states.dim() == 2:
sentence_hidden_states = torch.zeros(
(sentence_count, token_count), device=flair.device, dtype=hidden_states.dtype
)
else:
sentence_hidden_states = torch.zeros(
(sentence_count, token_count, hidden_states.shape[2]), device=flair.device, dtype=hidden_states.dtype
)
sentence_hidden_states += default_value
for sentence_id in torch.arange(0, sentence_hidden_states.shape[0]):
selected_sentences = hidden_states[overflow_to_sample_mapping == sentence_id]
if selected_sentences.size(0) > 1:
start_part = selected_sentences[0, : half_stride + 1]
mid_part = selected_sentences[:, half_stride + 1 : max_length - 1 - half_stride]
mid_part = torch.reshape(mid_part, (mid_part.shape[0] * mid_part.shape[1],) + mid_part.shape[2:])
end_part = selected_sentences[selected_sentences.shape[0] - 1, max_length - half_stride - 1 :]
sentence_hidden_state = torch.cat((start_part, mid_part, end_part), dim=0)
sentence_hidden_states[sentence_id, : sentence_hidden_state.shape[0]] = torch.cat(
(start_part, mid_part, end_part), dim=0
)
else:
sentence_hidden_states[sentence_id, : selected_sentences.shape[1]] = selected_sentences[0, :]
return sentence_hidden_states
@torch.jit.script_if_tracing
def fill_masked_elements(
all_token_embeddings: torch.Tensor,
sentence_hidden_states: torch.Tensor,
mask: torch.Tensor,
word_ids: torch.Tensor,
lengths: torch.LongTensor,
):
for i in torch.arange(int(all_token_embeddings.shape[0])):
r = insert_missing_embeddings(sentence_hidden_states[i][mask[i] & (word_ids[i] >= 0)], word_ids[i], lengths[i])
all_token_embeddings[i, : lengths[i], :] = r
return all_token_embeddings
@torch.jit.script_if_tracing
def insert_missing_embeddings(
token_embeddings: torch.Tensor, word_id: torch.Tensor, length: torch.LongTensor
) -> torch.Tensor:
# in some cases we need to insert zero vectors for tokens without embedding.
if token_embeddings.shape[0] == 0:
if token_embeddings.dim() == 2:
token_embeddings = torch.zeros(
int(length), token_embeddings.shape[1], dtype=token_embeddings.dtype, device=token_embeddings.device
)
elif token_embeddings.dim() == 3:
token_embeddings = torch.zeros(
int(length),
token_embeddings.shape[1],
token_embeddings.shape[2],
dtype=token_embeddings.dtype,
device=token_embeddings.device,
)
elif token_embeddings.dim() == 4:
token_embeddings = torch.zeros(
int(length),
token_embeddings.shape[1],
token_embeddings.shape[2],
token_embeddings.shape[3],
dtype=token_embeddings.dtype,
device=token_embeddings.device,
)
elif token_embeddings.shape[0] < length:
for _id in torch.arange(int(length)):
zero_vector = torch.zeros_like(token_embeddings[:1])
if not (word_id == _id).any():
token_embeddings = torch.cat(
(
token_embeddings[:_id],
zero_vector,
token_embeddings[_id:],
),
dim=0,
)
return token_embeddings
@torch.jit.script_if_tracing
def fill_mean_token_embeddings(
all_token_embeddings: torch.Tensor,
sentence_hidden_states: torch.Tensor,
word_ids: torch.Tensor,
token_lengths: torch.Tensor,
):
for i in torch.arange(all_token_embeddings.shape[0]):
for _id in torch.arange(token_lengths[i]): # type: ignore[call-overload]
all_token_embeddings[i, _id, :] = torch.nan_to_num(
sentence_hidden_states[i][word_ids[i] == _id].mean(dim=0)
)
return all_token_embeddings
@torch.jit.script_if_tracing
def document_mean_pooling(sentence_hidden_states: torch.Tensor, sentence_lengths: torch.Tensor):
result = torch.zeros(sentence_hidden_states.shape[0], sentence_hidden_states.shape[2])
for i in torch.arange(sentence_hidden_states.shape[0]):
result[i] = sentence_hidden_states[i, : sentence_lengths[i]].mean(dim=0)
@torch.jit.script_if_tracing
def document_max_pooling(sentence_hidden_states: torch.Tensor, sentence_lengths: torch.Tensor):
result = torch.zeros(sentence_hidden_states.shape[0], sentence_hidden_states.shape[2])
for i in torch.arange(sentence_hidden_states.shape[0]):
result[i], _ = sentence_hidden_states[i, : sentence_lengths[i]].max(dim=0)
def _legacy_reconstruct_word_ids(
embedding: "TransformerBaseEmbeddings", flair_tokens: List[List[str]]
) -> List[List[Optional[int]]]:
word_ids_list = []
max_len = 0
for tokens in flair_tokens:
token_texts = embedding.tokenizer.tokenize(" ".join(tokens), is_split_into_words=True)
token_ids = cast(List[int], embedding.tokenizer.convert_tokens_to_ids(token_texts))
expanded_token_ids = embedding.tokenizer.build_inputs_with_special_tokens(token_ids)
j = 0
for _i, token_id in enumerate(token_ids):
while expanded_token_ids[j] != token_id:
token_texts.insert(j, embedding.tokenizer.convert_ids_to_tokens(expanded_token_ids[j]))
j += 1
j += 1
while j < len(expanded_token_ids):
token_texts.insert(j, embedding.tokenizer.convert_ids_to_tokens(expanded_token_ids[j]))
j += 1
if not embedding.allow_long_sentences and embedding.truncate:
token_texts = token_texts[: embedding.tokenizer.model_max_length]
reconstruct = _reconstruct_word_ids_from_subtokens(embedding, tokens, token_texts)
word_ids_list.append(reconstruct)
reconstruct_len = len(reconstruct)
if reconstruct_len > max_len:
max_len = reconstruct_len
for _word_ids in word_ids_list:
# padding
_word_ids.extend([None] * (max_len - len(_word_ids)))
return word_ids_list
def remove_special_markup(text: str):
# remove special markup
text = re.sub("^Ġ", "", text) # RoBERTa models
text = re.sub("^##", "", text) # BERT models
text = re.sub("^▁", "", text) # XLNet models
text = re.sub("</w>$", "", text) # XLM models
return text
def _get_processed_token_text(tokenizer, token: str) -> str:
pieces = tokenizer.tokenize(token)
token_text = "".join(map(remove_special_markup, pieces))
token_text = token_text.lower()
return token_text.strip()
def _reconstruct_word_ids_from_subtokens(embedding, tokens: List[str], subtokens: List[str]):
word_iterator = iter(enumerate(_get_processed_token_text(embedding.tokenizer, token) for token in tokens))
token_id, token_text = next(word_iterator)
word_ids: List[Optional[int]] = []
reconstructed_token = ""
subtoken_count = 0
processed_first_token = False
special_tokens = []
# check if special tokens exist to circumvent error message
if embedding.tokenizer._bos_token:
special_tokens.append(embedding.tokenizer.bos_token)
if embedding.tokenizer._cls_token:
special_tokens.append(embedding.tokenizer.cls_token)
if embedding.tokenizer._sep_token:
special_tokens.append(embedding.tokenizer.sep_token)
# iterate over subtokens and reconstruct tokens
for _subtoken_id, subtoken in enumerate(subtokens):
# remove special markup
subtoken = remove_special_markup(subtoken)
# check if reconstructed token is special begin token ([CLS] or similar)
if subtoken in special_tokens:
word_ids.append(None)
continue
if subtoken_count == 0 and processed_first_token:
token_id, token_text = next(word_iterator)
processed_first_token = True
# some BERT tokenizers somehow omit words - in such cases skip to next token
while subtoken_count == 0 and not token_text.startswith(subtoken.lower()):
token_id, token_text = next(word_iterator)
word_ids.append(token_id)
subtoken_count += 1
reconstructed_token = reconstructed_token + subtoken
if reconstructed_token.lower() == token_text:
# we cannot handle unk_tokens perfectly, so let's assume that one unk_token corresponds to one token.
reconstructed_token = ""
subtoken_count = 0
# if tokens are unaccounted for
while len(word_ids) < len(subtokens):
word_ids.append(None)
# check if all tokens were matched to subtokens
if token_id + 1 != len(tokens) and not embedding.truncate:
log.error(f"Reconstructed token: '{reconstructed_token}'")
log.error(f"Tokenization MISMATCH in sentence '{' '.join(tokens)}'")
log.error(f"Last matched: '{tokens[token_id]}'")
log.error(f"Last sentence: '{tokens[-1]}'")
log.error(f"subtokenized: '{subtokens}'")
return word_ids
class TransformerBaseEmbeddings(Embeddings[Sentence]):
"""Base class for all TransformerEmbeddings.
This base class handles the tokenizer and the input preparation, however it won't implement the actual model.
This can be further extended to implement the model in either a pytorch, jit or onnx way of working.
"""
def __init__(
self,
name: str,
tokenizer: PreTrainedTokenizer,
embedding_length: int,
context_length: int,
context_dropout: float,
respect_document_boundaries: bool,
stride: int,
allow_long_sentences: bool,
fine_tune: bool,
truncate: bool,
use_lang_emb: bool,
is_document_embedding: bool = False,
is_token_embedding: bool = False,
force_device: Optional[torch.device] = None,
force_max_length: bool = False,
feature_extractor: Optional[FeatureExtractionMixin] = None,
needs_manual_ocr: Optional[bool] = None,
use_context_separator: bool = True,
) -> None:
self.name = name
super().__init__()
self.document_embedding = is_document_embedding
self.token_embedding = is_token_embedding
self.tokenizer: PreTrainedTokenizer = tokenizer
self.embedding_length_internal = embedding_length
self.context_length = context_length
self.context_dropout = context_dropout
self.respect_document_boundaries = respect_document_boundaries
self.stride = stride
self.allow_long_sentences = allow_long_sentences
self.truncate = truncate
self.use_lang_emb = use_lang_emb
self.force_device = force_device
self.fine_tune = fine_tune
self.force_max_length = force_max_length
self.feature_extractor = feature_extractor
self.use_context_separator = use_context_separator
tokenizer_params = list(inspect.signature(self.tokenizer.__call__).parameters.keys())
self.tokenizer_needs_ocr_boxes = "boxes" in tokenizer_params
# The layoutlm tokenizer doesn't handle ocr themselves
self.needs_manual_ocr = isinstance(self.tokenizer, (LayoutLMTokenizer, LayoutLMTokenizerFast))
if needs_manual_ocr is not None:
self.needs_manual_ocr = needs_manual_ocr
if (self.tokenizer_needs_ocr_boxes or self.needs_manual_ocr) and self.context_length > 0:
warnings.warn(f"using '{name}' with additional context, might lead to bad results.", UserWarning)
if not self.token_embedding and not self.document_embedding:
raise ValueError("either 'is_token_embedding' or 'is_document_embedding' needs to be set.")
def to_args(self):
args = {
"is_token_embedding": self.token_embedding,
"is_document_embedding": self.document_embedding,
"allow_long_sentences": self.allow_long_sentences,
"tokenizer": self.tokenizer,
"context_length": self.context_length,
"context_dropout": self.context_dropout,
"respect_document_boundaries": self.respect_document_boundaries,
"truncate": self.truncate,
"stride": self.stride,
"embedding_length": self.embedding_length_internal,
"name": self.name,
"fine_tune": self.fine_tune,
"use_lang_emb": self.use_lang_emb,
"force_max_length": self.force_max_length,
"feature_extractor": self.feature_extractor,
"use_context_separator": self.use_context_separator,
}
if hasattr(self, "needs_manual_ocr"):
args["needs_manual_ocr"] = self.needs_manual_ocr
return args
def __setstate__(self, state):
embedding = self.from_params(state)
for key in embedding.__dict__:
self.__dict__[key] = embedding.__dict__[key]
@classmethod
def from_params(cls, params):
tokenizer = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
feature_extractor = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
embedding = cls.create_from_state(tokenizer=tokenizer, feature_extractor=feature_extractor, **params)
return embedding
def to_params(self):
model_state = self.to_args()
del model_state["tokenizer"]
model_state["tokenizer_data"] = self.__tokenizer_bytes()
del model_state["feature_extractor"]
if self.feature_extractor:
model_state["feature_extractor_data"] = self.__feature_extractor_bytes()
return model_state
@classmethod
def _tokenizer_from_bytes(cls, zip_data: BytesIO) -> PreTrainedTokenizer:
zip_obj = zipfile.ZipFile(zip_data)
with tempfile.TemporaryDirectory() as temp_dir:
zip_obj.extractall(temp_dir)
return AutoTokenizer.from_pretrained(temp_dir, add_prefix_space=True)
@classmethod
def _feature_extractor_from_bytes(cls, zip_data: Optional[BytesIO]) -> Optional[FeatureExtractionMixin]:
if zip_data is None:
return None
zip_obj = zipfile.ZipFile(zip_data)
with tempfile.TemporaryDirectory() as temp_dir:
zip_obj.extractall(temp_dir)
return AutoFeatureExtractor.from_pretrained(temp_dir, apply_ocr=False)
def __tokenizer_bytes(self):
with tempfile.TemporaryDirectory() as temp_dir:
files = list(self.tokenizer.save_pretrained(temp_dir))
if self.tokenizer.is_fast and self.tokenizer.slow_tokenizer_class:
vocab_files = self.tokenizer.slow_tokenizer_class.vocab_files_names.values()
files = [f for f in files if all(v not in f for v in vocab_files)]
zip_data = BytesIO()
zip_obj = zipfile.ZipFile(zip_data, "w")
for f in files:
# transformers returns the "added_tokens.json" even if it doesn't create it
if os.path.exists(f):
zip_obj.write(f, os.path.relpath(f, temp_dir))
zip_data.seek(0)
return zip_data
def __feature_extractor_bytes(self):
with tempfile.TemporaryDirectory() as temp_dir:
files = list(self.feature_extractor.save_pretrained(temp_dir))
zip_data = BytesIO()
zip_obj = zipfile.ZipFile(zip_data, "w")
for f in files:
# transformers returns the "added_tokens.json" even if it doesn't create it
if os.path.exists(f):
zip_obj.write(f, os.path.relpath(f, temp_dir))
zip_data.seek(0)
return zip_data
@classmethod
def create_from_state(cls, **state):
return cls(**state)
@property
def embedding_length(self) -> int:
return self.embedding_length_internal
@property
def embedding_type(self) -> str:
# in case of doubt: token embedding has higher priority than document embedding
return "word-level" if self.token_embedding else "sentence-level"
@abstractmethod
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
return self(**tensors)
def prepare_tensors(self, sentences: List[Sentence], device: Optional[torch.device] = None):
if device is None:
device = flair.device
flair_tokens, offsets, lengths = self.__gather_flair_tokens(sentences)
# random check some tokens to save performance.
if (self.needs_manual_ocr or self.tokenizer_needs_ocr_boxes) and not all(
[
flair_tokens[0][0].has_metadata("bbox"),
flair_tokens[0][-1].has_metadata("bbox"),
flair_tokens[-1][0].has_metadata("bbox"),
flair_tokens[-1][-1].has_metadata("bbox"),
]
):
raise ValueError(f"The embedding '{self.name}' requires the ocr 'bbox' set as metadata on all tokens.")
if self.feature_extractor is not None and not all(
[
sentences[0].has_metadata("image"),
sentences[-1].has_metadata("image"),
]
):
raise ValueError(f"The embedding '{self.name}' requires the 'image' set as metadata for all sentences.")
return self.__build_transformer_model_inputs(sentences, offsets, lengths, flair_tokens, device)
def __build_transformer_model_inputs(
self,
sentences: List[Sentence],
offsets: List[int],
sentence_lengths: List[int],
flair_tokens: List[List[Token]],
device: torch.device,
):
tokenizer_kwargs: Dict[str, Any] = {}
if self.tokenizer_needs_ocr_boxes:
tokenizer_kwargs["boxes"] = [[t.get_metadata("bbox") for t in tokens] for tokens in flair_tokens]
else:
tokenizer_kwargs["is_split_into_words"] = True
batch_encoding = self.tokenizer(
[[t.text for t in tokens] for tokens in flair_tokens],
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=self.truncate,
padding=PaddingStrategy.MAX_LENGTH if self.force_max_length else PaddingStrategy.LONGEST,
return_tensors="pt",
**tokenizer_kwargs,
)
input_ids = batch_encoding["input_ids"].to(device, non_blocking=True)
model_kwargs = {"input_ids": input_ids}
# Models such as FNet do not have an attention_mask
if "attention_mask" in batch_encoding:
model_kwargs["attention_mask"] = batch_encoding["attention_mask"].to(device, non_blocking=True)
if "overflow_to_sample_mapping" in batch_encoding:
cpu_overflow_to_sample_mapping = batch_encoding["overflow_to_sample_mapping"]
model_kwargs["overflow_to_sample_mapping"] = cpu_overflow_to_sample_mapping.to(device, non_blocking=True)
unpacked_ids = combine_strided_tensors(
input_ids,
model_kwargs["overflow_to_sample_mapping"],
self.stride // 2,
self.tokenizer.model_max_length,
self.tokenizer.pad_token_id,
)
sub_token_lengths = (unpacked_ids != self.tokenizer.pad_token_id).sum(dim=1)
padded_tokens = [flair_tokens[i] for i in cpu_overflow_to_sample_mapping]
else:
cpu_overflow_to_sample_mapping = None
sub_token_lengths = (input_ids != self.tokenizer.pad_token_id).sum(dim=1)
padded_tokens = flair_tokens
if self.document_embedding and not (self.cls_pooling == "cls" and self.initial_cls_token):
model_kwargs["sub_token_lengths"] = sub_token_lengths
# set language IDs for XLM-style transformers
if self.use_lang_emb and self.tokenizer.lang2id is not None:
model_kwargs["langs"] = torch.zeros_like(input_ids, dtype=input_ids.dtype)
lang2id = self.tokenizer.lang2id
if not self.allow_long_sentences:
for s_id, sentence in enumerate(sentences):
lang_id = lang2id.get(sentence.get_language_code(), 0)
model_kwargs["langs"][s_id] = lang_id
else:
sentence_part_lengths = torch.unique(
batch_encoding["overflow_to_sample_mapping"],
return_counts=True,
sorted=True,
)[1].tolist()
sentence_idx = 0
for sentence, part_length in zip(sentences, sentence_part_lengths):
lang_id = lang2id.get(sentence.get_language_code(), 0)
model_kwargs["langs"][sentence_idx : sentence_idx + part_length] = lang_id
sentence_idx += part_length
if "bbox" in batch_encoding:
model_kwargs["bbox"] = batch_encoding["bbox"].to(device, non_blocking=True)
if self.token_embedding or self.needs_manual_ocr:
model_kwargs["token_lengths"] = torch.tensor(sentence_lengths, device=device)
if self.tokenizer.is_fast:
word_ids_list = [batch_encoding.word_ids(i) for i in range(input_ids.size()[0])]
else:
word_ids_list = _legacy_reconstruct_word_ids(
self,
[[t.text for t in tokens] for tokens in flair_tokens],
)
# word_ids is only supported for fast rust tokenizers. Some models like "xlm-mlm-ende-1024" do not have
# a fast tokenizer implementation, hence we need to fall back to our own reconstruction of word_ids.
if self.token_embedding:
if self.allow_long_sentences:
new_offsets = []
new_lengths = []
assert cpu_overflow_to_sample_mapping is not None
for sent_id in cpu_overflow_to_sample_mapping:
new_offsets.append(offsets[sent_id])
new_lengths.append(sentence_lengths[sent_id])
offsets = new_offsets
sentence_lengths = new_lengths
word_ids = torch.tensor(
[
[
-100 if (val is None or val < offset or val >= offset + length) else val - offset
for val in _word_ids
]
for _word_ids, offset, length in zip(word_ids_list, offsets, sentence_lengths)
],
device=device,
)
model_kwargs["word_ids"] = word_ids
if self.needs_manual_ocr:
bbox = [
[(0, 0, 0, 0) if val is None else tokens[val].get_metadata("bbox") for val in _word_ids]
for _word_ids, tokens in zip(word_ids_list, padded_tokens)
]
model_kwargs["bbox"] = torch.tensor(bbox, device=device)
if self.feature_extractor is not None:
images = [sent.get_metadata("image") for sent in sentences]
image_encodings = self.feature_extractor(images, return_tensors="pt")["pixel_values"]
if cpu_overflow_to_sample_mapping is not None:
batched_image_encodings = [image_encodings[i] for i in cpu_overflow_to_sample_mapping]
image_encodings = torch.stack(batched_image_encodings)
image_encodings = image_encodings.to(flair.device)
if isinstance(self.feature_extractor, LayoutLMv2FeatureExtractor):
model_kwargs["image"] = image_encodings
else:
model_kwargs["pixel_values"] = image_encodings
return model_kwargs
def __gather_flair_tokens(self, sentences: List[Sentence]) -> Tuple[List[List[Token]], List[int], List[int]]:
offsets = []
lengths = []
if self.context_length > 0:
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
sentence_tokens = []
for sentence in sentences:
# flair specific pre-tokenization
tokens, offset = self._expand_sentence_with_context(sentence)
sentence_tokens.append(tokens)
offsets.append(offset)
lengths.append(len(sentence))
return sentence_tokens, offsets, lengths
def _expand_sentence_with_context(self, sentence) -> Tuple[List[Token], int]:
# fields to store left and right context
left_context = []
right_context = []
# expand context only if context_length is set
expand_context = self.context_length > 0
if expand_context:
# if context_dropout is set, randomly deactivate left context during training
if not self.training or random.randint(1, 100) > (self.context_dropout * 100):
left_context = sentence.left_context(self.context_length, self.respect_document_boundaries)
# if context_dropout is set, randomly deactivate right context during training
if not self.training or random.randint(1, 100) > (self.context_dropout * 100):
right_context = sentence.right_context(self.context_length, self.respect_document_boundaries)
# if use_context_separator is set, add a [FLERT] token
if self.use_context_separator and self.context_length > 0:
left_context = [*left_context, Token(SENTENCE_BOUNDARY_TAG)]
right_context = [Token(SENTENCE_BOUNDARY_TAG), *right_context]
# return expanded sentence and context length information
expanded_sentence = left_context + sentence.tokens + right_context
context_length = len(left_context)
return expanded_sentence, context_length
def __extract_document_embeddings(self, sentence_hidden_states, sentences):
for document_emb, sentence in zip(sentence_hidden_states, sentences):
sentence.set_embedding(self.name, document_emb)
def __extract_token_embeddings(self, sentence_embeddings, sentences):
for token_embeddings, sentence in zip(sentence_embeddings, sentences):
for token_embedding, token in zip(token_embeddings, sentence):
token.set_embedding(self.name, token_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
tensors = self.prepare_tensors(sentences, device=self.force_device)
gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
with gradient_context:
embeddings = self._forward_tensors(tensors)
if self.document_embedding:
document_embedding = embeddings["document_embeddings"]
self.__extract_document_embeddings(document_embedding, sentences)
if self.token_embedding:
token_embedding = embeddings["token_embeddings"]
self.__extract_token_embeddings(token_embedding, sentences)
@register_embeddings
class TransformerOnnxEmbeddings(TransformerBaseEmbeddings):
def __init__(self, onnx_model: str, providers: List = [], **kwargs) -> None:
# onnx prepares numpy arrays, no mather if it runs on gpu or cpu, the input is on cpu first.
super().__init__(**kwargs, force_device=torch.device("cpu"))
self.onnx_model = onnx_model
self.providers = providers
self.create_session()
self.eval()
def to_params(self):
params = super().to_params()
params["providers"] = self.providers
params["onnx_model"] = self.onnx_model
return params
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "TransformerOnnxEmbeddings":
params["tokenizer"] = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
params["feature_extractor"] = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
return cls(**params)
def create_session(self):
try:
import onnxruntime
except ImportError:
log.error(
"You cannot use OnnxEmbeddings without ONNXruntime being installed,"
"please run `pip install onnxruntime`"
)
raise
if os.path.isfile(self.onnx_model):
self.session = onnxruntime.InferenceSession(self.onnx_model, providers=self.providers)
else:
log.warning(
f"Could not find file '{self.onnx_model}' used in {self.__class__.name}({self.name})."
"The embedding won't work unless a valid path is set."
)
self.session = None
def remove_session(self):
if self.session is not None:
self.session._sess = None
del self.session
self.session = None
def optimize_model(self, optimize_model_path, use_external_data_format: bool = False, **kwargs):
"""Wrapper for `onnxruntime.transformers.optimizer.optimize_model`."""
from onnxruntime.transformers.optimizer import optimize_model
self.remove_session()
model = optimize_model(self.onnx_model, **kwargs)
model.save_model_to_file(optimize_model_path, use_external_data_format=use_external_data_format)
self.onnx_model = optimize_model_path
self.create_session()
def quantize_model(self, quantize_model_path, use_external_data_format: bool = False, **kwargs):
from onnxruntime.quantization import quantize_dynamic
self.remove_session()
quantize_dynamic(
self.onnx_model, quantize_model_path, use_external_data_format=use_external_data_format, **kwargs
)
self.onnx_model = quantize_model_path
self.create_session()
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
input_array = {k: v.numpy() for k, v in tensors.items()}
embeddings = self.session.run([], input_array)
result = {}
if self.document_embedding:
result["document_embeddings"] = torch.tensor(embeddings[0], device=flair.device)
if self.token_embedding:
result["token_embeddings"] = torch.tensor(embeddings[-1], device=flair.device)
return result
@classmethod
def collect_dynamic_axes(cls, embedding: "TransformerEmbeddings", tensors):
dynamic_axes = {}
for k, v in tensors.items():
if k in ["sub_token_lengths", "token_lengths"]:
dynamic_axes[k] = {0: "sent-count"}
continue
if k == "word_ids":
if embedding.tokenizer.is_fast:
dynamic_axes[k] = {0: "batch", 1: "sequ_length"}
else:
dynamic_axes[k] = {0: "sent-count", 1: "max_token_count"}
continue
if k == "overflow_to_sample_mapping":
dynamic_axes[k] = {0: "batch"}
if v.dim() == 1:
dynamic_axes[k] = {0: "batch"}
else:
dynamic_axes[k] = {0: "batch", 1: "sequ_length"}
if embedding.token_embedding:
dynamic_axes["token_embeddings"] = {0: "sent-count", 1: "max_token_count", 2: "token_embedding_size"}
if embedding.document_embedding:
dynamic_axes["document_embeddings"] = {0: "sent-count", 1: "document_embedding_size"}
return dynamic_axes
@classmethod
def export_from_embedding(
cls,
path: Union[str, Path],
embedding: "TransformerEmbeddings",
example_sentences: List[Sentence],
opset_version: int = 14,
providers: Optional[List] = None,
):
path = str(path)
example_tensors = embedding.prepare_tensors(example_sentences)
dynamic_axes = cls.collect_dynamic_axes(embedding, example_tensors)
output_names = []
if embedding.document_embedding:
output_names.append("document_embeddings")
if embedding.token_embedding:
output_names.append("token_embeddings")
if providers is None:
if flair.device.type == "cuda":
providers = [
(
"CUDAExecutionProvider",
{
"device_id": 0,
"arena_extend_strategy": "kNextPowerOfTwo",
"gpu_mem_limit": 4 * 1024 * 1024 * 1024,
"cudnn_conv_algo_search": "EXHAUSTIVE",
"do_copy_in_default_stream": True,
},
),
"CPUExecutionProvider",
]
else:
providers = ["CPUExecutionProvider"]
desired_keys_order = [
param for param in inspect.signature(embedding.forward).parameters if param in example_tensors
]
torch.onnx.export(
embedding,
(example_tensors,),
path,
input_names=desired_keys_order,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=opset_version,
)
return cls(onnx_model=path, providers=providers, **embedding.to_args())
@register_embeddings
class TransformerJitEmbeddings(TransformerBaseEmbeddings):
def __init__(self, jit_model: Union[bytes, ScriptModule], param_names: List[str], **kwargs) -> None:
super().__init__(**kwargs)
if isinstance(jit_model, bytes):
buffer = BytesIO(jit_model)
buffer.seek(0)
self.jit_model: ScriptModule = torch.jit.load(buffer, map_location=flair.device)
else:
self.jit_model = jit_model
self.param_names = param_names
self.to(flair.device)
self.eval()
def to_params(self):
state = super().to_params()
buffer = BytesIO()
torch.jit.save(self.jit_model, buffer)
state["jit_model"] = buffer.getvalue()
state["param_names"] = self.param_names
return state
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
params["tokenizer"] = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
params["feature_extractor"] = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
return cls(**params)
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
parameters = []
for param in self.param_names:
parameters.append(tensors[param])
embeddings = self.jit_model(*parameters)
if isinstance(embeddings, tuple):
return {"document_embeddings": embeddings[0], "token_embeddings": embeddings[1]}
elif self.token_embedding:
return {"token_embeddings": embeddings}
elif self.document_embedding:
return {"document_embeddings": embeddings}
else:
raise ValueError("either 'token_embedding' or 'document_embedding' needs to be set.")
@classmethod
def create_from_embedding(cls, module: ScriptModule, embedding: "TransformerEmbeddings", param_names: List[str]):
return cls(jit_model=module, param_names=param_names, **embedding.to_args())
@classmethod
def parameter_to_list(
cls, embedding: "TransformerEmbeddings", wrapper: torch.nn.Module, sentences: List[Sentence]
) -> Tuple[List[str], List[torch.Tensor]]:
tensors = embedding.prepare_tensors(sentences)
param_names = list(inspect.signature(wrapper.forward).parameters.keys())
params = []
for param in param_names:
params.append(tensors[param])
return param_names, params
@register_embeddings
class TransformerJitWordEmbeddings(TokenEmbeddings, TransformerJitEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerJitEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerJitDocumentEmbeddings(DocumentEmbeddings, TransformerJitEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerJitEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerOnnxWordEmbeddings(TokenEmbeddings, TransformerOnnxEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerOnnxEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerOnnxDocumentEmbeddings(DocumentEmbeddings, TransformerOnnxEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerOnnxEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerEmbeddings(TransformerBaseEmbeddings):
onnx_cls: Type[TransformerOnnxEmbeddings] = TransformerOnnxEmbeddings
def __init__(
self,
model: str = "bert-base-uncased",
fine_tune: bool = True,
layers: str = "-1",
layer_mean: bool = True,
subtoken_pooling: str = "first",
cls_pooling: str = "cls",
is_token_embedding: bool = True,
is_document_embedding: bool = True,
allow_long_sentences: bool = False,
use_context: Union[bool, int] = False,
respect_document_boundaries: bool = True,
context_dropout: float = 0.5,
saved_config: Optional[PretrainedConfig] = None,
tokenizer_data: Optional[BytesIO] = None,
feature_extractor_data: Optional[BytesIO] = None,
name: Optional[str] = None,
force_max_length: bool = False,
needs_manual_ocr: Optional[bool] = None,
use_context_separator: bool = True,
**kwargs,
) -> None:
self.instance_parameters = self.get_instance_parameters(locals=locals())
del self.instance_parameters["saved_config"]
del self.instance_parameters["tokenizer_data"]
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# do not print transformer warnings as these are confusing in this case
from transformers import logging
logging.set_verbosity_error()
self.tokenizer: PreTrainedTokenizer
self.feature_extractor: Optional[FeatureExtractionMixin]
if tokenizer_data is None:
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, add_prefix_space=True, **kwargs)
try:
self.feature_extractor = AutoFeatureExtractor.from_pretrained(model, apply_ocr=False)
except OSError:
self.feature_extractor = None
else:
# load tokenizer from inmemory zip-file
self.tokenizer = self._tokenizer_from_bytes(tokenizer_data)
if feature_extractor_data is not None:
self.feature_extractor = self._feature_extractor_from_bytes(feature_extractor_data)
else:
self.feature_extractor = None
def is_supported_t5_model(config: PretrainedConfig) -> bool:
t5_supported_model_types = ["t5", "mt5", "longt5"]
return getattr(config, "model_type", "") in t5_supported_model_types
if saved_config is None:
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
if is_supported_t5_model(config):
from transformers import T5EncoderModel
transformer_model = T5EncoderModel.from_pretrained(model, config=config)
else:
transformer_model = AutoModel.from_pretrained(model, config=config)
else:
if is_supported_t5_model(saved_config):
from transformers import T5EncoderModel
transformer_model = T5EncoderModel(saved_config, **kwargs)
else:
transformer_model = AutoModel.from_config(saved_config, **kwargs)
transformer_model = transformer_model.to(flair.device)
self.truncate = True
self.force_max_length = force_max_length
if self.tokenizer.model_max_length > LARGE_INTEGER:
allow_long_sentences = False
self.truncate = False
self.stride = self.tokenizer.model_max_length // 2 if allow_long_sentences else 0
self.allow_long_sentences = allow_long_sentences
self.use_lang_emb = hasattr(transformer_model, "use_lang_emb") and transformer_model.use_lang_emb
# model name
if name is None:
self.name = "transformer-" + transformer_model.name_or_path
else:
self.name = name
self.base_model_name = transformer_model.name_or_path
self.token_embedding = is_token_embedding
self.document_embedding = is_document_embedding
if self.document_embedding and cls_pooling not in ["cls", "max", "mean"]:
raise ValueError(f"Document Pooling operation `{cls_pooling}` is not defined for TransformerEmbedding")
if self.token_embedding and subtoken_pooling not in ["first", "last", "first_last", "mean"]:
raise ValueError(f"Subtoken Pooling operation `{subtoken_pooling}` is not defined for TransformerEmbedding")
if self.document_embedding and cls_pooling == "cls" and allow_long_sentences:
log.warning(
"Using long sentences for Document embeddings is only beneficial for cls_pooling types 'mean' and 'max "
)
if isinstance(use_context, bool):
self.context_length: int = 64 if use_context else 0
else:
self.context_length = use_context
self.context_dropout = context_dropout
self.respect_document_boundaries = respect_document_boundaries
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = transformer_model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]
self.layer_indexes = list(range(len(hidden_states)))
else:
self.layer_indexes = list(map(int, layers.split(",")))
self.cls_pooling = cls_pooling
self.subtoken_pooling = subtoken_pooling
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
# return length
self.embedding_length_internal = self._calculate_embedding_length(transformer_model)
if needs_manual_ocr is not None:
self.needs_manual_ocr = needs_manual_ocr
# If we use a context separator, add a new special token
self.use_context_separator = use_context_separator
if use_context_separator:
self.tokenizer.add_special_tokens({"additional_special_tokens": [SENTENCE_BOUNDARY_TAG]})
transformer_model.resize_token_embeddings(len(self.tokenizer))
super().__init__(**self.to_args())
# most models have an initial BOS token, except for XLNet, T5 and GPT2
self.initial_cls_token: bool = self._has_initial_cls_token()
self.model = transformer_model
self.to(flair.device)
# when initializing, embeddings are in eval mode by default
self.eval()
@property
def embedding_length(self) -> int:
if not hasattr(self, "embedding_length_internal"):
self.embedding_length_internal = self._calculate_embedding_length(self.model)
return self.embedding_length_internal
def _has_initial_cls_token(self) -> bool:
# most models have CLS token as last token (GPT-1, GPT-2, TransfoXL, XLNet, XLM), but BERT is initial
if self.tokenizer_needs_ocr_boxes:
# cannot run `.encode` if ocr boxes are required, assume
return True
tokens = self.tokenizer.encode("a")
return tokens[0] == self.tokenizer.cls_token_id
def _calculate_embedding_length(self, model) -> int:
length = len(self.layer_indexes) * model.config.hidden_size if not self.layer_mean else model.config.hidden_size
# in case of doubt: token embedding has higher priority than document embedding
if self.token_embedding and self.subtoken_pooling == "first_last":
length *= 2
if self.document_embedding:
log.warning(
"Token embedding length and Document embedding length vary, due to `first_last` subtoken pooling, this might not be supported"
)
return length
@property
def embedding_type(self) -> str:
# in case of doubt: token embedding has higher priority than document embedding
return "word-level" if self.token_embedding else "sentence-level"
def __setstate__(self, state):
config_state_dict = state.pop("config_state_dict", None)
model_state_dict = state.pop("model_state_dict", None)
# legacy TransformerDocumentEmbedding
state.pop("batch_size", None)
state.pop("embedding_length_internal", None)
# legacy TransformerTokenEmbedding
state.pop("memory_effective_training", None)
if "base_model_name" in state:
state["model"] = state.pop("base_model_name")
state["use_context"] = state.pop("context_length", False)
if "layer_indexes" in state:
layer_indexes = state.pop("layer_indexes")
state["layers"] = ",".join(map(str, layer_indexes))
if "use_context_separator" not in state:
# legacy Flair <= 0.12
state["use_context_separator"] = False
if "use_scalar_mix" in state:
# legacy Flair <= 0.7
state["layer_mean"] = state.pop("use_scalar_mix")
if "is_token_embedding" not in state:
# legacy TransformerTokenEmbedding
state["is_token_embedding"] = "pooling_operation" in state
if "is_document_embedding" not in state:
# Legacy TransformerDocumentEmbedding
state["is_document_embedding"] = "pooling" in state
if "pooling_operation" in state:
# legacy TransformerTokenEmbedding
state["subtoken_pooling"] = state.pop("pooling_operation")
if "pooling" in state:
# legacy TransformerDocumentEmbedding
state["cls_pooling"] = state.pop("pooling")
config = None
if config_state_dict:
# some models like the tars model somehow lost this information.
if config_state_dict.get("_name_or_path") == "None":
config_state_dict["_name_or_path"] = state.get("model", "None")
model_type = config_state_dict.get("model_type", "bert")
config_class = CONFIG_MAPPING[model_type]
config = config_class.from_dict(config_state_dict)
embedding = self.create_from_state(saved_config=config, **state)
# copy values from new embedding
for key in embedding.__dict__:
self.__dict__[key] = embedding.__dict__[key]
if model_state_dict:
self.model.load_state_dict(model_state_dict)
@classmethod
def from_params(cls, params):
params.pop("truncate", None)
params.pop("stride", None)
params.pop("embedding_length", None)
params.pop("use_lang_emb", None)
params["use_context"] = params.pop("context_length", 0)
config_state_dict = params.pop("config_state_dict", None)
config = None
if config_state_dict:
model_type = config_state_dict.get("model_type", "bert")
config_class = CONFIG_MAPPING[model_type]
config = config_class.from_dict(config_state_dict)
return cls.create_from_state(saved_config=config, **params)
def to_params(self):
config_dict = self.model.config.to_dict()
super_params = super().to_params()
# those parameters are only from the super class and will be recreated in the constructor.
del super_params["truncate"]
del super_params["stride"]
del super_params["embedding_length"]
del super_params["use_lang_emb"]
model_state = {
**super_params,
"model": self.base_model_name,
"fine_tune": self.fine_tune,
"layers": ",".join(map(str, self.layer_indexes)),
"layer_mean": self.layer_mean,
"subtoken_pooling": self.subtoken_pooling,
"cls_pooling": self.cls_pooling,
"config_state_dict": config_dict,
}
return model_state
def _can_document_embedding_shortcut(self):
# cls first pooling can be done without recreating sentence hidden states
return (
self.document_embedding
and not self.token_embedding
and self.cls_pooling == "cls"
and self.initial_cls_token
)
def forward(
self,
input_ids: torch.Tensor,
sub_token_lengths: Optional[torch.LongTensor] = None,
token_lengths: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
overflow_to_sample_mapping: Optional[torch.Tensor] = None,
word_ids: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.Tensor] = None,
):
model_kwargs = {}
if langs is not None:
model_kwargs["langs"] = langs
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask
if bbox is not None:
model_kwargs["bbox"] = bbox
if pixel_values is not None:
model_kwargs["pixel_values"] = pixel_values
hidden_states = self.model(input_ids, **model_kwargs)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
# for multimodal models like layoutlmv3, we truncate the image embeddings as they are only used via attention
hidden_states = truncate_hidden_states(hidden_states, input_ids)
# only use layers that will be outputted
hidden_states = hidden_states[self.layer_indexes, :, :]
if self.layer_mean:
hidden_states = hidden_states.mean(dim=0)
else:
hidden_states = torch.flatten(hidden_states.permute((0, 3, 1, 2)), 0, 1).permute((1, 2, 0))
if self._can_document_embedding_shortcut():
return {"document_embeddings": hidden_states[:, 0]}
if self.allow_long_sentences:
assert overflow_to_sample_mapping is not None
sentence_hidden_states = combine_strided_tensors(
hidden_states, overflow_to_sample_mapping, self.stride // 2, self.tokenizer.model_max_length, 0
)
if self.tokenizer.is_fast and self.token_embedding:
word_ids = combine_strided_tensors(
word_ids, overflow_to_sample_mapping, self.stride // 2, self.tokenizer.model_max_length, -100
)
else:
sentence_hidden_states = hidden_states
result = {}
if self.document_embedding:
if self.cls_pooling == "cls" and self.initial_cls_token:
document_embeddings = sentence_hidden_states[:, 0]
else:
assert sub_token_lengths is not None
if self.cls_pooling == "cls":
document_embeddings = sentence_hidden_states[
torch.arange(sentence_hidden_states.shape[0]), sub_token_lengths - 1
]
elif self.cls_pooling == "mean":
document_embeddings = document_mean_pooling(sentence_hidden_states, sub_token_lengths)
elif self.cls_pooling == "max":
document_embeddings = document_max_pooling(sentence_hidden_states, sub_token_lengths)
else:
raise ValueError(f"cls pooling method: `{self.cls_pooling}` is not implemented")
result["document_embeddings"] = document_embeddings
if self.token_embedding:
assert word_ids is not None
assert token_lengths is not None
all_token_embeddings = torch.zeros( # type: ignore[call-overload]
word_ids.shape[0], token_lengths.max(), self.embedding_length_internal, device=flair.device
)
true_tensor = torch.ones_like(word_ids[:, :1], dtype=torch.bool)
if self.subtoken_pooling == "first":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
first_mask = torch.cat([true_tensor, gain_mask], dim=1)
all_token_embeddings = fill_masked_elements(
all_token_embeddings, sentence_hidden_states, first_mask, word_ids, token_lengths
)
elif self.subtoken_pooling == "last":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
last_mask = torch.cat([gain_mask, true_tensor], dim=1)
all_token_embeddings = fill_masked_elements(
all_token_embeddings, sentence_hidden_states, last_mask, word_ids, token_lengths
)
elif self.subtoken_pooling == "first_last":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
first_mask = torch.cat([true_tensor, gain_mask], dim=1)
last_mask = torch.cat([gain_mask, true_tensor], dim=1)
all_token_embeddings[:, :, : sentence_hidden_states.shape[2]] = fill_masked_elements(
all_token_embeddings[:, :, : sentence_hidden_states.shape[2]],
sentence_hidden_states,
first_mask,
word_ids,
token_lengths,
)
all_token_embeddings[:, :, sentence_hidden_states.shape[2] :] = fill_masked_elements(
all_token_embeddings[:, :, sentence_hidden_states.shape[2] :],
sentence_hidden_states,
last_mask,
word_ids,
token_lengths,
)
elif self.subtoken_pooling == "mean":
all_token_embeddings = fill_mean_token_embeddings(
all_token_embeddings, sentence_hidden_states, word_ids, token_lengths
)
else:
raise ValueError(f"subtoken pooling method: `{self.subtoken_pooling}` is not implemented")
result["token_embeddings"] = all_token_embeddings
return result
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
return self.forward(**tensors)
def export_onnx(
self, path: Union[str, Path], example_sentences: List[Sentence], **kwargs
) -> TransformerOnnxEmbeddings:
"""Export TransformerEmbeddings to OnnxFormat.
:param example_sentences: a list of sentences that will be used for tracing. It is recommended to take 2-4
sentences with some variation.
"""
return self.onnx_cls.export_from_embedding(path, self, example_sentences, **kwargs)
| 58,455 | 41.606414 | 146 | py |
flair | flair-master/flair/embeddings/token.py | import hashlib
import logging
import os
import re
import tempfile
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from gensim.models import KeyedVectors
from gensim.models.fasttext import FastTextKeyedVectors, load_facebook_vectors
from torch import nn
import flair
from flair.data import Corpus, Dictionary, Sentence, _iter_dataset
from flair.embeddings.base import TokenEmbeddings, load_embeddings, register_embeddings
from flair.embeddings.transformer import (
TransformerEmbeddings,
TransformerOnnxWordEmbeddings,
)
from flair.file_utils import cached_path, extract_single_zip_file, instance_lru_cache
log = logging.getLogger("flair")
@register_embeddings
class TransformerWordEmbeddings(TokenEmbeddings, TransformerEmbeddings):
onnx_cls = TransformerOnnxWordEmbeddings
def __init__(
self,
model: str = "bert-base-uncased",
is_document_embedding: bool = False,
allow_long_sentences: bool = True,
**kwargs,
) -> None:
"""Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param subtoken_pooling: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
TransformerEmbeddings.__init__(
self,
model=model,
is_token_embedding=True,
is_document_embedding=is_document_embedding,
allow_long_sentences=allow_long_sentences,
**kwargs,
)
@classmethod
def create_from_state(cls, **state):
# this parameter is fixed
del state["is_token_embedding"]
return cls(**state)
@register_embeddings
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings], overwrite_names: bool = True) -> None:
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
if overwrite_names:
embedding.name = f"{i!s}-{embedding.name}"
self.add_module(f"list_embedding_{i!s}", embedding)
self.name: str = "Stack"
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
self.eval()
def embed(self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self) -> str:
return f'StackedEmbeddings [{",".join([str(e) for e in self.embeddings])}]'
def get_names(self) -> List[str]:
"""Returns a list of embedding names.
In most cases, it is just a list with one item, namely the name of this embedding. But in some cases, the
embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack.
"""
# make compatible with serialized models
if "__names" not in self.__dict__:
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
return self.__names
def get_named_embeddings_dict(self) -> Dict:
named_embeddings_dict = {}
for embedding in self.embeddings:
named_embeddings_dict.update(embedding.get_named_embeddings_dict())
return named_embeddings_dict
@classmethod
def from_params(cls, params):
embeddings = [load_embeddings(p) for p in params["embeddings"]]
return cls(embeddings=embeddings, overwrite_names=False)
def to_params(self):
return {"embeddings": [emb.save_embeddings(use_state_dict=False) for emb in self.embeddings]}
@register_embeddings
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(
self,
embeddings: Optional[str],
field: Optional[str] = None,
fine_tune: bool = False,
force_cpu: bool = True,
stable: bool = False,
no_header: bool = False,
vocab: Optional[Dict[str, int]] = None,
embedding_length: Optional[int] = None,
name: Optional[str] = None,
) -> None:
"""Initializes classic word embeddings.
Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
set stable=True to use the stable embeddings as described in https://arxiv.org/abs/2110.02861
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
if fine_tune and force_cpu and flair.device.type != "cpu":
raise ValueError("Cannot train WordEmbeddings on cpu if the model is trained on gpu, set force_cpu=False")
embeddings_path = self.resolve_precomputed_path(embeddings)
if name is None:
name = str(embeddings_path)
self.name = name
self.embeddings = embeddings if embeddings is not None else name
self.static_embeddings = not fine_tune
self.fine_tune = fine_tune
self.force_cpu = force_cpu
self.field = field
self.stable = stable
super().__init__()
if embeddings_path is not None:
if embeddings_path.suffix in [".bin", ".txt"]:
precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings_path), binary=embeddings_path.suffix == ".bin", no_header=no_header
)
else:
precomputed_word_embeddings = gensim.models.KeyedVectors.load(str(embeddings_path))
self.__embedding_length: int = precomputed_word_embeddings.vector_size
vectors = np.row_stack(
(
precomputed_word_embeddings.vectors,
np.zeros(self.__embedding_length, dtype="float"),
)
)
try:
# gensim version 4
self.vocab = precomputed_word_embeddings.key_to_index
except AttributeError:
# gensim version 3
self.vocab = {k: v.index for k, v in precomputed_word_embeddings.vocab.items()}
else:
# if no embedding is set, the vocab and embedding length is requried
assert vocab is not None
assert embedding_length is not None
self.vocab = vocab
self.__embedding_length = embedding_length
vectors = np.zeros((len(self.vocab) + 1, self.__embedding_length), dtype="float")
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not fine_tune)
if stable:
self.layer_norm: Optional[nn.LayerNorm] = nn.LayerNorm(
self.__embedding_length, elementwise_affine=fine_tune
)
else:
self.layer_norm = None
self.device = None
self.to(flair.device)
self.eval()
def resolve_precomputed_path(self, embeddings: Optional[str]) -> Optional[Path]:
if embeddings is None:
return None
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/token"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{hu_path}/glove.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/glove.gensim", cache_dir=cache_dir)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(f"{hu_path}/turian.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/turian", cache_dir=cache_dir)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(f"{hu_path}/extvec.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/extvec.gensim", cache_dir=cache_dir)
# pubmed embeddings
elif embeddings.lower() == "pubmed" or embeddings.lower() == "en-pubmed":
cached_path(
f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() in ["news", "en-news", "en"]:
cached_path(f"{hu_path}/en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/en-fasttext-news-300d-1M", cache_dir=cache_dir)
# twitter embeddings
elif embeddings.lower() in ["twitter", "en-twitter"]:
cached_path(f"{hu_path}/twitter.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/twitter.gensim", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(
f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(
f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(
f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(
f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M",
cache_dir=cache_dir,
)
elif not Path(embeddings).exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
return Path(embeddings)
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=100000, typed=False)
def get_cached_token_index(self, word: str) -> int:
if word in self.vocab:
return self.vocab[word]
elif word.lower() in self.vocab:
return self.vocab[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in self.vocab:
return self.vocab[re.sub(r"\d", "#", word.lower())]
elif re.sub(r"\d", "0", word.lower()) in self.vocab:
return self.vocab[re.sub(r"\d", "0", word.lower())]
else:
return len(self.vocab) # <unk> token
def get_vec(self, word: str) -> torch.Tensor:
word_embedding = self.vectors[self.get_cached_token_index(word)]
word_embedding = torch.tensor(word_embedding.tolist(), device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
tokens = [token for sentence in sentences for token in sentence.tokens]
word_indices: List[int] = []
for token in tokens:
word = token.text if self.field is None else token.get_label(self.field).value
word_indices.append(self.get_cached_token_index(word))
embeddings = self.embedding(torch.tensor(word_indices, dtype=torch.long, device=self.device))
if self.layer_norm is not None:
embeddings = self.layer_norm(embeddings)
if self.force_cpu:
embeddings = embeddings.to(flair.device)
for emb, token in zip(embeddings, tokens):
token.set_embedding(self.name, emb)
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
def train(self, mode=True):
super().train(self.fine_tune and mode)
def to(self, device):
if self.force_cpu:
device = torch.device("cpu")
self.device = device
super().to(device)
def _apply(self, fn):
if fn.__name__ == "convert" and self.force_cpu:
# this is required to force the module on the cpu,
# if a parent module is put to gpu, the _apply is called to each sub_module
# self.to(..) actually sets the device properly
if not hasattr(self, "device"):
self.to(flair.device)
return
super()._apply(fn)
def __getattribute__(self, item):
# this ignores the get_cached_vec method when loading older versions
# it is needed for compatibility reasons
if item == "get_cached_vec":
return None
return super().__getattribute__(item)
def __setstate__(self, state: Dict[str, Any]):
state.pop("get_cached_vec", None)
state.setdefault("embeddings", state["name"])
state.setdefault("force_cpu", True)
state.setdefault("fine_tune", False)
state.setdefault("field", None)
if "precomputed_word_embeddings" in state:
precomputed_word_embeddings: KeyedVectors = state.pop("precomputed_word_embeddings")
vectors = np.row_stack(
(
precomputed_word_embeddings.vectors,
np.zeros(precomputed_word_embeddings.vector_size, dtype="float"),
)
)
embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not state["fine_tune"])
try:
# gensim version 4
vocab = precomputed_word_embeddings.key_to_index
except AttributeError:
# gensim version 3
vocab = {k: v.index for k, v in precomputed_word_embeddings.__dict__["vocab"].items()}
state["embedding"] = embedding
state["vocab"] = vocab
if "stable" not in state:
state["stable"] = False
state["layer_norm"] = None
super().__setstate__(state)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "WordEmbeddings":
return cls(embeddings=None, **params)
def to_params(self) -> Dict[str, Any]:
return {
"vocab": self.vocab,
"stable": self.stable,
"fine_tune": self.fine_tune,
"force_cpu": self.force_cpu,
"field": self.field,
"name": self.name,
"embedding_length": self.__embedding_length,
}
def state_dict(self, *args, destination=None, prefix="", keep_vars=False):
# when loading the old versions from pickle, the embeddings might not be added as pytorch module.
# we do this delayed, when the weights are collected (e.g. for saving), as doing this earlier might
# lead to issues while loading (trying to load weights that weren't stored as python weights and therefore
# not finding them)
if list(self.modules()) == [self]:
self.embedding = self.embedding
return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars)
@register_embeddings
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(
self,
path_to_char_dict: Optional[Union[str, Dictionary]] = None,
char_embedding_dim: int = 25,
hidden_size_char: int = 25,
) -> None:
"""Instantiates a bidirectional lstm layer toi encode words by their character representation.
Uses the default character dictionary if none provided.
"""
super().__init__()
self.name = "Char"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
elif isinstance(path_to_char_dict, Dictionary):
self.char_dictionary = path_to_char_dict
else:
self.char_dictionary = Dictionary.load_from_file(path_to_char_dict)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(len(self.char_dictionary.item2idx), self.char_embedding_dim)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.hidden_size_char * 2
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [self.char_dictionary.get_idx_for_item(char) for char in token.text]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(tokens_char_indices, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(c, dtype=torch.long, device=flair.device)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(character_embeddings, chars2_length) # type: ignore[arg-type]
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "CharacterEmbeddings":
return cls(**params)
def to_params(self) -> Dict[str, Any]:
return {
"path_to_char_dict": self.char_dictionary,
"char_embedding_dim": self.char_embedding_dim,
"hidden_size_char": self.hidden_size_char,
}
@register_embeddings
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(
self,
model,
fine_tune: bool = False,
chars_per_chunk: int = 512,
with_whitespace: bool = True,
tokenized_lm: bool = True,
is_lower: bool = False,
name: Optional[str] = None,
has_decoder: bool = False,
) -> None:
"""Initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward',
etc (see https://github.com/flairNLP/flair/blob/master/resources/docs/embeddings/FLAIR_EMBEDDINGS.md)
depending on which character language model is desired.
:param fine_tune: if set to True, the gradient will propagate into the language model. This dramatically slows
down training and often leads to overfitting, so use with caution.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster
but requires more memory. Lower means slower but less memory.
:param with_whitespace: If True, use hidden state after whitespace after word. If False, use hidden
state at last character of word.
:param tokenized_lm: Whether this lm is tokenized. Default is True, but for LMs trained over unprocessed text
False might be better.
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/flair"
clef_hipe_path: str = "https://files.ifi.uzh.ch/cl/siclemat/impresso/clef-hipe-2020/flair"
am_path: str = "http://ltdata1.informatik.uni-hamburg.de/amharic/models/flair/"
self.is_lower: bool = is_lower
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{hu_path}/lm-jw300-forward-v0.1.pt",
"multi-backward": f"{hu_path}/lm-jw300-backward-v0.1.pt",
"multi-v0-forward": f"{hu_path}/lm-multi-forward-v0.1.pt",
"multi-v0-backward": f"{hu_path}/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{hu_path}/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{hu_path}/lm-multi-backward-fast-v0.1.pt",
# English models
"en-forward": f"{hu_path}/news-forward-0.4.1.pt",
"en-backward": f"{hu_path}/news-backward-0.4.1.pt",
"en-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"en-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"news-forward": f"{hu_path}/news-forward-0.4.1.pt",
"news-backward": f"{hu_path}/news-backward-0.4.1.pt",
"news-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{hu_path}/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{hu_path}/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{hu_path}/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{hu_path}/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{hu_path}/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{hu_path}/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{hu_path}/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{hu_path}/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{hu_path}/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{hu_path}/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{hu_path}/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{hu_path}/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{hu_path}/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{hu_path}/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{hu_path}/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{hu_path}/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{hu_path}/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{hu_path}/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{hu_path}/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{hu_path}/lm-historic-wiener-zeitung-backward-v0.1.pt",
"de-historic-rw-forward": f"{hu_path}/redewiedergabe_lm_forward.pt",
"de-historic-rw-backward": f"{hu_path}/redewiedergabe_lm_backward.pt",
# Spanish
"es-forward": f"{hu_path}/lm-es-forward.pt",
"es-backward": f"{hu_path}/lm-es-backward.pt",
"es-forward-fast": f"{hu_path}/lm-es-forward-fast.pt",
"es-backward-fast": f"{hu_path}/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.2.pt",
"eu-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.2.pt",
"eu-v1-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.1.pt",
"eu-v1-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{hu_path}/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{hu_path}/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{hu_path}/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{hu_path}/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{hu_path}/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{hu_path}/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{hu_path}/lm-fr-charlm-forward.pt",
"fr-backward": f"{hu_path}/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{hu_path}/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{hu_path}/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{hu_path}/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{hu_path}/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{hu_path}/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{hu_path}/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{hu_path}/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{hu_path}/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{hu_path}/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{hu_path}/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{hu_path}/japanese-forward.pt",
"ja-backward": f"{hu_path}/japanese-backward.pt",
# Malayalam
"ml-forward": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-forward.pt",
"ml-backward": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-backward.pt",
# Dutch
"nl-forward": f"{hu_path}/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{hu_path}/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{hu_path}/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{hu_path}/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{hu_path}/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{hu_path}/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{hu_path}/lm-polish-forward-v0.2.pt",
"pl-backward": f"{hu_path}/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{hu_path}/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{hu_path}/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{hu_path}/lm-pt-forward.pt",
"pt-backward": f"{hu_path}/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{hu_path}/pubmed-forward.pt",
"pubmed-backward": f"{hu_path}/pubmed-backward.pt",
"pubmed-2015-forward": f"{hu_path}/pubmed-2015-fw-lm.pt",
"pubmed-2015-backward": f"{hu_path}/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{hu_path}/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{hu_path}/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{hu_path}/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{hu_path}/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{hu_path}/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{hu_path}/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{hu_path}/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{hu_path}/lm-sv-large-backward-v0.1.pt",
# Tamil
"ta-forward": f"{hu_path}/lm-ta-opus-large-forward-v0.1.pt",
"ta-backward": f"{hu_path}/lm-ta-opus-large-backward-v0.1.pt",
# Spanish clinical
"es-clinical-forward": f"{hu_path}/es-clinical-forward.pt",
"es-clinical-backward": f"{hu_path}/es-clinical-backward.pt",
# CLEF HIPE Shared task
"de-impresso-hipe-v1-forward": f"{clef_hipe_path}/de-hipe-flair-v1-forward/best-lm.pt",
"de-impresso-hipe-v1-backward": f"{clef_hipe_path}/de-hipe-flair-v1-backward/best-lm.pt",
"en-impresso-hipe-v1-forward": f"{clef_hipe_path}/en-flair-v1-forward/best-lm.pt",
"en-impresso-hipe-v1-backward": f"{clef_hipe_path}/en-flair-v1-backward/best-lm.pt",
"fr-impresso-hipe-v1-forward": f"{clef_hipe_path}/fr-hipe-flair-v1-forward/best-lm.pt",
"fr-impresso-hipe-v1-backward": f"{clef_hipe_path}/fr-hipe-flair-v1-backward/best-lm.pt",
# Amharic
"am-forward": f"{am_path}/best-lm.pt",
# Ukrainian
"uk-forward": "https://huggingface.co/dchaplinsky/flair-uk-forward/resolve/main/best-lm.pt",
"uk-backward": "https://huggingface.co/dchaplinsky/flair-uk-backward/resolve/main/best-lm.pt",
}
if isinstance(model, str):
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
# Fix for CLEF HIPE models (avoid overwriting best-lm.pt in cache_dir)
if "impresso-hipe" in model.lower():
cache_dir = cache_dir / model.lower()
# CLEF HIPE models are lowercased
self.is_lower = True
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[replace_with_language_code(model)]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(f'The given model "{model}" is not available or is not a valid path.')
from flair.models import LanguageModel
if isinstance(model, LanguageModel):
self.lm: LanguageModel = model
self.name = f"Task-LSTM-{self.lm.hidden_size}-{self.lm.nlayers}-{self.lm.is_forward_lm}"
else:
self.lm = LanguageModel.load_language_model(model, has_decoder=has_decoder)
self.name = str(model)
if name is not None:
self.name = name
# embeddings are static if we don't do finetuning
self.fine_tune = fine_tune
self.static_embeddings = not fine_tune
self.is_forward_lm: bool = self.lm.is_forward_lm
self.with_whitespace: bool = with_whitespace
self.tokenized_lm: bool = tokenized_lm
self.chars_per_chunk: int = chars_per_chunk
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence("hello")
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0][0].get_embedding())
# set to eval mode
self.eval()
def train(self, mode=True):
# unless fine-tuning is set, do not set language model to train() in order to disallow language model dropout
super().train(self.fine_tune and mode)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# gradients are enable if fine-tuning is enabled
gradient_context = torch.enable_grad() if self.fine_tune else torch.no_grad()
with gradient_context:
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = (
[sentence.to_tokenized_string() for sentence in sentences]
if self.tokenized_lm
else [sentence.to_plain_string() for sentence in sentences]
)
if self.is_lower:
text_sentences = [sentence.lower() for sentence in text_sentences]
start_marker = self.lm.document_delimiter if "document_delimiter" in self.lm.__dict__ else "\n"
end_marker = " "
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
text_sentences, start_marker, end_marker, self.chars_per_chunk
)
if not self.fine_tune:
all_hidden_states_in_lm = all_hidden_states_in_lm.detach()
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string() if self.tokenized_lm else sentence.to_plain_string()
offset_forward: int = len(start_marker)
offset_backward: int = len(sentence_text) + len(start_marker)
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset_with_whitespace = offset_forward
offset_without_whitespace = offset_forward - 1
else:
offset_with_whitespace = offset_backward
offset_without_whitespace = offset_backward - 1
# offset mode that extracts at whitespace after last character
if self.with_whitespace:
embedding = all_hidden_states_in_lm[offset_with_whitespace, i, :]
# offset mode that extracts at last character
else:
embedding = all_hidden_states_in_lm[offset_without_whitespace, i, :]
if self.tokenized_lm or token.whitespace_after > 0:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
del all_hidden_states_in_lm
return sentences
def __str__(self) -> str:
return self.name
def to_params(self):
return {
"fine_tune": self.fine_tune,
"chars_per_chunk": self.chars_per_chunk,
"is_lower": self.is_lower,
"tokenized_lm": self.tokenized_lm,
"model_params": {
"dictionary": self.lm.dictionary,
"is_forward_lm": self.lm.is_forward_lm,
"hidden_size": self.lm.hidden_size,
"nlayers": self.lm.nlayers,
"embedding_size": self.lm.embedding_size,
"nout": self.lm.nout,
"document_delimiter": self.lm.document_delimiter,
"dropout": self.lm.dropout,
"has_decoder": self.lm.decoder is not None,
},
"name": self.name,
}
@classmethod
def from_params(cls, params):
model_params = params.pop("model_params")
from flair.models import LanguageModel
lm = LanguageModel(**model_params)
return cls(lm, **params)
def __setstate__(self, d: Dict[str, Any]):
# make compatible with old models
d.setdefault("fine_tune", False)
d.setdefault("chars_per_chunk", 512)
d.setdefault("with_whitespace", True)
d.setdefault("tokenized_lm", True)
d.setdefault("is_lower", False)
d.setdefault("field", None)
super().__setstate__(d)
@register_embeddings
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
) -> None:
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use the character language model embeddings as basis
if isinstance(contextual_embeddings, str):
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(contextual_embeddings, **kwargs)
else:
self.context_embeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.__embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings: Dict[str, torch.Tensor] = {}
self.word_count: Dict[str, int] = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
log.info("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name].cpu()
# check token.text is empty or not
if token.text and (token.text[0].isupper() or not self.only_capitalized):
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
# set aggregation operation
if self.pooling == "mean":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "fade":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
aggregated_embedding /= 2
elif self.pooling == "max":
aggregated_embedding = torch.max(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "min":
aggregated_embedding = torch.min(self.word_embeddings[token.text], local_embedding)
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
@property
def embedding_length(self) -> int:
return self.__embedding_length
def get_names(self) -> List[str]:
return [self.name, self.context_embeddings.name]
def __setstate__(self, d: Dict[str, Any]):
super().__setstate__(d)
if flair.device.type != "cpu":
for key in self.word_embeddings:
self.word_embeddings[key] = self.word_embeddings[key].cpu()
@classmethod
def from_params(cls, params):
return cls(contextual_embeddings=load_embeddings(params.pop("contextual_embeddings")), **params)
def to_params(self):
return {
"pooling": self.pooling,
"only_capitalized": self.only_capitalized,
"contextual_embeddings": self.context_embeddings.save_embeddings(use_state_dict=False),
}
@register_embeddings
class FastTextEmbeddings(TokenEmbeddings):
"""FastText Embeddings with oov functionality."""
def __init__(
self, embeddings: str, use_local: bool = True, field: Optional[str] = None, name: Optional[str] = None
) -> None:
"""Initializes fasttext word embeddings.
Constructor downloads required embedding file and stores in cache if use_local is False.
:param embeddings: path to your embeddings '.bin' file
:param use_local: set this to False if you are using embeddings from a remote source
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
if use_local:
embeddings_path = Path(embeddings)
if not embeddings_path.exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
embeddings_path = cached_path(f"{embeddings}", cache_dir=cache_dir)
self.embeddings = embeddings_path
self.name: str = str(embeddings_path)
self.static_embeddings = True
if embeddings_path.suffix == ".bin":
self.precomputed_word_embeddings: FastTextKeyedVectors = load_facebook_vectors(str(embeddings_path))
else:
self.precomputed_word_embeddings = FastTextKeyedVectors.load(str(embeddings_path))
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
self.field = field
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, word: str) -> torch.Tensor:
word_embedding = self.precomputed_word_embeddings.get_vector(word)
word_embedding = torch.tensor(word_embedding.tolist(), device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for sentence in sentences:
for token in sentence.tokens:
word = token.text if self.field is None else token.get_label(self.field).value
word_embedding = self.get_cached_vec(word)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
@classmethod
def from_params(cls, params):
fasttext_binary = params.pop("fasttext_binary")
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
out_path = temp_path / "fasttext.model"
out_path.write_bytes(fasttext_binary)
return cls(**params, embeddings=str(out_path), use_local=True)
def to_params(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
out_path = temp_path / "fasttext.model"
self.precomputed_word_embeddings.save(str(out_path))
return {"name": self.name, "field": self.field, "fasttext_binary": out_path.read_bytes()}
@register_embeddings
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings."""
def __init__(
self,
vocab_dictionary: Dictionary,
field: str = "text",
embedding_length: int = 300,
stable: bool = False,
) -> None:
"""Initializes one-hot encoded word embeddings and a trainable embedding layer.
:param vocab_dictionary: the vocabulary that will be encoded
:param field: by default, the 'text' of tokens is embedded, but you can also embed tags such as 'pos'
:param embedding_length: dimensionality of the trainable embedding layer
:param stable: set stable=True to use the stable embeddings as described in https://arxiv.org/abs/2110.02861
"""
super().__init__()
self.name = f"one-hot-{field}"
self.static_embeddings = False
self.field = field
self.instance_parameters = self.get_instance_parameters(locals=locals())
self.__embedding_length = embedding_length
self.vocab_dictionary = vocab_dictionary
log.info(self.vocab_dictionary.idx2item)
log.info(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = nn.Embedding(len(self.vocab_dictionary), self.__embedding_length)
nn.init.xavier_uniform_(self.embedding_layer.weight)
if stable:
self.layer_norm: Optional[nn.LayerNorm] = nn.LayerNorm(embedding_length)
else:
self.layer_norm = None
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
tokens = [t for sentence in sentences for t in sentence.tokens]
if self.field == "text":
one_hot_sentences = [self.vocab_dictionary.get_idx_for_item(t.text) for t in tokens]
else:
one_hot_sentences = [self.vocab_dictionary.get_idx_for_item(t.get_label(self.field).value) for t in tokens]
one_hot_sentences_tensor = torch.tensor(one_hot_sentences, dtype=torch.long).to(flair.device)
embedded = self.embedding_layer.forward(one_hot_sentences_tensor)
if self.layer_norm is not None:
embedded = self.layer_norm(embedded)
for emb, token in zip(embedded, tokens):
token.set_embedding(self.name, emb)
return sentences
def __str__(self) -> str:
return self.name
@classmethod
def from_corpus(cls, corpus: Corpus, field: str = "text", min_freq: int = 3, **kwargs):
vocab_dictionary = Dictionary()
assert corpus.train is not None
tokens = [s.tokens for s in _iter_dataset(corpus.train)]
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter([t.text for t in tokens]).most_common()
else:
most_common = Counter([t.get_label(field).value for t in tokens]).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
for token in tokens:
vocab_dictionary.add_item(token)
return cls(vocab_dictionary, field=field, **kwargs)
@classmethod
def from_params(cls, params):
return cls(**params)
def to_params(self):
return {
"vocab_dictionary": self.vocab_dictionary,
"field": self.field,
"embedding_length": self.__embedding_length,
"stable": self.layer_norm is not None,
}
@register_embeddings
class HashEmbeddings(TokenEmbeddings):
"""Standard embeddings with Hashing Trick."""
def __init__(self, num_embeddings: int = 1000, embedding_length: int = 300, hash_method="md5") -> None:
super().__init__()
self.name = "hash"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
self.__num_embeddings = num_embeddings
self.__embedding_length = embedding_length
self.__hash_method = hash_method
# model architecture
self.embedding_layer = torch.nn.Embedding(self.__num_embeddings, self.__embedding_length)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
self.to(flair.device)
self.eval()
@property
def num_embeddings(self) -> int:
return self.__num_embeddings
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
def get_idx_for_item(text):
hash_function = hashlib.new(self.__hash_method)
hash_function.update(bytes(str(text), "utf-8"))
return int(hash_function.hexdigest(), 16) % self.__num_embeddings
context_idxs = [get_idx_for_item(t.text) for sentence in sentences for t in sentence.tokens]
hash_sentences = torch.tensor(context_idxs, dtype=torch.long).to(flair.device)
embedded = self.embedding_layer.forward(hash_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params):
return cls(**params)
def to_params(self):
return {
"num_embeddings": self.num_embeddings,
"embedding_length": self.embedding_length,
"hash_method": self.__hash_method,
}
@register_embeddings
class MuseCrosslingualEmbeddings(TokenEmbeddings):
def __init__(
self,
) -> None:
self.name: str = "muse-crosslingual"
self.static_embeddings = True
self.__embedding_length: int = 300
self.language_embeddings: Dict[str, Any] = {}
super().__init__()
self.eval()
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, language_code: str, word: str) -> torch.Tensor:
current_embedding_model = self.language_embeddings[language_code]
if word in current_embedding_model:
word_embedding = current_embedding_model[word]
elif word.lower() in current_embedding_model:
word_embedding = current_embedding_model[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "#", word.lower())]
elif re.sub(r"\d", "0", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "0", word.lower())]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.tensor(word_embedding, device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for _i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
# "pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(f"{hu_path}/muse.{language_code}.vec.gensim", cache_dir=cache_dir)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(str(embeddings_file))
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = self.get_cached_vec(language_code=language_code, word=token.text)
token.set_embedding(self.name, word_embedding)
return sentences
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params):
return cls()
def to_params(self):
return {}
# TODO: keep for backwards compatibility, but remove in future
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
with self.model_file.open(mode="rb") as fin:
state["spm_model_binary"] = fin.read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = flair.cache_root / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
@register_embeddings
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: Optional[str] = None,
dim: int = 50,
syllables: int = 100000,
cache_dir=None,
model_file_path: Optional[Path] = None,
embedding_file_path: Optional[Path] = None,
name: Optional[str] = None,
**kwargs,
) -> None:
"""Initializes BP embeddings.
Constructor downloads required files if not there.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
if not cache_dir:
cache_dir = flair.cache_root / "embeddings"
if language:
self.name: str = f"bpe-{language}-{syllables}-{dim}"
else:
assert (
model_file_path is not None and embedding_file_path is not None
), "Need to specify model_file_path and embedding_file_path if no language is given in BytePairEmbeddings(...)"
dim = None # type: ignore[assignment]
self.embedder = BPEmbSerializable(
lang=language,
vs=syllables,
dim=dim,
cache_dir=cache_dir,
model_file=model_file_path,
emb_file=embedding_file_path,
**kwargs,
)
if not language:
self.name = f"bpe-custom-{self.embedder.vs}-{self.embedder.dim}"
if name is not None:
self.name = name
self.static_embeddings = True
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for _i, sentence in enumerate(sentences):
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word = token.text
if word.strip() == "":
# empty words get no embedding
token.set_embedding(self.name, torch.zeros(self.embedding_length, dtype=torch.float))
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate((embeddings[0], embeddings[len(embeddings) - 1]))
token.set_embedding(self.name, torch.tensor(embedding, dtype=torch.float))
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"model={self.name}"
@classmethod
def from_params(cls, params):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
model_file_path = temp_path / "model.spm"
model_file_path.write_bytes(params["spm_model_binary"])
embedding_file_path = temp_path / "word2vec.bin"
embedding_file_path.write_bytes(params["word2vec_binary"])
return cls(name=params["name"], model_file_path=model_file_path, embedding_file_path=embedding_file_path)
def to_params(self):
if not self.embedder.emb_file.exists():
self.embedder.emb_file = self.embedder.emb_file.with_suffix(".bin")
self.embedder.emb.save_word2vec_format(str(self.embedder.emb_file), binary=True)
return {
"name": self.name,
"spm_model_binary": self.embedder.spm.serialized_model_proto(),
"word2vec_binary": self.embedder.emb_file.read_bytes(),
}
@register_embeddings
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100) -> None:
"""Initializes portuguese classic word embeddings trained by NILC Lab.
See: http://www.nilc.icmc.usp.br/embeddings
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / ("nilc-" + embeddings.lower())
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir)
embeddings_path = f"{base_path}{embeddings}/{embeddings}_s{size}.zip"
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir)
embeddings_path = f"{base_path}{embeddings}/{model}_s{size}.zip"
elif not Path(embeddings).exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
embeddings_path = embeddings
log.info("Reading embeddings from %s" % embeddings_path)
super().__init__(
embeddings=str(extract_single_zip_file(embeddings_path, cache_dir=cache_dir)), name="NILC-" + embeddings
)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "WordEmbeddings":
# no need to recreate as NILCEmbeddings
return WordEmbeddings(embeddings=None, **params)
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
| 64,771 | 40.734536 | 131 | py |
flair | flair-master/flair/embeddings/__init__.py | # Expose base classses
from flair.embeddings.transformer import (
TransformerEmbeddings,
TransformerJitDocumentEmbeddings,
TransformerJitWordEmbeddings,
TransformerOnnxDocumentEmbeddings,
TransformerOnnxWordEmbeddings,
)
from .base import Embeddings, ScalarMix
# Expose document embedding classes
from .document import (
DocumentCNNEmbeddings,
DocumentEmbeddings,
DocumentLMEmbeddings,
DocumentPoolEmbeddings,
DocumentRNNEmbeddings,
DocumentTFIDFEmbeddings,
SentenceTransformerDocumentEmbeddings,
TransformerDocumentEmbeddings,
)
# Expose image embedding classes
from .image import (
ConvTransformNetworkImageEmbeddings,
IdentityImageEmbeddings,
ImageEmbeddings,
NetworkImageEmbeddings,
PrecomputedImageEmbeddings,
)
# Expose legacy embedding classes
from .legacy import (
BertEmbeddings,
CamembertEmbeddings,
CharLMEmbeddings,
DocumentLSTMEmbeddings,
DocumentMeanEmbeddings,
ELMoEmbeddings,
ELMoTransformerEmbeddings,
OpenAIGPT2Embeddings,
OpenAIGPTEmbeddings,
RoBERTaEmbeddings,
XLMEmbeddings,
XLMRobertaEmbeddings,
XLNetEmbeddings,
)
# Expose token embedding classes
from .token import (
BPEmbSerializable,
BytePairEmbeddings,
CharacterEmbeddings,
FastTextEmbeddings,
FlairEmbeddings,
HashEmbeddings,
MuseCrosslingualEmbeddings,
NILCEmbeddings,
OneHotEmbeddings,
PooledFlairEmbeddings,
StackedEmbeddings,
TokenEmbeddings,
TransformerWordEmbeddings,
WordEmbeddings,
)
__all__ = [
"Embeddings",
"ScalarMix",
"DocumentCNNEmbeddings",
"DocumentEmbeddings",
"DocumentLMEmbeddings",
"DocumentPoolEmbeddings",
"DocumentRNNEmbeddings",
"DocumentTFIDFEmbeddings",
"SentenceTransformerDocumentEmbeddings",
"TransformerDocumentEmbeddings",
"ConvTransformNetworkImageEmbeddings",
"IdentityImageEmbeddings",
"ImageEmbeddings",
"NetworkImageEmbeddings",
"PrecomputedImageEmbeddings",
"BertEmbeddings",
"CamembertEmbeddings",
"CharLMEmbeddings",
"DocumentLSTMEmbeddings",
"DocumentMeanEmbeddings",
"ELMoTransformerEmbeddings",
"OpenAIGPT2Embeddings",
"OpenAIGPTEmbeddings",
"RoBERTaEmbeddings",
"XLMEmbeddings",
"XLMRobertaEmbeddings",
"XLNetEmbeddings",
"BPEmbSerializable",
"BytePairEmbeddings",
"CharacterEmbeddings",
"ELMoEmbeddings",
"FastTextEmbeddings",
"FlairEmbeddings",
"HashEmbeddings",
"MuseCrosslingualEmbeddings",
"NILCEmbeddings",
"OneHotEmbeddings",
"PooledFlairEmbeddings",
"StackedEmbeddings",
"TokenEmbeddings",
"TransformerWordEmbeddings",
"WordEmbeddings",
"TransformerEmbeddings",
"TransformerOnnxWordEmbeddings",
"TransformerOnnxDocumentEmbeddings",
"TransformerJitWordEmbeddings",
"TransformerJitDocumentEmbeddings",
]
| 2,905 | 23.837607 | 44 | py |
flair | flair-master/flair/datasets/text_image.py | import json
import logging
import os
import urllib
from pathlib import Path
from typing import List
import numpy as np
import torch.utils.data.dataloader
from torch.utils.data import Dataset
from tqdm import tqdm
from flair.data import Corpus, DataPair, FlairDataset, Image, Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class FeideggerCorpus(Corpus):
def __init__(self, **kwargs) -> None:
dataset = "feidegger"
# cache Feidegger config file
json_link = "https://raw.githubusercontent.com/zalandoresearch/feidegger/master/data/FEIDEGGER_release_1.1.json"
json_local_path = cached_path(json_link, Path("datasets") / dataset)
# cache Feidegger images
with json_local_path.open(encoding="utf-8") as fin:
dataset_info = json.load(fin)
images_cache_folder = os.path.join(os.path.dirname(json_local_path), "images")
if not os.path.isdir(images_cache_folder):
os.mkdir(images_cache_folder)
for image_info in tqdm(dataset_info):
name = os.path.basename(image_info["url"])
filename = os.path.join(images_cache_folder, name)
if not os.path.isfile(filename):
urllib.request.urlretrieve(image_info["url"], filename)
# replace image URL with local cached file
image_info["url"] = filename
feidegger_dataset: Dataset = FeideggerDataset(dataset_info, **kwargs)
train_indices = list(np.where(np.in1d(feidegger_dataset.split, list(range(8))))[0]) # type: ignore[attr-defined]
train = torch.utils.data.dataset.Subset(feidegger_dataset, train_indices)
dev_indices = list(np.where(np.in1d(feidegger_dataset.split, [8]))[0]) # type: ignore[attr-defined]
dev = torch.utils.data.dataset.Subset(feidegger_dataset, dev_indices)
test_indices = list(np.where(np.in1d(feidegger_dataset.split, [9]))[0]) # type: ignore[attr-defined]
test = torch.utils.data.dataset.Subset(feidegger_dataset, test_indices)
super().__init__(train, dev, test, name="feidegger")
class FeideggerDataset(FlairDataset):
def __init__(self, dataset_info, **kwargs) -> None:
super().__init__()
self.data_points: List[DataPair] = []
self.split: List[int] = []
def identity(x):
return x
preprocessor = identity
if "lowercase" in kwargs and kwargs["lowercase"]:
preprocessor = str.lower
for image_info in dataset_info:
image = Image(imageURL=image_info["url"])
for caption in image_info["descriptions"]:
# append Sentence-Image data point
self.data_points.append(DataPair(Sentence(preprocessor(caption), use_tokenizer=True), image))
self.split.append(int(image_info["split"]))
def __len__(self) -> int:
return len(self.data_points)
def __getitem__(self, index: int = 0) -> DataPair:
return self.data_points[index]
def is_in_memory(self) -> bool:
return True
| 3,092 | 35.821429 | 121 | py |
flair | flair-master/flair/datasets/base.py | import logging
from abc import abstractmethod
from pathlib import Path
from typing import Generic, List, Optional, Union
import torch.utils.data.dataloader
from deprecated import deprecated
from flair.data import DT, FlairDataset, Sentence, Tokenizer
from flair.tokenization import SegtokTokenizer, SpaceTokenizer
log = logging.getLogger("flair")
class DataLoader(torch.utils.data.dataloader.DataLoader):
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
drop_last=False,
timeout=0,
worker_init_fn=None,
) -> None:
super().__init__(
dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=0,
collate_fn=list,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
)
class FlairDatapointDataset(FlairDataset, Generic[DT]):
"""A simple Dataset object to wrap a List of Datapoints, for example Sentences."""
def __init__(self, datapoints: Union[DT, List[DT]]) -> None:
"""Instantiate FlairDatapointDataset.
:param sentences: DT or List of DT that make up FlairDatapointDataset
"""
# cast to list if necessary
if not isinstance(datapoints, list):
datapoints = [datapoints]
self.datapoints = datapoints
def is_in_memory(self) -> bool:
return True
def __len__(self) -> int:
return len(self.datapoints)
def __getitem__(self, index: int = 0) -> DT:
return self.datapoints[index]
class SentenceDataset(FlairDatapointDataset):
@deprecated(version="0.11", reason="The 'SentenceDataset' class was renamed to 'FlairDatapointDataset'")
def __init__(self, sentences: Union[Sentence, List[Sentence]]) -> None:
super().__init__(sentences)
class StringDataset(FlairDataset):
"""A Dataset taking string as input and returning Sentence during iteration."""
def __init__(
self,
texts: Union[str, List[str]],
use_tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
) -> None:
"""Instantiate StringDataset.
:param texts: a string or List of string that make up StringDataset
:param use_tokenizer: Custom tokenizer to use (default is SpaceTokenizer,
more advanced options are SegTokTokenizer to use segtok or SpacyTokenizer to use Spacy library models
if available). Check the code of subclasses of Tokenizer to implement your own (if you need it).
If instead of providing a function, this parameter is just set to True, SegTokTokenizer will be used.
"""
# cast to list if necessary
if isinstance(texts, str):
texts = [texts]
self.texts = texts
self.use_tokenizer = use_tokenizer
@abstractmethod
def is_in_memory(self) -> bool:
return True
def __len__(self) -> int:
return len(self.texts)
def __getitem__(self, index: int = 0) -> Sentence:
text = self.texts[index]
return Sentence(text, use_tokenizer=self.use_tokenizer)
class MongoDataset(FlairDataset):
def __init__(
self,
query: str,
host: str,
port: int,
database: str,
collection: str,
text_field: str,
categories_field: Optional[List[str]] = None,
max_tokens_per_doc: int = -1,
max_chars_per_doc: int = -1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = True,
tag_type: str = "class",
) -> None:
"""Reads Mongo collections.
Each collection should contain one document/text per item.
Each item should have the following format:
{
'Beskrivning': 'Abrahamsby. Gård i Gottröra sn, Långhundra hd, Stockholms län, nära Långsjön.',
'Län':'Stockholms län',
'Härad': 'Långhundra',
'Församling': 'Gottröra',
'Plats': 'Abrahamsby'
}
:param query: Query, e.g. {'Län': 'Stockholms län'}
:param host: Host, e.g. 'localhost',
:param port: Port, e.g. 27017
:param database: Database, e.g. 'rosenberg',
:param collection: Collection, e.g. 'book',
:param text_field: Text field, e.g. 'Beskrivning',
:param categories_field: List of category fields, e.g ['Län', 'Härad', 'Tingslag', 'Församling', 'Plats'],
:param max_tokens_per_doc: Takes at most this amount of tokens per document. If set to -1 all documents are taken as is.
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Custom tokenizer to use (default SegtokTokenizer)
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:return: list of sentences
"""
# first, check if pymongo is installed
try:
import pymongo
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "pymongo" is not installed!')
log.warning('To use MongoDataset, please first install with "pip install pymongo"')
log.warning("-" * 100)
pass
self.in_memory = in_memory
self.tokenizer = tokenizer
if self.in_memory:
self.sentences = []
else:
self.indices = []
self.total_sentence_count: int = 0
self.max_chars_per_doc = max_chars_per_doc
self.max_tokens_per_doc = max_tokens_per_doc
self.__connection = pymongo.MongoClient(host, port)
self.__cursor = self.__connection[database][collection]
self.text = text_field
self.categories = categories_field if categories_field is not None else []
self.tag_type = tag_type
start = 0
if self.in_memory:
for document in self.__cursor.find(filter=query, skip=start, limit=0):
sentence = self._parse_document_to_sentence(
document[self.text],
[document[_] if _ in document else "" for _ in self.categories],
tokenizer,
)
if sentence is not None and len(sentence.tokens) > 0:
self.sentences.append(sentence)
self.total_sentence_count += 1
else:
self.indices = self.__cursor.find().distinct("_id")
self.total_sentence_count = self.__cursor.count_documents()
def _parse_document_to_sentence(
self,
text: str,
labels: List[str],
tokenizer: Union[bool, Tokenizer],
):
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
if text and labels:
sentence = Sentence(text, use_tokenizer=tokenizer)
for label in labels:
sentence.add_label(self.tag_type, label)
if self.max_tokens_per_doc > 0:
sentence.tokens = sentence.tokens[: min(len(sentence), self.max_tokens_per_doc)]
return sentence
return None
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
return self.sentences[index]
else:
document = self.__cursor.find_one({"_id": index})
sentence = self._parse_document_to_sentence(
document[self.text],
[document[_] if _ in document else "" for _ in self.categories],
self.tokenizer,
)
return sentence
def find_train_dev_test_files(data_folder, dev_file, test_file, train_file, autofind_splits=True):
if type(data_folder) == str:
data_folder: Path = Path(data_folder)
if train_file is not None:
train_file = data_folder / train_file
if test_file is not None:
test_file = data_folder / test_file
if dev_file is not None:
dev_file = data_folder / dev_file
suffixes_to_ignore = {".gz", ".swp"}
# automatically identify train / test / dev files
if train_file is None and autofind_splits:
for file in data_folder.iterdir():
file_name = file.name
if not suffixes_to_ignore.isdisjoint(file.suffixes):
continue
if "train" in file_name and "54019" not in file_name:
train_file = file
if "dev" in file_name:
dev_file = file
if "testa" in file_name:
dev_file = file
if "testb" in file_name:
test_file = file
# if no test file is found, take any file with 'test' in name
if test_file is None and autofind_splits:
for file in data_folder.iterdir():
file_name = file.name
if not suffixes_to_ignore.isdisjoint(file.suffixes):
continue
if "test" in file_name:
test_file = file
log.info(f"Reading data from {data_folder}")
log.info(f"Train: {train_file}")
log.info(f"Dev: {dev_file}")
log.info(f"Test: {test_file}")
return dev_file, test_file, train_file
| 9,549 | 33.854015 | 128 | py |
flair | flair-master/flair/datasets/biomedical.py | import inspect
import json
import logging
import os
import re
import shutil
import sys
from abc import ABC, abstractmethod
from collections import defaultdict, deque
from copy import copy
from operator import attrgetter
from pathlib import Path
from tarfile import (
CompressionError,
ExtractError,
HeaderError,
ReadError,
StreamError,
TarError,
)
from typing import Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
from zipfile import BadZipFile, LargeZipFile
import ftfy
from deprecated import deprecated
from lxml import etree
from lxml.etree import XMLSyntaxError
import flair
from flair.data import MultiCorpus, Tokenizer
from flair.datasets.sequence_labeling import ColumnCorpus, ColumnDataset
from flair.file_utils import Tqdm, cached_path, unpack_file
from flair.splitter import (
NoSentenceSplitter,
SciSpacySentenceSplitter,
SentenceSplitter,
TagSentenceSplitter,
)
from flair.tokenization import SciSpacyTokenizer, SpaceTokenizer
DISEASE_TAG = "Disease"
CHEMICAL_TAG = "Chemical"
CELL_LINE_TAG = "CellLine"
GENE_TAG = "Gene"
SPECIES_TAG = "Species"
SENTENCE_TAG = "[__SENT__]"
logger = logging.getLogger("flair")
class Entity:
"""Internal class to represent entities while converting biomedical NER corpora to a standardized format.
Each entity consists of the char span it addresses in the original
text as well as the type of entity (e.g. Chemical, Gene, and so on).
"""
def __init__(self, char_span: Tuple[int, int], entity_type: str) -> None:
assert char_span[0] < char_span[1]
self.char_span = range(*char_span)
self.type = entity_type
def __str__(self) -> str:
return self.type + "(" + str(self.char_span.start) + "," + str(self.char_span.stop) + ")"
def __repr__(self) -> str:
return str(self)
def is_before(self, other_entity) -> bool:
"""Checks whether this entity is located before the given one.
:param other_entity: Entity to check
"""
return self.char_span.stop <= other_entity.char_span.start
def contains(self, other_entity) -> bool:
"""Checks whether the given entity is fully contained in this entity.
:param other_entity: Entity to check
"""
return (
other_entity.char_span.start >= self.char_span.start and other_entity.char_span.stop <= self.char_span.stop
)
def overlaps(self, other_entity) -> bool:
"""Checks whether this and the given entity overlap.
:param other_entity: Entity to check
"""
return (self.char_span.start <= other_entity.char_span.start < self.char_span.stop) or (
self.char_span.start < other_entity.char_span.stop <= self.char_span.stop
)
class InternalBioNerDataset:
"""Internal class to represent a corpus and it's entities."""
def __init__(self, documents: Dict[str, str], entities_per_document: Dict[str, List[Entity]]) -> None:
self.documents = documents
self.entities_per_document = entities_per_document
class DpEntry(NamedTuple):
position_end: int
entity_count: int
entity_lengths_sum: int
last_entity: Optional[Entity]
def merge_datasets(data_sets: Iterable[InternalBioNerDataset]):
all_documents = {}
all_entities = {}
for ds in data_sets:
all_documents.update(ds.documents)
all_entities.update(ds.entities_per_document)
return InternalBioNerDataset(documents=all_documents, entities_per_document=all_entities)
def filter_and_map_entities(
dataset: InternalBioNerDataset, entity_type_to_canonical: Dict[str, str]
) -> InternalBioNerDataset:
mapped_entities_per_document = {}
for id, entities in dataset.entities_per_document.items():
new_entities = []
for entity in entities:
if entity.type in entity_type_to_canonical:
new_entity = copy(entity)
new_entity.type = entity_type_to_canonical[entity.type]
new_entities.append(new_entity)
else:
logging.debug(f"Skip entity type {entity.type}")
pass
mapped_entities_per_document[id] = new_entities
return InternalBioNerDataset(documents=dataset.documents, entities_per_document=mapped_entities_per_document)
def filter_nested_entities(dataset: InternalBioNerDataset) -> None:
num_entities_before = sum([len(x) for x in dataset.entities_per_document.values()])
for document_id, entities in dataset.entities_per_document.items():
# Uses dynamic programming approach to calculate maximum independent set in interval graph
# with sum of all entity lengths as secondary key
dp_array = [DpEntry(position_end=0, entity_count=0, entity_lengths_sum=0, last_entity=None)]
for entity in sorted(entities, key=lambda x: x.char_span.stop):
i = len(dp_array) - 1
while dp_array[i].position_end > entity.char_span.start:
i -= 1
if dp_array[i].entity_count + 1 > dp_array[-1].entity_count or (
dp_array[i].entity_count + 1 == dp_array[-1].entity_count
and dp_array[i].entity_lengths_sum + len(entity.char_span) > dp_array[-1].entity_lengths_sum
):
dp_array += [
DpEntry(
entity.char_span.stop,
dp_array[i].entity_count + 1,
dp_array[i].entity_lengths_sum + len(entity.char_span),
entity,
)
]
else:
dp_array += [dp_array[-1]]
independent_set = []
p = dp_array[-1].position_end
for dp_entry in dp_array[::-1]:
if dp_entry.last_entity is None:
break
if dp_entry.position_end <= p:
independent_set += [dp_entry.last_entity]
p -= len(dp_entry.last_entity.char_span)
dataset.entities_per_document[document_id] = independent_set
num_entities_after = sum([len(x) for x in dataset.entities_per_document.values()])
if num_entities_before != num_entities_after:
removed = num_entities_before - num_entities_after
logger.warning(
f"WARNING: Corpus modified by filtering nested entities. "
f"Removed {removed} entities. Keep {num_entities_after} entities."
)
def bioc_to_internal(bioc_file: Path):
"""Helper function to parse corpora that are given in BIOC format. See.
http://bioc.sourceforge.net/
for details.
"""
tree = etree.parse(str(bioc_file))
texts_per_document = {}
entities_per_document = {}
documents = tree.xpath(".//document")
all_entities = 0
non_matching = 0
for document in Tqdm.tqdm(documents, desc="Converting to internal"):
document_id = document.xpath("./id")[0].text
texts: List[str] = []
entities = []
for passage in document.xpath("passage"):
passage_texts = passage.xpath("text/text()")
if len(passage_texts) == 0:
continue
text = passage_texts[0]
passage_offset = int(passage.xpath("./offset/text()")[0]) # from BioC annotation
# calculate offset without current text
# because we stick all passages of a document together
document_text = " ".join(texts)
document_offset = len(document_text)
texts.append(text)
document_text += " " + text
for annotation in passage.xpath(".//annotation"):
entity_types = [
i.text.replace(" ", "_")
for i in annotation.xpath("./infon")
if i.attrib["key"] in {"type", "class"}
]
start = int(annotation.xpath("./location")[0].get("offset")) - passage_offset
# TODO For split entities we also annotate everything inbetween which might be a bad idea?
final_length = int(annotation.xpath("./location")[-1].get("length"))
final_offset = int(annotation.xpath("./location")[-1].get("offset")) - passage_offset
if final_length <= 0:
continue
end = final_offset + final_length
start += document_offset
end += document_offset
true_entity = annotation.xpath(".//text")[0].text
annotated_entity = " ".join(texts)[start:end]
# Try to fix incorrect annotations
if annotated_entity.lower() != true_entity.lower():
max_shift = min(3, len(true_entity))
for i in range(max_shift):
index = annotated_entity.lower().find(true_entity[0 : max_shift - i].lower())
if index != -1:
start += index
end += index
break
annotated_entity = " ".join(texts)[start:end]
if annotated_entity.lower() != true_entity.lower():
non_matching += 1
all_entities += 1
for entity_type in entity_types:
entities.append(Entity((start, end), entity_type))
texts_per_document[document_id] = " ".join(texts)
entities_per_document[document_id] = entities
# print(
# f"Found {non_matching} non-matching entities ({non_matching/all_entities}%) in {bioc_file}"
# )
return InternalBioNerDataset(documents=texts_per_document, entities_per_document=entities_per_document)
def brat_to_internal(corpus_dir: Path, ann_file_suffixes=None) -> InternalBioNerDataset:
"""Helper function to parse corpora that are annotated using BRAT. See.
https://brat.nlplab.org/
for details.
"""
if ann_file_suffixes is None:
ann_file_suffixes = [".ann"]
text_files = list(corpus_dir.glob("*.txt"))
documents = {}
entities_per_document = defaultdict(list)
for text_file in text_files:
with Path(text_file).open(encoding="utf-8") as fin:
document_text = fin.read().strip()
document_id = text_file.stem
for suffix in ann_file_suffixes:
with open(str(text_file.with_suffix(suffix)), encoding="utf8") as ann_file:
for line in ann_file:
fields = line.strip().split("\t")
# Ignore empty lines or relation annotations
if not fields or len(fields) <= 2:
continue
ent_type, char_start, char_end = fields[1].split()
start = int(char_start)
end = int(char_end)
# FIX annotation of whitespaces (necessary for PDR)
while document_text[start:end].startswith(" "):
start += 1
while document_text[start:end].endswith(" "):
end -= 1
entities_per_document[document_id].append(
Entity(
char_span=(start, end),
entity_type=ent_type,
)
)
assert document_text[start:end].strip() == fields[2].strip()
documents[document_id] = document_text
return InternalBioNerDataset(documents=documents, entities_per_document=dict(entities_per_document))
class CoNLLWriter:
"""Utility class for writing `InternalBioNerDataset` to CoNLL files."""
def __init__(
self,
sentence_splitter: SentenceSplitter,
) -> None:
"""Initialize CoNLLWriter.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which
segments the text into sentences and tokens
"""
self.sentence_splitter = sentence_splitter
def process_dataset(self, datasets: Dict[str, InternalBioNerDataset], out_dir: Path):
if "train" in datasets:
self.write_to_conll(datasets["train"], out_dir / "train.conll")
if "dev" in datasets:
self.write_to_conll(datasets["dev"], out_dir / "dev.conll")
if "test" in datasets:
self.write_to_conll(datasets["test"], out_dir / "test.conll")
def write_to_conll(self, dataset: InternalBioNerDataset, output_file: Path):
os.makedirs(str(output_file.parent), exist_ok=True)
filter_nested_entities(dataset)
with output_file.open("w", encoding="utf8") as f:
for document_id in Tqdm.tqdm(
dataset.documents.keys(),
total=len(dataset.documents),
desc="Converting to CoNLL",
):
document_text = ftfy.fix_text(dataset.documents[document_id])
document_text = re.sub(r"[\u2000-\u200B]", " ", document_text) # replace unicode space characters!
document_text = document_text.replace("\xa0", " ") # replace non-break space
entities = deque(
sorted(
dataset.entities_per_document[document_id],
key=attrgetter("char_span.start", "char_span.stop"),
)
)
current_entity = entities.popleft() if entities else None
sentences = self.sentence_splitter.split(document_text)
for sentence in sentences:
in_entity = False
sentence_had_tokens = False
for flair_token in sentence.tokens:
token = flair_token.text.strip()
assert sentence.start_position is not None
assert flair_token.start_position is not None
offset = sentence.start_position + flair_token.start_position
if current_entity and offset >= current_entity.char_span.stop:
in_entity = False
# One token may contain multiple entities -> deque all of them
while current_entity and offset >= current_entity.char_span.stop:
current_entity = entities.popleft() if entities else None
if current_entity and offset in current_entity.char_span:
if not in_entity:
tag = "B-" + current_entity.type
in_entity = True
else:
tag = "I-" + current_entity.type
else:
tag = "O"
in_entity = False
whitespace_after = "+" if flair_token.whitespace_after > 0 else "-"
if len(token) > 0:
f.write(" ".join([token, tag, whitespace_after]) + "\n")
sentence_had_tokens = True
if sentence_had_tokens:
f.write("\n")
class HunerDataset(ColumnCorpus, ABC):
"""Base class for HUNER datasets.
Every subclass has to implement the following methods:
- `to_internal', which reads the complete data set (incl. train, dev, test) and returns the corpus
as InternalBioNerDataset
- `split_url', which returns the base url (i.e. without '.train', '.dev', '.test') to the HUNER split files
For further information see:
- Weber et al.: 'HUNER: improving biomedical NER with pretraining'
https://academic.oup.com/bioinformatics/article-abstract/36/1/295/5523847?redirectedFrom=fulltext
- HUNER github repository:
https://github.com/hu-ner/huner
"""
@abstractmethod
def to_internal(self, data_folder: Path) -> InternalBioNerDataset:
raise NotImplementedError
@staticmethod
@abstractmethod
def split_url() -> str:
raise NotImplementedError
def get_corpus_sentence_splitter(self) -> Optional[SentenceSplitter]:
"""Return the pre-defined sentence splitter if defined, otherwise return None."""
return None
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the HUNER corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Custom implementation of :class:`SentenceSplitter` which
segments the text into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
self.sentence_splitter = self.get_corpus_sentence_splitter()
if not self.sentence_splitter:
self.sentence_splitter = sentence_splitter if sentence_splitter else SciSpacySentenceSplitter()
else:
if sentence_splitter:
logger.warning(
f"WARNING: The corpus {self.__class__.__name__} has a pre-defined sentence splitting, "
f"thus just the tokenizer of the given sentence splitter is used"
)
self.sentence_splitter.tokenizer = sentence_splitter.tokenizer
# Create tokenization-dependent CONLL files. This is necessary to prevent
# from caching issues (e.g. loading the same corpus with different sentence splitters)
train_file = data_folder / f"{self.sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{self.sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{self.sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
splits_dir = data_folder / "splits"
os.makedirs(splits_dir, exist_ok=True)
writer = CoNLLWriter(sentence_splitter=self.sentence_splitter)
internal_dataset = self.to_internal(data_folder)
train_data = self.get_subset(internal_dataset, "train", splits_dir)
writer.write_to_conll(train_data, train_file)
dev_data = self.get_subset(internal_dataset, "dev", splits_dir)
writer.write_to_conll(dev_data, dev_file)
test_data = self.get_subset(internal_dataset, "test", splits_dir)
writer.write_to_conll(test_data, test_file)
super().__init__(
data_folder=data_folder,
train_file=train_file.name,
dev_file=dev_file.name,
test_file=test_file.name,
column_format=columns,
in_memory=in_memory,
)
def get_subset(self, dataset: InternalBioNerDataset, split: str, split_dir: Path):
split_file = cached_path(f"{self.split_url()}.{split}", split_dir)
with split_file.open(encoding="utf8") as f:
ids = [line.strip() for line in f if line.strip()]
ids = sorted(id_ for id_ in ids if id_ in dataset.documents)
return InternalBioNerDataset(
documents={k: dataset.documents[k] for k in ids},
entities_per_document={k: dataset.entities_per_document[k] for k in ids},
)
class BIO_INFER(ColumnCorpus):
"""Original BioInfer corpus.
For further information see Pyysalo et al.:
BioInfer: a corpus for information extraction in the biomedical domain
https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-8-50
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
) -> None:
"""Initialize the BioInfer corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and test_file.exists()):
corpus_folder = self.download_dataset(data_folder)
sentence_splitter = NoSentenceSplitter(tokenizer=SpaceTokenizer())
train_data = self.parse_dataset(corpus_folder / "BioInfer-train.xml")
test_data = self.parse_dataset(corpus_folder / "BioInfer-test.xml")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_dataset(cls, data_dir: Path) -> Path:
data_url = "https://github.com/metalrt/ppi-dataset/archive/refs/heads/master.zip"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "ppi-dataset-master/csv_output"
@classmethod
def parse_dataset(cls, original_file: Path):
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = {}
tree = etree.parse(str(original_file))
sentence_elems = tree.xpath("//sentence")
for s_id, sentence in enumerate(sentence_elems):
sentence_id = str(s_id)
documents[sentence_id] = sentence.attrib["text"]
entities_per_document[sentence_id] = []
for entity in sentence.xpath(".//entity"):
char_offsets = re.split("-|,", entity.attrib["charOffset"])
start_token = int(char_offsets[0])
end_token = int(char_offsets[-1])
entities_per_document[sentence_id].append(
Entity(
char_span=(start_token, end_token),
entity_type=entity.attrib["type"],
)
)
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_BIO_INFER(HunerDataset):
"""HUNER version of the BioInfer corpus containing only gene/protein annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bioinfer"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_folder = BIO_INFER.download_dataset(data_dir)
train_data = BIO_INFER.parse_dataset(corpus_folder / "BioInfer-train.xml")
test_data = BIO_INFER.parse_dataset(corpus_folder / "BioInfer-test.xml")
entity_type_mapping = {
"Individual_protein": GENE_TAG,
"Gene/protein/RNA": GENE_TAG,
"Gene": GENE_TAG,
"DNA_family_or_group": GENE_TAG,
"Protein_family_or_group": GENE_TAG,
}
train_data = filter_and_map_entities(train_data, entity_type_mapping)
test_data = filter_and_map_entities(test_data, entity_type_mapping)
return merge_datasets([train_data, test_data])
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class JNLPBA(ColumnCorpus):
"""Original corpus of the JNLPBA shared task.
For further information see Kim et al.: Introduction to the Bio-
Entity Recognition Task at JNLPBA
https://www.aclweb.org/anthology/W04-1213.pdf
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
"""Initialize the JNLPBA corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and test_file.exists()):
download_dir = data_folder / "original"
os.makedirs(download_dir, exist_ok=True)
train_data_url = "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/JNLPBA/Train/Genia4ERtraining.tar.gz"
train_data_path = cached_path(train_data_url, download_dir)
unpack_file(train_data_path, download_dir)
test_data_url = "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/JNLPBA/Evaluation/Genia4ERtest.tar.gz"
test_data_path = cached_path(test_data_url, download_dir)
unpack_file(test_data_path, download_dir)
train_file = download_dir / "Genia4ERtask2.iob2"
shutil.copy(train_file, data_folder / "train.conll")
test_file = download_dir / "Genia4EReval2.iob2"
shutil.copy(test_file, data_folder / "test.conll")
super().__init__(
data_folder,
columns,
in_memory=in_memory,
comment_symbol="#",
)
class HunerJNLPBA:
@classmethod
def download_and_prepare_train(cls, data_folder: Path, sentence_tag: str) -> InternalBioNerDataset:
train_data_url = "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/JNLPBA/Train/Genia4ERtraining.tar.gz"
train_data_path = cached_path(train_data_url, data_folder)
unpack_file(train_data_path, data_folder)
train_input_file = data_folder / "Genia4ERtask2.iob2"
return cls.read_file(train_input_file, sentence_tag)
@classmethod
def download_and_prepare_test(cls, data_folder: Path, sentence_tag: str) -> InternalBioNerDataset:
test_data_url = "http://www.nactem.ac.uk/GENIA/current/Shared-tasks/JNLPBA/Evaluation/Genia4ERtest.tar.gz"
test_data_path = cached_path(test_data_url, data_folder)
unpack_file(test_data_path, data_folder)
test_input_file = data_folder / "Genia4EReval2.iob2"
return cls.read_file(test_input_file, sentence_tag)
@classmethod
def read_file(cls, input_iob_file: Path, sentence_tag: str) -> InternalBioNerDataset:
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = defaultdict(list)
with open(str(input_iob_file), encoding="utf8") as file_reader:
document_id: Optional[str] = None
document_text: Optional[str] = None
entities: List[Entity] = []
entity_type: Optional[str] = None
entity_start = 0
for line in file_reader:
line = line.strip()
if line[:3] == "###":
if not (document_id is None or document_text is None):
documents[document_id] = document_text
entities_per_document[document_id] = entities
document_id = line.split(":")[-1]
document_text = None
entities = []
entity_type = None
entity_start = 0
file_reader.__next__()
continue
if line:
parts = line.split()
token = parts[0].strip()
tag = parts[1].strip()
if tag.startswith("B-"):
if entity_type is not None and document_text is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
entity_start = len(document_text) + 1 if document_text else 0
entity_type = tag[2:]
elif tag == "O" and entity_type is not None and document_text is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
entity_type = None
document_text = (document_text + " " + token) if document_text is not None else token
else:
if document_text is not None:
document_text += sentence_tag
# Edge case: last token starts a new entity
if entity_type is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
# Last document in file
if not (document_id is None or document_text is None):
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_JNLPBA(HunerDataset):
"""HUNER version of the JNLPBA corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/genia"
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
orig_folder = data_dir / "original"
os.makedirs(str(orig_folder), exist_ok=True)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
train_data = HunerJNLPBA.download_and_prepare_train(orig_folder, sentence_separator)
train_data = filter_and_map_entities(train_data, {"protein": GENE_TAG})
test_data = HunerJNLPBA.download_and_prepare_test(orig_folder, sentence_separator)
test_data = filter_and_map_entities(test_data, {"protein": GENE_TAG})
return merge_datasets([train_data, test_data])
class HUNER_CELL_LINE_JNLPBA(HunerDataset):
"""HUNER version of the JNLPBA corpus containing cell line annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/genia"
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
download_folder = data_dir / "original"
os.makedirs(str(download_folder), exist_ok=True)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
train_data = HunerJNLPBA.download_and_prepare_train(download_folder, sentence_separator)
train_data = filter_and_map_entities(train_data, {"cell_line": CELL_LINE_TAG})
test_data = HunerJNLPBA.download_and_prepare_test(download_folder, sentence_separator)
test_data = filter_and_map_entities(test_data, {"cell_line": CELL_LINE_TAG})
return merge_datasets([train_data, test_data])
class CELL_FINDER(ColumnCorpus):
"""Original CellFinder corpus containing cell line, species and gene annotations.
For futher information see Neves et al.: Annotating and
evaluating text for stem cell research
https://pdfs.semanticscholar.org/38e3/75aeeeb1937d03c3c80128a70d8e7a74441f.pdf
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the CellFinder corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Custom implementation of :class:`SentenceSplitter` which segments
the text into sentences and tokens.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
data_folder = base_path / dataset_name
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
train_corpus = self.download_and_prepare(data_folder)
writer = CoNLLWriter(sentence_splitter=sentence_splitter)
writer.write_to_conll(train_corpus, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_and_prepare(cls, data_folder: Path) -> InternalBioNerDataset:
data_url = (
"https://www.informatik.hu-berlin.de/de/forschung/gebiete/wbi/resources/cellfinder/cellfinder1_brat.tar.gz"
)
data_path = cached_path(data_url, data_folder)
unpack_file(data_path, data_folder)
return cls.read_folder(data_folder)
@classmethod
def read_folder(cls, data_folder: Path) -> InternalBioNerDataset:
ann_files = list(data_folder.glob("*.ann"))
documents = {}
entities_per_document = defaultdict(list)
for ann_file in ann_files:
with ann_file.open(encoding="utf8") as f_ann, ann_file.with_suffix(".txt").open(encoding="utf8") as f_txt:
document_text = f_txt.read().strip()
document_id = ann_file.stem
documents[document_id] = document_text
for line in f_ann:
fields = line.strip().split("\t")
if not fields:
continue
ent_type, char_start, char_end = fields[1].split()
entities_per_document[document_id].append(
Entity(
char_span=(int(char_start), int(char_end)),
entity_type=ent_type,
)
)
assert document_text[int(char_start) : int(char_end)] == fields[2]
return InternalBioNerDataset(documents=documents, entities_per_document=dict(entities_per_document))
class HUNER_CELL_LINE_CELL_FINDER(HunerDataset):
"""HUNER version of the CellFinder corpus containing only cell line annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/cellfinder_cellline"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
data = CELL_FINDER.download_and_prepare(data_dir)
data = filter_and_map_entities(data, {"CellLine": CELL_LINE_TAG})
return data
class HUNER_SPECIES_CELL_FINDER(HunerDataset):
"""HUNER version of the CellFinder corpus containing only species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/cellfinder_species"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
data = CELL_FINDER.download_and_prepare(data_dir)
data = filter_and_map_entities(data, {"Species": SPECIES_TAG})
return data
class HUNER_GENE_CELL_FINDER(HunerDataset):
"""HUNER version of the CellFinder corpus containing only gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/cellfinder_protein"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
data = CELL_FINDER.download_and_prepare(data_dir)
data = filter_and_map_entities(data, {"GeneProtein": GENE_TAG})
return data
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class MIRNA(ColumnCorpus):
"""Original miRNA corpus.
For further information see Bagewadi et al.: Detecting miRNA
Mentions and Relations in Biomedical Literature
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4602280/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the miRNA corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param tokenizer: Callable that segments a sentence into words,
defaults to scispacy
:param sentence_splitter: Callable that segments a document into sentences,
defaults to scispacy
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
sentence_separator = " "
if sentence_splitter is None:
sentence_separator = SENTENCE_TAG
sentence_splitter = TagSentenceSplitter(tag=sentence_separator, tokenizer=SciSpacyTokenizer())
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and test_file.exists()):
download_folder = data_folder / "original"
os.makedirs(str(download_folder), exist_ok=True)
writer = CoNLLWriter(sentence_splitter=sentence_splitter)
train_corpus = self.download_and_prepare_train(download_folder, sentence_separator)
writer.write_to_conll(train_corpus, train_file)
test_corpus = self.download_and_prepare_test(download_folder, sentence_separator)
writer.write_to_conll(test_corpus, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_and_prepare_train(cls, data_folder: Path, sentence_separator: str):
data_url = (
"https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/miRNA/miRNA-Train-Corpus.xml"
)
data_path = cached_path(data_url, data_folder)
return cls.parse_file(data_path, "train", sentence_separator)
@classmethod
def download_and_prepare_test(cls, data_folder: Path, sentence_separator):
data_url = (
"https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/miRNA/miRNA-Test-Corpus.xml"
)
data_path = cached_path(data_url, data_folder)
return cls.parse_file(data_path, "test", sentence_separator)
@classmethod
def parse_file(cls, input_file: Path, split: str, sentence_separator: str) -> InternalBioNerDataset:
tree = etree.parse(str(input_file))
documents = {}
entities_per_document = {}
for document in tree.xpath(".//document"):
document_id = document.get("id") + "-" + split
entities = []
document_text = ""
for sentence in document.xpath(".//sentence"):
if document_text:
document_text += sentence_separator
sentence_offset = len(document_text)
document_text += sentence.get("text") if document_text else sentence.get("text")
for entity in sentence.xpath(".//entity"):
start, end = entity.get("charOffset").split("-")
entities.append(
Entity(
(
sentence_offset + int(start),
sentence_offset + int(end) + 1,
),
entity.get("type"),
)
)
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HunerMiRNAHelper:
@staticmethod
def get_mirna_subset(dataset: InternalBioNerDataset, split_url: str, split_dir: Path):
split_file = cached_path(split_url, split_dir)
with split_file.open(encoding="utf8") as f:
ids = [line.strip() for line in f if line.strip()]
ids = [id + "-train" for id in ids] + [id + "-test" for id in ids]
ids = sorted(id_ for id_ in ids if id_ in dataset.documents)
return InternalBioNerDataset(
documents={k: dataset.documents[k] for k in ids},
entities_per_document={k: dataset.entities_per_document[k] for k in ids},
)
class HUNER_GENE_MIRNA(HunerDataset):
"""HUNER version of the miRNA corpus containing protein / gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/miRNA"
def get_subset(self, dataset: InternalBioNerDataset, split: str, split_dir: Path):
# In the huner split files there is no information whether a given id originates
# from the train or test file of the original corpus - so we have to adapt corpus
# splitting here
return HunerMiRNAHelper.get_mirna_subset(dataset, f"{self.split_url()}.{split}", split_dir)
def get_corpus_sentence_splitter(self):
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
download_folder = data_dir / "original"
os.makedirs(str(download_folder), exist_ok=True)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
train_data = MIRNA.download_and_prepare_train(download_folder, sentence_separator)
train_data = filter_and_map_entities(train_data, {"Genes/Proteins": GENE_TAG})
test_data = MIRNA.download_and_prepare_test(download_folder, sentence_separator)
test_data = filter_and_map_entities(test_data, {"Genes/Proteins": GENE_TAG})
return merge_datasets([train_data, test_data])
class HUNER_SPECIES_MIRNA(HunerDataset):
"""HUNER version of the miRNA corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/miRNA"
def get_subset(self, dataset: InternalBioNerDataset, split: str, split_dir: Path):
# In the huner split files there is no information whether a given id originates
# from the train or test file of the original corpus - so we have to adapt corpus
# splitting here
return HunerMiRNAHelper.get_mirna_subset(dataset, f"{self.split_url()}.{split}", split_dir)
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
download_folder = data_dir / "original"
os.makedirs(str(download_folder), exist_ok=True)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
train_data = MIRNA.download_and_prepare_train(download_folder, sentence_separator)
train_data = filter_and_map_entities(train_data, {"Species": SPECIES_TAG})
test_data = MIRNA.download_and_prepare_test(download_folder, sentence_separator)
test_data = filter_and_map_entities(test_data, {"Species": SPECIES_TAG})
return merge_datasets([train_data, test_data])
class HUNER_DISEASE_MIRNA(HunerDataset):
"""HUNER version of the miRNA corpus containing disease annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/miRNA"
def get_subset(self, dataset: InternalBioNerDataset, split: str, split_dir: Path):
# In the huner split files there is no information whether a given id originates
# from the train or test file of the original corpus - so we have to adapt corpus
# splitting here
return HunerMiRNAHelper.get_mirna_subset(dataset, f"{self.split_url()}.{split}", split_dir)
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
download_folder = data_dir / "original"
os.makedirs(str(download_folder), exist_ok=True)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
train_data = MIRNA.download_and_prepare_train(download_folder, sentence_separator)
train_data = filter_and_map_entities(train_data, {"Diseases": DISEASE_TAG})
test_data = MIRNA.download_and_prepare_test(download_folder, sentence_separator)
test_data = filter_and_map_entities(test_data, {"Diseases": DISEASE_TAG})
return merge_datasets([train_data, test_data])
class KaewphanCorpusHelper:
"""Helper class for the corpora from Kaewphan et al., i.e. CLL and Gellus."""
@staticmethod
def download_cll_dataset(data_folder: Path):
data_url = "https://github.com/hu-ner/hunflair-corpora/raw/main/cll/CLL_corpus.tar.gz"
data_path = cached_path(data_url, data_folder)
unpack_file(data_path, data_folder)
@staticmethod
def prepare_and_save_dataset(nersuite_folder: Path, output_file: Path):
with output_file.open("w", encoding="utf-8") as writer:
out_newline = False
for file in os.listdir(str(nersuite_folder)):
if not file.endswith(".nersuite"):
continue
annotations = []
with open(os.path.join(str(nersuite_folder), file), encoding="utf8") as reader:
for line in reader.readlines():
columns = line.split("\t")
annotations.append(columns[:4])
num_annotations = len(annotations)
for i, annotation in enumerate(annotations):
if len(annotation) == 1:
assert annotation[0] == "\n"
if not out_newline:
writer.write("\n")
out_newline = True
continue
has_whitespace = "+"
next_annotation = (
annotations[i + 1] if (i + 1) < num_annotations and len(annotations[i + 1]) > 1 else None
)
if next_annotation and next_annotation[1] == annotation[2]:
has_whitespace = "-"
writer.write(" ".join([annotation[3], annotation[0], has_whitespace]) + "\n")
out_newline = False
if not out_newline:
writer.write("\n")
out_newline = True
@staticmethod
def download_gellus_dataset(data_folder: Path):
data_url = "https://github.com/hu-ner/hunflair-corpora/raw/main/gellus/Gellus_corpus.tar.gz"
data_path = cached_path(data_url, data_folder)
unpack_file(data_path, data_folder)
@staticmethod
def read_dataset(nersuite_folder: Path, sentence_separator: str) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
for file in os.listdir(str(nersuite_folder)):
if not file.endswith(".nersuite"):
continue
document_id = file.replace(".nersuite", "")
with open(os.path.join(str(nersuite_folder), file), encoding="utf8") as reader:
document_text = ""
entities = []
entity_start = None
entity_type = None
for line in reader.readlines():
line = line.strip()
if line:
tag, _, _, _, token = line.split("\t")[:5]
if tag.startswith("B-"):
if entity_type is not None and entity_start is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
entity_start = len(document_text) + 1 if document_text else 0
entity_type = tag[2:]
elif tag == "O" and entity_type is not None and entity_start is not None:
entities.append(
Entity(
(entity_start, len(document_text)),
entity_type,
)
)
entity_type = None
document_text = document_text + " " + token if document_text else token
else:
# Edge case: last token starts a new entity
if entity_type is not None and entity_start is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
document_text += sentence_separator
if document_text.endswith(sentence_separator):
document_text = document_text[: -len(sentence_separator)]
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class CLL(ColumnCorpus):
"""Original CLL corpus containing cell line annotations.
For further information, see Kaewphan et al.: Cell line name
recognition in support of the identification of synthetic lethality
in cancer from text
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4708107/
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
"""Initialize the CLL corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
if not (train_file.exists()):
KaewphanCorpusHelper.download_cll_dataset(data_folder)
nersuite_folder = data_folder / "CLL-1.0.2" / "nersuite"
KaewphanCorpusHelper.prepare_and_save_dataset(nersuite_folder, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
class HUNER_CELL_LINE_CLL(HunerDataset):
"""HUNER version of the CLL corpus containing cell line annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/cll"
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
KaewphanCorpusHelper.download_cll_dataset(data_dir)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
nersuite_folder = data_dir / "CLL-1.0.2" / "nersuite"
orig_dataset = KaewphanCorpusHelper.read_dataset(nersuite_folder, sentence_separator)
return filter_and_map_entities(orig_dataset, {"CL": CELL_LINE_TAG})
class GELLUS(ColumnCorpus):
"""Original Gellus corpus containing cell line annotations.
For further information, see Kaewphan et al.: Cell line name
recognition in support of the identification of synthetic lethality
in cancer from text
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4708107/
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
"""Initialize the GELLUS corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
KaewphanCorpusHelper.download_gellus_dataset(data_folder)
nersuite_train = data_folder / "GELLUS-1.0.3" / "nersuite" / "train"
KaewphanCorpusHelper.prepare_and_save_dataset(nersuite_train, train_file)
nersuite_dev = data_folder / "GELLUS-1.0.3" / "nersuite" / "devel"
KaewphanCorpusHelper.prepare_and_save_dataset(nersuite_dev, dev_file)
nersuite_test = data_folder / "GELLUS-1.0.3" / "nersuite" / "test"
KaewphanCorpusHelper.prepare_and_save_dataset(nersuite_test, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
class HUNER_CELL_LINE_GELLUS(HunerDataset):
"""HUNER version of the Gellus corpus containing cell line annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/gellus"
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
KaewphanCorpusHelper.download_gellus_dataset(data_dir)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
splits = []
for folder in ["train", "devel", "test"]:
nersuite_folder = data_dir / "GELLUS-1.0.3" / "nersuite" / folder
splits.append(KaewphanCorpusHelper.read_dataset(nersuite_folder, sentence_separator))
full_dataset = merge_datasets(splits)
return filter_and_map_entities(full_dataset, {"Cell-line-name": CELL_LINE_TAG})
class LOCTEXT(ColumnCorpus):
"""Original LOCTEXT corpus containing species annotations.
For further information see Cejuela et al.:
LocText: relation extraction of protein localizations to assist database curation
https://bmcbioinformatics.biomedcentral.com/articles/10.1186/s12859-018-2021-9
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the LOCTEXT corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Custom implementation of :class:`SentenceSplitter`
that segments a document into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
self.download_dataset(data_folder)
full_dataset = self.parse_dataset(data_folder)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(full_dataset, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "http://pubannotation.org/downloads/LocText-annotations.tgz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
@staticmethod
def parse_dataset(data_dir: Path) -> InternalBioNerDataset:
loctext_json_folder = data_dir / "LocText"
entity_type_mapping = {
"go": "protein",
"uniprot": "protein",
"taxonomy": "species",
}
documents = {}
entities_per_document = {}
for file in os.listdir(str(loctext_json_folder)):
document_id = file.strip(".json")
entities = []
with open(os.path.join(str(loctext_json_folder), file), encoding="utf8") as f_in:
data = json.load(f_in)
document_text = data["text"].strip()
document_text = document_text.replace("\n", " ")
if "denotations" in data:
for ann in data["denotations"]:
start = int(ann["span"]["begin"])
end = int(ann["span"]["end"])
original_entity_type = ann["obj"].split(":")[0]
if original_entity_type not in entity_type_mapping:
continue
entity_type = entity_type_mapping[original_entity_type]
entities.append(Entity((start, end), entity_type))
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_SPECIES_LOCTEXT(HunerDataset):
"""HUNER version of the Loctext corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/loctext"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
LOCTEXT.download_dataset(data_dir)
dataset = LOCTEXT.parse_dataset(data_dir)
return filter_and_map_entities(dataset, {"species": SPECIES_TAG})
class HUNER_GENE_LOCTEXT(HunerDataset):
"""HUNER version of the Loctext corpus containing protein annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/loctext"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
LOCTEXT.download_dataset(data_dir)
dataset = LOCTEXT.parse_dataset(data_dir)
return filter_and_map_entities(dataset, {"protein": GENE_TAG})
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class CHEMDNER(ColumnCorpus):
"""Original corpus of the CHEMDNER shared task.
For further information see Krallinger et al.: The CHEMDNER corpus
of chemicals and drugs and its annotation principles
https://jcheminf.biomedcentral.com/articles/10.1186/1758-2946-7-S1-S2
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the CHEMDNER corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Custom implementation of :class:`SentenceSplitter` which
segements documents into sentences and tokens
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
download_dir = data_folder / "original"
os.makedirs(download_dir, exist_ok=True)
self.download_dataset(download_dir)
train_data = bioc_to_internal(download_dir / "chemdner_corpus" / "training.bioc.xml")
dev_data = bioc_to_internal(download_dir / "chemdner_corpus" / "development.bioc.xml")
test_data = bioc_to_internal(download_dir / "chemdner_corpus" / "evaluation.bioc.xml")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2014/chemdner_corpus.tar.gz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
class HUNER_CHEMICAL_CHEMDNER(HunerDataset):
"""HUNER version of the CHEMDNER corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/chemdner"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
CHEMDNER.download_dataset(data_dir)
train_data = bioc_to_internal(data_dir / "chemdner_corpus" / "training.bioc.xml")
dev_data = bioc_to_internal(data_dir / "chemdner_corpus" / "development.bioc.xml")
test_data = bioc_to_internal(data_dir / "chemdner_corpus" / "evaluation.bioc.xml")
all_data = merge_datasets([train_data, dev_data, test_data])
all_data = filter_and_map_entities(
all_data,
{
"ABBREVIATION": CHEMICAL_TAG,
"FAMILY": CHEMICAL_TAG,
"FORMULA": CHEMICAL_TAG,
"IDENTIFIER": CHEMICAL_TAG,
"MULTIPLE": CHEMICAL_TAG,
"NO_CLASS": CHEMICAL_TAG,
"SYSTEMATIC": CHEMICAL_TAG,
"TRIVIAL": CHEMICAL_TAG,
},
)
return all_data
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class IEPA(ColumnCorpus):
"""IEPA corpus as provided by http://corpora.informatik.hu-berlin.de/.
For further information see Ding, Berleant, Nettleton, Wurtele:
Mining MEDLINE: abstracts, sentences, or phrases?
https://www.ncbi.nlm.nih.gov/pubmed/11928487
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
) -> None:
"""Initialize the IEPA corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and test_file.exists()):
corpus_folder = self.download_dataset(data_folder)
sentence_splitter = NoSentenceSplitter(tokenizer=SpaceTokenizer())
train_data = self.parse_dataset(corpus_folder / "IEPA-train.xml")
test_data = self.parse_dataset(corpus_folder / "IEPA-test.xml")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "https://github.com/metalrt/ppi-dataset/archive/refs/heads/master.zip"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "ppi-dataset-master/csv_output"
@classmethod
def parse_dataset(cls, original_file: Path):
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = {}
tree = etree.parse(str(original_file))
document_elems = tree.xpath("//document")
for document in document_elems:
document_id = "_".join(document.attrib["id"].split("."))
document_text = ""
entities_per_document[document_id] = []
sentence_elems = document.xpath(".//sentence")
for sentence in sentence_elems:
sentence_text = sentence.attrib["text"]
if document_text == "":
document_text = sentence_text
else:
document_text += " " + sentence_text
for entity in sentence.xpath(".//entity"):
char_offsets = re.split("-|,", entity.attrib["charOffset"])
start_token = int(char_offsets[0])
end_token = int(char_offsets[-1])
entities_per_document[document_id].append(
Entity(
char_span=(start_token, end_token),
entity_type="Protein",
)
)
documents[document_id] = document_text
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_IEPA(HunerDataset):
"""HUNER version of the IEPA corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/iepa"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_folder = IEPA.download_dataset(data_dir)
train_data = IEPA.parse_dataset(corpus_folder / "IEPA-train.xml")
test_data = IEPA.parse_dataset(corpus_folder / "IEPA-test.xml")
entity_type_mapping = {"Protein": GENE_TAG}
train_data = filter_and_map_entities(train_data, entity_type_mapping)
test_data = filter_and_map_entities(test_data, entity_type_mapping)
return merge_datasets([train_data, test_data])
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class LINNEAUS(ColumnCorpus):
"""Original LINNEAUS corpus containing species annotations.
For further information see Gerner et al.:
LINNAEUS: a species name identification system for biomedical literature
https://www.ncbi.nlm.nih.gov/pubmed/20149233
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
tokenizer: Optional[Tokenizer] = None,
) -> None:
"""Initialize the LINNEAUS corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param tokenizer: Custom implementation of :class:`Tokenizer` which segments
sentence into tokens (default :class:`SciSpacyTokenizer`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if tokenizer is None:
tokenizer = SciSpacyTokenizer()
sentence_splitter = TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=tokenizer)
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
dataset = self.download_and_parse_dataset(data_folder)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(dataset, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_and_parse_dataset(data_dir: Path):
data_url = "https://sourceforge.net/projects/linnaeus/files/Corpora/manual-corpus-species-1.0.tar.gz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
documents = {}
entities_per_document = defaultdict(list)
# Read texts
texts_directory = data_dir / "manual-corpus-species-1.0" / "txt"
for filename in os.listdir(str(texts_directory)):
document_id = filename.strip(".txt")
with open(os.path.join(str(texts_directory), filename), encoding="utf8") as file:
documents[document_id] = file.read().strip()
# Read annotations
tag_file = data_dir / "manual-corpus-species-1.0" / "filtered_tags.tsv"
with open(str(tag_file), encoding="utf8") as file:
next(file) # Ignore header row
for line in file:
if not line:
continue
document_id, _start, _end, text = line.strip().split("\t")[1:5]
start, end = int(_start), int(_end)
entities_per_document[document_id].append(Entity((start, end), SPECIES_TAG))
document_text = documents[document_id]
if document_text[start:end] != text:
raise AssertionError
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_SPECIES_LINNEAUS(HunerDataset):
"""HUNER version of the LINNEAUS corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/linneaus"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
return LINNEAUS.download_and_parse_dataset(data_dir)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class CDR(ColumnCorpus):
"""CDR corpus as provided by https://github.com/JHnlp/BioCreative-V-CDR-Corpus.
For further information see Li et al.: BioCreative V CDR task
corpus: a resource for chemical disease relation extraction
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4860626/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the CDR corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
download_dir = data_folder / "original"
os.makedirs(download_dir, exist_ok=True)
self.download_dataset(download_dir)
train_data = bioc_to_internal(download_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TrainingSet.BioC.xml")
dev_data = bioc_to_internal(
download_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_DevelopmentSet.BioC.xml"
)
test_data = bioc_to_internal(download_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TestSet.BioC.xml")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "https://github.com/JHnlp/BioCreative-V-CDR-Corpus/raw/master/CDR_Data.zip"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
class HUNER_DISEASE_CDR(HunerDataset):
"""HUNER version of the IEPA corpus containing disease annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/CDRDisease"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
CDR.download_dataset(data_dir)
train_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TrainingSet.BioC.xml")
dev_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_DevelopmentSet.BioC.xml")
test_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TestSet.BioC.xml")
all_data = merge_datasets([train_data, dev_data, test_data])
all_data = filter_and_map_entities(all_data, {"Disease": DISEASE_TAG})
return all_data
class HUNER_CHEMICAL_CDR(HunerDataset):
"""HUNER version of the IEPA corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/CDRChem"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
CDR.download_dataset(data_dir)
train_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TrainingSet.BioC.xml")
dev_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_DevelopmentSet.BioC.xml")
test_data = bioc_to_internal(data_dir / "CDR_Data" / "CDR.Corpus.v010516" / "CDR_TestSet.BioC.xml")
all_data = merge_datasets([train_data, dev_data, test_data])
all_data = filter_and_map_entities(all_data, {"Chemical": CHEMICAL_TAG})
return all_data
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class VARIOME(ColumnCorpus):
"""Variome corpus as provided by http://corpora.informatik.hu-berlin.de/corpora/brat2bioc/hvp_bioc.xml.zip.
For further information see Verspoor et al.: Annotating the
biomedical literature for the human variome
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3676157/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the Variome corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
download_dir = data_folder / "original"
os.makedirs(download_dir, exist_ok=True)
self.download_dataset(download_dir)
all_data = self.parse_corpus(download_dir / "hvp_bioc.xml")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(all_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "https://github.com/hu-ner/hunflair-corpora/raw/main/variome/hvp_bioc.xml.zip"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
@staticmethod
def parse_corpus(corpus_xml: Path) -> InternalBioNerDataset:
corpus = bioc_to_internal(corpus_xml)
cleaned_documents = {}
cleaned_entities_per_document = {}
for id, document_text in corpus.documents.items():
entities = corpus.entities_per_document[id]
original_length = len(document_text)
text_cleaned = document_text.replace("** IGNORE LINE **\n", "")
offset = original_length - len(text_cleaned)
if offset != 0:
new_entities = []
for entity in entities:
new_start = entity.char_span.start - offset
new_end = entity.char_span.stop - offset
new_entities.append(Entity((new_start, new_end), entity.type))
orig_text = document_text[entity.char_span.start : entity.char_span.stop]
new_text = text_cleaned[new_start:new_end]
assert orig_text == new_text
entities = new_entities
document_text = text_cleaned
cleaned_documents[id] = document_text
cleaned_entities_per_document[id] = entities
return InternalBioNerDataset(
documents=cleaned_documents,
entities_per_document=cleaned_entities_per_document,
)
class HUNER_GENE_VARIOME(HunerDataset):
"""HUNER version of the Variome corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/variome_gene"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
VARIOME.download_dataset(data_dir)
all_data = VARIOME.parse_corpus(data_dir / "hvp_bioc.xml")
all_data = filter_and_map_entities(all_data, {"gene": GENE_TAG})
return all_data
class HUNER_DISEASE_VARIOME(HunerDataset):
"""HUNER version of the Variome corpus containing disease annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/variome_disease"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
VARIOME.download_dataset(data_dir)
all_data = VARIOME.parse_corpus(data_dir / "hvp_bioc.xml")
all_data = filter_and_map_entities(all_data, {"Disorder": DISEASE_TAG, "disease": DISEASE_TAG})
return all_data
class HUNER_SPECIES_VARIOME(HunerDataset):
"""HUNER version of the Variome corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/variome_species"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
os.makedirs(str(data_dir), exist_ok=True)
VARIOME.download_dataset(data_dir)
all_data = VARIOME.parse_corpus(data_dir / "hvp_bioc.xml")
all_data = filter_and_map_entities(all_data, {"Living_Beings": SPECIES_TAG})
return all_data
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class NCBI_DISEASE(ColumnCorpus):
"""Original NCBI disease corpus containing disease annotations.
For further information see
Dogan et al.:
NCBI disease corpus: a resource for disease name recognition and concept normalization
https://www.ncbi.nlm.nih.gov/pubmed/24393765
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the NCBI disease corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
orig_folder = self.download_corpus(data_folder)
train_data = self.parse_input_file(orig_folder / "NCBItrainset_patched.txt")
dev_data = self.parse_input_file(orig_folder / "NCBIdevelopset_corpus.txt")
test_data = self.parse_input_file(orig_folder / "NCBItestset_corpus.txt")
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
original_folder = data_dir / "original"
os.makedirs(str(original_folder), exist_ok=True)
data_urls = [
"https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBItrainset_corpus.zip",
"https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBIdevelopset_corpus.zip",
"https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/NCBItestset_corpus.zip",
]
for url in data_urls:
data_path = cached_path(url, original_folder)
unpack_file(data_path, original_folder)
# We need to apply a patch to correct the original training file
orig_train_file = original_folder / "NCBItrainset_corpus.txt"
patched_train_file = original_folder / "NCBItrainset_patched.txt"
cls.patch_training_file(orig_train_file, patched_train_file)
return original_folder
@staticmethod
def patch_training_file(orig_train_file: Path, patched_file: Path):
patch_lines = {
3249: '10923035\t711\t761\tgeneralized epilepsy and febrile seizures " plus "\tSpecificDisease\tD004829+D003294\n'
}
with orig_train_file.open(encoding="utf-8") as input, patched_file.open("w", encoding="utf-8") as output:
line_no = 1
for line in input:
output.write(patch_lines[line_no] if line_no in patch_lines else line)
line_no += 1
@staticmethod
def parse_input_file(input_file: Path):
documents = {}
entities_per_document = {}
with open(str(input_file), encoding="utf8") as file:
document_id = ""
document_text = ""
entities: List[Entity] = []
c = 1
for line in file:
line = line.strip()
if not line:
if document_id and document_text:
documents[document_id] = document_text
entities_per_document[document_id] = entities
document_id, document_text, entities = "", "", []
c = 1
continue
if c == 1:
# Articles title
document_text = line.split("|")[2] + " "
document_id = line.split("|")[0]
elif c == 2:
# Article abstract
document_text += line.split("|")[2]
else:
# Entity annotations
columns = line.split("\t")
start = int(columns[1])
end = int(columns[2])
entity_text = columns[3]
assert document_text[start:end] == entity_text
entities.append(Entity((start, end), DISEASE_TAG))
c += 1
if c != 1 and document_id and document_text:
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_DISEASE_NCBI(HunerDataset):
"""HUNER version of the NCBI corpus containing disease annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/ncbi"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
orig_folder = NCBI_DISEASE.download_corpus(data_dir)
train_data = NCBI_DISEASE.parse_input_file(orig_folder / "NCBItrainset_patched.txt")
dev_data = NCBI_DISEASE.parse_input_file(orig_folder / "NCBIdevelopset_corpus.txt")
test_data = NCBI_DISEASE.parse_input_file(orig_folder / "NCBItestset_corpus.txt")
return merge_datasets([train_data, dev_data, test_data])
class ScaiCorpus(ColumnCorpus):
"""Base class to support the SCAI chemicals and disease corpora."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the SCAU corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
dataset_file = self.download_corpus(data_folder)
train_data = self.parse_input_file(dataset_file)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
def download_corpus(self, data_folder: Path) -> Path:
raise NotImplementedError
@staticmethod
def parse_input_file(input_file: Path):
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = {}
with open(str(input_file), encoding="iso-8859-1") as file:
document_id = None
document_text = ""
entities: List[Entity] = []
entity_type = None
entity_start = 0
for line in file:
line = line.strip()
if not line:
continue
if line[:3] == "###":
# Edge case: last token starts a new entity
if entity_type is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
if not (document_id is None or document_text is None):
documents[document_id] = document_text
entities_per_document[document_id] = entities
document_id = line.strip("#").strip()
document_text = ""
entities = []
else:
columns = line.strip().split("\t")
token = columns[0].strip()
tag = columns[4].strip().split("|")[1]
if tag.startswith("B-"):
if entity_type is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
entity_start = len(document_text) + 1 if document_text else 0
entity_type = tag[2:]
elif tag == "O" and entity_type is not None:
entities.append(Entity((entity_start, len(document_text)), entity_type))
entity_type = None
document_text = document_text + " " + token if document_text is not None else token
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class SCAI_CHEMICALS(ScaiCorpus):
"""Original SCAI chemicals corpus containing chemical annotations.
For further information see Kolářik et al.: Chemical Names:
Terminological Resources and Corpora Annotation
https://pub.uni-bielefeld.de/record/2603498
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def download_corpus(self, data_dir: Path) -> Path:
return self.perform_corpus_download(data_dir)
@staticmethod
def perform_corpus_download(data_dir: Path) -> Path:
original_directory = data_dir / "original"
os.makedirs(str(original_directory), exist_ok=True)
url = "https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/Corpora-for-Chemical-Entity-Recognition/chemicals-test-corpus-27-04-2009-v3_iob.gz"
data_path = cached_path(url, original_directory)
corpus_file = original_directory / "chemicals-test-corpus-27-04-2009-v3.iob"
unpack_file(data_path, corpus_file)
return corpus_file
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class SCAI_DISEASE(ScaiCorpus):
"""Original SCAI disease corpus containing disease annotations.
For further information see Gurulingappa et al.: An Empirical
Evaluation of Resources for the Identification of Diseases and
Adverse Effects in Biomedical Literature
https://pub.uni-bielefeld.de/record/2603398
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def download_corpus(self, data_dir: Path) -> Path:
return self.perform_corpus_download(data_dir)
@staticmethod
def perform_corpus_download(data_dir: Path) -> Path:
original_directory = data_dir / "original"
os.makedirs(str(original_directory), exist_ok=True)
url = "https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/Disease-ae-corpus.iob"
data_path = cached_path(url, original_directory)
return data_path
class HUNER_CHEMICAL_SCAI(HunerDataset):
"""HUNER version of the SCAI chemicals corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/scai_chemicals"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
original_file = SCAI_CHEMICALS.perform_corpus_download(data_dir)
corpus = ScaiCorpus.parse_input_file(original_file)
# Map all entities to chemicals
entity_mapping = {
"FAMILY": CHEMICAL_TAG,
"TRIVIALVAR": CHEMICAL_TAG,
"PARTIUPAC": CHEMICAL_TAG,
"TRIVIAL": CHEMICAL_TAG,
"ABBREVIATION": CHEMICAL_TAG,
"IUPAC": CHEMICAL_TAG,
"MODIFIER": CHEMICAL_TAG,
"SUM": CHEMICAL_TAG,
}
return filter_and_map_entities(corpus, entity_mapping)
class HUNER_DISEASE_SCAI(HunerDataset):
"""HUNER version of the SCAI chemicals corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/scai_disease"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
original_file = SCAI_DISEASE.perform_corpus_download(data_dir)
corpus = ScaiCorpus.parse_input_file(original_file)
# Map all entities to disease
entity_mapping = {"DISEASE": DISEASE_TAG, "ADVERSE": DISEASE_TAG}
return filter_and_map_entities(corpus, entity_mapping)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class OSIRIS(ColumnCorpus):
"""Original OSIRIS corpus containing variation and gene annotations.
For further information see Furlong et al.: Osiris v1.2: a named
entity recognition system for sequence variants of genes in
biomedical literature
https://www.ncbi.nlm.nih.gov/pubmed/18251998
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
load_original_unfixed_annotation=False,
) -> None:
"""Initialize the OSIRIS corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which
segments documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
:param load_original_unfixed_annotation: The original annotation of Osiris
erroneously annotates two sentences as a protein. Set to True if you don't
want the fixed version.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
corpus_folder = self.download_dataset(data_folder)
corpus_data = self.parse_dataset(corpus_folder, fix_annotation=not load_original_unfixed_annotation)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_dataset(cls, data_dir: Path) -> Path:
url = "https://github.com/hu-ner/hunflair-corpora/raw/main/osiris/OSIRIScorpusv02.tar"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir)
return data_dir
@classmethod
def parse_dataset(cls, corpus_folder: Path, fix_annotation=True):
documents = {}
entities_per_document = {}
input_files = [
file for file in os.listdir(str(corpus_folder)) if file.endswith(".txt") and not file.startswith("README")
]
for text_file in input_files:
with open(os.path.join(str(corpus_folder), text_file), encoding="utf8") as text_reader:
document_text = text_reader.read()
if not document_text:
continue
article_parts = document_text.split("\n\n")
document_id = article_parts[0]
text_offset = document_text.find(article_parts[1])
document_text = (article_parts[1] + " " + article_parts[2]).strip()
with open(os.path.join(str(corpus_folder), text_file + ".ann"), encoding="utf8") as ann_file:
entities = []
tree = etree.parse(ann_file)
for annotation in tree.xpath(".//Annotation"):
entity_type = annotation.get("type")
if entity_type == "file":
continue
start, end = annotation.get("span").split("..")
start, end = int(start), int(end)
if fix_annotation and text_file == "article46.txt" and start == 289 and end == 644:
end = 295
entities.append(Entity((start - text_offset, end - text_offset), entity_type))
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_OSIRIS(HunerDataset):
"""HUNER version of the OSIRIS corpus containing (only) gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/osiris"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
original_file = OSIRIS.download_dataset(data_dir)
corpus = OSIRIS.parse_dataset(original_file / "OSIRIScorpusv02")
entity_type_mapping = {"ge": GENE_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class S800(ColumnCorpus):
"""S800 corpus.
For further information see
Pafilis et al.: The SPECIES and ORGANISMS Resources for Fast and Accurate Identification of Taxonomic Names in Text
http://www.plosone.org/article/info:doi%2F10.1371%2Fjournal.pone.0065390.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the S800 corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
download_dir = data_folder / "original"
os.makedirs(download_dir, exist_ok=True)
self.download_dataset(download_dir)
all_data = self.parse_dataset(download_dir)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(all_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path):
data_url = "https://species.jensenlab.org/files/S800-1.0.tar.gz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
@staticmethod
def parse_dataset(data_dir: Path) -> InternalBioNerDataset:
entities_per_document = defaultdict(list)
texts_per_document = {}
with (data_dir / "S800.tsv").open(encoding="utf8") as f:
for line in f:
fields = line.strip().split("\t")
if not fields:
continue
fname, pmid = fields[1].split(":")
start, end = int(fields[2]), int(fields[3])
if start == end:
continue # Illegal annotation
entities_per_document[fname].append(Entity((start, end), "Species"))
for fname in entities_per_document:
with (data_dir / "abstracts" / fname).with_suffix(".txt").open(encoding="utf8") as f:
texts_per_document[fname] = f.read()
return InternalBioNerDataset(documents=texts_per_document, entities_per_document=entities_per_document)
class HUNER_SPECIES_S800(HunerDataset):
"""HUNER version of the S800 corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/s800"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
S800.download_dataset(data_dir)
data = S800.parse_dataset(data_dir)
data = filter_and_map_entities(data, {"Species": SPECIES_TAG})
return data
class GPRO(ColumnCorpus):
"""Original GPRO corpus containing gene annotations.
For further information see:
https://biocreative.bioinformatics.udel.edu/tasks/biocreative-v/gpro-detailed-task-description/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the GPRO corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
if not (train_file.exists() and dev_file.exists()):
train_folder = self.download_train_corpus(data_folder)
train_text_file = train_folder / "chemdner_patents_train_text.txt"
train_ann_file = train_folder / "chemdner_gpro_gold_standard_train_v02.tsv"
train_data = self.parse_input_file(train_text_file, train_ann_file)
dev_folder = self.download_dev_corpus(data_folder)
dev_text_file = dev_folder / "chemdner_patents_development_text.txt"
dev_ann_file = dev_folder / "chemdner_gpro_gold_standard_development.tsv"
dev_data = self.parse_input_file(dev_text_file, dev_ann_file)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_train_corpus(cls, data_dir: Path) -> Path:
corpus_dir = data_dir / "original"
os.makedirs(str(corpus_dir), exist_ok=True)
train_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2015/gpro_training_set_v02.tar.gz"
data_path = cached_path(train_url, corpus_dir)
unpack_file(data_path, corpus_dir)
return corpus_dir / "gpro_training_set_v02"
@classmethod
def download_dev_corpus(cls, data_dir) -> Path:
corpus_dir = data_dir / "original"
os.makedirs(str(corpus_dir), exist_ok=True)
dev_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2015/gpro_development_set.tar.gz"
data_path = cached_path(dev_url, corpus_dir)
unpack_file(data_path, corpus_dir)
return corpus_dir / "gpro_development_set"
@staticmethod
def parse_input_file(text_file: Path, ann_file: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document: Dict[str, List[Entity]] = {}
document_title_length = {}
with open(str(text_file), encoding="utf8") as text_reader:
for line in text_reader:
if not line:
continue
document_id, title, abstract = line.split("\t")
documents[document_id] = title + " " + abstract
document_title_length[document_id] = len(title) + 1
entities_per_document[document_id] = []
with open(str(ann_file), encoding="utf8") as ann_reader:
for line in ann_reader:
if not line:
continue
columns = line.split("\t")
document_id = columns[0]
start, end = int(columns[2]), int(columns[3])
if columns[1] == "A":
start = start + document_title_length[document_id]
end = end + document_title_length[document_id]
entities_per_document[document_id].append(Entity((start, end), GENE_TAG))
document_text = documents[document_id]
assert columns[4] == document_text[start:end]
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_GPRO(HunerDataset):
"""HUNER version of the GPRO corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/gpro"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_folder = GPRO.download_train_corpus(data_dir)
train_text_file = train_folder / "chemdner_patents_train_text.txt"
train_ann_file = train_folder / "chemdner_gpro_gold_standard_train_v02.tsv"
train_data = GPRO.parse_input_file(train_text_file, train_ann_file)
dev_folder = GPRO.download_dev_corpus(data_dir)
dev_text_file = dev_folder / "chemdner_patents_development_text.txt"
dev_ann_file = dev_folder / "chemdner_gpro_gold_standard_development.tsv"
dev_data = GPRO.parse_input_file(dev_text_file, dev_ann_file)
return merge_datasets([train_data, dev_data])
class DECA(ColumnCorpus):
"""Original DECA corpus containing gene annotations.
For further information see Wang et al.: Disambiguating the
species of biomedical named entities using natural language parsers
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2828111/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the DECA corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default BioSpacySentenceSpliiter)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / "train.conll"
if not train_file.exists():
corpus_dir = self.download_corpus(data_folder)
text_dir = corpus_dir / "text"
gold_file = corpus_dir / "gold.txt"
corpus_data = self.parse_corpus(text_dir, gold_file)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "http://www.nactem.ac.uk/deca/species_corpus_0.2.tar.gz"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "species_corpus_0.2"
@staticmethod
def parse_corpus(text_dir: Path, gold_file: Path) -> InternalBioNerDataset:
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = {}
text_files = [file for file in os.listdir(str(text_dir)) if not file.startswith(".")]
for file in text_files:
document_id = file.strip(".txt")
with open(os.path.join(str(text_dir), file), encoding="utf8") as text_file:
documents[document_id] = text_file.read().strip()
entities_per_document[document_id] = []
with open(str(gold_file), encoding="utf8") as gold_reader:
for line in gold_reader:
if not line:
continue
columns = line.strip().split("\t")
document_id = columns[0].strip(".txt")
start, end = int(columns[1]), int(columns[2])
entities_per_document[document_id].append(Entity((start, end), GENE_TAG))
document_text = documents[document_id]
assert document_text[start:end] == columns[3]
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_DECA(HunerDataset):
"""HUNER version of the DECA corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/deca"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_dir = DECA.download_corpus(data_dir)
text_dir = corpus_dir / "text"
gold_file = corpus_dir / "gold.txt"
return DECA.parse_corpus(text_dir, gold_file)
class FSU(ColumnCorpus):
"""Original FSU corpus containing protein and derived annotations.
For further information see
Hahn et al.: A proposal for a configurable silver standard
https://www.aclweb.org/anthology/W10-1838/
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
"""Initialize the FSU corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
sentence_splitter = TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SpaceTokenizer())
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not train_file.exists():
corpus_dir = self.download_corpus(data_folder)
corpus_data = self.parse_corpus(corpus_dir, SENTENCE_TAG)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "https://julielab.de/downloads/resources/fsu_prge_release_v1_0.tgz"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir, mode="targz")
return data_dir / "fsu-prge-release-v1.0"
@staticmethod
def parse_corpus(corpus_dir: Path, sentence_separator: str) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
for subcorpus in corpus_dir.iterdir():
if not subcorpus.is_dir():
continue
for doc in (subcorpus / "mmax").iterdir():
if not doc.is_dir():
continue
try:
with open(doc / "Basedata" / "Basedata.xml", encoding="utf8") as word_f:
word_tree = etree.parse(word_f)
with open(doc / "Markables" / "sentence.xml", encoding="utf8") as sentence_f:
sentence_tree = etree.parse(sentence_f).getroot()
with open(doc / "Markables" / "proteins.xml", encoding="utf8") as protein_f:
protein_tree = etree.parse(protein_f).getroot()
with open(doc / "Basedata.uri", encoding="utf8") as id_f:
document_id = id_f.read().strip()
except FileNotFoundError:
# Incomplete article
continue
except XMLSyntaxError:
# Invalid XML syntax
continue
word_to_id = {}
words = []
for i, token in enumerate(word_tree.xpath(".//word")):
words += [token.text]
word_to_id[token.get("id")] = i
word_pos = [(0, 0) for _ in words]
sentences_id_span = sorted(
[(int(sentence.get("id").split("_")[-1]), sentence.get("span")) for sentence in sentence_tree]
)
sentences = []
for j, sentence in enumerate(sentences_id_span):
tmp_sentence = []
akt_pos = 0
start = word_to_id[sentence[1].split("..")[0]]
end = word_to_id[sentence[1].split("..")[1]]
for i in range(start, end + 1):
tmp_sentence += [words[i]]
word_pos[i] = (j, akt_pos)
akt_pos += len(words[i]) + 1
sentences += [tmp_sentence]
pre_entities: List[List[Tuple[int, int, str]]] = [[] for _ in sentences]
for protein in protein_tree:
for span in protein.get("span").split(","):
start = word_to_id[span.split("..")[0]]
end = word_to_id[span.split("..")[-1]]
pre_entities[word_pos[start][0]] += [
(
word_pos[start][1],
word_pos[end][1] + len(words[end]),
protein.get("proteins"),
)
]
sentence_texts = [" ".join(sentence) for sentence in sentences]
document = sentence_separator.join(sentence_texts)
entities = []
sent_offset = 0
for sent, sent_entities in zip(sentence_texts, pre_entities):
entities += [
Entity(
(start + sent_offset, end + sent_offset),
ent_type,
)
for (start, end, ent_type) in sent_entities
]
sent_offset += len(sent) + len(sentence_separator)
documents[document_id] = document
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_FSU(HunerDataset):
"""HUNER version of the FSU corpus containing (only) gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/fsu"
def get_corpus_sentence_splitter(self) -> SentenceSplitter:
return TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=SciSpacyTokenizer())
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_dir = FSU.download_corpus(data_dir)
sentence_separator = " "
if isinstance(self.sentence_splitter, TagSentenceSplitter):
sentence_separator = self.sentence_splitter.tag
corpus = FSU.parse_corpus(corpus_dir, sentence_separator)
entity_type_mapping = {
"protein": GENE_TAG,
"protein_familiy_or_group": GENE_TAG,
"protein_complex": GENE_TAG,
"protein_variant": GENE_TAG,
"protein_enum": GENE_TAG,
}
return filter_and_map_entities(corpus, entity_type_mapping)
class CRAFT(ColumnCorpus):
"""Original CRAFT corpus (version 2.0) containing all but the coreference and sections/typography annotations.
For further information see Bada et al.: Concept annotation in the
craft corpus
https://bmcbioinformatics.biomedcentral.com/articles/10.1186/1471-2105-13-161
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the CRAFT corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not train_file.exists():
corpus_dir = self.download_corpus(data_folder)
corpus_data = self.parse_corpus(corpus_dir)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "http://sourceforge.net/projects/bionlp-corpora/files/CRAFT/v2.0/craft-2.0.tar.gz/download"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir, mode="targz")
return data_dir / "craft-2.0"
@staticmethod
def parse_corpus(corpus_dir: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
text_dir = corpus_dir / "articles" / "txt"
document_texts = [doc for doc in text_dir.iterdir() if doc.name[-4:] == ".txt"]
annotation_dirs = [
path
for path in (corpus_dir / "xml").iterdir()
if path.name not in ["sections-and-typography", "coreference"]
]
for doc in Tqdm.tqdm(document_texts, desc="Converting to internal"):
document_id = doc.name.split(".")[0]
with open(doc, encoding="utf8") as f_txt:
documents[document_id] = f_txt.read()
entities = []
for annotation_dir in annotation_dirs:
with open(
annotation_dir / (doc.name + ".annotations.xml"),
encoding="utf8",
) as f_ann:
ann_tree = etree.parse(f_ann)
for annotation in ann_tree.xpath("//annotation"):
for span in annotation.xpath("span"):
start = int(span.get("start"))
end = int(span.get("end"))
entities += [Entity((start, end), annotation_dir.name)]
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class BIOSEMANTICS(ColumnCorpus):
"""Original Biosemantics corpus.
For further information see Akhondi et al.: Annotated chemical
patent corpus: a gold standard for text mining
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4182036/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the Biosemantics corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
corpus_dir = self.download_dataset(data_folder)
full_dataset = self.parse_dataset(corpus_dir)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(full_dataset, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path) -> Path:
data_url = "http://biosemantics.erasmusmc.nl/PatentCorpus/Patent_Corpus.rar"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "Patent_Corpus"
@staticmethod
def parse_dataset(data_dir: Path) -> InternalBioNerDataset:
base_folder = data_dir / "Full_set"
dirs = [file for file in os.listdir(str(base_folder)) if os.path.isdir(os.path.join(str(base_folder), file))]
text_files = []
for directory in dirs:
text_files += [
os.path.join(str(base_folder), directory, file)
for file in os.listdir(os.path.join(str(base_folder), directory))
if file[-4:] == ".txt"
]
text_files = sorted(text_files)
documents: Dict[str, str] = {}
entities_per_document: Dict[str, List[Entity]] = {}
for text_file in sorted(text_files):
document_id = os.path.basename(text_file).split("_")[0]
with open(text_file, encoding="utf8") as file_reader:
file_text = file_reader.read().replace("\n", " ")
offset = 0
document_text = ""
if document_id in documents:
document_text = documents[document_id] + " "
offset = len(document_text)
tmp_document_text = document_text + file_text
entities = []
dirty_file = False
with open(text_file[:-4] + ".ann", encoding="utf8") as file_reader:
for line in file_reader:
if line[-1] == "\n":
line = line[:-1]
if not line:
continue
columns = line.split("\t")
mid = columns[1].split()
# if len(mid) != 3:
# continue
entity_type, _start, _end = mid[:3]
start, end = int(_start.split(";")[0]), int(_end.split(";")[0])
if start == end:
continue
# Try to fix entity offsets
if tmp_document_text[offset + start : offset + end] != columns[2]:
alt_text = tmp_document_text[offset + start : offset + start + len(columns[2])]
if alt_text == columns[2]:
end = start + len(columns[2])
if file_text[start:end] != columns[2]:
dirty_file = True
continue
if tmp_document_text[offset + start : offset + end] != columns[2]:
dirty_file = True
continue
entities.append(Entity((offset + start, offset + end), entity_type))
if not dirty_file:
documents[document_id] = tmp_document_text
if document_id in entities_per_document:
entities_per_document[document_id] += entities
else:
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class BC2GM(ColumnCorpus):
"""Original BioCreative-II-GM corpus containing gene annotations.
For further information see Smith et al.: Overview of
BioCreative II gene mention recognition
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the BioCreative-II-GM corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and test_file.exists()):
data_folder = self.download_dataset(data_folder)
train_data = self.parse_train_dataset(data_folder)
test_data = self.parse_test_dataset(data_folder)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path) -> Path:
data_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2011/bc2GMtrain_1.1.tar.gz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
data_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2011/bc2GMtest_1.0.tar.gz"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
return data_dir
@classmethod
def parse_train_dataset(cls, data_folder: Path) -> InternalBioNerDataset:
train_text_file = data_folder / "bc2geneMention" / "train" / "train.in"
train_ann_file = data_folder / "bc2geneMention" / "train" / "GENE.eval"
return cls.parse_dataset(train_text_file, train_ann_file)
@classmethod
def parse_test_dataset(cls, data_folder: Path) -> InternalBioNerDataset:
test_text_file = data_folder / "BC2GM" / "test" / "test.in"
test_ann_file = data_folder / "BC2GM" / "test" / "GENE.eval"
return cls.parse_dataset(test_text_file, test_ann_file)
@staticmethod
def parse_dataset(text_file: Path, ann_file: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document: Dict[str, List[Entity]] = {}
with open(str(text_file), encoding="utf8") as text_file_reader:
for line in text_file_reader:
line = line.strip()
offset = line.find(" ")
document_id = line[:offset]
document_text = line[offset + 1 :]
documents[document_id] = document_text
entities_per_document[document_id] = []
with open(str(ann_file), encoding="utf8") as ann_file_reader:
for line in ann_file_reader:
columns = line.strip().split("|")
document_id = columns[0]
document_text = documents[document_id]
start_idx, end_idx = (int(i) for i in columns[1].split())
non_whitespaces_chars = 0
new_start_idx = None
new_end_idx = None
for i, char in enumerate(document_text):
if char != " ":
non_whitespaces_chars += 1
if new_start_idx is None and non_whitespaces_chars == start_idx + 1:
new_start_idx = i
if non_whitespaces_chars == end_idx + 1:
new_end_idx = i + 1
break
assert new_start_idx is not None
assert new_end_idx is not None
mention_text = document_text[new_start_idx:new_end_idx]
if mention_text != columns[2] and mention_text.startswith("/"):
# There is still one illegal annotation in the file ..
new_start_idx += 1
entities_per_document[document_id].append(Entity((new_start_idx, new_end_idx), GENE_TAG))
assert document_text[new_start_idx:new_end_idx] == columns[2]
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_GENE_BC2GM(HunerDataset):
"""HUNER version of the BioCreative-II-GM corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bc2gm"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
data_dir = BC2GM.download_dataset(data_dir)
train_data = BC2GM.parse_train_dataset(data_dir)
test_data = BC2GM.parse_test_dataset(data_dir)
return merge_datasets([train_data, test_data])
class CEMP(ColumnCorpus):
"""Original CEMP corpus containing chemical annotations.
For further information see:
https://biocreative.bioinformatics.udel.edu/tasks/biocreative-v/cemp-detailed-task-description/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the CEMP corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
if not (train_file.exists() and dev_file.exists()):
train_folder = self.download_train_corpus(data_folder)
train_text_file = train_folder / "chemdner_patents_train_text.txt"
train_ann_file = train_folder / "chemdner_cemp_gold_standard_train.tsv"
train_data = self.parse_input_file(train_text_file, train_ann_file)
dev_folder = self.download_dev_corpus(data_folder)
dev_text_file = dev_folder / "chemdner_patents_development_text.txt"
dev_ann_file = dev_folder / "chemdner_cemp_gold_standard_development_v03.tsv"
dev_data = self.parse_input_file(dev_text_file, dev_ann_file)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_train_corpus(cls, data_dir: Path) -> Path:
corpus_dir = data_dir / "original"
os.makedirs(str(corpus_dir), exist_ok=True)
train_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2015/cemp_training_set.tar.gz"
data_path = cached_path(train_url, corpus_dir)
unpack_file(data_path, corpus_dir)
return corpus_dir / "cemp_training_set"
@classmethod
def download_dev_corpus(cls, data_dir) -> Path:
corpus_dir = data_dir / "original"
os.makedirs(str(corpus_dir), exist_ok=True)
dev_url = "https://biocreative.bioinformatics.udel.edu/media/store/files/2015/cemp_development_set_v03.tar.gz"
data_path = cached_path(dev_url, corpus_dir)
unpack_file(data_path, corpus_dir)
return corpus_dir / "cemp_development_set_v03"
@staticmethod
def parse_input_file(text_file: Path, ann_file: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document: Dict[str, List[Entity]] = {}
document_abstract_length = {}
with open(str(text_file), encoding="utf8") as text_reader:
for line in text_reader:
if not line:
continue
document_id, title, abstract = line.split("\t")
# Abstract first, title second to prevent issues with sentence splitting
documents[document_id] = abstract + " " + title
document_abstract_length[document_id] = len(abstract) + 1
entities_per_document[document_id] = []
with open(str(ann_file), encoding="utf8") as ann_reader:
for line in ann_reader:
if not line:
continue
columns = line.split("\t")
document_id = columns[0]
start, end = int(columns[2]), int(columns[3])
if columns[1] == "T":
start = start + document_abstract_length[document_id]
end = end + document_abstract_length[document_id]
entities_per_document[document_id].append(Entity((start, end), columns[5].strip()))
document_text = documents[document_id]
assert columns[4] == document_text[start:end]
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_CHEMICAL_CEMP(HunerDataset):
"""HUNER version of the CEMP corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/cemp"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_folder = CEMP.download_train_corpus(data_dir)
train_text_file = train_folder / "chemdner_patents_train_text.txt"
train_ann_file = train_folder / "chemdner_cemp_gold_standard_train.tsv"
train_data = CEMP.parse_input_file(train_text_file, train_ann_file)
dev_folder = CEMP.download_dev_corpus(data_dir)
dev_text_file = dev_folder / "chemdner_patents_development_text.txt"
dev_ann_file = dev_folder / "chemdner_cemp_gold_standard_development_v03.tsv"
dev_data = CEMP.parse_input_file(dev_text_file, dev_ann_file)
dataset = merge_datasets([train_data, dev_data])
entity_type_mapping = {
x: CHEMICAL_TAG
for x in [
"ABBREVIATION",
"FAMILY",
"FORMULA",
"IDENTIFIERS",
"MULTIPLE",
"SYSTEMATIC",
"TRIVIAL",
]
}
return filter_and_map_entities(dataset, entity_type_mapping)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class CHEBI(ColumnCorpus):
"""Original CHEBI corpus containing all annotations.
For further information see Shardlow et al.: A New Corpus to
Support Text Mining for the Curation of Metabolites in the ChEBI
Database
http://www.lrec-conf.org/proceedings/lrec2018/pdf/229.pdf
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
annotator: int = 0,
) -> None:
"""Initialize the CHEBI corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
:param annotator: The abstracts have been annotated by two annotators, which can be
selected by choosing annotator 1 or 2. If annotator is 0, the union of both annotations is used.
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not (train_file.exists()):
corpus_dir = self.download_dataset(data_folder)
full_dataset = self.parse_dataset(corpus_dir, annotator=annotator)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(full_dataset, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
def download_dataset(data_dir: Path) -> Path:
data_url = "http://www.nactem.ac.uk/chebi/ChEBI.zip"
data_path = cached_path(data_url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "ChEBI"
@staticmethod
def parse_dataset(data_dir: Path, annotator: int) -> InternalBioNerDataset:
abstract_folder = data_dir / "abstracts"
fulltext_folder = data_dir / "fullpapers"
if annotator == 0:
annotation_dirs = ["Annotator1", "Annotator2"]
elif annotator <= 2:
annotation_dirs = [f"Annotator{annotator}"]
else:
raise ValueError("Invalid value for annotator")
documents = {}
entities_per_document = {}
abstract_ids = [x.name[:-4] for x in (abstract_folder / annotation_dirs[0]).iterdir() if x.name[-4:] == ".txt"]
fulltext_ids = [x.name[:-4] for x in fulltext_folder.iterdir() if x.name[-4:] == ".txt"]
for abstract_id in abstract_ids:
abstract_id_output = abstract_id + "_A"
with open(
abstract_folder / annotation_dirs[0] / f"{abstract_id}.txt",
encoding="utf8",
) as f:
documents[abstract_id_output] = f.read()
for annotation_dir in annotation_dirs:
with open(
abstract_folder / annotation_dir / f"{abstract_id}.ann",
encoding="utf8",
) as f:
entities = CHEBI.get_entities(f)
entities_per_document[abstract_id_output] = entities
for fulltext_id in fulltext_ids:
fulltext_id_output = fulltext_id + "_F"
with open(fulltext_folder / f"{fulltext_id}.txt", encoding="utf8") as f:
documents[fulltext_id_output] = f.read()
with open(fulltext_folder / f"{fulltext_id}.ann", encoding="utf8") as f:
entities = CHEBI.get_entities(f)
entities_per_document[fulltext_id_output] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
@staticmethod
def get_entities(f):
entities = []
for line in f:
if not line.strip() or line[0] != "T":
continue
parts = line.split("\t")[1].split()
entity_type = parts[0]
char_offsets = " ".join(parts[1:])
for start_end in char_offsets.split(";"):
start, end = start_end.split(" ")
entities += [Entity((int(start), int(end)), entity_type)]
return entities
class HUNER_CHEMICAL_CHEBI(HunerDataset):
"""HUNER version of the CHEBI corpus containing chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/chebi_new"
def to_internal(self, data_dir: Path, annotator: int = 0) -> InternalBioNerDataset:
corpus_dir = CHEBI.download_dataset(data_dir)
dataset = CHEBI.parse_dataset(corpus_dir, annotator=annotator)
entity_type_mapping = {"Chemical": CHEMICAL_TAG}
return filter_and_map_entities(dataset, entity_type_mapping)
class HUNER_GENE_CHEBI(HunerDataset):
"""HUNER version of the CHEBI corpus containing gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/chebi_new"
def to_internal(self, data_dir: Path, annotator: int = 0) -> InternalBioNerDataset:
corpus_dir = CHEBI.download_dataset(data_dir)
dataset = CHEBI.parse_dataset(corpus_dir, annotator=annotator)
entity_type_mapping = {"Protein": GENE_TAG}
return filter_and_map_entities(dataset, entity_type_mapping)
class HUNER_SPECIES_CHEBI(HunerDataset):
"""HUNER version of the CHEBI corpus containing species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/chebi_new"
def to_internal(self, data_dir: Path, annotator: int = 0) -> InternalBioNerDataset:
corpus_dir = CHEBI.download_dataset(data_dir)
dataset = CHEBI.parse_dataset(corpus_dir, annotator=annotator)
entity_type_mapping = {"Species": SPECIES_TAG}
return filter_and_map_entities(dataset, entity_type_mapping)
class BioNLPCorpus(ColumnCorpus):
"""Base class for corpora from BioNLP event extraction shared tasks.
For further information see:
http://2013.bionlp-st.org/Intro
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the BioNLP Corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments documents
into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
train_folder, dev_folder, test_folder = self.download_corpus(data_folder / "original")
train_data = self.parse_input_files(train_folder)
dev_data = self.parse_input_files(dev_folder)
test_data = self.parse_input_files(test_folder)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
@abstractmethod
def download_corpus(data_folder: Path) -> Tuple[Path, Path, Path]:
pass
@staticmethod
def parse_input_files(input_folder: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
for txt_file in input_folder.glob("*.txt"):
name = txt_file.with_suffix("").name
a1_file = txt_file.with_suffix(".a1")
with txt_file.open(encoding="utf8") as f:
documents[name] = f.read()
with a1_file.open(encoding="utf8") as ann_reader:
entities = []
for line in ann_reader:
fields = line.strip().split("\t")
if fields[0].startswith("T"):
ann_type, start, end = fields[1].split()
entities.append(Entity(char_span=(int(start), int(end)), entity_type=ann_type))
entities_per_document[name] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class BIONLP2013_PC(BioNLPCorpus):
"""Corpus of the BioNLP'2013 Pathway Curation shared task.
For further information see Ohta et al. Overview of the pathway
curation (PC) task of bioNLP shared task 2013.
https://www.aclweb.org/anthology/W13-2009/
"""
@staticmethod
def download_corpus(download_folder: Path) -> Tuple[Path, Path, Path]:
train_url = "http://2013.bionlp-st.org/tasks/BioNLP-ST_2013_PC_training_data.tar.gz"
dev_url = "http://2013.bionlp-st.org/tasks/BioNLP-ST_2013_PC_development_data.tar.gz"
test_url = "http://2013.bionlp-st.org/tasks/BioNLP-ST_2013_PC_test_data.tar.gz"
cached_path(train_url, download_folder)
cached_path(dev_url, download_folder)
cached_path(test_url, download_folder)
unpack_file(
download_folder / "BioNLP-ST_2013_PC_training_data.tar.gz",
download_folder,
keep=False,
)
unpack_file(
download_folder / "BioNLP-ST_2013_PC_development_data.tar.gz",
download_folder,
keep=False,
)
unpack_file(
download_folder / "BioNLP-ST_2013_PC_test_data.tar.gz",
download_folder,
keep=False,
)
train_folder = download_folder / "BioNLP-ST_2013_PC_training_data"
dev_folder = download_folder / "BioNLP-ST_2013_PC_development_data"
test_folder = download_folder / "BioNLP-ST_2013_PC_test_data"
return train_folder, dev_folder, test_folder
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class BIONLP2013_CG(BioNLPCorpus):
"""Corpus of the BioNLP'2013 Cancer Genetics shared task.
For further information see Pyysalo, Ohta & Ananiadou 2013
Overview of the Cancer Genetics (CG) task of BioNLP Shared Task 2013
https://www.aclweb.org/anthology/W13-2008/
"""
@staticmethod
def download_corpus(download_folder: Path) -> Tuple[Path, Path, Path]:
url = "https://github.com/openbiocorpora/bionlp-st-2013-cg/archive/refs/heads/master.zip"
cached_path(url, download_folder)
unpack_file(
download_folder / "master.zip",
download_folder,
keep=False,
)
train_folder = download_folder / "bionlp-st-2013-cg-master/original-data/train"
dev_folder = download_folder / "bionlp-st-2013-cg-master/original-data/devel"
test_folder = download_folder / "bionlp-st-2013-cg-master/original-data/test"
return train_folder, dev_folder, test_folder
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class ANAT_EM(ColumnCorpus):
"""Corpus for anatomical named entity mention recognition.
For further information see Pyysalo and Ananiadou: Anatomical
entity mention recognition at literature scale
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3957068/
http://nactem.ac.uk/anatomytagger/#AnatEM
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
tokenizer: Optional[Tokenizer] = None,
) -> None:
"""Initialize the anatomical named entity mention recognition Corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`Tokenizer` which segments
sentences into tokens (default :class:`SciSpacyTokenizer`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if tokenizer is None:
tokenizer = SciSpacyTokenizer()
sentence_splitter = TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=tokenizer)
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
corpus_folder = self.download_corpus(data_folder)
train_data = self.parse_input_files(corpus_folder / "nersuite" / "train", SENTENCE_TAG)
dev_data = self.parse_input_files(corpus_folder / "nersuite" / "devel", SENTENCE_TAG)
test_data = self.parse_input_files(corpus_folder / "nersuite" / "test", SENTENCE_TAG)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@staticmethod
@abstractmethod
def download_corpus(data_folder: Path):
corpus_url = "http://nactem.ac.uk/anatomytagger/AnatEM-1.0.2.tar.gz"
corpus_archive = cached_path(corpus_url, data_folder)
unpack_file(
corpus_archive,
data_folder,
keep=True,
mode="targz",
)
return data_folder / "AnatEM-1.0.2"
@staticmethod
def parse_input_files(input_dir: Path, sentence_separator: str) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
input_files = [
file for file in os.listdir(str(input_dir)) if file.endswith(".nersuite") and not file.startswith("._")
]
for input_file in input_files:
document_id = input_file.replace(".nersuite", "")
document_text = ""
entities = []
entity_type = None
entity_start = None
sent_offset = 0
last_offset = 0
with open(input_dir / input_file, encoding="utf8") as f:
for line in f.readlines():
line = line.strip()
if not line:
document_text += sentence_separator
sent_offset += len(sentence_separator)
last_offset += len(sentence_separator)
continue
tag, _start, _end, word, _, _, _ = line.split("\t")
start = int(_start) + sent_offset
end = int(_end) + sent_offset
document_text += " " * (start - last_offset)
document_text += word
if tag.startswith("B-"):
if entity_type is not None:
entities.append(Entity((entity_start, last_offset), entity_type))
entity_start = start
entity_type = tag[2:]
elif tag == "O" and entity_type is not None and entity_start is not None:
entities.append(Entity((entity_start, last_offset), entity_type))
entity_type = None
last_offset = end
assert word == document_text[start:end]
documents[document_id] = document_text
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class BioBertHelper(ColumnCorpus):
"""Helper class to convert corpora and the respective train, dev and test split used by BioBERT.
For further details see Lee et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
@staticmethod
def download_corpora(download_dir: Path):
from google_drive_downloader import GoogleDriveDownloader as gdd
gdd.download_file_from_google_drive(
file_id="1OletxmPYNkz2ltOr9pyT0b0iBtUWxslh",
dest_path=str(download_dir / "NERdata.zip"),
unzip=True,
)
@staticmethod
def convert_and_write(download_folder, data_folder, tag_type):
data_folder.mkdir(parents=True, exist_ok=True)
with (download_folder / "train.tsv").open(encoding="utf8") as f_in, (data_folder / "train.conll").open(
"w", encoding="utf8"
) as f_out:
for line in f_in:
if not line.strip():
f_out.write("\n")
continue
token, tag = line.strip().split("\t")
if tag != "O":
tag = tag + "-" + tag_type
f_out.write(f"{token} {tag}\n")
with (download_folder / "devel.tsv").open(encoding="utf8") as f_in, (data_folder / "dev.conll").open(
"w", encoding="utf8"
) as f_out:
for line in f_in:
if not line.strip():
f_out.write("\n")
continue
token, tag = line.strip().split("\t")
if tag != "O":
tag = tag + "-" + tag_type
f_out.write(f"{token} {tag}\n")
with (download_folder / "test.tsv").open(encoding="utf8") as f_in, (data_folder / "test.conll").open(
"w", encoding="utf8"
) as f_out:
for line in f_in:
if not line.strip():
f_out.write("\n")
continue
token, tag = line.strip().split("\t")
if tag != "O":
tag = tag + "-" + tag_type
f_out.write(f"{token} {tag}\n")
class BIOBERT_CHEMICAL_BC4CHEMD(ColumnCorpus):
"""BC4CHEMD corpus with chemical annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "BC4CHEMD").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "BC4CHEMD", data_folder, tag_type=CHEMICAL_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_GENE_BC2GM(ColumnCorpus):
"""BC4CHEMD corpus with gene annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "BC2GM").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "BC2GM", data_folder, tag_type=GENE_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_GENE_JNLPBA(ColumnCorpus):
"""JNLPBA corpus with gene annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "JNLPBA").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "JNLPBA", data_folder, tag_type=GENE_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_CHEMICAL_BC5CDR(ColumnCorpus):
"""BC5CDR corpus with chemical annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "BC5CDR-chem").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "BC5CDR-chem", data_folder, tag_type=CHEMICAL_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_DISEASE_BC5CDR(ColumnCorpus):
"""BC5CDR corpus with disease annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "BC5CDR-disease").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "BC5CDR-disease", data_folder, tag_type=DISEASE_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_DISEASE_NCBI(ColumnCorpus):
"""NCBI disease corpus as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "NCBI-disease").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "NCBI-disease", data_folder, tag_type=DISEASE_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_SPECIES_LINNAEUS(ColumnCorpus):
"""Linneaeus corpus with species annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "linnaeus").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "linnaeus", data_folder, tag_type=SPECIES_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class BIOBERT_SPECIES_S800(ColumnCorpus):
"""S800 corpus with species annotations as used in the evaluation of BioBERT.
For further details regarding BioBERT and it's evaluation, see Lee
et al.:
https://academic.oup.com/bioinformatics/article/36/4/1234/5566506
https://github.com/dmis-lab/biobert
"""
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True) -> None:
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
data_folder = base_path / dataset_name
train_file = data_folder / "train.conll"
dev_file = data_folder / "dev.conll"
test_file = data_folder / "test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
common_path = base_path / "biobert_common"
if not (common_path / "s800").exists():
BioBertHelper.download_corpora(common_path)
BioBertHelper.convert_and_write(common_path / "s800", data_folder, tag_type=SPECIES_TAG)
super().__init__(data_folder, columns, in_memory=in_memory)
class CRAFT_V4(ColumnCorpus):
"""Version 4.0.1 of the CRAFT corpus containing all but the co-reference and structural annotations.
For further information see:
https://github.com/UCDenver-ccp/CRAFT
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initializes version 4.0.1 of the CRAFT corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which segments
documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
dev_file = data_folder / f"{sentence_splitter.name}_dev.conll"
test_file = data_folder / f"{sentence_splitter.name}_test.conll"
if not (train_file.exists() and dev_file.exists() and test_file.exists()):
corpus_dir = self.download_corpus(data_folder)
corpus_data = self.parse_corpus(corpus_dir)
# Filter for specific entity types, by default no entities will be filtered
corpus_data = self.filter_entities(corpus_data)
train_data, dev_data, test_data = self.prepare_splits(data_folder, corpus_data)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(train_data, train_file)
conll_writer.write_to_conll(dev_data, dev_file)
conll_writer.write_to_conll(test_data, test_file)
super().__init__(data_folder, columns, in_memory=in_memory)
def filter_entities(self, corpus: InternalBioNerDataset) -> InternalBioNerDataset:
return corpus
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "https://github.com/UCDenver-ccp/CRAFT/archive/v4.0.1.tar.gz"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir, mode="targz")
return data_dir / "CRAFT-4.0.1"
@staticmethod
def prepare_splits(
data_dir: Path, corpus: InternalBioNerDataset
) -> Tuple[InternalBioNerDataset, InternalBioNerDataset, InternalBioNerDataset]:
splits_dir = data_dir / "splits"
os.makedirs(str(splits_dir), exist_ok=True)
# Get original HUNER splits to retrieve a list of all document ids contained in V2
split_urls = [
"https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft.train",
"https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft.dev",
"https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft.test",
]
splits = {}
for url in split_urls:
split_file = cached_path(url, splits_dir)
with open(str(split_file), encoding="utf8") as split_reader:
splits[url.split(".")[-1]] = [line.strip() for line in split_reader if line.strip()]
train_documents, train_entities = {}, {}
dev_documents, dev_entities = {}, {}
test_documents, test_entities = {}, {}
for document_id, document_text in corpus.documents.items():
if document_id in splits["train"] or document_id in splits["dev"]:
# train and dev split of V2 will be train in V4
train_documents[document_id] = document_text
train_entities[document_id] = corpus.entities_per_document[document_id]
elif document_id in splits["test"]:
# test split of V2 will be dev in V4
dev_documents[document_id] = document_text
dev_entities[document_id] = corpus.entities_per_document[document_id]
else:
# New documents in V4 will become test documents
test_documents[document_id] = document_text
test_entities[document_id] = corpus.entities_per_document[document_id]
train_corpus = InternalBioNerDataset(documents=train_documents, entities_per_document=train_entities)
dev_corpus = InternalBioNerDataset(documents=dev_documents, entities_per_document=dev_entities)
test_corpus = InternalBioNerDataset(documents=test_documents, entities_per_document=test_entities)
return train_corpus, dev_corpus, test_corpus
@staticmethod
def parse_corpus(corpus_dir: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
text_dir = corpus_dir / "articles" / "txt"
document_texts = [doc for doc in text_dir.iterdir() if doc.name[-4:] == ".txt"]
annotation_dirs = [
path
for path in (corpus_dir / "concept-annotation").iterdir()
if path.name not in ["sections-and-typography", "coreference"] and path.is_dir()
]
for doc in Tqdm.tqdm(document_texts, desc="Converting to internal"):
document_id = doc.name.split(".")[0]
with open(doc, encoding="utf8") as f_txt:
documents[document_id] = f_txt.read()
entities = []
for annotation_dir in annotation_dirs:
with open(
annotation_dir / annotation_dir.parts[-1] / "knowtator" / (doc.name + ".knowtator.xml"),
encoding="utf8",
) as f_ann:
ann_tree = etree.parse(f_ann)
for annotation in ann_tree.xpath("//annotation"):
for span in annotation.xpath("span"):
start = int(span.get("start"))
end = int(span.get("end"))
entities += [Entity((start, end), annotation_dir.name.lower())]
entities_per_document[document_id] = entities
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
class HUNER_CHEMICAL_CRAFT_V4(HunerDataset):
"""HUNER version of the CRAFT corpus containing (only) chemical annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft_v4"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_dir = CRAFT_V4.download_corpus(data_dir)
corpus = CRAFT_V4.parse_corpus(corpus_dir)
entity_type_mapping = {"chebi": CHEMICAL_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_GENE_CRAFT_V4(HunerDataset):
"""HUNER version of the CRAFT corpus containing (only) gene annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft_v4"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_dir = CRAFT_V4.download_corpus(data_dir)
corpus = CRAFT_V4.parse_corpus(corpus_dir)
entity_type_mapping = {"pr": GENE_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_SPECIES_CRAFT_V4(HunerDataset):
"""HUNER version of the CRAFT corpus containing (only) species annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/craft_v4"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_dir = CRAFT_V4.download_corpus(data_dir)
corpus = CRAFT_V4.parse_corpus(corpus_dir)
entity_type_mapping = {"ncbitaxon": SPECIES_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_CHEMICAL_BIONLP2013_CG(HunerDataset):
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bionlp2013_cg"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_dir, dev_dir, test_dir = BIONLP2013_CG.download_corpus(data_dir)
train_corpus = BioNLPCorpus.parse_input_files(train_dir)
dev_corpus = BioNLPCorpus.parse_input_files(dev_dir)
test_corpus = BioNLPCorpus.parse_input_files(test_dir)
corpus = merge_datasets([train_corpus, dev_corpus, test_corpus])
entity_type_mapping = {"Simple_chemical": CHEMICAL_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_DISEASE_BIONLP2013_CG(HunerDataset):
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bionlp2013_cg"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_dir, dev_dir, test_dir = BIONLP2013_CG.download_corpus(data_dir)
train_corpus = BioNLPCorpus.parse_input_files(train_dir)
dev_corpus = BioNLPCorpus.parse_input_files(dev_dir)
test_corpus = BioNLPCorpus.parse_input_files(test_dir)
corpus = merge_datasets([train_corpus, dev_corpus, test_corpus])
entity_type_mapping = {"Cancer": DISEASE_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_GENE_BIONLP2013_CG(HunerDataset):
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bionlp2013_cg"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_dir, dev_dir, test_dir = BIONLP2013_CG.download_corpus(data_dir)
train_corpus = BioNLPCorpus.parse_input_files(train_dir)
dev_corpus = BioNLPCorpus.parse_input_files(dev_dir)
test_corpus = BioNLPCorpus.parse_input_files(test_dir)
corpus = merge_datasets([train_corpus, dev_corpus, test_corpus])
entity_type_mapping = {"Gene_or_gene_product": GENE_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class HUNER_SPECIES_BIONLP2013_CG(HunerDataset):
def __init__(self, *args, **kwargs) -> None:
super().__init__(
*args,
**kwargs,
)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/bionlp2013_cg"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
train_dir, dev_dir, test_dir = BIONLP2013_CG.download_corpus(data_dir)
train_corpus = BioNLPCorpus.parse_input_files(train_dir)
dev_corpus = BioNLPCorpus.parse_input_files(dev_dir)
test_corpus = BioNLPCorpus.parse_input_files(test_dir)
corpus = merge_datasets([train_corpus, dev_corpus, test_corpus])
entity_type_mapping = {"Organism": SPECIES_TAG}
return filter_and_map_entities(corpus, entity_type_mapping)
class AZDZ(ColumnCorpus):
"""Arizona Disease Corpus from the Biomedical Informatics Lab at Arizona State University.
For further information see:
http://diego.asu.edu/index.php
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
tokenizer: Optional[Tokenizer] = None,
) -> None:
"""Initializes the Arizona Disease Corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param tokenizer: Implementation of :class:`Tokenizer` which segments sentences
into tokens (default :class:`SciSpacyTokenizer`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if tokenizer is None:
tokenizer = SciSpacyTokenizer()
sentence_splitter = TagSentenceSplitter(tag=SENTENCE_TAG, tokenizer=tokenizer)
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not train_file.exists():
corpus_file = self.download_corpus(data_folder)
corpus_data = self.parse_corpus(corpus_file)
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "http://diego.asu.edu/downloads/AZDC_6-26-2009.txt"
data_path = cached_path(url, data_dir)
return data_path
@staticmethod
def parse_corpus(input_file: Path) -> InternalBioNerDataset:
documents = {}
entities_per_document = {}
with open(str(input_file), encoding="iso-8859-1") as azdz_reader:
prev_document_id: Optional[str] = None
prev_sentence_id: Optional[str] = None
document_text: Optional[str] = None
entities: List[Entity] = []
offset: Optional[int] = None
for line in azdz_reader:
line = line.strip()
if not line or line.startswith("Doc Id"):
continue
pmid, sentence_no, text, entity_start, entity_end = line.split("\t")
document_id = pmid
sentence_id = document_id + "_" + sentence_no
if document_id != prev_document_id and document_text:
documents[document_id] = document_text
entities_per_document[document_id] = entities
document_text = None
entities = []
offset = None
if sentence_id != prev_sentence_id:
offset = offset + len(SENTENCE_TAG) if offset is not None else 0
document_text = document_text + SENTENCE_TAG + text.strip() if document_text is not None else text
if offset is None:
continue
try:
start = offset + int(entity_start) - 1
end = offset + int(entity_end)
except ValueError:
continue
if end == 0:
continue
entities.append(Entity((start, end), DISEASE_TAG))
return InternalBioNerDataset(documents=documents, entities_per_document=entities_per_document)
@deprecated(version="0.13", reason="Please use data set implementation from BigBio instead (see BIGBIO_NER_CORPUS)")
class PDR(ColumnCorpus):
"""Corpus of plant-disease relations.
For further information see Kim et al.: A corpus of plant-disease
relations in the biomedical domain
https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0221582
http://gcancer.org/pdr/
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
) -> None:
"""Initialize the plant-disease relations Corpus.
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Implementation of :class:`SentenceSplitter` which
segments documents into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner", 2: ColumnDataset.SPACE_AFTER_KEY}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
if sentence_splitter is None:
sentence_splitter = SciSpacySentenceSplitter()
train_file = data_folder / f"{sentence_splitter.name}_train.conll"
if not train_file.exists():
corpus_folder = self.download_corpus(data_folder)
corpus_data = brat_to_internal(corpus_folder, ann_file_suffixes=[".ann", ".ann2"])
conll_writer = CoNLLWriter(sentence_splitter=sentence_splitter)
conll_writer.write_to_conll(corpus_data, train_file)
super().__init__(data_folder, columns, in_memory=in_memory)
@classmethod
def download_corpus(cls, data_dir: Path) -> Path:
url = "http://gcancer.org/pdr/Plant-Disease_Corpus.tar.gz"
data_path = cached_path(url, data_dir)
unpack_file(data_path, data_dir)
return data_dir / "Plant-Disease_Corpus"
class HUNER_DISEASE_PDR(HunerDataset):
"""PDR Dataset with only Disease annotations."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@staticmethod
def split_url() -> str:
return "https://raw.githubusercontent.com/hu-ner/huner/master/ner_scripts/splits/pdr"
def to_internal(self, data_dir: Path) -> InternalBioNerDataset:
corpus_folder = PDR.download_corpus(data_dir)
corpus_data = brat_to_internal(corpus_folder, ann_file_suffixes=[".ann", ".ann2"])
corpus_data = filter_and_map_entities(corpus_data, {"Disease": DISEASE_TAG})
return corpus_data
class HunerMultiCorpus(MultiCorpus):
"""Base class to build the union of all HUNER data sets considering a particular entity type."""
def __init__(self, entity_type: str, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
self.entity_type = entity_type
def entity_type_predicate(member):
return f"HUNER_{entity_type}_" in str(member) and inspect.isclass(member)
self.huner_corpora_classes = inspect.getmembers(sys.modules[__name__], predicate=entity_type_predicate)
self.huner_corpora = []
for name, constructor_func in self.huner_corpora_classes:
try:
if not sentence_splitter:
corpus = constructor_func()
else:
corpus = constructor_func(sentence_splitter=sentence_splitter)
self.huner_corpora.append(corpus)
except (CompressionError, ExtractError, HeaderError, ReadError, StreamError, TarError):
logger.exception(
f"Error while processing Tar file from corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except (BadZipFile, LargeZipFile):
logger.exception(
f"Error while processing Zip file from corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except OSError:
logger.exception(
f"Error while downloading data for corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except shutil.Error:
logger.exception(
f"Error while copying data files for corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except etree.LxmlError:
logger.exception(
f"Error while processing XML file from corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except json.JSONDecodeError:
logger.exception(
f"Error while processing JSON file from corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False
)
except (FileNotFoundError, OSError, ValueError):
logger.exception(f"Error while preparing corpus {name}:\n{sys.exc_info()[1]}\n\n", exc_info=False)
super().__init__(corpora=self.huner_corpora, name=f"HUNER-{entity_type}")
class HUNER_CELL_LINE(HunerMultiCorpus):
"""Union of all HUNER cell line data sets."""
def __init__(self, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
super().__init__(entity_type="CELL_LINE", sentence_splitter=sentence_splitter)
class HUNER_CHEMICAL(HunerMultiCorpus):
"""Union of all HUNER chemical data sets."""
def __init__(self, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
super().__init__(entity_type="CHEMICAL", sentence_splitter=sentence_splitter)
class HUNER_DISEASE(HunerMultiCorpus):
"""Union of all HUNER disease data sets."""
def __init__(self, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
super().__init__(entity_type="DISEASE", sentence_splitter=sentence_splitter)
class HUNER_GENE(HunerMultiCorpus):
"""Union of all HUNER gene data sets."""
def __init__(self, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
super().__init__(entity_type="GENE", sentence_splitter=sentence_splitter)
class HUNER_SPECIES(HunerMultiCorpus):
"""Union of all HUNER species data sets."""
def __init__(self, sentence_splitter: Optional[SentenceSplitter] = None) -> None:
super().__init__(entity_type="SPECIES", sentence_splitter=sentence_splitter)
class BIGBIO_NER_CORPUS(ColumnCorpus):
"""This class implements an adapter to data sets implemented in the BigBio framework.
see https://github.com/bigscience-workshop/biomedical
The BigBio framework harmonizes over 120 biomedical data sets and provides a uniform
programming api to access them. This adapter allows to use all named entity recognition
data sets by using the bigbio_kb schema.
"""
def __init__(
self,
dataset_name: str,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
"""Initialize the BigBio Corpus.
:param dataset_name: Name of the dataset in the huggingface hub (e.g. nlmchem or bigbio/nlmchem)
:param base_path: Path to the corpus on your machine
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param sentence_splitter: Custom implementation of :class:`SentenceSplitter` which
segments the text into sentences and tokens (default :class:`SciSpacySentenceSplitter`)
:param train_split_name: Name of the training split in bigbio, usually train (default: None)
:param dev_split_name: Name of the development split in bigbio, usually validation (default: None)
:param test_split_name: Name of the test split in bigbio, usually test (default: None)
"""
base_path = flair.cache_root / "datasets" if base_path is None else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# build dataset name and full huggingface reference name
if dataset_name.startswith("/"): # Absolute path for local BigBio datasets
full_dataset_name = dataset_name
dataset_name = dataset_name.split("/")[-1]
dataset_name = dataset_name.split(".")[0]
elif not dataset_name.startswith("bigbio/"):
full_dataset_name = "bigbio/" + dataset_name
else:
full_dataset_name = dataset_name
dataset_name = dataset_name.replace("bigbio/", "")
self.sentence_splitter = sentence_splitter if sentence_splitter else SciSpacySentenceSplitter()
dataset_dir_name = self.build_corpus_directory_name(dataset_name)
data_folder = base_path / dataset_dir_name / self.sentence_splitter.name
train_file = data_folder / "train.conll"
# Download data if necessary
# Some datasets in BigBio only have train or test splits, not both
# If only test split, assign it to train split
# If only train split, sample other from it (sample_missing_splits=True)
if not train_file.exists():
from datasets import load_dataset
dataset = load_dataset(full_dataset_name, name=dataset_name + "_bigbio_kb")
if "train" in dataset:
train_split_name = "train"
if "test" in dataset:
test_split_name = "test"
if "validation" in dataset:
dev_split_name = "validation"
assert not (train_split_name is None and test_split_name is None)
splits = {}
# Not every dataset has a dev / validation set!
if train_split_name is not None:
splits["train"] = self.to_internal_dataset(dataset, train_split_name)
if test_split_name is not None:
splits["test"] = self.to_internal_dataset(dataset, test_split_name)
if dev_split_name is not None:
splits["dev"] = self.to_internal_dataset(dataset, dev_split_name)
if "train" not in splits and "test" in splits:
splits["train"] = splits.pop("test")
# Perform type mapping if necessary
type_mapping = self.get_entity_type_mapping()
if type_mapping:
splits = {split: filter_and_map_entities(dataset, type_mapping) for split, dataset in splits.items()}
conll_writer = CoNLLWriter(sentence_splitter=self.sentence_splitter)
conll_writer.process_dataset(splits, data_folder)
super().__init__(data_folder, columns, in_memory=in_memory, comment_symbol="#", sample_missing_splits=True)
def get_entity_type_mapping(self) -> Optional[Dict]:
"""Return the mapping of entity type given in the dataset to canonical types.
Note, if a entity type is not present in the map it is discarded.
"""
return None
def build_corpus_directory_name(self, dataset_name: str) -> str:
"""Builds the directory name for the given data set."""
return "bigbio-" + dataset_name.lower()
def to_internal_dataset(self, dataset, split: str) -> InternalBioNerDataset:
"""Converts a dataset given in hugging datasets format to our internal corpus representation."""
id_to_text = {}
id_to_entities: Dict[str, List] = {}
for document in dataset[split]:
document_id = document["document_id"]
passage_offsets = []
# Collect all texts of the document, each passage will be
# a text in our internal format
for passage in document["passages"]:
passage_id = document_id + "#" + str(passage["id"])
id_to_text[passage_id] = " ".join(passage["text"])
passage_offsets.append((passage_id, passage["offsets"]))
id_to_entities[passage_id] = []
# Sort passages by start offset
passage_offsets = sorted(passage_offsets, key=lambda e: e[1][0][0])
# Transform all entity annotations into internal format
for entity in document["entities"]:
# Find the passage of the entity (necessary for offset adaption)
passage_id, passage_offset = self.bin_search_passage(
passage_offsets, 0, len(passage_offsets) - 1, entity
)
# Adapt entity offsets according to passage offsets
entity_offset = entity["offsets"][0]
entity_offset = (entity_offset[0] - passage_offset[0], entity_offset[1] - passage_offset[0])
id_to_entities[passage_id].append(Entity(char_span=entity_offset, entity_type=entity["type"]))
# FIXME: This is just for debugging purposes
# passage_text = id_to_text[passage_id]
# doc_text = passage_text[entity_offset[0] : entity_offset[1]]
# mention_text = entity["text"][0]
# if doc_text != mention_text:
# print(f"Annotation error ({document['document_id']}) - Doc: {doc_text} vs. Mention: {mention_text}")
return InternalBioNerDataset(documents=id_to_text, entities_per_document=id_to_entities)
def bin_search_passage(self, passages: List[Tuple[str, List[Tuple[int, int]]]], low: int, high: int, entity: Dict):
"""Helper methods to find the passage to a given entity mention inclusive offset.
The implementation uses binary search to find the passage in the ordered sequence passages.
"""
# Check base case
if low > high:
raise NotImplementedError("There was a mistake concerning the lower and upper bound.")
# Get element in the middle
mid = (high + low) // 2
first_text_offset = passages[mid][1][0]
first_mention_offset = entity["offsets"][0]
# Is the mention with the passage offsets?
if first_mention_offset[0] >= first_text_offset[0] and first_mention_offset[1] <= first_text_offset[1]:
return passages[mid][0], first_text_offset
# If element is smaller than mid, then it can only
# be present in left subarray
elif first_text_offset[0] > first_mention_offset[0]:
return self.bin_search_passage(passages, low, mid - 1, entity)
else:
# Else the element can only be present in right subarray
return self.bin_search_passage(passages, mid + 1, high, entity)
class HUNER_GENE_NLM_GENE(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="nlm_gene",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Gene": GENE_TAG, "GENERIF": GENE_TAG, "STARGENE": GENE_TAG, "Domain": GENE_TAG, "Other": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_DRUGPROT(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="drugprot",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"GENE-N": GENE_TAG, "GENE-Y": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_DRUGPROT(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="drugprot",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"CHEMICAL": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIORED(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="biored",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"GeneOrGeneProduct": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_BIORED(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="biored",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"ChemicalEntity": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_DISEASE_BIORED(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="biored",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"DiseaseOrPhenotypicFeature": DISEASE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_SPECIES_BIORED(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="biored",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"OrganismTaxon": SPECIES_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CELL_LINE_BIORED(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="biored",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"CellLine": CELL_LINE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_CPI(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="cpi",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_CPI(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="cpi",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"compound": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2013_PC(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2013_pc",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Gene_or_gene_product": GENE_TAG, "Complex": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_BIONLP_ST_2013_PC(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2013_pc",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Simple_chemical": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2013_GE(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2013_ge",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2011_GE(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_ge",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2011_ID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_id",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_BIONLP_ST_2011_ID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_id",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Chemical": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_SPECIES_BIONLP_ST_2011_ID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_id",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Organism": SPECIES_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2011_REL(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_rel",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIONLP_ST_2011_EPI(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2011_epi",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_SPECIES_BIONLP_ST_2019_BB(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bionlp_st_2019_bb",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Microorganism": SPECIES_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_BIOID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bioid",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"gene": GENE_TAG, "protein": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_BIOID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bioid",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"chemical": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_SPECIES_BIOID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bioid",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"species": SPECIES_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CELL_LINE_BIOID(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="bioid",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"cell": CELL_LINE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_GNORMPLUS(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="gnormplus",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Gene": GENE_TAG, "FamilyName": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_PROGENE(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
# Special case for ProGene: We need to use the split_0_train and split_0_test splits
# as they are currently provided in BigBio
train_split_name = "split_0_train"
dev_split_name = "split_0_validation"
test_split_name = "split_0_test"
super().__init__(
dataset_name="progene",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"progene_text": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CHEMICAL_NLM_CHEM(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="nlmchem",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Chemical": CHEMICAL_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_SETH_CORPUS(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="seth_corpus",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Gene": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_GENE_TMVAR_V3(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="tmvar_v3",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"Gene": GENE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_SPECIES_TMVAR_V3(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="tmvar_v3",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"['Species']": SPECIES_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
class HUNER_CELL_LINE_TMVAR_V3(BIGBIO_NER_CORPUS):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: Optional[SentenceSplitter] = None,
train_split_name: Optional[str] = None,
dev_split_name: Optional[str] = None,
test_split_name: Optional[str] = None,
) -> None:
super().__init__(
dataset_name="tmvar_v3",
base_path=base_path,
in_memory=in_memory,
sentence_splitter=sentence_splitter,
train_split_name=train_split_name,
dev_split_name=dev_split_name,
test_split_name=test_split_name,
)
def get_entity_type_mapping(self) -> Optional[Dict]:
return {"['CellLine']": CELL_LINE_TAG}
def build_corpus_directory_name(self, dataset_name: str) -> str:
return self.__class__.__name__.lower()
| 234,560 | 38.608409 | 173 | py |
flair | flair-master/flair/datasets/text_text.py | import logging
import os
from pathlib import Path
from typing import List, Optional, Union
import flair
from flair.data import Corpus, DataPair, FlairDataset, Sentence, TextPair, _iter_dataset
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path, unpack_file, unzip_file
log = logging.getLogger("flair")
class ParallelTextCorpus(Corpus):
def __init__(
self,
source_file: Union[str, Path],
target_file: Union[str, Path],
name: str,
use_tokenizer: bool = True,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Instantiates a Corpus for text classification from CSV column formatted data.
:param data_folder: base folder with the task data
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:return: a Corpus with annotated train, dev and test data
"""
train: FlairDataset = ParallelTextDataset(
source_file,
target_file,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
)
self.in_memory = in_memory
super().__init__(train, name=name, **corpusargs)
def is_in_memory(self) -> bool:
return self.in_memory
class OpusParallelCorpus(ParallelTextCorpus):
def __init__(
self,
dataset: str,
l1: str,
l2: str,
use_tokenizer: bool = True,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Instantiates a Parallel Corpus from OPUS.
see http://opus.nlpl.eu/
:param dataset: Name of the dataset (one of "tatoeba")
:param l1: Language code of first language in pair ("en", "de", etc.)
:param l2: Language code of second language in pair ("en", "de", etc.)
:param use_tokenizer: Whether or not to use in-built tokenizer
:param max_tokens_per_doc: If set, shortens sentences to this maximum number of tokens
:param max_chars_per_doc: If set, shortens sentences to this maximum number of characters
:param in_memory: If True, keeps dataset fully in memory
"""
if l1 > l2:
l1, l2 = l2, l1
# check if dataset is supported
supported_datasets = ["tatoeba", "subtitles"]
if dataset not in supported_datasets:
log.error(f"Dataset must be one of: {supported_datasets}")
# set file names
if dataset == "tatoeba":
link = f"https://object.pouta.csc.fi/OPUS-Tatoeba/v20190709/moses/{l1}-{l2}.txt.zip"
l1_file = flair.cache_root / "datasets" / dataset / f"{l1}-{l2}" / f"Tatoeba.{l1}-{l2}.{l1}"
l2_file = flair.cache_root / "datasets" / dataset / f"{l1}-{l2}" / f"Tatoeba.{l1}-{l2}.{l2}"
# set file names
if dataset == "subtitles":
link = f"https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/moses/{l1}-{l2}.txt.zip"
l1_file = flair.cache_root / "datasets" / dataset / f"{l1}-{l2}" / f"OpenSubtitles.{l1}-{l2}.{l1}"
l2_file = flair.cache_root / "datasets" / dataset / f"{l1}-{l2}" / f"OpenSubtitles.{l1}-{l2}.{l2}"
# download and unzip in file structure if necessary
if not l1_file.exists():
path = cached_path(link, Path("datasets") / dataset / f"{l1}-{l2}")
unzip_file(path, flair.cache_root / Path("datasets") / dataset / f"{l1}-{l2}")
# instantiate corpus
super().__init__(
l1_file,
l2_file,
name=f"{dataset}-{l1_file}-{l2_file}",
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
**corpusargs,
)
class ParallelTextDataset(FlairDataset):
def __init__(
self,
path_to_source: Union[str, Path],
path_to_target: Union[str, Path],
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
) -> None:
path_to_source = Path(path_to_source)
path_to_target = Path(path_to_target)
assert path_to_source.exists()
assert path_to_target.exists()
self.in_memory = in_memory
self.use_tokenizer = use_tokenizer
self.max_tokens_per_doc = max_tokens_per_doc
self.total_sentence_count: int = 0
if self.in_memory:
self.bi_sentences: List[DataPair] = []
else:
self.source_lines: List[str] = []
self.target_lines: List[str] = []
with open(str(path_to_source), encoding="utf-8") as source_file, open(
str(path_to_target), encoding="utf-8"
) as target_file:
source_line = source_file.readline()
target_line = target_file.readline()
while source_line and target_line:
source_line = source_file.readline()
target_line = target_file.readline()
if source_line.strip() == "":
continue
if target_line.strip() == "":
continue
if max_chars_per_doc > 0:
source_line = source_line[:max_chars_per_doc]
target_line = target_line[:max_chars_per_doc]
if self.in_memory:
bi_sentence = self._make_bi_sentence(source_line, target_line)
self.bi_sentences.append(bi_sentence)
else:
self.source_lines.append(source_line)
self.target_lines.append(target_line)
self.total_sentence_count += 1
def _make_bi_sentence(self, source_line: str, target_line: str):
source_sentence = Sentence(source_line, use_tokenizer=self.use_tokenizer)
target_sentence = Sentence(target_line, use_tokenizer=self.use_tokenizer)
if self.max_tokens_per_doc > 0:
source_sentence.tokens = source_sentence.tokens[: self.max_tokens_per_doc]
target_sentence.tokens = target_sentence.tokens[: self.max_tokens_per_doc]
return TextPair(source_sentence, target_sentence)
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> DataPair:
if self.in_memory:
return self.bi_sentences[index]
else:
return self._make_bi_sentence(self.source_lines[index], self.target_lines[index])
def is_in_memory(self) -> bool:
return self.in_memory
class DataPairCorpus(Corpus):
def __init__(
self,
data_folder: Union[str, Path],
columns: List[int] = [0, 1, 2],
train_file=None,
test_file=None,
dev_file=None,
use_tokenizer: bool = True,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
in_memory: bool = True,
label_type: Optional[str] = None,
autofind_splits=True,
sample_missing_splits: bool = True,
skip_first_line: bool = False,
separator: str = "\t",
encoding: str = "utf-8",
) -> None:
r"""Corpus for tasks involving pairs of sentences or paragraphs.
The data files are expected to be in column format where each line has a column
for the first sentence/paragraph, the second sentence/paragraph and the labels, respectively. The columns must be separated by a given separator (default: '\t').
:param data_folder: base folder with the task data
:param columns: List that indicates the columns for the first sentence (first entry in the list), the second sentence (second entry) and label (last entry).
default = [0,1,2]
:param train_file: the name of the train file
:param test_file: the name of the test file, if None, dev data is sampled from train (if sample_missing_splits is true)
:param dev_file: the name of the dev file, if None, dev data is sampled from train (if sample_missing_splits is true)
:param use_tokenizer: Whether or not to use in-built tokenizer
:param max_tokens_per_doc: If set, shortens sentences to this maximum number of tokens
:param max_chars_per_doc: If set, shortens sentences to this maximum number of characters
:param in_memory: If True, data will be saved in list of flair.data.DataPair objects, other wise we use lists with simple strings which needs less space
:param label_type: Name of the label of the data pairs
:param autofind_splits: If True, train/test/dev files will be automatically identified in the given data_folder
:param sample_missing_splits: If True, a missing train/test/dev file will be sampled from the available data
:param skip_first_line: If True, first line of data files will be ignored
:param separator: Separator between columns in data files
:param encoding: Encoding of data files
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder,
dev_file,
test_file,
train_file,
autofind_splits=autofind_splits,
)
# create DataPairDataset for train, test and dev file, if they are given
train = (
DataPairDataset(
train_file,
columns=columns,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
label_type=label_type,
skip_first_line=skip_first_line,
separator=separator,
encoding=encoding,
)
if train_file is not None
else None
)
test = (
DataPairDataset(
test_file,
columns=columns,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
label_type=label_type,
skip_first_line=skip_first_line,
separator=separator,
encoding=encoding,
)
if test_file is not None
else None
)
dev = (
DataPairDataset(
dev_file,
columns=columns,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
label_type=label_type,
skip_first_line=skip_first_line,
separator=separator,
encoding=encoding,
)
if dev_file is not None
else None
)
super().__init__(
train,
dev,
test,
sample_missing_splits=sample_missing_splits,
name=str(data_folder),
)
class DataPairDataset(FlairDataset):
def __init__(
self,
path_to_data: Union[str, Path],
columns: List[int] = [0, 1, 2],
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
label_type: Optional[str] = None,
skip_first_line: bool = False,
separator: str = "\t",
encoding: str = "utf-8",
label: bool = True,
) -> None:
r"""Creates a Dataset for pairs of sentences/paragraphs.
The file needs to be in a column format,
where each line has a column for the first sentence/paragraph, the second sentence/paragraph and the label
seperated by e.g. '\t' (just like in the glue RTE-dataset https://gluebenchmark.com/tasks) .
For each data pair we create a flair.data.DataPair object.
:param path_to_data: path to the data file
:param columns: list of integers that indicate the respective columns. The first entry is the column
for the first sentence, the second for the second sentence and the third for the label. Default [0,1,2]
:param max_tokens_per_doc: If set, shortens sentences to this maximum number of tokens
:param max_chars_per_doc: If set, shortens sentences to this maximum number of characters
:param use_tokenizer: Whether or not to use in-built tokenizer
:param in_memory: If True, data will be saved in list of flair.data.DataPair objects, other wise we use lists with simple strings which needs less space
:param label_type: Name of the label of the data pairs
:param skip_first_line: If True, first line of data file will be ignored
:param separator: Separator between columns in the data file
:param encoding: Encoding of the data file
:param label: If False, the dataset expects unlabeled data
"""
path_to_data = Path(path_to_data)
# stop if file does not exist
assert path_to_data.exists()
self.in_memory = in_memory
self.use_tokenizer = use_tokenizer
self.max_tokens_per_doc = max_tokens_per_doc
self.label = label
assert label_type is not None
self.label_type = label_type
self.total_data_count: int = 0
if self.in_memory:
self.data_pairs: List[DataPair] = []
else:
self.first_elements: List[str] = []
self.second_elements: List[str] = []
self.labels: List[Optional[str]] = []
with open(str(path_to_data), encoding=encoding) as source_file:
source_line = source_file.readline()
if skip_first_line:
source_line = source_file.readline()
while source_line:
source_line_list = source_line.strip().split(separator)
first_element = source_line_list[columns[0]]
second_element = source_line_list[columns[1]]
if self.label:
pair_label: Optional[str] = source_line_list[columns[2]]
else:
pair_label = None
if max_chars_per_doc > 0:
first_element = first_element[:max_chars_per_doc]
second_element = second_element[:max_chars_per_doc]
if self.in_memory:
data_pair = self._make_data_pair(first_element, second_element, pair_label)
self.data_pairs.append(data_pair)
else:
self.first_elements.append(first_element)
self.second_elements.append(second_element)
if self.label:
self.labels.append(pair_label)
self.total_data_count += 1
source_line = source_file.readline()
# create a DataPair object from strings
def _make_data_pair(self, first_element: str, second_element: str, label: Optional[str] = None):
first_sentence = Sentence(first_element, use_tokenizer=self.use_tokenizer)
second_sentence = Sentence(second_element, use_tokenizer=self.use_tokenizer)
if self.max_tokens_per_doc > 0:
first_sentence.tokens = first_sentence.tokens[: self.max_tokens_per_doc]
second_sentence.tokens = second_sentence.tokens[: self.max_tokens_per_doc]
data_pair = TextPair(first_sentence, second_sentence)
if label:
data_pair.add_label(typename=self.label_type, value=label)
return data_pair
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_data_count
# if in_memory is True we return a datapair, otherwise we create one from the lists of strings
def __getitem__(self, index: int = 0) -> DataPair:
if self.in_memory:
return self.data_pairs[index]
elif self.label:
return self._make_data_pair(
self.first_elements[index],
self.second_elements[index],
self.labels[index],
)
else:
return self._make_data_pair(self.first_elements[index], self.second_elements[index])
class GLUE_RTE(DataPairCorpus):
def __init__(
self,
label_type="entailment",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a DataPairCorpus for the Glue Recognizing Textual Entailment (RTE) data.
See https://gluebenchmark.com/tasks
Additionally to the Corpus we have a eval_dataset containing the test file of the Glue data.
This file contains unlabeled test data to evaluate models on the Glue RTE task.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "RTE/train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / "RTE/test.tsv"),
str(data_folder / "RTE/eval_dataset.tsv"),
)
super().__init__(
data_folder / "RTE",
label_type=label_type,
columns=[1, 2, 3],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "RTE/eval_dataset.tsv",
label_type=label_type,
columns=[1, 2, 3],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling classifier.predict(corpus.eval_dataset, label_name='textual_entailment')).
The resulting file is called RTE.tsv and is in the format required for submission to the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "RTE.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
tsv_file.write(str(index) + "\t" + datapoint.get_labels("textual_entailment")[0].value + "\n")
class GLUE_MNLI(DataPairCorpus):
def __init__(
self,
label_type="entailment",
evaluate_on_matched: bool = True,
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a DataPairCorpus for the Multi-Genre Natural Language Inference Corpus (MNLI) from GLUE benchmark.
see https://gluebenchmark.com/tasks
Entailment annotations are: entailment, contradiction, neutral.
This corpus includes two dev sets mathced/mismatched and two unlabeled test sets: eval_dataset_matched,
eval_dataset_mismatched.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "MNLI/train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# reorder dev datasets to have same columns as in train set: 8, 9, and 11
# dev sets include 5 different annotations but we will only keep the gold label
for dev_filename in ["dev_matched.tsv", "dev_mismatched.tsv"]:
temp_file = str("temp_" + dev_filename)
os.rename(
str(data_folder / "MNLI" / dev_filename),
str(data_folder / "MNLI" / temp_file),
)
with open(data_folder / "MNLI" / dev_filename, "a", encoding="utf-8") as out_file, open(
data_folder / "MNLI" / temp_file, encoding="utf-8"
) as in_file:
for line in in_file:
fields = line.split("\t")
reordered_columns = "\t".join(fields[column_id] for column_id in range(11))
reordered_columns += "\t" + fields[15]
out_file.write(reordered_columns)
os.remove(str(data_folder / "MNLI" / temp_file))
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / "MNLI/test_matched.tsv"),
str(data_folder / "MNLI/eval_dataset_matched.tsv"),
)
os.rename(
str(data_folder / "MNLI/test_mismatched.tsv"),
str(data_folder / "MNLI/eval_dataset_mismatched.tsv"),
)
matched_suffix = "matched" if evaluate_on_matched else "mismatched"
dev_dataset = "dev_" + matched_suffix + ".tsv"
eval_dataset = "eval_dataset_" + matched_suffix + ".tsv"
self.evaluate_on_matched = evaluate_on_matched
super().__init__(
data_folder / "MNLI",
train_file=data_file,
dev_file=dev_dataset,
label_type=label_type,
columns=[8, 9, 11],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "MNLI" / eval_dataset,
label_type=label_type,
columns=[8, 9, 11],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='textual_entailment')). The resulting file
is called MNLI-m.tsv or MNLI-mm.tsv and is in the format required for the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
glue_eval_tsv = "MNLI-m.tsv" if self.evaluate_on_matched else "MNLI-mm.tsv"
folder_path = folder_path / glue_eval_tsv
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
label = datapoint.get_labels("textual_entailment")[0].value
tsv_file.write(str(index) + "\t" + label + "\n")
class GLUE_MRPC(DataPairCorpus):
def __init__(
self,
label_type="paraphrase",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a DataPairCorpus for the Microsoft Research Paraphrase Corpus (MRPC) from Glue benchmark.
See https://gluebenchmark.com/tasks
MRPC includes annotated train and test sets. Dev set is sampled each time when creating this corpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "MRPC/train.tsv"
mrpc_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/"
original_filenames = ["msr_paraphrase_train.txt", "msr_paraphrase_test.txt"]
# if data is not downloaded yet, download it
if not data_file.is_file():
for original_filename in original_filenames:
# get test and dev sets
cached_path(
f"{mrpc_path}{original_filename}",
Path("datasets") / dataset_name / "MRPC",
)
os.rename(
str(data_folder / "MRPC/msr_paraphrase_train.txt"),
str(data_folder / "MRPC/train.tsv"),
)
os.rename(
str(data_folder / "MRPC/msr_paraphrase_test.txt"),
str(data_folder / "MRPC/test.tsv"),
)
super().__init__(
data_folder / "MRPC",
label_type=label_type,
columns=[3, 4, 0],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling
classifier.predict(corpus.test, label_name='paraphrase')). The dataset that is used
for evaluation is the same as the test set. The resulting file is called MRPC.tsv
and is in the format required for submission to the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "MRPC.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.test)):
label = datapoint.get_labels("paraphrase")[0].value
tsv_file.write(str(index) + "\t" + label + "\n")
class GLUE_QNLI(DataPairCorpus):
def __init__(
self,
label_type="entailment",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a DataPairCorpus for the Question-answering Natural Language Inference dataset (QNLI) from GLUE.
see https://gluebenchmark.com/tasks
Additionally, to the Corpus we have an eval_dataset containing the test file of the Glue data.
This file contains unlabeled test data to evaluate models on the Glue QNLI task.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "QNLI/train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / "QNLI/test.tsv"),
str(data_folder / "QNLI/eval_dataset.tsv"),
)
super().__init__(
data_folder / "QNLI",
label_type=label_type,
columns=[1, 2, 3],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "QNLI/eval_dataset.tsv",
label_type=label_type,
columns=[1, 2, 3],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='textual_entailment')). The resulting
file is called QNLI.tsv and is in the format required for submission to the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "QNLI.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
label = datapoint.get_labels("textual_entailment")[0].value
tsv_file.write(str(index) + "\t" + label + "\n")
class GLUE_QQP(DataPairCorpus):
def __init__(
self,
label_type="paraphrase",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a Quora Question Pairs (QQP) Corpus from the Glue benchmark.
See https://gluebenchmark.com/tasks
The task is to determine whether a pair of questions are semantically equivalent.
Additionaly to the Corpus we have a eval_dataset containing the test file of the Glue data.
This file contains unlabeled test data to evaluate models on the Glue QQP task.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "QQP/train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / "QQP/test.tsv"),
str(data_folder / "QQP/eval_dataset.tsv"),
)
super().__init__(
data_folder / "QQP",
label_type=label_type,
columns=[3, 4, 5],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "QQP/eval_dataset.tsv",
label_type=label_type,
columns=[1, 2, 0],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='paraphrase')). The resulting file
is called QQP.tsv and is in the format required for submission to the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "QQP.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
label = datapoint.get_labels("paraphrase")[0].value
tsv_file.write(str(index) + "\t" + label + "\n")
class GLUE_WNLI(DataPairCorpus):
def __init__(
self,
label_type="entailment",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a Winograd Schema Challenge Corpus formated as Natural Language Inference task (WNLI).
The task is to predict if the sentence with the pronoun substituted is entailed by the original sentence.
Additionaly to the Corpus we have a eval_dataset containing the test file of the Glue data.
This file contains unlabeled test data to evaluate models on the Glue WNLI task.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "WNLI/train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / "WNLI/test.tsv"),
str(data_folder / "WNLI/eval_dataset.tsv"),
)
super().__init__(
data_folder / "WNLI",
label_type=label_type,
columns=[1, 2, 3],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "WNLI/eval_dataset.tsv",
label_type=label_type,
columns=[1, 2, 3],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
"""
This function creates a tsv file of the predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='textual_entailment')). The resulting file
is called WNLI.tsv and is in the format required for submission to the Glue Benchmark.
"""
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "WNLI.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
tsv_file.write(str(index) + "\t" + datapoint.get_labels("entailment")[0].value + "\n")
class GLUE_STSB(DataPairCorpus):
def __init__(
self,
label_type="similarity",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
data_file = data_folder / "STS-B" / "train.tsv"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# rename test file to eval_dataset, since it has no labels
os.rename(
str(data_folder / data_folder / "STS-B" / "test.tsv"),
str(data_folder / data_folder / "STS-B" / "eval_dataset.tsv"),
)
super().__init__(
data_folder / "STS-B",
label_type=label_type,
columns=[7, 8, 9],
skip_first_line=True,
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "STS-B" / "eval_dataset.tsv",
label_type=label_type,
columns=[7, 8, 9],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=True,
label=False,
)
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
"""Create a tsv file of the predictions of the eval_dataset.
After calling classifier.predict(corpus.eval_dataset, label_name='similarity'), this function can be used
to produce a file called STS-B.tsv suitable for submission to the Glue Benchmark.
"""
folder_path = Path(folder_path)
folder_path = folder_path / "STS-B.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
tsv_file.write(str(index) + "\t" + datapoint.get_labels("entailment")[0].value + "\n")
class SUPERGLUE_RTE(DataPairCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
use_tokenizer=True,
in_memory: bool = True,
sample_missing_splits: bool = True,
) -> None:
"""Creates a DataPairCorpus for the SuperGlue Recognizing Textual Entailment (RTE) data.
See https://super.gluebenchmark.com/tasks
Additionaly to the Corpus we have a eval_dataset containing the test file of the SuperGlue data.
This file contains unlabeled test data to evaluate models on the SuperGlue RTE task.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "superglue"
data_folder = base_path / dataset_name
data_file = data_folder / "RTE/train.tsv"
# if data not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(
"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/RTE.zip",
Path("datasets") / dataset_name,
)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# the downloaded files have json format, we transform them to tsv
rte_jsonl_to_tsv(data_folder / "RTE/train.jsonl", remove=True)
rte_jsonl_to_tsv(data_folder / "RTE/test.jsonl", remove=True, label=False)
rte_jsonl_to_tsv(data_folder / "RTE/val.jsonl", remove=True)
os.rename(str(data_folder / "RTE/val.tsv"), str(data_folder / "RTE/dev.tsv"))
os.rename(
str(data_folder / "RTE/test.tsv"),
str(data_folder / "RTE/eval_dataset.tsv"),
)
super().__init__(
data_folder / "RTE",
columns=[0, 1, 2],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
label_type="textual_entailment",
sample_missing_splits=sample_missing_splits,
)
self.eval_dataset = DataPairDataset(
data_folder / "RTE/eval_dataset.tsv",
label_type="textual_entailment",
columns=[0, 1, 2],
use_tokenizer=use_tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_first_line=False,
label=False,
)
"""
Creates JSONL file of the predictions of the eval_dataset (after calling classifier.predict(corpus.eval_dataset, label_name='textual_entailment')).
The resulting file is called RTE.jsonl and is in the form required for submission to the SuperGlue Benchmark.
"""
def jsonl_from_eval_dataset(self, folder_path: Union[str, Path]):
folder_path = Path(folder_path)
folder_path = folder_path / "RTE.jsonl"
with open(folder_path, mode="w") as jsonl_file:
datapoint: DataPair
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
entry = {
"idx": index,
"label": datapoint.get_labels("textual_entailment")[0].value,
}
jsonl_file.write(str(entry) + "\n")
# Function to transform JSON file to tsv for Recognizing Textual Entailment Data
def rte_jsonl_to_tsv(
file_path: Union[str, Path],
label: bool = True,
remove: bool = False,
encoding="utf-8",
):
import json
tsv_file = os.path.splitext(file_path)[0] + ".tsv"
with open(file_path, encoding=encoding) as jsonl_f, open(tsv_file, "w", encoding=encoding) as tsv_f:
for line in jsonl_f:
obj = json.loads(line)
new_line = obj["premise"] + "\t" + obj["hypothesis"]
if label:
new_line += "\t" + obj["label"]
new_line += "\n"
tsv_f.write(new_line)
# remove json file
if remove:
os.remove(file_path)
| 44,055 | 37.342907 | 169 | py |
flair | flair-master/flair/datasets/entity_linking.py | import csv
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import requests
import flair
from flair.data import Corpus, MultiCorpus, Sentence
from flair.datasets.sequence_labeling import ColumnCorpus, MultiFileColumnCorpus
from flair.file_utils import cached_path, unpack_file
from flair.splitter import SegtokSentenceSplitter, SentenceSplitter
log = logging.getLogger("flair")
class ZELDA(MultiFileColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
column_format={0: "text", 2: "nel"},
**corpusargs,
) -> None:
"""Initialize ZELDA Entity Linking corpus.
introduced in "ZELDA: A Comprehensive Benchmark for Supervised Entity Disambiguation" (Milich and Akbik, 2023).
When calling the constructor for the first time, the dataset gets automatically downloaded.
Parameters
----------
base_path: Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
column_format: Dict[int, str]
The column-format to specify which columns correspond to the text or label types.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
# download and parse data if necessary
parsed_dataset = data_folder / "train_data" / "zelda_train.conll"
if not parsed_dataset.exists():
zelda_zip_path = "https://nlp.informatik.hu-berlin.de/resources/datasets/zelda/zelda.zip"
aquaint_el_zip = cached_path(f"{zelda_zip_path}", base_path)
unpack_file(aquaint_el_zip, base_path, "zip", False)
# paths to train and test splits
train_file = data_folder / "train_data" / "zelda_train.conll"
test_path = data_folder / "test_data" / "conll"
test_files = [
test_path / "test_aida-b.conll",
test_path / "test_cweb.conll",
test_path / "test_tweeki.conll",
test_path / "test_reddit-comments.conll",
test_path / "test_reddit-posts.conll",
test_path / "test_shadowlinks-top.conll",
test_path / "test_shadowlinks-shadow.conll",
test_path / "test_shadowlinks-tail.conll",
test_path / "test_wned-wiki.conll",
]
# init corpus
super().__init__(
train_files=[train_file],
test_files=test_files,
column_format=column_format,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
comment_symbol="# ",
**corpusargs,
)
class NEL_ENGLISH_AQUAINT(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
agreement_threshold: float = 0.5,
sentence_splitter: SentenceSplitter = SegtokSentenceSplitter(),
**corpusargs,
) -> None:
"""Initialize Aquaint Entity Linking corpus.
introduced in: D. Milne and I. H. Witten. Learning to link with wikipedia
https://www.cms.waikato.ac.nz/~ihw/papers/08-DNM-IHW-LearningToLinkWithWikipedia.pdf . If you call the constructor the first
time the dataset gets automatically downloaded and transformed in tab-separated column format (aquaint.txt).
Parameters
----------
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
agreement_threshold: float
Some link annotations come with an agreement_score representing the agreement from the human annotators. The score ranges from lowest 0.2
to highest 1.0. The lower the score, the less "important" is the entity because fewer annotators thought it was worth linking.
Default is 0.5 which means the majority of annotators must have annoteted the respective entity mention.
sentence_splitter: `SentenceSplitter`
The sentencesplitter that is used to split the articles into sentences.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
self.agreement_threshold = agreement_threshold
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_" + type(sentence_splitter).__name__
# default dataset folder is the cache root
data_folder = base_path / dataset_name
aquaint_el_path = "https://www.nzdl.org/wikification/data/wikifiedStories.zip"
corpus_file_name = "aquaint.txt"
parsed_dataset = data_folder / corpus_file_name
# download and parse data if necessary
if not parsed_dataset.exists():
aquaint_el_zip = cached_path(f"{aquaint_el_path}", Path("datasets") / dataset_name)
unpack_file(aquaint_el_zip, data_folder, "zip", False)
try:
with open(parsed_dataset, "w", encoding="utf-8") as txt_out:
# iterate over all html files
for file in os.listdir(data_folder):
if not file.endswith(".htm"):
continue
with open(str(data_folder / file), encoding="utf-8") as txt_in:
text = txt_in.read()
# get rid of html syntax, we only need the text
strings = text.split("<p> ")
strings[0] = strings[0].split('<h1 id="header">')[1][:-7]
for i in range(1, len(strings) - 1):
strings[i] = strings[i][:-7]
strings[-1] = strings[-1][:-23]
# between all documents we write a separator symbol
txt_out.write("-DOCSTART-\n\n")
for string in strings:
# skip empty strings
if not string:
continue
# process the annotation format in the text and collect triples (begin_mention, length_mention, wikiname)
indices = []
lengths = []
wikinames = []
current_entity = string.find("[[") # each annotation starts with '[['
while current_entity != -1:
wikiname = ""
surface_form = ""
j = current_entity + 2
while string[j] not in ["]", "|"]:
wikiname += string[j]
j += 1
if string[j] == "]": # entity mention ends, i.e. looks like this [[wikiname]]
surface_form = wikiname # in this case entity mention = wiki-page name
else: # string[j] == '|'
j += 1
while string[j] not in ["]", "|"]:
surface_form += string[j]
j += 1
if (
string[j] == "|"
): # entity has a score, i.e. looks like this [[wikiname|surface_form|agreement_score]]
agreement_score = float(string[j + 1 : j + 4])
j += 4 # points to first ']' of entity now
if agreement_score < self.agreement_threshold: # discard entity
string = string[:current_entity] + surface_form + string[j + 2 :]
current_entity = string.find("[[")
continue
# replace [[wikiname|surface_form|score]] by surface_form and save index, length and wikiname of mention
indices.append(current_entity)
lengths.append(len(surface_form))
wikinames.append(wikiname[0].upper() + wikiname.replace(" ", "_")[1:])
string = string[:current_entity] + surface_form + string[j + 2 :]
current_entity = string.find("[[")
# sentence splitting and tokenization
sentences = sentence_splitter.split(string)
sentence_offsets = [sentence.start_position or 0 for sentence in sentences]
# iterate through all annotations and add to corresponding tokens
for mention_start, mention_length, wikiname in zip(indices, lengths, wikinames):
# find sentence to which annotation belongs
sentence_index = 0
for i in range(1, len(sentences)):
if mention_start < sentence_offsets[i]:
break
else:
sentence_index += 1
# position within corresponding sentence
mention_start -= sentence_offsets[sentence_index]
mention_end = mention_start + mention_length
# set annotation for tokens of entity mention
first = True
for token in sentences[sentence_index].tokens:
assert token.start_position is not None
assert token.end_position is not None
if (
token.start_position >= mention_start and token.end_position <= mention_end
): # token belongs to entity mention
if first:
token.set_label(typename="nel", value="B-" + wikiname)
first = False
else:
token.set_label(typename="nel", value="I-" + wikiname)
# write to out-file in column format
for sentence in sentences:
for token in sentence.tokens:
labels = token.get_labels("nel")
if len(labels) == 0: # no entity
txt_out.write(token.text + "\tO\n")
else: # annotation
txt_out.write(token.text + "\t" + labels[0].value + "\n")
txt_out.write("\n") # empty line after each sentence
except Exception:
# in case something goes wrong, delete the dataset and raise error
os.remove(parsed_dataset)
raise
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=corpus_file_name,
in_memory=in_memory,
**corpusargs,
)
class NEL_GERMAN_HIPE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
wiki_language: str = "dewiki",
**corpusargs,
) -> None:
"""Initialize a sentence-segmented version of the HIPE entity linking corpus for historical German.
see description of HIPE at https://impresso.github.io/CLEF-HIPE-2020/.
This version was segmented by @stefan-it and is hosted at https://github.com/stefan-it/clef-hipe.
If you call the constructor the first time the dataset gets automatically downloaded and transformed in tab-separated column format.
Parameters
----------
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
wiki_language: str
specify the language of the names of the wikipedia pages, i.e. which language version of
Wikipedia URLs to use. Since the text is in german the default language is German.
"""
self.wiki_language = wiki_language
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
dev_raw_url = "https://raw.githubusercontent.com/stefan-it/clef-hipe/main/data/future/dev-v1.2/de/HIPE-data-v1.2-dev-de-normalized-manual-eos.tsv"
test_raw_url = "https://raw.githubusercontent.com/stefan-it/clef-hipe/main/data/future/test-v1.3/de/HIPE-data-v1.3-test-de-normalized-manual-eos.tsv"
train_raw_url = "https://raw.githubusercontent.com/stefan-it/clef-hipe/main/data/future/training-v1.2/de/HIPE-data-v1.2-train-de-normalized-manual-eos.tsv"
train_file_name = wiki_language + "_train.tsv"
parsed_dataset = data_folder / train_file_name
# download and parse data if necessary
if not parsed_dataset.exists():
# from qwikidata.linked_data_interface import get_entity_dict_from_api
original_train_path = cached_path(f"{train_raw_url}", Path("datasets") / dataset_name)
original_test_path = cached_path(f"{test_raw_url}", Path("datasets") / dataset_name)
original_dev_path = cached_path(f"{dev_raw_url}", Path("datasets") / dataset_name)
# generate qid wikiname dictionaries
log.info("Get wikinames from wikidata...")
train_dict = self._get_qid_wikiname_dict(path=original_train_path)
test_dict = self._get_qid_wikiname_dict(original_test_path)
dev_dict = self._get_qid_wikiname_dict(original_dev_path)
log.info("...done!")
# merge dictionaries
qid_wikiname_dict = {**train_dict, **test_dict, **dev_dict}
for doc_path, file_name in zip(
[original_train_path, original_test_path, original_dev_path],
[
train_file_name,
wiki_language + "_test.tsv",
wiki_language + "_dev.tsv",
],
):
with open(doc_path, encoding="utf-8") as read, open(
data_folder / file_name, "w", encoding="utf-8"
) as write:
# ignore first line
read.readline()
line = read.readline()
last_eos = True
while line:
# commented and empty lines
if line[0] == "#" or line == "\n":
if line[2:13] == "document_id": # beginning of new document
if last_eos:
write.write("-DOCSTART-\n\n")
last_eos = False
else:
write.write("\n-DOCSTART-\n\n")
else:
line_list = line.split("\t")
if line_list[7] not in ["_", "NIL"]: # line has wikidata link
wikiname = qid_wikiname_dict[line_list[7]]
annotation = line_list[1][:2] + wikiname if wikiname != "O" else "O"
else:
annotation = "O"
write.write(line_list[0] + "\t" + annotation + "\n")
if line_list[-1][-4:-1] == "EOS": # end of sentence
write.write("\n")
last_eos = True
else:
last_eos = False
line = read.readline()
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=train_file_name,
dev_file=wiki_language + "_dev.tsv",
test_file=wiki_language + "_test.tsv",
in_memory=in_memory,
**corpusargs,
)
def _get_qid_wikiname_dict(self, path):
qid_set = set()
with open(path, encoding="utf-8") as read:
# read all Q-IDs
# ignore first line
read.readline()
line = read.readline()
while line:
if not (line[0] == "#" or line == "\n"): # commented or empty lines
line_list = line.split("\t")
if line_list[7] not in ["_", "NIL"]: # line has wikidata link
qid_set.add(line_list[7])
line = read.readline()
base_url = (
"https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=sitelinks&sitefilter="
+ self.wiki_language
+ "&ids="
)
qid_list = list(qid_set)
ids = ""
length = len(qid_list)
qid_wikiname_dict = {}
for i in range(length):
if (
i + 1
) % 50 == 0 or i == length - 1: # there is a limit to the number of ids in one request in the wikidata api
ids += qid_list[i]
# request
response_json = requests.get(base_url + ids).json()
for qid in response_json["entities"]:
try:
wikiname = response_json["entities"][qid]["sitelinks"][self.wiki_language]["title"].replace(
" ", "_"
)
except KeyError: # language not available for specific wikiitem
wikiname = "O"
qid_wikiname_dict[qid] = wikiname
ids = ""
else:
ids += qid_list[i]
ids += "|"
return qid_wikiname_dict
class NEL_ENGLISH_AIDA(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
use_ids_and_check_existence: bool = False,
**corpusargs,
) -> None:
"""Initialize AIDA CoNLL-YAGO Entity Linking corpus.
The corpus got introduced here https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/research/ambiverse-nlu/aida/downloads.
License: https://creativecommons.org/licenses/by-sa/3.0/deed.en_US
If you call the constructor the first time the dataset gets automatically downloaded.
Parameters
----------
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
use_ids_and_check_existence: bool
If True the existence of the given wikipedia ids/pagenames is checked and non existent ids/names will be ignored. This also means that one works with
current wikipedia-arcticle names and possibly alter some of the out-dated ones in the original dataset
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
dataset_name = dataset_name + "_ids" if use_ids_and_check_existence else dataset_name + "_raw"
data_folder = base_path / dataset_name
conll_yago_path = "https://nlp.informatik.hu-berlin.de/resources/datasets/conll_entity_linking/"
corpus_file_name = "train"
parsed_dataset = data_folder / corpus_file_name
if not parsed_dataset.exists():
testa_unprocessed_path = cached_path(f"{conll_yago_path}aida_conll_testa", Path("datasets") / dataset_name)
testb_unprocessed_path = cached_path(f"{conll_yago_path}aida_conll_testb", Path("datasets") / dataset_name)
train_unprocessed_path = cached_path(f"{conll_yago_path}aida_conll_train", Path("datasets") / dataset_name)
if use_ids_and_check_existence:
# we use the wikiids in the data instead of directly utilizing the wikipedia urls.
# like this we can quickly check if the corresponding page exists
import wikipediaapi
wiki_wiki = wikipediaapi.Wikipedia(language="en")
wikiid_wikiname_dict = self._get_wikiid_wikiname_dict(data_folder)
for name, path in zip(
["train", "testa", "testb"],
[
train_unprocessed_path,
testa_unprocessed_path,
testb_unprocessed_path,
],
):
with open(data_folder / name, "w", encoding="utf-8") as write, open(path, encoding="utf-8") as read:
for line in read:
line_list = line.split("\t")
if len(line_list) <= 4:
if line_list[0][:10] == "-DOCSTART-": # Docstart
write.write("-DOCSTART-\n\n")
elif line_list[0] == "\n": # empty line
write.write("\n")
else: # text without annotation or marked '--NME--' (no matching entity)
if len(line_list) == 1:
write.write(line_list[0][:-1] + "\tO\n")
else:
write.write(line_list[0] + "\tO\n")
else: # line with annotation
if use_ids_and_check_existence:
wikiname = wikiid_wikiname_dict[line_list[5].strip()]
if wikiname != "O":
write.write(line_list[0] + "\t" + line_list[1] + "-" + wikiname + "\n")
else:
# if there is a bad wikiid we can check if the given url in the data exists using wikipediaapi
wikiname = line_list[4].split("/")[-1]
page = wiki_wiki.page(wikiname)
if page.exists():
write.write(line_list[0] + "\t" + line_list[1] + "-" + wikiname + "\n")
else: # neither the wikiid nor the url exist
write.write(line_list[0] + "\tO\n")
else: # write wikipedia names as given in the file
wikiname = line_list[4].split("/")[-1]
write.write(line_list[0] + "\t" + line_list[1] + "-" + wikiname + "\n")
# delete unprocessed file
os.remove(path)
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=corpus_file_name,
dev_file="testa",
test_file="testb",
in_memory=in_memory,
**corpusargs,
)
def _get_wikiid_wikiname_dict(self, base_folder):
# collect all wikiids
wikiid_set = set()
for data_file in ["aida_conll_testa", "aida_conll_testb", "aida_conll_train"]:
with open(base_folder / data_file, encoding="utf-8") as read:
line = read.readline()
while line:
row = line.split("\t")
if len(row) > 4: # line has a wiki annotation
wikiid_set.add(row[5].strip())
line = read.readline()
# create the dictionary
wikiid_wikiname_dict = {}
wikiid_list = list(wikiid_set)
ids = ""
length = len(wikiid_list)
for i in range(length):
if (
i + 1
) % 50 == 0 or i == length - 1: # there is a limit to the number of ids in one request in the wikimedia api
ids += wikiid_list[i]
# request
resp = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"prop": "info",
"pageids": ids,
"format": "json",
},
).json()
for wikiid in resp["query"]["pages"]:
try:
wikiname = resp["query"]["pages"][wikiid]["title"].replace(" ", "_")
except KeyError: # bad wikiid
wikiname = "O"
wikiid_wikiname_dict[wikiid] = wikiname
ids = ""
else:
ids += wikiid_list[i]
ids += "|"
return wikiid_wikiname_dict
class NEL_ENGLISH_IITB(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
ignore_disagreements: bool = False,
sentence_splitter: SentenceSplitter = SegtokSentenceSplitter(),
**corpusargs,
) -> None:
"""Initialize ITTB Entity Linking corpus.
The corpus got introduced in "Collective Annotation of Wikipedia Entities in Web Text" Sayali Kulkarni, Amit Singh, Ganesh Ramakrishnan, and Soumen Chakrabarti.
If you call the constructor the first time the dataset gets automatically downloaded.
Parameters
----------
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
ignore_disagreements: bool
If True annotations with annotator disagreement will be ignored.
sentence_splitter: `SentenceSplitter`
The sentencesplitter that is used to split the articles into sentences.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_" + type(sentence_splitter).__name__
data_folder = base_path / dataset_name
iitb_el_docs_path = "https://www.cse.iitb.ac.in/~soumen/doc/CSAW/Annot/CSAW_crawledDocs.tar.gz"
iitb_el_annotations_path = "https://www.cse.iitb.ac.in/~soumen/doc/CSAW/Annot/CSAW_Annotations.xml"
corpus_file_name = "iitb.txt"
parsed_dataset = data_folder / corpus_file_name
if not parsed_dataset.exists():
docs_zip_path = cached_path(f"{iitb_el_docs_path}", Path("datasets") / dataset_name)
annotations_xml_path = cached_path(f"{iitb_el_annotations_path}", Path("datasets") / dataset_name)
unpack_file(docs_zip_path, data_folder, "tar", False)
import xml.etree.ElementTree as ET
tree = ET.parse(annotations_xml_path)
root = tree.getroot()
# names of raw text documents
doc_names = set()
for elem in root:
if elem[0].text is not None:
doc_names.add(elem[0].text)
# open output_file
with open(parsed_dataset, "w", encoding="utf-8") as write:
# iterate through all documents
for doc_name in doc_names:
with open(data_folder / "crawledDocs" / doc_name, encoding="utf-8") as read:
text = read.read()
# split sentences and tokenize
sentences = sentence_splitter.split(text)
sentence_offsets = [sentence.start_position or 0 for sentence in sentences]
# iterate through all annotations and add to corresponding tokens
for elem in root:
if elem[0].text == doc_name and elem[2].text: # annotation belongs to current document
wikiname = elem[2].text.replace(" ", "_")
assert elem[3].text is not None
assert elem[4].text is not None
mention_start = int(elem[3].text)
mention_length = int(elem[4].text)
# find sentence to which annotation belongs
sentence_index = 0
for i in range(1, len(sentences)):
if mention_start < sentence_offsets[i]:
break
else:
sentence_index += 1
# position within corresponding sentence
mention_start -= sentence_offsets[sentence_index]
mention_end = mention_start + mention_length
# set annotation for tokens of entity mention
first = True
for token in sentences[sentence_index].tokens:
assert token.start_position is not None
assert token.end_position is not None
if (
token.start_position >= mention_start and token.end_position <= mention_end
): # token belongs to entity mention
assert elem[1].text is not None
if first:
token.set_label(
typename=elem[1].text,
value="B-" + wikiname,
)
first = False
else:
token.set_label(
typename=elem[1].text,
value="I-" + wikiname,
)
# write to out file
write.write("-DOCSTART-\n\n") # each file is one document
for sentence in sentences:
for token in sentence.tokens:
labels = token.labels
if len(labels) == 0: # no entity
write.write(token.text + "\tO\n")
elif len(labels) == 1 or labels[0].value == labels[1].value:
# annotation from one annotator or two agreeing annotators
write.write(token.text + "\t" + labels[0].value + "\n")
else: # annotators disagree: ignore or arbitrarily take first annotation
if ignore_disagreements:
write.write(token.text + "\tO\n")
else:
write.write(token.text + "\t" + labels[0].value + "\n")
write.write("\n") # empty line after each sentence
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=corpus_file_name,
in_memory=in_memory,
**corpusargs,
)
class NEL_ENGLISH_TWEEKI(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize Tweeki Entity Linking corpus.
The dataset got introduced in "Tweeki:
Linking Named Entities on Twitter to a Knowledge Graph" Harandizadeh,
Singh. The data consits of tweets with manually annotated wikipedia
links. If you call the constructor the first time the dataset gets
automatically downloaded and transformed in tab-separated column
format.
Parameters
----------
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
in_memory: bool
If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
tweeki_gold_el_path = "https://raw.githubusercontent.com/ucinlp/tweeki/main/data/Tweeki_gold/Tweeki_gold"
corpus_file_name = "tweeki_gold.txt"
parsed_dataset = data_folder / corpus_file_name
# download and parse data if necessary
if not parsed_dataset.exists():
original_file_path = cached_path(f"{tweeki_gold_el_path}", Path("datasets") / dataset_name)
with open(original_file_path, encoding="utf-8") as read, open(
parsed_dataset, "w", encoding="utf-8"
) as write:
line = read.readline()
while line:
if line.startswith("#"):
out_line = ""
elif line == "\n": # tweet ends
out_line = "\n-DOCSTART-\n\n"
else:
line_list = line.split("\t")
out_line = line_list[1] + "\t"
if line_list[3] == "-\n": # no wiki name
out_line += "O\n"
else:
out_line += line_list[2][:2] + line_list[3].split("|")[0].replace(" ", "_") + "\n"
write.write(out_line)
line = read.readline()
os.rename(original_file_path, str(original_file_path) + "_original")
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=corpus_file_name,
in_memory=in_memory,
**corpusargs,
)
class NEL_ENGLISH_REDDIT(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Reddit Entity Linking corpus containing gold annotations only.
see https://arxiv.org/abs/2101.01228v2
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download and parse data if necessary
reddit_el_path = "https://zenodo.org/record/3970806/files/reddit_el.zip"
corpus_file_name = "reddit_el_gold.txt"
parsed_dataset = data_folder / corpus_file_name
if not parsed_dataset.exists():
reddit_el_zip = cached_path(f"{reddit_el_path}", Path("datasets") / dataset_name)
unpack_file(reddit_el_zip, data_folder, "zip", False)
with open(data_folder / corpus_file_name, "w", encoding="utf-8") as txtout:
# First parse the post titles
with open(data_folder / "posts.tsv", encoding="utf-8") as tsvin1, open(
data_folder / "gold_post_annotations.tsv", encoding="utf-8"
) as tsvin2:
posts = csv.reader(tsvin1, delimiter="\t")
self.post_annotations = csv.reader(tsvin2, delimiter="\t")
self.curr_annot = next(self.post_annotations)
for row in posts: # Go through all the post titles
txtout.writelines("-DOCSTART-\n\n") # Start each post with a -DOCSTART- token
# Keep track of how many and which entity mentions does a given post title have
link_annots = [] # [start pos, end pos, wiki page title] of an entity mention
# Check if the current post title has an entity link and parse accordingly
if row[0] == self.curr_annot[0]:
link_annots.append(
(
int(self.curr_annot[4]),
int(self.curr_annot[5]),
self.curr_annot[3],
)
)
link_annots = self._fill_annot_array(link_annots, row[0], post_flag=True)
# Post titles with entity mentions (if any) are handled via this function
self._text_to_cols(
Sentence(row[2], use_tokenizer=True),
link_annots,
txtout,
)
else:
self._text_to_cols(
Sentence(row[2], use_tokenizer=True),
link_annots,
txtout,
)
# Then parse the comments
with open(data_folder / "comments.tsv", encoding="utf-8") as tsvin3, open(
data_folder / "gold_comment_annotations.tsv", encoding="utf-8"
) as tsvin4:
self.comments = csv.reader(tsvin3, delimiter="\t")
self.comment_annotations = csv.reader(tsvin4, delimiter="\t")
self.curr_annot = next(self.comment_annotations)
self.curr_row: Optional[List[str]] = next(self.comments)
self.stop_iter = False
# Iterate over the comments.tsv file, until the end is reached
while not self.stop_iter:
txtout.writelines("-DOCSTART-\n") # Start each comment thread with a -DOCSTART- token
# Keep track of the current comment thread and its corresponding key, on which the annotations are matched.
# Each comment thread is handled as one 'document'.
self.curr_comm: str = self.curr_row[4]
comm_key = self.curr_row[0]
# Python's csv package for some reason fails to correctly parse a handful of rows inside the comments.tsv file.
# This if-condition is needed to handle this problem.
if comm_key in {"en5rf4c", "es3ia8j", "es3lrmw"}:
if comm_key == "en5rf4c":
self.parsed_row = (r.split("\t") for r in self.curr_row[4].split("\n"))
self.curr_comm = next(self.parsed_row) # type: ignore # noqa: PGH003
self._fill_curr_comment(fix_flag=True)
# In case we are dealing with properly parsed rows, proceed with a regular parsing procedure
else:
self._fill_curr_comment(fix_flag=False)
link_annots = [] # [start pos, end pos, wiki page title] of an entity mention
# Check if the current comment thread has an entity link and parse accordingly, same as with post titles above
if comm_key == self.curr_annot[0]:
link_annots.append(
(
int(self.curr_annot[4]),
int(self.curr_annot[5]),
self.curr_annot[3],
)
)
link_annots = self._fill_annot_array(link_annots, comm_key, post_flag=False)
self._text_to_cols(
Sentence(self.curr_comm, use_tokenizer=True),
link_annots,
txtout,
)
else:
# In two of the comment thread a case of capital letter spacing occurs, which the SegtokTokenizer cannot properly handle.
# The following if-elif condition handles these two cases and as result writes full capitalized words in each corresponding row,
# and not just single letters into single rows.
if comm_key == "dv74ybb":
self.curr_comm = " ".join(
[word.replace(" ", "") for word in self.curr_comm.split(" ")]
)
elif comm_key == "eci2lut":
self.curr_comm = (
self.curr_comm[:18]
+ self.curr_comm[18:27].replace(" ", "")
+ self.curr_comm[27:55]
+ self.curr_comm[55:68].replace(" ", "")
+ self.curr_comm[68:85]
+ self.curr_comm[85:92].replace(" ", "")
+ self.curr_comm[92:]
)
self._text_to_cols(
Sentence(self.curr_comm, use_tokenizer=True),
link_annots,
txtout,
)
super().__init__(
data_folder,
column_format={0: "text", 1: "nel"},
train_file=corpus_file_name,
in_memory=in_memory,
**corpusargs,
)
def _text_to_cols(self, sentence: Sentence, links: list, outfile):
"""Convert a tokenized sentence into column format.
:param sentence: Flair Sentence object containing a tokenized post title or comment thread
:param links: array containing information about the starting and ending position of an entity mention, as well
as its corresponding wiki tag
:param outfile: file, to which the output is written
"""
for i in range(0, len(sentence)):
# If there are annotated entity mentions for given post title or a comment thread
if links:
# Keep track which is the correct corresponding entity link, in cases where there is >1 link in a sentence
link_index = [
j
for j, v in enumerate(links)
if (sentence[i].start_position >= v[0] and sentence[i].end_position <= v[1])
]
# Write the token with a corresponding tag to file
try:
if any(
sentence[i].start_position == v[0] and sentence[i].end_position == v[1]
for j, v in enumerate(links)
):
outfile.writelines(sentence[i].text + "\tS-" + links[link_index[0]][2] + "\n")
elif any(
sentence[i].start_position == v[0] and sentence[i].end_position != v[1]
for j, v in enumerate(links)
):
outfile.writelines(sentence[i].text + "\tB-" + links[link_index[0]][2] + "\n")
elif any(
sentence[i].start_position >= v[0] and sentence[i].end_position <= v[1]
for j, v in enumerate(links)
):
outfile.writelines(sentence[i].text + "\tI-" + links[link_index[0]][2] + "\n")
else:
outfile.writelines(sentence[i].text + "\tO\n")
# IndexError is raised in cases when there is exactly one link in a sentence, therefore can be dismissed
except IndexError:
pass
# If a comment thread or a post title has no entity link, all tokens are assigned the O tag
else:
outfile.writelines(sentence[i].text + "\tO\n")
# Prevent writing empty lines if e.g. a quote comes after a dot or initials are tokenized
# incorrectly, in order to keep the desired format (empty line as a sentence separator).
try:
if (
(sentence[i].text in {".", "!", "?", "!*"})
and (sentence[i + 1].text not in {'"', "“", "'", "''", "!", "?", ";)", "."})
and ("." not in sentence[i - 1].text)
):
outfile.writelines("\n")
except IndexError:
# Thrown when the second check above happens, but the last token of a sentence is reached.
# Indicates that the EOS punctuaion mark is present, therefore an empty line needs to be written below.
outfile.writelines("\n")
# If there is no punctuation mark indicating EOS, an empty line is still needed after the EOS
if sentence[-1].text not in {".", "!", "?"}:
outfile.writelines("\n")
def _fill_annot_array(self, annot_array: list, key: str, post_flag: bool) -> list:
"""Fills the array containing information about the entity mention annotations.
:param annot_array: array to be filled
:param key: reddit id, on which the post title/comment thread is matched with its corresponding annotation
:param post_flag: flag indicating whether the annotations are collected for the post titles (=True)
or comment threads (=False)
"""
while True:
# Check if further annotations belong to the current post title or comment thread as well
try:
next_annot = next(self.post_annotations) if post_flag else next(self.comment_annotations)
if next_annot[0] == key:
annot_array.append((int(next_annot[4]), int(next_annot[5]), next_annot[3]))
else:
self.curr_annot = next_annot
break
# Stop when the end of an annotation file is reached
except StopIteration:
break
return annot_array
def _fill_curr_comment(self, fix_flag: bool):
"""Extends the string containing the current comment thread, which is passed to _text_to_cols method, when the comments are parsed.
:param fix_flag: flag indicating whether the method is called when the incorrectly imported rows are parsed (=True)
or regular rows (=False)
"""
next_row = None
while True:
# Check if further annotations belong to the current sentence as well
try:
next_row = next(self.comments) if not fix_flag else next(self.parsed_row)
if len(next_row) < 2:
# 'else " "' is needed to keep the proper token positions (for accordance with annotations)
self.curr_comm += next_row[0] if any(next_row) else " "
else:
self.curr_row = next_row
break
except StopIteration: # When the end of the comments.tsv file is reached
self.curr_row = next_row
self.stop_iter = not fix_flag
break
def from_ufsac_to_tsv(
xml_file: Union[str, Path],
conll_file: Union[str, Path],
datasetname: str,
encoding: str = "utf8",
cut_multisense: bool = True,
):
"""Function that converts the UFSAC format into tab separated column format in a new file.
Parameters
----------
xml_file : Union[str, Path]
Path to the xml file.
conll_file : Union[str, Path]
Path for the new conll file.
datasetname: str
Name of the dataset from UFSAC, needed because of different handling of multi-word-spans in the datasets
encoding : str, optional
Encoding used in open function. The default is "utf8".
cut_multisense : bool, optional
Boolean that determines whether or not the wn30_key tag should be cut if it contains multiple possible senses.
If True only the first listed sense will be used. Otherwise the whole list of senses will be detected
as one new sense. The default is True.
"""
def make_line(word, begin_or_inside, attributes):
"""Function that creates an output line from a word.
Parameters
----------
word :
String of the actual word.
begin_or_inside:
Either 'B-' or 'I-'
attributes:
List of attributes of the word (pos, lemma, wn30_key)
"""
line = word
if cut_multisense:
attributes[-1] = attributes[-1].split(";")[0] # take only first sense
for attrib in attributes:
line = line + "\t" + begin_or_inside + attrib if attrib != "O" else line + "\tO"
line += "\n"
return line
def split_span(word_fields: List[str], datasetname: str):
"""Function that splits a word if necessary, i.e. if it is a multiple-word-span.
Parameters
----------
word_fields :
list ['surface_form', 'lemma', 'pos', 'wn30_key'] of a word
datasetname:
name of corresponding dataset
"""
span = word_fields[0]
if datasetname in [
"trainomatic",
"masc",
]: # splitting not sensible for these datasets
return [span]
elif datasetname == "omsti":
if (
word_fields[3] != "O" and span != "_" and "__" not in span
): # has annotation and does not consist only of '_' (still not 100% clean)
return span.split("_")
else:
return [span]
else: # for all other datasets splitting at '_' is always sensible
return span.split("_")
with Path(conll_file).open(mode="w", encoding=encoding) as txt_out:
import xml.etree.ElementTree as ET
tree = ET.parse(xml_file)
corpus = tree.getroot()
number_of_docs = len(corpus.findall("document"))
fields = ["surface_form", "lemma", "pos", "wn30_key"]
for document in corpus:
# Docstart
if number_of_docs > 1:
txt_out.write("-DOCSTART-\n\n")
for paragraph in document:
for sentence in paragraph:
for word in sentence:
dictionary = word.attrib
fields_of_word = [word.attrib[field] if (field in dictionary) else "O" for field in fields]
chunks = split_span(fields_of_word, datasetname)
txt_out.write(make_line(chunks[0], "B-", fields_of_word[1:]))
# if there is more than one word in the chunk we write each in a separate line
for chunk in chunks[1:]:
# print(chunks)
txt_out.write(make_line(chunk, "I-", fields_of_word[1:]))
# empty line after each sentence
txt_out.write("\n")
def determine_tsv_file(filename: str, data_folder: Path, cut_multisense: bool = True):
"""Checks if the converted .tsv file already exists and if not, creates it.
Returns name of the file.
----------
string : str
String that contains the name of the file.
data_folder : str
String that contains the name of the folder in which the CoNLL file should reside.
cut_multisense : bool, optional
Boolean that determines whether or not the wn30_key tag should be cut if it contains multiple possible senses.
If True only the first listed sense will be used. Otherwise the whole list of senses will be detected
as one new sense. The default is True.
"""
if cut_multisense is True and filename not in [
"semeval2007task17",
"trainomatic",
"wngt",
]: # these three datasets do not have multiple senses
conll_file_name = filename + "_cut.tsv"
else:
conll_file_name = filename + ".tsv"
path_to_conll_file = data_folder / conll_file_name
if not path_to_conll_file.exists():
# convert the file to CoNLL
from_ufsac_to_tsv(
xml_file=Path(data_folder / "original_data" / (filename + ".xml")),
conll_file=Path(data_folder / conll_file_name),
datasetname=filename,
cut_multisense=cut_multisense,
)
return conll_file_name
class WSD_UFSAC(MultiCorpus):
def __init__(
self,
filenames: Union[str, List[str]] = ["masc", "semcor"],
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
cut_multisense: bool = True,
columns={0: "text", 3: "sense"},
banned_sentences: Optional[List[str]] = None,
sample_missing_splits_in_multicorpus: Union[bool, str] = True,
sample_missing_splits_in_each_corpus: Union[bool, str] = True,
use_raganato_ALL_as_test_data: bool = False,
name: str = "multicorpus",
) -> None:
"""Initialize a custom corpus with any Word Sense Disambiguation (WSD) datasets in the UFSAC format.
see https://github.com/getalp/UFSAC.
If the constructor is called for the first time the data is automatically downloaded and transformed from xml to a tab separated column format.
Since only the WordNet 3.0 version for senses is consistently available for all provided datasets we will only consider this version.
Also we ignore the id annotation used in datasets that were originally created for evaluation tasks
:param filenames: Here you can pass a single datasetname or a list of ddatasetnames. The available names are:
'masc', 'omsti', 'raganato_ALL', 'raganato_semeval2007', 'raganato_semeval2013', 'raganato_semeval2015', 'raganato_senseval2', 'raganato_senseval3',
'semcor', 'semeval2007task17', 'semeval2007task7', 'semeval2013task12', 'semeval2015task13', 'senseval2', 'senseval2_lexical_sample_test',
'senseval2_lexical_sample_train', 'senseval3task1', 'senseval3task6_test', 'senseval3task6_train', 'trainomatic', 'wngt'.
So you can pass for example filenames = ['masc', 'omsti', 'wngt']. Default two mid-sized datasets 'masc' and 'semcor' are loaded.
:param base_path: You can override this to point to a specific folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
:param cut_multisense: Boolean that determines whether or not the wn30_key tag should be cut if it contains
multiple possible senses. If True only the first listed sense will be used and the
suffix '_cut' will be added to the name of the CoNLL file. Otherwise the whole list of
senses will be detected as one new sense. The default is True.
:param columns: Columns to consider when loading the dataset. You can add 1: "lemma" or 2: "pos" to the default dict {0: "text", 3: "sense"}
if you want to use additional pos and/or lemma for the words.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:param sample_missing_splits_in_multicorpus: Whether to sample missing splits when loading the multicorpus (this is redundant if
sample_missing_splits_in_each_corpus is True)
:param sample_missing_splits_in_each_corpus: Whether to sample missing splits when loading each single corpus given in filenames.
:param use_raganato_ALL_as_test_data: If True, the raganato_ALL dataset (Raganato et al. "Word Sense Disambiguation: A unified evaluation framework and empirical compariso")
will be used as test data. Note that the sample_missing_splits parameters are set to 'only_dev' in this case if set to True.
:param name: Name of your (costum) corpus
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# check if data there, if not, download the data
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
# transform data into column format if necessary
# if no filenames are specified we use all the data
if not filenames:
filenames = [name[:-4] for name in os.listdir(original_data_folder) if "raganato" not in name]
if isinstance(filenames, str):
filenames = [filenames]
corpora: List[Corpus] = []
log.info("Transforming data into column format and creating corpora...")
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if the sample arguments are set to true, the dev set will be sampled
if sample_missing_splits_in_each_corpus:
sample_missing_splits_in_each_corpus = "only_dev"
if sample_missing_splits_in_multicorpus:
sample_missing_splits_in_multicorpus = "only_dev"
# also we remove 'raganato_ALL' from filenames in case its in the list
if "raganato_ALL" in filenames:
filenames.remove("raganato_ALL")
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
corpus = ColumnCorpus(
data_folder=data_folder,
column_format=columns,
test_file=test_file, # corpus only has test data
in_memory=in_memory,
column_delimiter="\t",
document_separator_token="-DOCSTART-",
banned_sentences=banned_sentences,
autofind_splits=False,
sample_missing_splits=sample_missing_splits_in_each_corpus,
)
corpora.append(corpus)
for filename in filenames:
# make column file and save to data_folder
new_filename = determine_tsv_file(
filename=filename,
data_folder=data_folder,
cut_multisense=cut_multisense,
)
corpus = ColumnCorpus(
data_folder=data_folder,
column_format=columns,
train_file=new_filename,
in_memory=in_memory,
column_delimiter="\t",
document_separator_token="-DOCSTART-",
banned_sentences=banned_sentences,
autofind_splits=False,
sample_missing_splits=sample_missing_splits_in_each_corpus,
)
corpora.append(corpus)
log.info("Done with transforming data into column format and creating corpora...")
super().__init__(
corpora,
sample_missing_splits=sample_missing_splits_in_multicorpus,
name=name,
)
class WSD_RAGANATO_ALL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: bool = True,
cut_multisense: bool = True,
) -> None:
"""Initialize ragnato_ALL (concatenation of all SensEval and SemEval all-words tasks) provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just SemCor. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
train_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_SEMCOR(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
) -> None:
"""Initialize SemCor provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just SemCor. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="semcor", data_folder=data_folder, cut_multisense=cut_multisense)
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_WORDNET_GLOSS_TAGGED(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: Union[bool, str] = True,
use_raganato_ALL_as_test_data: bool = False,
) -> None:
"""Initialize Princeton WordNet Gloss Corpus provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just WordNet Gloss Tagged. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(filename="raganato_ALL", data_folder=data_folder, cut_multisense=True)
else:
test_file = None
train_file = determine_tsv_file(
filename="wngt", data_folder=data_folder, cut_multisense=False
) # does not have multisense!
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_MASC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
) -> None:
"""Initialize MASC (Manually Annotated Sub-Corpus) provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
# default dataset folder is the cache root
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just MASC. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="masc", data_folder=data_folder, cut_multisense=cut_multisense)
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_OMSTI(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: Union[bool, str] = True,
cut_multisense: bool = True,
use_raganato_ALL_as_test_data: bool = False,
) -> None:
"""Initialize OMSTI (One Million Sense-Tagged Instances) provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
# default dataset folder is the cache root
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just OMSTI. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(
filename="raganato_ALL",
data_folder=data_folder,
cut_multisense=cut_multisense,
)
else:
test_file = None
train_file = determine_tsv_file(filename="omsti", data_folder=data_folder, cut_multisense=cut_multisense)
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
class WSD_TRAINOMATIC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
columns={0: "text", 3: "sense"},
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
sample_missing_splits: Union[bool, str] = True,
use_raganato_ALL_as_test_data: bool = False,
) -> None:
"""Initialize Train-O-Matic provided in UFSAC.
see https://github.com/getalp/UFSAC
When first initializing the corpus the whole UFSAC data is downloaded.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "wsd_ufsac"
# default dataset folder is the cache root
data_folder = base_path / dataset_name
original_data_folder = data_folder / "original_data"
# We check if the the UFSAC data has already been downloaded. If not, we download it.
# Note that this downloads more datasets than just Train-O-Matic. But the size of the download is only around 190 Mb (around 4.5 Gb unpacked)
if not original_data_folder.exists():
# create folder
data_folder.mkdir(parents=True)
# download data
import gdown
url = "https://drive.google.com/uc?id=1Oigo3kzRosz2VjyA44vpJZ58tDFyLRMO"
output = data_folder / (dataset_name + ".tar")
gdown.download(url, str(output), quiet=False)
output = data_folder / (dataset_name + ".tar")
unpack_file(file=output, unpack_to=data_folder, mode="tar", keep=False)
os.rename(data_folder / "ufsac-public-2.1", original_data_folder)
if use_raganato_ALL_as_test_data:
# in this case no test data should be generated by sampling from train data. But if sample_missing_splits is true, the dev set will be sampled.
if sample_missing_splits:
sample_missing_splits = "only_dev"
# generate the test file
test_file = determine_tsv_file(filename="raganato_ALL", data_folder=data_folder, cut_multisense=True)
else:
test_file = None
train_file = determine_tsv_file(
filename="trainomatic", data_folder=data_folder, cut_multisense=False
) # no multisenses
super().__init__(
data_folder=data_folder,
column_format=columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
column_delimiter="\t",
autofind_splits=False,
label_name_map=label_name_map,
banned_sentences=banned_sentences,
sample_missing_splits=sample_missing_splits,
)
| 79,934 | 44.110045 | 181 | py |
flair | flair-master/flair/datasets/treebanks.py | import logging
import re
from pathlib import Path
from typing import List, Optional, Union
import flair
from flair.data import Corpus, FlairDataset, Sentence, Token
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class UniversalDependenciesCorpus(Corpus):
def __init__(
self,
data_folder: Union[str, Path],
train_file=None,
test_file=None,
dev_file=None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
"""Instantiates a Corpus from CoNLL-U column-formatted task data such as the UD corpora.
:param data_folder: base folder with the task data
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param in_memory: If set to True, keeps full dataset in memory, otherwise does disk reads
:param split_multiwords: If set to True, multiwords are split (default), otherwise kept as single tokens
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
# get train data
train = UniversalDependenciesDataset(train_file, in_memory=in_memory, split_multiwords=split_multiwords)
# get test data
test = (
UniversalDependenciesDataset(test_file, in_memory=in_memory, split_multiwords=split_multiwords)
if test_file is not None
else None
)
# get dev data
dev = (
UniversalDependenciesDataset(dev_file, in_memory=in_memory, split_multiwords=split_multiwords)
if dev_file is not None
else None
)
super().__init__(train, dev, test, name=str(data_folder))
class UniversalDependenciesDataset(FlairDataset):
def __init__(
self,
path_to_conll_file: Union[str, Path],
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
"""Instantiates a column dataset in CoNLL-U format.
:param path_to_conll_file: Path to the CoNLL-U formatted file
:param in_memory: If set to True, keeps full dataset in memory, otherwise does disk reads
"""
path_to_conll_file = Path(path_to_conll_file)
assert path_to_conll_file.exists()
self.in_memory: bool = in_memory
self.split_multiwords: bool = split_multiwords
self.path_to_conll_file = path_to_conll_file
self.total_sentence_count: int = 0
with open(str(self.path_to_conll_file), encoding="utf-8") as file:
# option 1: read only sentence boundaries as offset positions
if not self.in_memory:
self.indices: List[int] = []
line = file.readline()
position = 0
while line:
line = line.strip()
if line == "":
self.indices.append(position)
position = file.tell()
line = file.readline()
self.total_sentence_count = len(self.indices)
# option 2: keep everything in memory
if self.in_memory:
self.sentences: List[Sentence] = []
while True:
sentence = self._read_next_sentence(file)
if not sentence:
break
self.sentences.append(sentence)
self.total_sentence_count = len(self.sentences)
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
# if in memory, retrieve parsed sentence
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
with open(str(self.path_to_conll_file), encoding="utf-8") as file:
file.seek(self.indices[index])
sentence = self._read_next_sentence(file)
return sentence
def _read_next_sentence(self, file) -> Sentence:
line = file.readline()
tokens: List[Token] = []
# current token ID
token_idx = 0
# handling for the awful UD multiword format
current_multiword_text = ""
current_multiword_sequence = ""
current_multiword_first_token = 0
current_multiword_last_token = 0
while line:
line = line.strip()
fields: List[str] = re.split("\t+", line)
# end of sentence
if line == "":
if len(tokens) > 0:
break
# comments or ellipsis
elif line.startswith("#") or "." in fields[0]:
line = file.readline()
continue
# if token is a multi-word
elif "-" in fields[0]:
line = file.readline()
current_multiword_first_token = int(fields[0].split("-")[0])
current_multiword_last_token = int(fields[0].split("-")[1])
current_multiword_text = fields[1]
current_multiword_sequence = ""
if self.split_multiwords:
continue
else:
token = Token(fields[1])
token.add_label("lemma", str(fields[2]))
if len(fields) > 9 and "SpaceAfter=No" in fields[9]:
token.whitespace_after = 0
tokens.append(token)
token_idx += 1
# normal single-word tokens
else:
# if we don't split multiwords, skip over component words
if not self.split_multiwords and token_idx < current_multiword_last_token:
token_idx += 1
line = file.readline()
continue
# add token
token = Token(fields[1], head_id=int(fields[6]))
token.add_label("lemma", str(fields[2]))
token.add_label("upos", str(fields[3]))
token.add_label("pos", str(fields[4]))
token.add_label("dependency", str(fields[7]))
if len(fields) > 9 and "SpaceAfter=No" in fields[9]:
token.whitespace_after = 0
# add morphological tags
for morph in str(fields[5]).split("|"):
if "=" not in morph:
continue
token.add_label(morph.split("=")[0].lower(), morph.split("=")[1])
if len(fields) > 10 and str(fields[10]) == "Y":
token.add_label("frame", str(fields[11]))
token_idx += 1
# derive whitespace logic for multiwords
if token_idx <= current_multiword_last_token:
current_multiword_sequence += token.text
# print(token)
# print(current_multiword_last_token)
# print(current_multiword_first_token)
# if multi-word equals component tokens, there should be no whitespace
if token_idx == current_multiword_last_token and current_multiword_sequence == current_multiword_text:
# go through all tokens in subword and set whitespace_after information
for i in range(current_multiword_last_token - current_multiword_first_token):
# print(i)
tokens[-(i + 1)].whitespace_after = 0
tokens.append(token)
line = file.readline()
return Sentence(tokens)
class UD_ENGLISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master"
cached_path(f"{web_path}/en_ewt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/en_ewt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/en_ewt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GALICIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Galician-TreeGal/master"
cached_path(f"{web_path}/gl_treegal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/gl_treegal-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ANCIENT_GREEK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Ancient_Greek-PROIEL/master"
cached_path(f"{web_path}/grc_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/grc_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/grc_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_KAZAKH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Kazakh-KTB/master"
cached_path(f"{web_path}/kk_ktb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/kk_ktb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_OLD_CHURCH_SLAVONIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Old_Church_Slavonic-PROIEL/master"
cached_path(f"{web_path}/cu_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/cu_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/cu_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ARMENIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Armenian-ArmTDP/master/"
cached_path(f"{web_path}/hy_armtdp-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/hy_armtdp-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/hy_armtdp-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ESTONIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Estonian-EDT/master"
cached_path(f"{web_path}/et_edt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/et_edt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/et_edt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GERMAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_German-GSD/master"
cached_path(f"{ud_path}/de_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GERMAN_HDT(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_German-HDT/dev"
cached_path(f"{ud_path}/de_hdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/de_hdt-ud-test.conllu", Path("datasets") / dataset_name)
train_filenames = [
"de_hdt-ud-train-a-1.conllu",
"de_hdt-ud-train-a-2.conllu",
"de_hdt-ud-train-b-1.conllu",
"de_hdt-ud-train-b-2.conllu",
]
for train_file in train_filenames:
cached_path(f"{ud_path}/{train_file}", Path("datasets") / dataset_name / "original")
data_path = flair.cache_root / "datasets" / dataset_name
new_train_file: Path = data_path / "de_hdt-ud-train-all.conllu"
if not new_train_file.is_file():
with open(new_train_file, "w") as f_out:
for train_filename in train_filenames:
with open(data_path / "original" / train_filename) as f_in:
f_out.write(f_in.read())
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_DUTCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Dutch-Alpino/master"
cached_path(f"{ud_path}/nl_alpino-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/nl_alpino-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/nl_alpino-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FAROESE(UniversalDependenciesCorpus):
"""This treebank includes the Faroese treebank dataset.
The data is obtained from the following link:
https://github.com/UniversalDependencies/UD_Faroese-FarPaHC/tree/master
Faronese is a small Western Scandinavian language with 60.000-100.000, related to Icelandic and Old Norse.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Faroese-FarPaHC/master"
cached_path(f"{web_path}/fo_farpahc-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fo_farpahc-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fo_farpahc-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FRENCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_French-GSD/master"
cached_path(f"{ud_path}/fr_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fr_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fr_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ITALIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Italian-ISDT/master"
cached_path(f"{ud_path}/it_isdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/it_isdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/it_isdt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LATIN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Latin-LLCT/master/"
cached_path(f"{web_path}/la_llct-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/la_llct-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/la_llct-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SPANISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Spanish-GSD/master"
cached_path(f"{ud_path}/es_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/es_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/es_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_PORTUGUESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Portuguese-Bosque/master"
cached_path(f"{ud_path}/pt_bosque-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pt_bosque-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pt_bosque-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ROMANIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Romanian-RRT/master"
cached_path(f"{ud_path}/ro_rrt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ro_rrt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ro_rrt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CATALAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Catalan-AnCora/master"
cached_path(f"{ud_path}/ca_ancora-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ca_ancora-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ca_ancora-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_POLISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Polish-LFG/master"
cached_path(f"{ud_path}/pl_lfg-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pl_lfg-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/pl_lfg-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CZECH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Czech-PDT/master"
cached_path(f"{ud_path}/cs_pdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/cs_pdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(
f"{ud_path}/cs_pdt-ud-train-c.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-l.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-m.conllu",
Path("datasets") / dataset_name / "original",
)
cached_path(
f"{ud_path}/cs_pdt-ud-train-v.conllu",
Path("datasets") / dataset_name / "original",
)
data_path = flair.cache_root / "datasets" / dataset_name
train_filenames = [
"cs_pdt-ud-train-c.conllu",
"cs_pdt-ud-train-l.conllu",
"cs_pdt-ud-train-m.conllu",
"cs_pdt-ud-train-v.conllu",
]
new_train_file: Path = data_path / "cs_pdt-ud-train-all.conllu"
if not new_train_file.is_file():
with open(new_train_file, "w") as f_out:
for train_filename in train_filenames:
with open(data_path / "original" / train_filename) as f_in:
f_out.write(f_in.read())
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SLOVAK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Slovak-SNK/master"
cached_path(f"{ud_path}/sk_snk-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sk_snk-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sk_snk-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SWEDISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Swedish-Talbanken/master"
cached_path(f"{ud_path}/sv_talbanken-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sv_talbanken-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sv_talbanken-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_DANISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Danish-DDT/master"
cached_path(f"{ud_path}/da_ddt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/da_ddt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/da_ddt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NORWEGIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Norwegian-Bokmaal/master"
cached_path(f"{ud_path}/no_bokmaal-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/no_bokmaal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/no_bokmaal-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_FINNISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Finnish-TDT/master"
cached_path(f"{ud_path}/fi_tdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fi_tdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fi_tdt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SLOVENIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Slovenian-SSJ/master"
cached_path(f"{ud_path}/sl_ssj-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sl_ssj-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sl_ssj-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CROATIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Croatian-SET/master"
cached_path(f"{ud_path}/hr_set-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hr_set-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hr_set-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_SERBIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Serbian-SET/master"
cached_path(f"{ud_path}/sr_set-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sr_set-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/sr_set-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BULGARIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Bulgarian-BTB/master"
cached_path(f"{ud_path}/bg_btb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/bg_btb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/bg_btb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_ARABIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Arabic-PADT/master"
cached_path(f"{ud_path}/ar_padt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ar_padt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ar_padt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_HEBREW(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Hebrew-HTB/master"
cached_path(f"{ud_path}/he_htb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/he_htb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/he_htb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_TURKISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Turkish-IMST/master"
cached_path(f"{ud_path}/tr_imst-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/tr_imst-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/tr_imst-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_UKRAINIAN(UniversalDependenciesCorpus):
def __init__(
self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True, split_multiwords: bool = True
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Ukrainian-IU/master"
cached_path(f"{ud_path}/uk_iu-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/uk_iu-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/uk_iu-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_PERSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Persian-Seraji/master"
cached_path(f"{ud_path}/fa_seraji-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fa_seraji-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/fa_seraji-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_RUSSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Russian-SynTagRus/master"
cached_path(f"{ud_path}/ru_syntagrus-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ru_syntagrus-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ru_syntagrus-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_HINDI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Hindi-HDTB/master"
cached_path(f"{ud_path}/hi_hdtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hi_hdtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/hi_hdtb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_INDONESIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-GSD/master"
cached_path(f"{ud_path}/id_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/id_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/id_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_JAPANESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Japanese-GSD/master"
cached_path(f"{ud_path}/ja_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ja_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ja_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CHINESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Chinese-GSD/master"
cached_path(f"{ud_path}/zh_gsd-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/zh_gsd-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/zh_gsd-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_KOREAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Korean-Kaist/master"
cached_path(f"{ud_path}/ko_kaist-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ko_kaist-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/ko_kaist-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BASQUE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ud_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Basque-BDT/master"
cached_path(f"{ud_path}/eu_bdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/eu_bdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{ud_path}/eu_bdt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_CHINESE_KYOTO(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Classical_Chinese-Kyoto/master"
cached_path(f"{web_path}/lzh_kyoto-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lzh_kyoto-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lzh_kyoto-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GREEK(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Greek-GDT/master"
cached_path(f"{web_path}/el_gdt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/el_gdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/el_gdt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NAIJA(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Naija-NSC/master"
cached_path(f"{web_path}//pcm_nsc-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}//pcm_nsc-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}//pcm_nsc-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LIVVI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Livvi-KKPP/master"
cached_path(f"{web_path}/olo_kkpp-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/olo_kkpp-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BURYAT(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Buryat-BDT/master"
cached_path(f"{web_path}/bxr_bdt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/bxr_bdt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_NORTH_SAMI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_North_Sami-Giella/master"
cached_path(f"{web_path}/sme_giella-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/sme_giella-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_MARATHI(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Marathi-UFAL/master"
cached_path(f"{web_path}/mr_ufal-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mr_ufal-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mr_ufal-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_MALTESE(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Maltese-MUDT/master"
cached_path(f"{web_path}/mt_mudt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mt_mudt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/mt_mudt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_AFRIKAANS(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Afrikaans-AfriBooms/master"
cached_path(f"{web_path}/af_afribooms-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/af_afribooms-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/af_afribooms-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_GOTHIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Gothic-PROIEL/master"
cached_path(f"{web_path}/got_proiel-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/got_proiel-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/got_proiel-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_OLD_FRENCH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Old_French-SRCMF/master"
cached_path(f"{web_path}/fro_srcmf-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fro_srcmf-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/fro_srcmf-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_WOLOF(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Wolof-WTB/master"
cached_path(f"{web_path}/wo_wtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/wo_wtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/wo_wtb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_BELARUSIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Belarusian-HSE/master"
cached_path(f"{web_path}/be_hse-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/be_hse-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/be_hse-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_COPTIC(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Coptic-Scriptorium/master"
cached_path(f"{web_path}/cop_scriptorium-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(
f"{web_path}/cop_scriptorium-ud-test.conllu",
Path("datasets") / dataset_name,
)
cached_path(
f"{web_path}/cop_scriptorium-ud-train.conllu",
Path("datasets") / dataset_name,
)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_IRISH(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Irish-IDT/master"
cached_path(f"{web_path}/ga_idt-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/ga_idt-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/ga_idt-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LATVIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Latvian-LVTB/master"
cached_path(f"{web_path}/lv_lvtb-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lv_lvtb-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lv_lvtb-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
class UD_LITHUANIAN(UniversalDependenciesCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
split_multiwords: bool = True,
) -> None:
base_path = Path(flair.cache_root) / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
# download data if necessary
web_path = "https://raw.githubusercontent.com/UniversalDependencies/UD_Lithuanian-ALKSNIS/master"
cached_path(f"{web_path}/lt_alksnis-ud-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lt_alksnis-ud-test.conllu", Path("datasets") / dataset_name)
cached_path(f"{web_path}/lt_alksnis-ud-train.conllu", Path("datasets") / dataset_name)
super().__init__(data_folder, in_memory=in_memory, split_multiwords=split_multiwords)
| 65,227 | 39.742036 | 118 | py |
flair | flair-master/flair/datasets/ocr.py | import json
from pathlib import Path
from typing import Dict, Optional, Union
import gdown.download_folder
import PIL
from torch.utils.data import Dataset
import flair
from flair.data import BoundingBox, Corpus, FlairDataset, Sentence, get_spans_from_bio
from flair.datasets.base import find_train_dev_test_files
class OcrJsonDataset(FlairDataset):
def __init__(
self,
path_to_split_directory: Union[str, Path],
label_type: str = "ner",
in_memory: bool = True,
encoding: str = "utf-8",
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
) -> None:
"""Instantiates a Dataset from a OCR-Json format.
The folder is structured with a "images" folder and a "tagged" folder.
Those folders contain respectively .jpg and .json files with matching file name.
The json contains 3 fields "words", "bbox", "labels" which are lists of equal length
"words" is a list of strings, containing the ocr texts,
"bbox" is a list of int-Tuples, containing left, top, right, bottom
"labels" is a BIO-tagging of the sentences
:param path_to_split_directory: base folder with the task data
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param load_images: if True, the pillow images will be added as metadata
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Dataset with Sentences that contain OCR information
"""
self.in_memory = in_memory
path_to_split_directory = Path(path_to_split_directory)
assert path_to_split_directory.exists()
image_dir = path_to_split_directory / "images"
tagged_dir = path_to_split_directory / "tagged"
self.base_path = path_to_split_directory
assert tagged_dir.exists()
assert image_dir.exists()
self.file_names = sorted(
{p.stem for p in image_dir.iterdir() if p.is_file()} & {p.stem for p in tagged_dir.iterdir() if p.is_file()}
)
self.total_sentence_count: int = len(self.file_names)
self.load_images = load_images
self.label_type = label_type
self.encoding = encoding
self.label_name_map = label_name_map
self.normalize_coords_to_thousands = normalize_coords_to_thousands
if in_memory:
self.sentences = [self._load_example(file_name) for file_name in self.file_names]
def _remap_label(self, tag):
# remap regular tag names
if self.label_name_map is not None:
return self.label_name_map.get(tag, tag) # for example, transforming 'PER' to 'person'
return tag
def _load_example(self, file_name: str) -> Sentence:
data_path = self.base_path / "tagged" / f"{file_name}.json"
with data_path.open("r", encoding=self.encoding) as f:
data = json.load(f)
sentence = Sentence(text=data["words"])
img_path = self.base_path / "images" / f"{file_name}.jpg"
with PIL.Image.open(img_path) as img:
width, height = img.size
if self.load_images:
img.load()
sentence.add_metadata("image", img.convert("RGB"))
sentence.add_metadata("img_width", width)
sentence.add_metadata("img_height", height)
for token, (left, top, right, bottom) in zip(sentence, data["bbox"]):
if self.normalize_coords_to_thousands:
left = int(1000 * left / width)
top = int(1000 * top / height)
right = int(1000 * right / width)
bottom = int(1000 * bottom / height)
token.add_metadata("bbox", BoundingBox(left=left, top=top, right=right, bottom=bottom))
for span_indices, score, label in get_spans_from_bio(data["labels"]):
span = sentence[span_indices[0] : span_indices[-1] + 1]
value = self._remap_label(label)
if value != "O":
span.add_label(self.label_type, value=value, score=score)
return sentence
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
sentence = self._load_example(self.file_names[index])
# set sentence context using partials TODO: pointer to dataset is really inefficient
sentence._has_context = True
sentence._position_in_dataset = (self, index)
return sentence
class OcrCorpus(Corpus):
def __init__(
self,
train_path: Optional[Path] = None,
dev_path: Optional[Path] = None,
test_path: Optional[Path] = None,
encoding: str = "utf-8",
label_type: str = "ner",
in_memory: bool = True,
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
**corpusargs,
) -> None:
"""Instantiates a Corpus from a OCR-Json format.
:param train_path: the folder for the training data
:param dev_path: the folder for the dev data
:param test_path: the folder for the test data
:param path_to_split_directory: base folder with the task data
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param load_images: if True, the pillow images will be added as metadata
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Corpus with Sentences that contain OCR information
"""
train: Optional[Dataset] = (
OcrJsonDataset(
train_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if train_path is not None
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
OcrJsonDataset(
dev_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if dev_path is not None
else None
)
# read in test file if exists
test: Optional[Dataset] = (
OcrJsonDataset(
test_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if test_path is not None
else None
)
super().__init__(train, dev, test, **corpusargs)
class SROIE(OcrCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
encoding: str = "utf-8",
label_type: str = "ner",
in_memory: bool = True,
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
**corpusargs,
) -> None:
"""Instantiates the SROIE corpus with perfect ocr boxes.
:param base_path: the path to store the dataset or load it from
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param load_images: if True, the pillow images will be added as metadata
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Corpus with Sentences that contain OCR information
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if not data_folder.exists():
# the url is copied from https://huggingface.co/datasets/darentang/sroie/blob/main/sroie.py#L44
url = "https://drive.google.com/uc?id=1ZyxAw1d-9UvhgNLGRvsJK4gBCMf0VpGD"
zip_path = base_path / "sroie.zip"
gdown.cached_download(url, str(zip_path), postprocess=gdown.extractall)
zip_path.unlink()
dev_path, test_path, train_path = find_train_dev_test_files(data_folder, None, None, None)
super().__init__(
train_path,
dev_path,
test_path,
encoding=encoding,
label_type=label_type,
in_memory=in_memory,
load_images=load_images,
label_name_map=label_name_map,
normalize_coords_to_thousands=normalize_coords_to_thousands,
**corpusargs,
)
| 10,117 | 40.130081 | 120 | py |
flair | flair-master/flair/datasets/relation_extraction.py | import bisect
import io
import json
import logging
import os
import re
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import conllu
import gdown
from conllu.models import Metadata, Token
import flair
from flair.data import Sentence
from flair.datasets.sequence_labeling import ColumnCorpus
from flair.file_utils import cached_path
from flair.splitter import SegtokSentenceSplitter, SentenceSplitter
log = logging.getLogger("flair")
def convert_ptb_token(token: str) -> str:
"""Convert PTB tokens to normal tokens."""
return {
"-lrb-": "(",
"-rrb-": ")",
"-lsb-": "[",
"-rsb-": "]",
"-lcb-": "{",
"-rcb-": "}",
}.get(token.lower(), token)
class RE_ENGLISH_SEMEVAL2010(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
augment_train: bool = False,
**corpusargs,
) -> None:
"""SemEval-2010 Task 8 on Multi-Way Classification of Semantic Relations Between Pairs of Nominals.
see https://aclanthology.org/S10-1006.pdf
:param base_path:
:param in_memory:
:param augment_train:
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
semeval_2010_task_8_url = (
"https://drive.google.com/uc?id=0B_jQiLugGTAkMDQ5ZjZiMTUtMzQ1Yy00YWNmLWJlZDYtOWY1ZDMwY2U4YjFk"
)
train_file_name = "semeval2010-task8-train-aug.conllu" if augment_train else "semeval2010-task8-train.conllu"
data_file = data_folder / train_file_name
# if True:
if not data_file.is_file():
source_data_folder = data_folder / "original"
source_data_file = source_data_folder / "SemEval2010_task8_all_data.zip"
os.makedirs(source_data_folder, exist_ok=True)
gdown.download(semeval_2010_task_8_url, str(source_data_file))
self.extract_and_convert_to_conllu(
data_file=source_data_file,
data_folder=data_folder,
augment_train=augment_train,
)
super().__init__(
data_folder,
train_file=train_file_name,
test_file="semeval2010-task8-test.conllu",
column_format={1: "text", 2: "ner"},
comment_symbol="# ",
in_memory=in_memory,
**corpusargs,
)
def extract_and_convert_to_conllu(self, data_file, data_folder, augment_train):
import zipfile
source_file_paths = [
"SemEval2010_task8_all_data/SemEval2010_task8_training/TRAIN_FILE.TXT",
"SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT",
]
train_filename = "semeval2010-task8-train-aug.conllu" if augment_train else "semeval2010-task8-train.conllu"
target_filenames = [train_filename, "semeval2010-task8-test.conllu"]
with zipfile.ZipFile(data_file) as zip_file:
for source_file_path, target_filename in zip(source_file_paths, target_filenames):
with zip_file.open(source_file_path, mode="r") as source_file:
target_file_path = Path(data_folder) / target_filename
with open(target_file_path, mode="w", encoding="utf-8") as target_file:
# write CoNLL-U Plus header
target_file.write("# global.columns = id form ner\n")
raw_lines = []
for line in io.TextIOWrapper(source_file, encoding="utf-8"):
line = line.strip()
if not line:
token_list = self._semeval_lines_to_token_list(
raw_lines,
augment_relations=augment_train if "train" in target_filename else False,
)
target_file.write(token_list.serialize())
raw_lines = []
continue
raw_lines.append(line)
def _semeval_lines_to_token_list(self, raw_lines, augment_relations):
raw_id, raw_text = raw_lines[0].split("\t")
label = raw_lines[1]
id_ = int(raw_id)
raw_text = raw_text.strip('"')
# Some special cases (e.g., missing spaces before entity marker)
if id_ in [213, 4612, 6373, 8411, 9867]:
raw_text = raw_text.replace("<e2>", " <e2>")
if id_ in [2740, 4219, 4784]:
raw_text = raw_text.replace("<e1>", " <e1>")
if id_ == 9256:
raw_text = raw_text.replace("log- jam", "log-jam")
# necessary if text should be whitespace tokenizeable
if id_ in [2609, 7589]:
raw_text = raw_text.replace("1 1/2", "1-1/2")
if id_ == 10591:
raw_text = raw_text.replace("1 1/4", "1-1/4")
if id_ == 10665:
raw_text = raw_text.replace("6 1/2", "6-1/2")
raw_text = re.sub(r"([.,!?()])$", r" \1", raw_text)
raw_text = re.sub(r"(e[12]>)([',;:\"\(\)])", r"\1 \2", raw_text)
raw_text = re.sub(r"([',;:\"\(\)])(</?e[12])", r"\1 \2", raw_text)
raw_text = raw_text.replace("<e1>", "<e1> ")
raw_text = raw_text.replace("<e2>", "<e2> ")
raw_text = raw_text.replace("</e1>", " </e1>")
raw_text = raw_text.replace("</e2>", " </e2>")
tokens = raw_text.split(" ")
# Handle case where tail may occur before the head
subj_start = tokens.index("<e1>")
obj_start = tokens.index("<e2>")
if subj_start < obj_start:
tokens.pop(subj_start)
subj_end = tokens.index("</e1>")
tokens.pop(subj_end)
obj_start = tokens.index("<e2>")
tokens.pop(obj_start)
obj_end = tokens.index("</e2>")
tokens.pop(obj_end)
else:
tokens.pop(obj_start)
obj_end = tokens.index("</e2>")
tokens.pop(obj_end)
subj_start = tokens.index("<e1>")
tokens.pop(subj_start)
subj_end = tokens.index("</e1>")
tokens.pop(subj_end)
relation = ";".join(
[
str(subj_start + 1),
str(subj_end),
str(obj_start + 1),
str(obj_end),
label,
]
)
if augment_relations:
label_inverted = label.replace("e1", "e3")
label_inverted = label_inverted.replace("e2", "e1")
label_inverted = label_inverted.replace("e3", "e2")
relation_inverted = ";".join(
[
str(obj_start + 1),
str(obj_end),
str(subj_start + 1),
str(subj_end),
label_inverted,
]
)
metadata = {
"text": " ".join(tokens),
"sentence_id": str(id_),
"relations": relation + "|" + relation_inverted if augment_relations else relation,
}
token_dicts = []
for idx, token in enumerate(tokens):
tag = "O"
prefix = ""
if subj_start <= idx < subj_end:
prefix = "B-" if idx == subj_start else "I-"
tag = "E1"
elif obj_start <= idx < obj_end:
prefix = "B-" if idx == obj_start else "I-"
tag = "E2"
token_dicts.append(
{
"id": str(idx + 1),
"form": token,
"ner": prefix + tag,
}
)
return conllu.TokenList(tokens=token_dicts, metadata=metadata)
class RE_ENGLISH_TACRED(ColumnCorpus):
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True, **corpusargs) -> None:
"""TAC Relation Extraction Dataset.
with 41 relations from https://nlp.stanford.edu/projects/tacred/.
Manual download is required for this dataset.
:param base_path:
:param in_memory:
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
data_file = data_folder / "tacred-train.conllu"
if not data_file.is_file():
source_data_folder = data_folder / "original"
source_data_file = source_data_folder / "TACRED_LDC.zip"
os.makedirs(source_data_folder, exist_ok=True)
self.extract_and_convert_to_conllu(
data_file=source_data_file,
data_folder=data_folder,
)
super().__init__(
data_folder,
column_format={1: "text", 2: "ner"},
comment_symbol="# ",
in_memory=in_memory,
**corpusargs,
)
def extract_and_convert_to_conllu(self, data_file, data_folder):
import zipfile
source_file_paths = [
"tacred/data/json/train.json",
"tacred/data/json/dev.json",
"tacred/data/json/test.json",
]
target_filenames = [
"tacred-train.conllu",
"tacred-dev.conllu",
"tacred-test.conllu",
]
with zipfile.ZipFile(data_file) as zip_file:
for source_file_path, target_filename in zip(source_file_paths, target_filenames):
with zip_file.open(source_file_path, mode="r") as source_file:
target_file_path = Path(data_folder) / target_filename
with open(target_file_path, mode="w", encoding="utf-8") as target_file:
# write CoNLL-U Plus header
target_file.write("# global.columns = id form ner\n")
for example in json.load(source_file):
token_list = self._tacred_example_to_token_list(example)
target_file.write(token_list.serialize())
def _tacred_example_to_token_list(self, example: Dict[str, Any]) -> conllu.TokenList:
id_ = example["id"]
tokens = example["token"]
ner = example["stanford_ner"]
subj_start = example["subj_start"]
subj_end = example["subj_end"]
obj_start = example["obj_start"]
obj_end = example["obj_end"]
subj_tag = example["subj_type"]
obj_tag = example["obj_type"]
label = example["relation"]
metadata = {
"text": " ".join(tokens),
"sentence_id": str(id_),
"relations": ";".join(
[
str(subj_start + 1),
str(subj_end + 1),
str(obj_start + 1),
str(obj_end + 1),
label,
]
),
}
prev_tag = None
token_dicts = []
for idx, (token, tag) in enumerate(zip(tokens, ner)):
if subj_start <= idx <= subj_end:
tag = subj_tag
if obj_start <= idx <= obj_end:
tag = obj_tag
prefix = ""
if tag != "O":
prefix = "B-" if tag != prev_tag else "I-"
prev_tag = tag
token_dicts.append(
Token(
{
"id": str(idx + 1),
"form": convert_ptb_token(token),
"ner": prefix + tag,
}
)
)
return conllu.TokenList(tokens=token_dicts, metadata=Metadata(metadata))
class RE_ENGLISH_CONLL04(ColumnCorpus):
def __init__(self, base_path: Optional[Union[str, Path]] = None, in_memory: bool = True, **corpusargs) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# TODO: change data source to original CoNLL04 -- this dataset has span formatting errors
# download data if necessary
conll04_url = (
"https://raw.githubusercontent.com/bekou/multihead_joint_entity_relation_extraction/master/data/CoNLL04/"
)
data_file = data_folder / "conll04-train.conllu"
if not data_file.is_file():
source_data_folder = data_folder / "original"
cached_path(f"{conll04_url}train.txt", source_data_folder)
cached_path(f"{conll04_url}dev.txt", source_data_folder)
cached_path(f"{conll04_url}test.txt", source_data_folder)
self.convert_to_conllu(
source_data_folder=source_data_folder,
data_folder=data_folder,
)
super().__init__(
data_folder,
in_memory=in_memory,
column_format={1: "text", 2: "ner"},
comment_symbol="# ",
**corpusargs,
)
def _parse_incr(self, source_file) -> Iterable[conllu.TokenList]:
fields = ["id", "form", "ner", "relations", "relation_heads"]
field_parsers = {
"relations": lambda line, i: json.loads(line[i].replace("'", '"')),
"relation_heads": lambda line, i: json.loads(line[i]),
}
metadata_parsers = {"__fallback__": lambda k, v: tuple(k.split())}
lines: List[str] = []
for index, line in enumerate(source_file):
if index > 0 and line.startswith("#"):
source_str = "".join(lines)
src_token_list = conllu.parse(
source_str,
fields=fields,
field_parsers=field_parsers,
metadata_parsers=metadata_parsers,
)
lines = []
yield src_token_list[0]
lines.append(line)
source_str = "".join(lines)
src_token_list = conllu.parse(
source_str,
fields=fields,
field_parsers=field_parsers,
metadata_parsers=metadata_parsers,
)
yield src_token_list[0]
def convert_to_conllu(self, source_data_folder: Path, data_folder):
source_filenames = [
"train.txt",
"dev.txt",
"test.txt",
]
target_filenames = [
"conll04-train.conllu",
"conll04-dev.conllu",
"conll04-test.conllu",
]
for source_filename, target_filename in zip(source_filenames, target_filenames):
with (source_data_folder / source_filename).open(encoding="utf-8") as source_file, (
data_folder / target_filename
).open("w", encoding="utf-8") as target_file:
# write CoNLL-U Plus header
target_file.write("# global.columns = id form ner\n")
for src_token_list in self._parse_incr(source_file):
token_list = self._src_token_list_to_token_list(src_token_list)
target_file.write(token_list.serialize())
def _bio_tags_to_spans(self, tags: List[str]) -> List[Tuple[int, int]]:
spans = []
span_start = 0
span_end = 0
active_conll_tag = None
for index, tag in enumerate(tags):
bio_tag = tag[0]
conll_tag = tag[2:]
if bio_tag == "O":
# The span has ended.
if active_conll_tag is not None:
spans.append((span_start, span_end))
active_conll_tag = None
continue
elif bio_tag == "B" or (bio_tag == "I" and conll_tag != active_conll_tag):
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.append((span_start, span_end))
active_conll_tag = conll_tag
span_start = index
span_end = index
elif bio_tag == "I" and conll_tag == active_conll_tag:
# We're inside a span.
span_end += 1
else:
raise Exception("That should never happen.")
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.append((span_start, span_end))
return spans
def _src_token_list_to_token_list(self, src_token_list):
tokens = []
token_dicts = []
ner_tags = []
for index, token in enumerate(src_token_list, start=1):
text = token["form"]
ner_tag = token["ner"]
tokens.append(text)
ner_tags.append(ner_tag)
token_dicts.append(
{
"id": str(index),
"form": text,
"ner": ner_tag,
}
)
span_end_to_span = {end: (start, end) for start, end in self._bio_tags_to_spans(ner_tags)}
relations = []
for index, token in enumerate(src_token_list):
for relation, head in zip(token["relations"], token["relation_heads"]):
if relation == "N":
continue
subj_start, subj_end = span_end_to_span[index]
obj_start, obj_end = span_end_to_span[head]
relations.append((subj_start, subj_end, obj_start, obj_end, relation))
doc_id = src_token_list.metadata["doc"]
metadata = {
"text": " ".join(tokens),
"sentence_id": doc_id,
"relations": "|".join(
[
";".join(
[
str(subj_start + 1),
str(subj_end + 1),
str(obj_start + 1),
str(obj_end + 1),
relation,
]
)
for subj_start, subj_end, obj_start, obj_end, relation in relations
]
),
}
return conllu.TokenList(tokens=token_dicts, metadata=metadata)
class RE_ENGLISH_DRUGPROT(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
sentence_splitter: SentenceSplitter = SegtokSentenceSplitter(),
**corpusargs,
) -> None:
"""Initialize the DrugProt corpus.
Biocreative VII Track 1 from https://zenodo.org/record/5119892#.YSdSaVuxU5k/ on drug and chemical-protein
interactions.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
self.sentence_splitter = sentence_splitter
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_" + type(self.sentence_splitter).__name__ + "_v3"
data_folder = base_path / dataset_name
drugprot_url = "https://zenodo.org/record/5119892/files/drugprot-training-development-test-background.zip"
data_file = data_folder / "drugprot-train.conllu"
if not data_file.is_file():
source_data_folder = data_folder / "original"
cached_path(drugprot_url, source_data_folder)
self.extract_and_convert_to_conllu(
data_file=source_data_folder / "drugprot-training-development-test-background.zip",
data_folder=data_folder,
)
super().__init__(
data_folder,
in_memory=in_memory,
sample_missing_splits=False,
column_format={1: "text", 2: "ner", 3: "ner"},
comment_symbol="# ",
**corpusargs,
)
def extract_and_convert_to_conllu(self, data_file, data_folder):
import zipfile
splits = ["training", "development"]
target_filenames = ["drugprot-train.conllu", "drugprot-dev.conllu"]
with zipfile.ZipFile(data_file) as zip_file:
for split, target_filename in zip(splits, target_filenames):
pmid_to_entities = defaultdict(dict)
pmid_to_relations = defaultdict(set)
with zip_file.open(
f"drugprot-gs-training-development/{split}/drugprot_{split}_entities.tsv"
) as entites_file:
for line in io.TextIOWrapper(entites_file, encoding="utf-8"):
fields = line.strip().split("\t")
pmid, ent_id, ent_type, start, end, mention = fields
pmid_to_entities[pmid][ent_id] = (
ent_type,
int(start),
int(end),
mention,
)
with zip_file.open(
f"drugprot-gs-training-development/{split}/drugprot_{split}_relations.tsv"
) as relations_file:
for line in io.TextIOWrapper(relations_file, encoding="utf-8"):
fields = line.strip().split("\t")
pmid, rel_type, arg1, arg2 = fields
ent1 = arg1.split(":")[1]
ent2 = arg2.split(":")[1]
pmid_to_relations[pmid].add((rel_type, ent1, ent2))
tokenlists: List[conllu.TokenList] = []
with zip_file.open(
f"drugprot-gs-training-development/{split}/drugprot_{split}_abstracs.tsv"
) as abstracts_file:
for line in io.TextIOWrapper(abstracts_file, encoding="utf-8"):
fields = line.strip().split("\t")
pmid, title, abstract = fields
title_sentences = self.sentence_splitter.split(title)
abstract_sentences = self.sentence_splitter.split(abstract)
tokenlists.extend(
self.drugprot_document_to_tokenlists(
pmid=pmid,
title_sentences=title_sentences,
abstract_sentences=abstract_sentences,
abstract_offset=len(title) + 1,
entities=pmid_to_entities[pmid],
relations=pmid_to_relations[pmid],
)
)
target_file_path = Path(data_folder) / target_filename
with open(target_file_path, mode="w", encoding="utf-8") as target_file:
# write CoNLL-U Plus header
target_file.write("# global.columns = id form ner ner-2\n")
for tokenlist in tokenlists:
target_file.write(tokenlist.serialize())
# for source_file_path, target_filename in zip(source_file_paths, target_filenames):
# with zip_file.open(source_file_path, mode="r") as source_file:
# target_file_path = Path(data_folder) / target_filename
# with open(target_file_path, mode="w", encoding="utf-8") as target_file:
# # write CoNLL-U Plus header
# target_file.write("# global.columns = id form ner\n")
# for example in json.load(source_file):
# token_list = self._tacred_example_to_token_list(example)
# target_file.write(token_list.serialize())
def char_spans_to_token_spans(self, char_spans, token_offsets):
token_starts = [s[0] for s in token_offsets]
token_ends = [s[1] for s in token_offsets]
token_spans = []
for char_start, char_end in char_spans:
token_start = bisect.bisect_right(token_ends, char_start)
token_end = bisect.bisect_left(token_starts, char_end)
token_spans.append((token_start, token_end))
return token_spans
def has_overlap(self, a, b):
if a is None or b is None:
return False
return max(0, min(a[1], b[1]) - max(a[0], b[0])) > 0
def drugprot_document_to_tokenlists(
self,
pmid: str,
title_sentences: List[Sentence],
abstract_sentences: List[Sentence],
abstract_offset: int,
entities: Dict[str, Tuple[str, int, int, str]],
relations: Set[Tuple[str, str, str]],
) -> List[conllu.TokenList]:
tokenlists: List[conllu.TokenList] = []
sentence_id = 1
for offset, sents in [
(0, title_sentences),
(abstract_offset, abstract_sentences),
]:
for sent in sents:
assert sent.start_position is not None
assert sent.end_position is not None
sent_char_start = sent.start_position + offset
sent_char_end = sent.end_position + offset
entities_in_sent = set()
for entity_id, (_, char_start, char_end, _) in entities.items():
if sent_char_start <= char_start and char_end <= sent_char_end:
entities_in_sent.add(entity_id)
entity_char_spans = [(entities[entity_id][1], entities[entity_id][2]) for entity_id in entities_in_sent]
token_offsets = [
(
sent.start_position + (token.start_position or 0) + offset,
sent.start_position + (token.end_position or 0) + offset,
)
for token in sent.tokens
]
entity_token_spans = self.char_spans_to_token_spans(entity_char_spans, token_offsets)
tags_1 = ["O"] * len(sent)
tags_2 = ["O"] * len(sent)
entity_id_to_token_idx = {}
ordered_entities = sorted(
zip(entities_in_sent, entity_token_spans),
key=lambda x: x[1][1] - x[1][0],
reverse=True,
)
for entity_id, entity_span in ordered_entities:
entity_id_to_token_idx[entity_id] = entity_span
# check if first tag row is already occupied
token_start, token_end = entity_span
tag_1_occupied = False
for i in range(token_start, token_end):
if tags_1[i] != "O":
tag_1_occupied = True
# if first tag row is occupied, use second tag row
tags = tags_2 if tag_1_occupied else tags_1
tag = entities[entity_id][0]
token_start, token_end = entity_span
for i in range(token_start, token_end):
prefix = "B-" if i == token_start else "I-"
tags[i] = prefix + tag
token_dicts = []
for i, (token, tag_1, tag_2) in enumerate(zip(sent, tags_1, tags_2)):
# hardcoded mapping TODO: perhaps find nicer solution
tag_1 = tag_1.replace("GENE-N", "GENE")
tag_1 = tag_1.replace("GENE-Y", "GENE")
tag_2 = tag_2.replace("GENE-N", "GENE")
tag_2 = tag_2.replace("GENE-Y", "GENE")
token_dicts.append(
Token(
{
"id": str(i + 1),
"form": token.text,
"ner": tag_1,
"ner-2": tag_2,
}
)
)
relations_in_sent = []
for relation, ent1, ent2 in [r for r in relations if {r[1], r[2]} <= entities_in_sent]:
subj_start = entity_id_to_token_idx[ent1][0]
subj_end = entity_id_to_token_idx[ent1][1]
obj_start = entity_id_to_token_idx[ent2][0]
obj_end = entity_id_to_token_idx[ent2][1]
relations_in_sent.append((subj_start, subj_end, obj_start, obj_end, relation))
metadata = {
"text": sent.to_original_text(),
"doc_id": pmid,
"sentence_id": str(sentence_id),
"relations": "|".join(
[
";".join(
[
str(subj_start + 1),
str(subj_end),
str(obj_start + 1),
str(obj_end),
relation,
]
)
for subj_start, subj_end, obj_start, obj_end, relation in relations_in_sent
]
),
}
tokenlists.append(conllu.TokenList(tokens=token_dicts, metadata=Metadata(metadata)))
sentence_id += 1
return tokenlists
| 29,860 | 37.48067 | 120 | py |
flair | flair-master/flair/datasets/__init__.py | # Expose base classses
from .base import (
DataLoader,
FlairDatapointDataset,
MongoDataset,
SentenceDataset,
StringDataset,
)
# Expose all biomedical data sets used for the evaluation of BioBERT
# -
# -
# -
# -
# Expose all biomedical data sets using the HUNER splits
# Expose all biomedical data sets
from .biomedical import (
ANAT_EM,
AZDZ,
BC2GM,
BIO_INFER,
BIOBERT_CHEMICAL_BC4CHEMD,
BIOBERT_CHEMICAL_BC5CDR,
BIOBERT_DISEASE_BC5CDR,
BIOBERT_DISEASE_NCBI,
BIOBERT_GENE_BC2GM,
BIOBERT_GENE_JNLPBA,
BIOBERT_SPECIES_LINNAEUS,
BIOBERT_SPECIES_S800,
BIONLP2013_CG,
BIONLP2013_PC,
BIOSEMANTICS,
CDR,
CELL_FINDER,
CEMP,
CHEMDNER,
CLL,
CRAFT,
CRAFT_V4,
DECA,
FSU,
GELLUS,
GPRO,
HUNER_CELL_LINE,
HUNER_CELL_LINE_CELL_FINDER,
HUNER_CELL_LINE_CLL,
HUNER_CELL_LINE_GELLUS,
HUNER_CELL_LINE_JNLPBA,
HUNER_CHEMICAL,
HUNER_CHEMICAL_CDR,
HUNER_CHEMICAL_CEMP,
HUNER_CHEMICAL_CHEBI,
HUNER_CHEMICAL_CHEMDNER,
HUNER_CHEMICAL_CRAFT_V4,
HUNER_CHEMICAL_SCAI,
HUNER_DISEASE,
HUNER_DISEASE_CDR,
HUNER_DISEASE_MIRNA,
HUNER_DISEASE_NCBI,
HUNER_DISEASE_PDR,
HUNER_DISEASE_SCAI,
HUNER_DISEASE_VARIOME,
HUNER_GENE,
HUNER_GENE_BC2GM,
HUNER_GENE_BIO_INFER,
HUNER_GENE_CELL_FINDER,
HUNER_GENE_CHEBI,
HUNER_GENE_CRAFT_V4,
HUNER_GENE_DECA,
HUNER_GENE_FSU,
HUNER_GENE_GPRO,
HUNER_GENE_IEPA,
HUNER_GENE_JNLPBA,
HUNER_GENE_LOCTEXT,
HUNER_GENE_MIRNA,
HUNER_GENE_OSIRIS,
HUNER_GENE_VARIOME,
HUNER_SPECIES,
HUNER_SPECIES_CELL_FINDER,
HUNER_SPECIES_CHEBI,
HUNER_SPECIES_CRAFT_V4,
HUNER_SPECIES_LINNEAUS,
HUNER_SPECIES_LOCTEXT,
HUNER_SPECIES_MIRNA,
HUNER_SPECIES_S800,
HUNER_SPECIES_VARIOME,
IEPA,
JNLPBA,
LINNEAUS,
LOCTEXT,
MIRNA,
NCBI_DISEASE,
OSIRIS,
PDR,
S800,
SCAI_CHEMICALS,
SCAI_DISEASE,
VARIOME,
)
# Expose all document classification datasets
from .document_classification import (
AMAZON_REVIEWS,
COMMUNICATIVE_FUNCTIONS,
GERMEVAL_2018_OFFENSIVE_LANGUAGE,
GLUE_COLA,
GLUE_SST2,
GO_EMOTIONS,
IMDB,
NEWSGROUPS,
SENTEVAL_CR,
SENTEVAL_MPQA,
SENTEVAL_MR,
SENTEVAL_SST_BINARY,
SENTEVAL_SST_GRANULAR,
SENTEVAL_SUBJ,
SENTIMENT_140,
STACKOVERFLOW,
TREC_6,
TREC_50,
WASSA_ANGER,
WASSA_FEAR,
WASSA_JOY,
WASSA_SADNESS,
YAHOO_ANSWERS,
ClassificationCorpus,
ClassificationDataset,
CSVClassificationCorpus,
CSVClassificationDataset,
)
# word sense disambiguation
# Expose all entity linking datasets
from .entity_linking import (
NEL_ENGLISH_AIDA,
NEL_ENGLISH_AQUAINT,
NEL_ENGLISH_IITB,
NEL_ENGLISH_REDDIT,
NEL_ENGLISH_TWEEKI,
NEL_GERMAN_HIPE,
WSD_MASC,
WSD_OMSTI,
WSD_RAGANATO_ALL,
WSD_SEMCOR,
WSD_TRAINOMATIC,
WSD_UFSAC,
WSD_WORDNET_GLOSS_TAGGED,
ZELDA,
)
# Expose all relation extraction datasets
from .ocr import SROIE, OcrJsonDataset
from .relation_extraction import (
RE_ENGLISH_CONLL04,
RE_ENGLISH_DRUGPROT,
RE_ENGLISH_SEMEVAL2010,
RE_ENGLISH_TACRED,
)
# universal proposition banks
# keyphrase detection datasets
# other NER datasets
# standard NER datasets
# Expose all sequence labeling datasets
from .sequence_labeling import (
BIOSCOPE,
CONLL_03,
CONLL_03_DUTCH,
CONLL_03_GERMAN,
CONLL_03_SPANISH,
CONLL_2000,
FEWNERD,
KEYPHRASE_INSPEC,
KEYPHRASE_SEMEVAL2010,
KEYPHRASE_SEMEVAL2017,
NER_ARABIC_ANER,
NER_ARABIC_AQMAR,
NER_BASQUE,
NER_CHINESE_WEIBO,
NER_DANISH_DANE,
NER_ENGLISH_MOVIE_COMPLEX,
NER_ENGLISH_MOVIE_SIMPLE,
NER_ENGLISH_PERSON,
NER_ENGLISH_RESTAURANT,
NER_ENGLISH_SEC_FILLINGS,
NER_ENGLISH_STACKOVERFLOW,
NER_ENGLISH_TWITTER,
NER_ENGLISH_WEBPAGES,
NER_ENGLISH_WIKIGOLD,
NER_ENGLISH_WNUT_2020,
NER_FINNISH,
NER_GERMAN_BIOFID,
NER_GERMAN_EUROPARL,
NER_GERMAN_GERMEVAL,
NER_GERMAN_LEGAL,
NER_GERMAN_POLITICS,
NER_HIPE_2022,
NER_HUNGARIAN,
NER_ICDAR_EUROPEANA,
NER_ICELANDIC,
NER_JAPANESE,
NER_MASAKHANE,
NER_MULTI_CONER,
NER_MULTI_CONER_V2,
NER_MULTI_WIKIANN,
NER_MULTI_WIKINER,
NER_MULTI_XTREME,
NER_NERMUD,
NER_SWEDISH,
NER_TURKU,
NER_UKRAINIAN,
ONTONOTES,
UP_CHINESE,
UP_ENGLISH,
UP_FINNISH,
UP_FRENCH,
UP_GERMAN,
UP_ITALIAN,
UP_SPANISH,
UP_SPANISH_ANCORA,
WNUT_17,
ColumnCorpus,
ColumnDataset,
)
# Expose all text-image datasets
from .text_image import FeideggerCorpus, FeideggerDataset
# Expose all text-text datasets
from .text_text import (
GLUE_MNLI,
GLUE_MRPC,
GLUE_QNLI,
GLUE_QQP,
GLUE_RTE,
GLUE_STSB,
GLUE_WNLI,
SUPERGLUE_RTE,
DataPairCorpus,
DataPairDataset,
OpusParallelCorpus,
ParallelTextCorpus,
ParallelTextDataset,
)
# Expose all treebanks
from .treebanks import (
UD_AFRIKAANS,
UD_ANCIENT_GREEK,
UD_ARABIC,
UD_ARMENIAN,
UD_BASQUE,
UD_BELARUSIAN,
UD_BULGARIAN,
UD_CATALAN,
UD_CHINESE,
UD_COPTIC,
UD_CROATIAN,
UD_CZECH,
UD_DANISH,
UD_DUTCH,
UD_ENGLISH,
UD_ESTONIAN,
UD_FAROESE,
UD_FINNISH,
UD_FRENCH,
UD_GALICIAN,
UD_GERMAN,
UD_GERMAN_HDT,
UD_GOTHIC,
UD_GREEK,
UD_HEBREW,
UD_HINDI,
UD_INDONESIAN,
UD_IRISH,
UD_ITALIAN,
UD_JAPANESE,
UD_KAZAKH,
UD_KOREAN,
UD_LATIN,
UD_LATVIAN,
UD_LITHUANIAN,
UD_LIVVI,
UD_MALTESE,
UD_MARATHI,
UD_NORTH_SAMI,
UD_NORWEGIAN,
UD_OLD_CHURCH_SLAVONIC,
UD_OLD_FRENCH,
UD_PERSIAN,
UD_POLISH,
UD_PORTUGUESE,
UD_ROMANIAN,
UD_RUSSIAN,
UD_SERBIAN,
UD_SLOVAK,
UD_SLOVENIAN,
UD_SPANISH,
UD_SWEDISH,
UD_TURKISH,
UD_UKRAINIAN,
UD_WOLOF,
UniversalDependenciesCorpus,
UniversalDependenciesDataset,
)
__all__ = [
"DataLoader",
"OcrJsonDataset",
"SROIE",
"FlairDatapointDataset",
"SentenceDataset",
"MongoDataset",
"StringDataset",
"ANAT_EM",
"AZDZ",
"BC2GM",
"BIO_INFER",
"BIOBERT_CHEMICAL_BC4CHEMD",
"BIOBERT_CHEMICAL_BC5CDR",
"BIOBERT_DISEASE_BC5CDR",
"BIOBERT_DISEASE_NCBI",
"BIOBERT_GENE_BC2GM",
"BIOBERT_GENE_JNLPBA",
"BIOBERT_SPECIES_LINNAEUS",
"BIOBERT_SPECIES_S800",
"BIONLP2013_CG",
"BIONLP2013_PC",
"BIOSEMANTICS",
"CDR",
"CELL_FINDER",
"CEMP",
"CHEMDNER",
"CLL",
"CRAFT",
"CRAFT_V4",
"DECA",
"FSU",
"GELLUS",
"GPRO",
"HUNER_CELL_LINE",
"HUNER_CELL_LINE_CELL_FINDER",
"HUNER_CELL_LINE_CLL",
"HUNER_CELL_LINE_GELLUS",
"HUNER_CELL_LINE_JNLPBA",
"HUNER_CHEMICAL",
"HUNER_CHEMICAL_CDR",
"HUNER_CHEMICAL_CEMP",
"HUNER_CHEMICAL_CHEBI",
"HUNER_CHEMICAL_CHEMDNER",
"HUNER_CHEMICAL_CRAFT_V4",
"HUNER_CHEMICAL_SCAI",
"HUNER_DISEASE",
"HUNER_DISEASE_CDR",
"HUNER_DISEASE_MIRNA",
"HUNER_DISEASE_NCBI",
"HUNER_DISEASE_PDR",
"HUNER_DISEASE_SCAI",
"HUNER_DISEASE_VARIOME",
"HUNER_GENE",
"HUNER_GENE_BC2GM",
"HUNER_GENE_BIO_INFER",
"HUNER_GENE_CELL_FINDER",
"HUNER_GENE_CHEBI",
"HUNER_GENE_CRAFT_V4",
"HUNER_GENE_DECA",
"HUNER_GENE_FSU",
"HUNER_GENE_GPRO",
"HUNER_GENE_IEPA",
"HUNER_GENE_JNLPBA",
"HUNER_GENE_LOCTEXT",
"HUNER_GENE_MIRNA",
"HUNER_GENE_OSIRIS",
"HUNER_GENE_VARIOME",
"HUNER_SPECIES",
"HUNER_SPECIES_CELL_FINDER",
"HUNER_SPECIES_CHEBI",
"HUNER_SPECIES_CRAFT_V4",
"HUNER_SPECIES_LINNEAUS",
"HUNER_SPECIES_LOCTEXT",
"HUNER_SPECIES_MIRNA",
"HUNER_SPECIES_S800",
"HUNER_SPECIES_VARIOME",
"IEPA",
"JNLPBA",
"LINNEAUS",
"LOCTEXT",
"MIRNA",
"NCBI_DISEASE",
"ONTONOTES",
"OSIRIS",
"PDR",
"S800",
"SCAI_CHEMICALS",
"SCAI_DISEASE",
"VARIOME",
"AMAZON_REVIEWS",
"COMMUNICATIVE_FUNCTIONS",
"GERMEVAL_2018_OFFENSIVE_LANGUAGE",
"GLUE_COLA",
"GO_EMOTIONS",
"IMDB",
"NEWSGROUPS",
"STACKOVERFLOW",
"SENTEVAL_CR",
"SENTEVAL_MPQA",
"SENTEVAL_MR",
"SENTEVAL_SST_BINARY",
"SENTEVAL_SST_GRANULAR",
"SENTEVAL_SUBJ",
"SENTIMENT_140",
"TREC_6",
"TREC_50",
"WASSA_ANGER",
"WASSA_FEAR",
"WASSA_JOY",
"WASSA_SADNESS",
"YAHOO_ANSWERS",
"ClassificationCorpus",
"ClassificationDataset",
"CSVClassificationCorpus",
"CSVClassificationDataset",
"NEL_ENGLISH_AIDA",
"NEL_ENGLISH_AQUAINT",
"NEL_ENGLISH_IITB",
"NEL_ENGLISH_REDDIT",
"NEL_ENGLISH_TWEEKI",
"NEL_GERMAN_HIPE",
"WSD_MASC",
"WSD_OMSTI",
"WSD_RAGANATO_ALL",
"WSD_SEMCOR",
"WSD_TRAINOMATIC",
"WSD_UFSAC",
"WSD_WORDNET_GLOSS_TAGGED",
"RE_ENGLISH_CONLL04",
"RE_ENGLISH_DRUGPROT",
"RE_ENGLISH_SEMEVAL2010",
"RE_ENGLISH_TACRED",
"BIOSCOPE",
"CONLL_03",
"CONLL_03_DUTCH",
"CONLL_03_GERMAN",
"CONLL_03_SPANISH",
"CONLL_2000",
"FEWNERD",
"KEYPHRASE_INSPEC",
"KEYPHRASE_SEMEVAL2010",
"KEYPHRASE_SEMEVAL2017",
"NER_ARABIC_ANER",
"NER_ARABIC_AQMAR",
"NER_BASQUE",
"NER_CHINESE_WEIBO",
"NER_DANISH_DANE",
"NER_ENGLISH_MOVIE_COMPLEX",
"NER_ENGLISH_MOVIE_SIMPLE",
"NER_ENGLISH_PERSON",
"NER_ENGLISH_RESTAURANT",
"NER_ENGLISH_SEC_FILLINGS",
"NER_ENGLISH_STACKOVERFLOW",
"NER_ENGLISH_TWITTER",
"NER_ENGLISH_WEBPAGES",
"NER_ENGLISH_WIKIGOLD",
"NER_ENGLISH_WNUT_2020",
"NER_FINNISH",
"NER_GERMAN_BIOFID",
"NER_GERMAN_EUROPARL",
"NER_GERMAN_GERMEVAL",
"NER_GERMAN_LEGAL",
"NER_GERMAN_POLITICS",
"NER_HIPE_2022",
"NER_HUNGARIAN",
"NER_ICDAR_EUROPEANA",
"NER_ICELANDIC",
"NER_JAPANESE",
"NER_NERMUD",
"NER_MASAKHANE",
"NER_MULTI_WIKIANN",
"NER_MULTI_WIKINER",
"NER_MULTI_XTREME",
"NER_SWEDISH",
"NER_TURKU",
"NER_UKRAINIAN",
"UP_CHINESE",
"UP_ENGLISH",
"UP_FINNISH",
"UP_FRENCH",
"UP_GERMAN",
"UP_ITALIAN",
"UP_SPANISH",
"UP_SPANISH_ANCORA",
"WNUT_17",
"ColumnCorpus",
"ColumnDataset",
"NER_MULTI_CONER",
"NER_MULTI_CONER_V2",
"FeideggerCorpus",
"FeideggerDataset",
"GLUE_MNLI",
"GLUE_MRPC",
"GLUE_QNLI",
"GLUE_QQP",
"GLUE_RTE",
"GLUE_WNLI",
"GLUE_SST2",
"GLUE_STSB",
"SUPERGLUE_RTE",
"DataPairCorpus",
"DataPairDataset",
"OpusParallelCorpus",
"ParallelTextCorpus",
"ParallelTextDataset",
"UD_AFRIKAANS",
"UD_ANCIENT_GREEK",
"UD_ARABIC",
"UD_ARMENIAN",
"UD_BASQUE",
"UD_BELARUSIAN",
"UD_BULGARIAN",
"UD_CATALAN",
"UD_CHINESE",
"UD_COPTIC",
"UD_CROATIAN",
"UD_CZECH",
"UD_DANISH",
"UD_DUTCH",
"UD_ENGLISH",
"UD_ESTONIAN",
"UD_FAROESE",
"UD_FINNISH",
"UD_FRENCH",
"UD_GALICIAN",
"UD_GERMAN",
"UD_GERMAN_HDT",
"UD_GOTHIC",
"UD_GREEK",
"UD_HEBREW",
"UD_HINDI",
"UD_INDONESIAN",
"UD_IRISH",
"UD_ITALIAN",
"UD_JAPANESE",
"UD_KAZAKH",
"UD_KOREAN",
"UD_LATIN",
"UD_LATVIAN",
"UD_LITHUANIAN",
"UD_LIVVI",
"UD_MALTESE",
"UD_MARATHI",
"UD_NORTH_SAMI",
"UD_NORWEGIAN",
"UD_OLD_CHURCH_SLAVONIC",
"UD_OLD_FRENCH",
"UD_PERSIAN",
"UD_POLISH",
"UD_PORTUGUESE",
"UD_ROMANIAN",
"UD_RUSSIAN",
"UD_SERBIAN",
"UD_SLOVAK",
"UD_SLOVENIAN",
"UD_SPANISH",
"UD_SWEDISH",
"UD_TURKISH",
"UD_UKRAINIAN",
"UD_WOLOF",
"UniversalDependenciesCorpus",
"UniversalDependenciesDataset",
"ZELDA",
]
| 11,771 | 19.58042 | 68 | py |
flair | flair-master/flair/datasets/sequence_labeling.py | import copy
import json
import logging
import os
import re
import shutil
from collections import defaultdict
from pathlib import Path
from typing import (
Any,
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
cast,
)
from torch.utils.data import ConcatDataset, Dataset
import flair
from flair.data import (
Corpus,
FlairDataset,
MultiCorpus,
Relation,
Sentence,
Token,
get_spans_from_bio,
)
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path, unpack_file
log = logging.getLogger("flair")
class MultiFileJsonlCorpus(Corpus):
"""This class represents a generic Jsonl corpus with multiple train, dev, and test files."""
def __init__(
self,
train_files=None,
test_files=None,
dev_files=None,
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
**corpusargs,
) -> None:
"""Instantiates a MuliFileJsonlCorpus as, e.g., created with doccanos JSONL export.
Note that at least one of train_files, test_files, and dev_files must contain one path.
Otherwise, the initialization will fail.
:param corpusargs: Additional arguments for Corpus initialization
:param train_files: the name of the train files
:param test_files: the name of the test files
:param dev_files: the name of the dev files, if empty, dev data is sampled from train
:param text_column_name: Name of the text column inside the jsonl files.
:param label_column_name: Name of the label column inside the jsonl files.
:raises RuntimeError: If no paths are given
"""
train: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
train_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
encoding=encoding,
)
for train_file in train_files
]
)
if train_files and train_files[0]
else None
)
# read in test file if exists
test: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
test_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
)
for test_file in test_files
]
)
if test_files and test_files[0]
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
dev_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
)
for dev_file in dev_files
]
)
if dev_files and dev_files[0]
else None
)
super().__init__(train, dev, test, **corpusargs)
class JsonlCorpus(MultiFileJsonlCorpus):
def __init__(
self,
data_folder: Union[str, Path],
train_file: Optional[Union[str, Path]] = None,
test_file: Optional[Union[str, Path]] = None,
dev_file: Optional[Union[str, Path]] = None,
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
autofind_splits: bool = True,
name: Optional[str] = None,
**corpusargs,
) -> None:
"""Instantiates a JsonlCorpus with one file per Dataset (train, dev, and test).
:param data_folder: Path to the folder containing the JSONL corpus
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param text_column_name: Name of the text column inside the JSONL file.
:param label_column_name: Name of the label column inside the JSONL file.
:param autofind_splits: Whether train, test and dev file should be determined automatically
:param name: name of the Corpus see flair.data.Corpus
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder, dev_file, test_file, train_file, autofind_splits
)
super().__init__(
dev_files=[dev_file] if dev_file else [],
train_files=[train_file] if train_file else [],
test_files=[test_file] if test_file else [],
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
name=name if data_folder is None else str(data_folder),
encoding=encoding,
**corpusargs,
)
class JsonlDataset(FlairDataset):
def __init__(
self,
path_to_jsonl_file: Union[str, Path],
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
) -> None:
"""Instantiates a JsonlDataset and converts all annotated char spans to token tags using the IOB scheme.
The expected file format is:
{ "<text_column_name>": "<text>", "label_column_name": [[<start_char_index>, <end_char_index>, <label>],...] }
:param path_to_json._file: File to read
:param text_column_name: Name of the text column
:param label_column_name: Name of the label column
"""
path_to_json_file = Path(path_to_jsonl_file)
self.text_column_name = text_column_name
self.label_column_name = label_column_name
self.label_type = label_type
self.path_to_json_file = path_to_json_file
self.sentences: List[Sentence] = []
with path_to_json_file.open(encoding=encoding) as jsonl_fp:
for line in jsonl_fp:
current_line = json.loads(line)
raw_text = current_line[text_column_name]
current_labels = current_line[label_column_name]
current_sentence = Sentence(raw_text)
self._add_labels_to_sentence(raw_text, current_sentence, current_labels)
self.sentences.append(current_sentence)
def _add_labels_to_sentence(self, raw_text: str, sentence: Sentence, labels: List[List[Any]]):
# Add tags for each annotated span
for label in labels:
self._add_label_to_sentence(raw_text, sentence, label[0], label[1], label[2])
def _add_label_to_sentence(self, text: str, sentence: Sentence, start: int, end: int, label: str):
"""Adds a NE label to a given sentence.
:param text: raw sentence (with all whitespaces etc.). Is used to determine the token indices.
:param sentence: Tokenized flair Sentence.
:param start: Start character index of the label.
:param end: End character index of the label.
:param label: Label to assign to the given range.
:return: Nothing. Changes sentence as INOUT-param
"""
annotated_part = text[start:end]
# Remove leading and trailing whitespaces from annotated spans
while re.search(r"^\s", annotated_part):
start += 1
annotated_part = text[start:end]
while re.search(r"\s$", annotated_part):
end -= 1
annotated_part = text[start:end]
# Search start and end token index for current span
start_idx = -1
end_idx = -1
for token in sentence:
if token.start_position <= start <= token.end_position and start_idx == -1:
start_idx = token.idx - 1
if token.start_position <= end <= token.end_position and end_idx == -1:
end_idx = token.idx - 1
# If end index is not found set to last token
if end_idx == -1:
end_idx = sentence[-1].idx - 1
# Throw error if indices are not valid
if start_idx == -1 or start_idx > end_idx:
raise ValueError(
f"Could not create token span from char span.\n\
Sen: {sentence}\nStart: {start}, End: {end}, Label: {label}\n\
Ann: {annotated_part}\nRaw: {text}\nCo: {start_idx}, {end_idx}"
)
sentence[start_idx : end_idx + 1].add_label(self.label_type, label)
def is_in_memory(self) -> bool:
# Currently all Jsonl Datasets are stored in Memory
return True
def __len__(self) -> int:
"""Number of sentences in the Dataset."""
return len(self.sentences)
def __getitem__(self, index: int) -> Sentence:
"""Returns the sentence at a given index."""
return self.sentences[index]
class MultiFileColumnCorpus(Corpus):
def __init__(
self,
column_format: Dict[int, str],
train_files=None,
test_files=None,
dev_files=None,
column_delimiter: str = r"\s+",
comment_symbol: Optional[str] = None,
encoding: str = "utf-8",
document_separator_token: Optional[str] = None,
skip_first_line: bool = False,
in_memory: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
default_whitespace_after: int = 1,
**corpusargs,
) -> None:
r"""Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
:param data_folder: base folder with the task data
:param column_format: a map specifying the column format
:param train_files: the name of the train files
:param test_files: the name of the test files
:param dev_files: the name of the dev files, if empty, dev data is sampled from train
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a Corpus with annotated train, dev and test data
"""
# get train data
train: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
train_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for train_file in train_files
]
)
if train_files and train_files[0]
else None
)
# read in test file if exists
test: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
test_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for test_file in test_files
]
)
if test_files and test_files[0]
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
dev_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for dev_file in dev_files
]
)
if dev_files and dev_files[0]
else None
)
super().__init__(train, dev, test, **corpusargs)
class ColumnCorpus(MultiFileColumnCorpus):
def __init__(
self,
data_folder: Union[str, Path],
column_format: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
autofind_splits: bool = True,
name: Optional[str] = None,
comment_symbol="# ",
**corpusargs,
) -> None:
r"""Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
:param data_folder: base folder with the task data
:param column_format: a map specifying the column format
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder, dev_file, test_file, train_file, autofind_splits
)
super().__init__(
column_format,
dev_files=[dev_file] if dev_file else [],
train_files=[train_file] if train_file else [],
test_files=[test_file] if test_file else [],
name=name if data_folder is None else str(data_folder),
comment_symbol=comment_symbol,
**corpusargs,
)
class ColumnDataset(FlairDataset):
# special key for space after
SPACE_AFTER_KEY = "space-after"
# special key for feature columns
FEATS = ["feats", "misc"]
# special key for dependency head id
HEAD = ["head", "head_id"]
def __init__(
self,
path_to_column_file: Union[str, Path],
column_name_map: Dict[int, str],
column_delimiter: str = r"\s+",
comment_symbol: Optional[str] = None,
banned_sentences: Optional[List[str]] = None,
in_memory: bool = True,
document_separator_token: Optional[str] = None,
encoding: str = "utf-8",
skip_first_line: bool = False,
label_name_map: Optional[Dict[str, str]] = None,
default_whitespace_after: int = 1,
) -> None:
r"""Instantiates a column dataset.
:param path_to_column_file: path to the file with the column-formatted data
:param column_name_map: a map specifying the column format
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a dataset with annotated data
"""
path_to_column_file = Path(path_to_column_file)
assert path_to_column_file.exists()
self.path_to_column_file = path_to_column_file
self.column_delimiter = re.compile(column_delimiter)
self.comment_symbol = comment_symbol
self.document_separator_token = document_separator_token
self.label_name_map = label_name_map
self.banned_sentences = banned_sentences
self.default_whitespace_after = default_whitespace_after
# store either Sentence objects in memory, or only file offsets
self.in_memory = in_memory
self.total_sentence_count: int = 0
# most data sets have the token text in the first column, if not, pass 'text' as column
self.text_column: int = 0
self.head_id_column: Optional[int] = None
for column in column_name_map:
if column_name_map[column] == "text":
self.text_column = column
if column_name_map[column] in self.HEAD:
self.head_id_column = column
# determine encoding of text file
self.encoding = encoding
# identify which columns are spans and which are word-level
self._identify_span_columns(column_name_map, skip_first_line)
# now load all sentences
with open(str(self.path_to_column_file), encoding=self.encoding) as file:
# skip first line if to selected
if skip_first_line:
file.readline()
# option 1: keep Sentence objects in memory
if self.in_memory:
self.sentences: List[Sentence] = []
# pointer to previous
previous_sentence = None
while True:
# parse next sentence
next_sentence = self._read_next_sentence(file)
# quit if last sentence reached
if len(next_sentence) == 0:
break
sentence = self._convert_lines_to_sentence(
next_sentence,
word_level_tag_columns=self.word_level_tag_columns,
span_level_tag_columns=self.span_level_tag_columns,
)
if not sentence:
continue
# skip banned sentences
if self.banned_sentences is not None and any(
d in sentence.to_plain_string() for d in self.banned_sentences
):
continue
# set previous and next sentence for context
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
# append parsed sentence to list in memory
self.sentences.append(sentence)
previous_sentence = sentence
self.total_sentence_count = len(self.sentences)
# option 2: keep source data in memory
if not self.in_memory:
self.sentences_raw: List[List[str]] = []
while True:
# read lines for next sentence, but don't parse
sentence_raw = self._read_next_sentence(file)
# quit if last sentence reached
if len(sentence_raw) == 0:
break
# append raw lines for each sentence
self.sentences_raw.append(sentence_raw)
self.total_sentence_count = len(self.sentences_raw)
def _identify_span_columns(self, column_name_map, skip_first_line):
# we make a distinction between word-level tags and span-level tags
self.span_level_tag_columns = {}
self.word_level_tag_columns = {self.text_column: "text"}
# read first sentence to determine which columns are span-labels
with open(str(self.path_to_column_file), encoding=self.encoding) as file:
# skip first line if to selected
if skip_first_line:
file.readline()
# check the first 5 sentences
probe = []
for _i in range(5):
next_sentence = self._read_next_sentence(file)
if len(next_sentence) == 0:
break
sentence = self._convert_lines_to_sentence(next_sentence, word_level_tag_columns=column_name_map)
if sentence:
probe.append(sentence)
else:
break
# go through all annotations and identify word- and span-level annotations
# - if a column has at least one BIES we know it's a Span label
# - if a column has at least one tag that is not BIOES, we know it's a Token label
# - problem cases are columns for which we see only O - in this case we default to Span
for sentence in probe:
for column in column_name_map:
# skip assigned columns
if (
column in self.word_level_tag_columns
or column in self.span_level_tag_columns
or column == self.head_id_column
):
continue
layer = column_name_map[column]
# the space after key is always word-levels
if column_name_map[column] == self.SPACE_AFTER_KEY:
self.word_level_tag_columns[column] = layer
continue
if layer in self.FEATS:
self.word_level_tag_columns[column] = layer
continue
for token in sentence:
# if at least one token has a BIES, we know it's a span label
if token.get_label(layer).value[0:2] in ["B-", "I-", "E-", "S-"]:
self.span_level_tag_columns[column] = layer
break
# if at least one token has a label other than BIOES, we know it's a token label
elif token.get_label(layer, "O").value != "O":
self.word_level_tag_columns[column] = layer
break
# all remaining columns that are not word-level are span-level
for column in column_name_map:
if column not in self.word_level_tag_columns:
self.span_level_tag_columns[column] = column_name_map[column]
for column in self.span_level_tag_columns:
log.debug(f"Column {column} ({self.span_level_tag_columns[column]}) is a span-level column.")
# for column in self.word_level_tag_columns:
# log.info(f"Column {column} ({self.word_level_tag_columns[column]}) is a word-level column.")
def _read_next_sentence(self, file):
lines = []
line = file.readline()
while line:
if not line.isspace():
lines.append(line)
# if sentence ends, break
if len(lines) > 0 and self.__line_completes_sentence(line):
break
line = file.readline()
return lines
def _convert_lines_to_sentence(
self, lines, word_level_tag_columns: Dict[int, str], span_level_tag_columns: Optional[Dict[int, str]] = None
):
token: Optional[Token] = None
tokens: List[Token] = []
filtered_lines = []
comments = []
for line in lines:
# parse comments if possible
if self.comment_symbol is not None and line.startswith(self.comment_symbol):
comments.append(line)
continue
filtered_lines.append(line)
# otherwise, this line is a token. parse and add to sentence
token = self._parse_token(line, word_level_tag_columns, token)
tokens.append(token)
sentence: Sentence = Sentence(text=tokens)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
# add span labels
if span_level_tag_columns:
for span_column in span_level_tag_columns:
try:
bioes_tags = [self.column_delimiter.split(line.rstrip())[span_column] for line in filtered_lines]
# discard tags from tokens that are not added to the sentence
bioes_tags = [tag for tag, token in zip(bioes_tags, tokens) if token._internal_index is not None]
predicted_spans = get_spans_from_bio(bioes_tags)
for span_indices, score, label in predicted_spans:
span = sentence[span_indices[0] : span_indices[-1] + 1]
value = self._remap_label(label)
if value != "O":
span.add_label(span_level_tag_columns[span_column], value=value, score=score)
except Exception:
pass
for comment in comments:
# parse relations if they are set
if comment.startswith("# relations = "):
relations_string = comment.strip().split("# relations = ")[1]
for relation in relations_string.split("|"):
indices = relation.split(";")
head_start = int(indices[0])
head_end = int(indices[1])
tail_start = int(indices[2])
tail_end = int(indices[3])
label = indices[4]
# head and tail span indices are 1-indexed and end index is inclusive
relation = Relation(
first=sentence[head_start - 1 : head_end], second=sentence[tail_start - 1 : tail_end]
)
remapped = self._remap_label(label)
if remapped != "O":
relation.add_label(typename="relation", value=remapped)
# parse comments such as '# id cd27886d-6895-4d02-a8df-e5fa763fa88f domain=de-orcas'
# to set the metadata "domain" to "de-orcas"
for comment_row in comment.split("\t"):
if "=" in comment_row:
key, value = comment_row.split("=", 1)
sentence.add_metadata(key, value)
if len(sentence) > 0:
return sentence
return None
def _parse_token(self, line: str, column_name_map: Dict[int, str], last_token: Optional[Token] = None) -> Token:
# get fields from line
fields: List[str] = self.column_delimiter.split(line.rstrip())
field_count = len(fields)
# get head_id if exists (only in dependency parses)
head_id = int(fields[self.head_id_column]) if self.head_id_column else None
if last_token is None:
start = 0
else:
assert last_token.end_position is not None
start = last_token.end_position + last_token.whitespace_after
# initialize token
token = Token(
fields[self.text_column],
head_id=head_id,
whitespace_after=self.default_whitespace_after,
start_position=start,
)
# go through all columns
for column, column_type in column_name_map.items():
if field_count <= column:
continue
if column == self.text_column:
continue
if column == self.head_id_column:
continue
if column_type == self.SPACE_AFTER_KEY:
if fields[column] == "-":
token.whitespace_after = 0
continue
# 'feats' and 'misc' column should be split into different fields
if column_type in self.FEATS:
for feature in fields[column].split("|"):
# special handling for whitespace after
if feature == "SpaceAfter=No":
token.whitespace_after = 0
continue
if "=" in feature:
# add each other feature as label-value pair
label_name, original_label_value = feature.split("=", 1)
label_value = self._remap_label(original_label_value)
if label_value != "O":
token.add_label(label_name, label_value)
else:
# get the task name (e.g. 'ner')
label_name = column_type
# get the label value
label_value = self._remap_label(fields[column])
# add label
if label_value != "O":
token.add_label(label_name, label_value)
return token
def _remap_label(self, tag):
# remap regular tag names
if self.label_name_map and tag in self.label_name_map:
tag = self.label_name_map[tag] # for example, transforming 'PER' to 'person'
return tag
def __line_completes_sentence(self, line: str) -> bool:
sentence_completed = line.isspace() or line == ""
return sentence_completed
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
# if in memory, retrieve parsed sentence
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
sentence = self._convert_lines_to_sentence(
self.sentences_raw[index],
word_level_tag_columns=self.word_level_tag_columns,
span_level_tag_columns=self.span_level_tag_columns,
)
# set sentence context using partials TODO: pointer to dataset is really inefficient
sentence._has_context = True
sentence._position_in_dataset = (self, index)
return sentence
class ONTONOTES(MultiFileColumnCorpus):
archive_url = "https://data.mendeley.com/public-files/datasets/zmycy7t9h9/files/b078e1c4-f7a4-4427-be7f-9389967831ef/file_downloaded"
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
version: str = "v4",
language: str = "english",
domain: Union[None, str, List[str], Dict[str, Union[None, str, List[str]]]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
assert version in ["v4", "v12"]
if version == "v12":
assert language == "english"
else:
assert language in ["english", "chinese", "arabic"]
column_format = {0: "text", 1: "pos", 2: "ner"}
processed_data_path = self._ensure_data_processed(base_path, language, version)
kw = {"version": version, "language": language, "domain": domain, "processed_data_path": processed_data_path}
dev_files = list(self._get_processed_file_paths(split="development", **kw))
train_files = list(self._get_processed_file_paths(split="train", **kw))
test_files = list(self._get_processed_file_paths(split="test", **kw))
super().__init__(
dev_files=dev_files,
train_files=train_files,
test_files=test_files,
name="/".join((self.__class__.__name__, language, version)),
column_format=column_format,
in_memory=in_memory,
column_delimiter="\t",
**corpusargs,
)
@classmethod
def get_available_domains(
cls,
base_path: Optional[Union[str, Path]] = None,
version: str = "v4",
language: str = "english",
split: str = "train",
) -> List[str]:
processed_data_path = cls._ensure_data_processed(base_path=base_path, language=language, version=version)
processed_split_path = processed_data_path / "splits" / version / language / split
return [domain_path.name for domain_path in processed_split_path.iterdir()]
@classmethod
def _get_processed_file_paths(
cls,
processed_data_path: Path,
split: str = "train",
version: str = "v4",
language: str = "english",
domain: Optional[Union[str, List[str], Dict[str, Union[None, str, List[str]]]]] = None,
) -> Iterable[Path]:
processed_split_path = processed_data_path / "splits" / version / language / split
if domain is None:
# use all domains
assert processed_split_path.exists(), f"Processed data not found (expected at: {processed_split_path})"
yield from sorted(filter(os.path.isfile, processed_split_path.rglob("*")))
elif isinstance(domain, str):
domain_path = processed_split_path / domain
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
yield from sorted(filter(os.path.isfile, domain_path.rglob("*")))
elif isinstance(domain, list):
for d in domain:
domain_path = processed_split_path / d
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
yield from sorted(filter(os.path.isfile, domain_path.rglob("*")))
else:
assert isinstance(domain, dict)
for d, sources in domain.items():
domain_path = processed_split_path / d
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
if sources is None:
yield from sorted(domain_path.rglob("*"))
elif isinstance(sources, str):
source_path = domain_path / sources
assert source_path.exists(), f"Processed data not found (expected at: {source_path})"
yield source_path
else:
assert isinstance(sources, list)
for s in sources:
source_path = domain_path / s
assert source_path.exists(), f"Processed data not found (expected at: {source_path})"
yield source_path
@classmethod
def _ensure_data_processed(cls, base_path, language: str, version: str):
raw_data_path = cls._ensure_data_downloaded(base_path)
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = cls.__name__.lower()
processed_data_path = base_path / dataset_name
processed_split_path = processed_data_path / "splits" / version / language
if not processed_split_path.exists():
log.info(f"OntoNotes splits for {version}/{language} have not been generated yet, generating it now.")
for split in ["train", "development", "test"]:
log.info(f"Generating {split} split for {version}/{language}")
raw_split_path = raw_data_path / version / "data" / split / "data" / language / "annotations"
# iter over all domains / sources and create target files
for raw_domain_path in raw_split_path.iterdir():
for raw_source_path in raw_domain_path.iterdir():
conll_files = sorted(raw_source_path.rglob("*gold_conll"))
processed_source_path = (
processed_split_path / split / raw_domain_path.name / raw_source_path.name
)
processed_source_path.parent.mkdir(parents=True, exist_ok=True)
with open(processed_source_path, "w") as f:
for conll_file in conll_files:
for sent in cls.sentence_iterator(conll_file):
if language == "arabic":
trimmed_sentence = [_sent.split("#")[0] for _sent in sent["sentence"]]
sent["sentence"] = trimmed_sentence
for row in zip(sent["sentence"], sent["pos_tags"], sent["named_entities"]):
f.write("\t".join(row) + "\n")
f.write("\n")
return processed_data_path
@classmethod
def _ensure_data_downloaded(cls, base_path: Optional[Union[str, Path]] = None) -> Path:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / "conll-2012"
if not data_folder.exists():
unpack_file(cached_path(cls.archive_url, data_folder), data_folder.parent, "zip", False)
return data_folder
@classmethod
def _process_coref_span_annotations_for_word(
cls,
label: str,
word_index: int,
clusters: DefaultDict[int, List[Tuple[int, int]]],
coref_stacks: DefaultDict[int, List[int]],
) -> None:
"""For a given coref label, add it to a currently open span(s), complete a span(s) or ignore it, if it is outside of all spans.
This method mutates the clusters and coref_stacks dictionaries.
# Parameters
label : `str`
The coref label for this word.
word_index : `int`
The word index into the sentence.
clusters : `DefaultDict[int, List[Tuple[int, int]]]`
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks : `DefaultDict[int, List[int]]`
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
"""
if label != "-":
for segment in label.split("|"):
# The conll representation of coref spans allows spans to
# overlap. If spans end or begin at the same word, they are
# separated by a "|".
if segment[0] == "(":
# The span begins at this word.
if segment[-1] == ")":
# The span begins and ends at this word (single word span).
cluster_id = int(segment[1:-1])
clusters[cluster_id].append((word_index, word_index))
else:
# The span is starting, so we record the index of the word.
cluster_id = int(segment[1:])
coref_stacks[cluster_id].append(word_index)
else:
# The span for this id is ending, but didn't start at this word.
# Retrieve the start index from the document state and
# add the span to the clusters for this id.
cluster_id = int(segment[:-1])
start = coref_stacks[cluster_id].pop()
clusters[cluster_id].append((start, word_index))
@classmethod
def _process_span_annotations_for_word(
cls,
annotations: List[str],
span_labels: List[List[str]],
current_span_labels: List[Optional[str]],
) -> None:
"""Given a sequence of different label types for a single word and the current span label we are inside, compute the BIO tag for each label and append to a list.
# Parameters
annotations : `List[str]`
A list of labels to compute BIO tags for.
span_labels : `List[List[str]]`
A list of lists, one for each annotation, to incrementally collect
the BIO tags for a sequence.
current_span_labels : `List[Optional[str]]`
The currently open span per annotation type, or `None` if there is no open span.
"""
for annotation_index, annotation in enumerate(annotations):
# strip all bracketing information to
# get the actual propbank label.
label = annotation.strip("()*")
if "(" in annotation:
# Entering into a span for a particular semantic role label.
# We append the label and set the current span for this annotation.
bio_label = "B-" + label
span_labels[annotation_index].append(bio_label)
current_span_labels[annotation_index] = label
elif current_span_labels[annotation_index] is not None:
# If there's no '(' token, but the current_span_label is not None,
# then we are inside a span.
bio_label = "I-" + cast(str, current_span_labels[annotation_index])
span_labels[annotation_index].append(bio_label)
else:
# We're outside a span.
span_labels[annotation_index].append("O")
# Exiting a span, so we reset the current span label for this annotation.
if ")" in annotation:
current_span_labels[annotation_index] = None
@classmethod
def _conll_rows_to_sentence(cls, conll_rows: List[str]) -> Dict:
document_id: str
sentence_id: int
# The words in the sentence.
sentence: List[str] = []
# The pos tags of the words in the sentence.
pos_tags: List[str] = []
# the pieces of the parse tree.
parse_pieces: List[Optional[str]] = []
# The lemmatised form of the words in the sentence which
# have SRL or word sense information.
predicate_lemmas: List[Optional[str]] = []
# The FrameNet ID of the predicate.
predicate_framenet_ids: List[Optional[str]] = []
# The sense of the word, if available.
word_senses: List[Optional[float]] = []
# The current speaker, if available.
speakers: List[Optional[str]] = []
verbal_predicates: List[str] = []
span_labels: List[List[str]] = []
current_span_labels: List[Optional[str]] = []
# Cluster id -> List of (start_index, end_index) spans.
clusters: DefaultDict[int, List[Tuple[int, int]]] = defaultdict(list)
# Cluster id -> List of start_indices which are open for this id.
coref_stacks: DefaultDict[int, List[int]] = defaultdict(list)
for index, row in enumerate(conll_rows):
conll_components = row.split()
document_id = conll_components[0]
sentence_id = int(conll_components[1])
word = conll_components[3]
pos_tag = conll_components[4]
parse_piece: Optional[str]
# Replace brackets in text and pos tags
# with a different token for parse trees.
if pos_tag != "XX" and word != "XX":
if word == "(":
parse_word = "-LRB-"
elif word == ")":
parse_word = "-RRB-"
else:
parse_word = word
if pos_tag == "(":
pos_tag = "-LRB-"
if pos_tag == ")":
pos_tag = "-RRB-"
(left_brackets, right_hand_side) = conll_components[5].split("*")
# only keep ')' if there are nested brackets with nothing in them.
right_brackets = right_hand_side.count(")") * ")"
parse_piece = f"{left_brackets} ({pos_tag} {parse_word}) {right_brackets}"
else:
# There are some bad annotations in the CONLL data.
# They contain no information, so to make this explicit,
# we just set the parse piece to be None which will result
# in the overall parse tree being None.
parse_piece = None
lemmatised_word = conll_components[6]
framenet_id = conll_components[7]
word_sense = conll_components[8]
speaker = conll_components[9]
if not span_labels:
# If this is the first word in the sentence, create
# empty lists to collect the NER and SRL BIO labels.
# We can't do this upfront, because we don't know how many
# components we are collecting, as a sentence can have
# variable numbers of SRL frames.
span_labels = [[] for _ in conll_components[10:-1]]
# Create variables representing the current label for each label
# sequence we are collecting.
current_span_labels = [None for _ in conll_components[10:-1]]
cls._process_span_annotations_for_word(conll_components[10:-1], span_labels, current_span_labels)
# If any annotation marks this word as a verb predicate,
# we need to record its index. This also has the side effect
# of ordering the verbal predicates by their location in the
# sentence, automatically aligning them with the annotations.
word_is_verbal_predicate = any("(V" in x for x in conll_components[11:-1])
if word_is_verbal_predicate:
verbal_predicates.append(word)
cls._process_coref_span_annotations_for_word(conll_components[-1], index, clusters, coref_stacks)
sentence.append(word)
pos_tags.append(pos_tag)
parse_pieces.append(parse_piece)
predicate_lemmas.append(lemmatised_word if lemmatised_word != "-" else None)
predicate_framenet_ids.append(framenet_id if framenet_id != "-" else None)
word_senses.append(float(word_sense) if word_sense != "-" else None)
speakers.append(speaker if speaker != "-" else None)
named_entities = span_labels[0]
srl_frames = [(predicate, labels) for predicate, labels in zip(verbal_predicates, span_labels[1:])]
# this would not be reached if parse_pieces contained None, hence the cast
parse_tree = "".join(cast(List[str], parse_pieces)) if all(parse_pieces) else None
coref_span_tuples = {(cluster_id, span) for cluster_id, span_list in clusters.items() for span in span_list}
return {
"document_id": document_id,
"sentence_id": sentence_id,
"sentence": sentence,
"pos_tags": pos_tags,
"parse_tree": parse_tree,
"predicate_lemmas": predicate_lemmas,
"predicate_framenet_ids": predicate_framenet_ids,
"word_senses": word_senses,
"speakers": speakers,
"named_entities": named_entities,
"srl_frames": srl_frames,
"coref_span_tuples": coref_span_tuples,
}
@classmethod
def dataset_document_iterator(cls, file_path: Union[Path, str]) -> Iterator[List]:
"""An iterator over CONLL formatted files which yields documents, regardless of the number of document annotations in a particular file.
This is useful for conll data which has been preprocessed, such
as the preprocessing which takes place for the 2012 CONLL
Coreference Resolution task.
"""
with open(file_path, encoding="utf8") as open_file:
conll_rows = []
document: List = []
for line in open_file:
line = line.strip()
if line != "" and not line.startswith("#"):
# Non-empty line. Collect the annotation.
conll_rows.append(line)
else:
if conll_rows:
document.append(cls._conll_rows_to_sentence(conll_rows))
conll_rows = []
if line.startswith("#end document"):
yield document
document = []
if document:
# Collect any stragglers or files which might not
# have the '#end document' format for the end of the file.
yield document
@classmethod
def sentence_iterator(cls, file_path: Union[Path, str]) -> Iterator:
"""An iterator over the sentences in an individual CONLL formatted file."""
for document in cls.dataset_document_iterator(file_path):
for sentence in document:
yield sentence
class CONLL_03(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
column_format={0: "text", 1: "pos", 3: "ner"},
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the eng.testa, .testb, .train
files in a folder called 'conll_03'. Then set the base_path parameter in the constructor to the path to the
parent directory where the conll_03 folder resides.
If using entity linking, the conll03 dateset is reduced by about 20 Documents, which are not part of the yago dataset.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super().__init__(
data_folder,
column_format=column_format,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class CONLL_03_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for German.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the respective files in a folder called
'conll_03_german'. Then set the base_path parameter in the constructor to the path to the parent directory where
the conll_03_german folder resides.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03_german' folder) on your machine
word lemmas, POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "np", 4: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class CONLL_03_DUTCH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for Dutch.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
# download files if not present locally
cached_path(f"{conll_02_path}ned.testa", data_folder / "raw")
cached_path(f"{conll_02_path}ned.testb", data_folder / "raw")
cached_path(f"{conll_02_path}ned.train", data_folder / "raw")
# we need to slightly modify the original files by adding some new lines after document separators
train_data_file = data_folder / "train.txt"
if not train_data_file.is_file():
self.__offset_docstarts(data_folder / "raw" / "ned.train", data_folder / "train.txt")
self.__offset_docstarts(data_folder / "raw" / "ned.testa", data_folder / "dev.txt")
self.__offset_docstarts(data_folder / "raw" / "ned.testb", data_folder / "test.txt")
super().__init__(
data_folder,
columns,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
encoding="latin-1",
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
@staticmethod
def __offset_docstarts(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in, encoding="latin-1") as f:
lines = f.readlines()
with open(file_out, "w", encoding="latin-1") as f:
for line in lines:
f.write(line)
if line.startswith("-DOCSTART-"):
f.write("\n")
class CONLL_03_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for Spanish.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
cached_path(f"{conll_02_path}esp.testa", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.testb", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.train", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
**corpusargs,
)
class CONLL_2000(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-2000 corpus for English chunking.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "np"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_2000_path = "https://www.clips.uantwerpen.be/conll2000/chunking/"
data_file = flair.cache_root / "datasets" / dataset_name / "train.txt"
if not data_file.is_file():
cached_path(f"{conll_2000_path}train.txt.gz", Path("datasets") / dataset_name)
cached_path(f"{conll_2000_path}test.txt.gz", Path("datasets") / dataset_name)
import gzip
import shutil
with gzip.open(flair.cache_root / "datasets" / dataset_name / "train.txt.gz", "rb") as f_in, open(
flair.cache_root / "datasets" / dataset_name / "train.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(flair.cache_root / "datasets" / dataset_name / "test.txt.gz", "rb") as f_in, open(
flair.cache_root / "datasets" / dataset_name / "test.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class WNUT_17(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
wnut_path = "https://noisy-text.github.io/2017/files/"
cached_path(f"{wnut_path}wnut17train.conll", Path("datasets") / dataset_name)
cached_path(f"{wnut_path}emerging.dev.conll", Path("datasets") / dataset_name)
cached_path(f"{wnut_path}emerging.test.annotated", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class FEWNERD(ColumnCorpus):
def __init__(
self,
setting: str = "supervised",
**corpusargs,
) -> None:
assert setting in ["supervised", "inter", "intra"]
base_path = flair.cache_root / "datasets"
self.dataset_name = self.__class__.__name__.lower()
self.data_folder = base_path / self.dataset_name / setting
self.bio_format_data = base_path / self.dataset_name / setting / "bio_format"
if not self.data_folder.exists():
self._download(setting=setting)
if not self.bio_format_data.exists():
self._generate_splits(setting)
super().__init__(
self.bio_format_data,
column_format={0: "text", 1: "ner"},
**corpusargs,
)
def _download(self, setting):
_URLs = {
"supervised": "https://cloud.tsinghua.edu.cn/f/09265750ae6340429827/?dl=1",
"intra": "https://cloud.tsinghua.edu.cn/f/a0d3efdebddd4412b07c/?dl=1",
"inter": "https://cloud.tsinghua.edu.cn/f/165693d5e68b43558f9b/?dl=1",
}
log.info(f"FewNERD ({setting}) dataset not found, downloading.")
dl_path = _URLs[setting]
dl_dir = cached_path(dl_path, Path("datasets") / self.dataset_name / setting)
if setting not in os.listdir(self.data_folder):
import zipfile
from tqdm import tqdm
log.info("FewNERD dataset has not been extracted yet, extracting it now. This might take a while.")
with zipfile.ZipFile(dl_dir, "r") as zip_ref:
for f in tqdm(zip_ref.namelist()):
if f.endswith("/"):
os.makedirs(self.data_folder / f)
else:
zip_ref.extract(f, path=self.data_folder)
def _generate_splits(self, setting):
log.info(
f"FewNERD splits for {setting} have not been parsed into BIO format, parsing it now. This might take a while."
)
os.mkdir(self.bio_format_data)
for split in os.listdir(self.data_folder / setting):
with open(self.data_folder / setting / split) as source, open(self.bio_format_data / split, "w") as target:
previous_tag = None
for line in source:
if line == "" or line == "\n":
target.write("\n")
else:
token, tag = line.split("\t")
tag = tag.replace("\n", "")
if tag == "O":
target.write(token + "\t" + tag + "\n")
elif previous_tag != tag and tag != "O":
target.write(token + "\t" + "B-" + tag + "\n")
elif previous_tag == tag and tag != "O":
target.write(token + "\t" + "I-" + tag + "\n")
previous_tag = tag
class BIOSCOPE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "tag"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
bioscope_path = (
"https://raw.githubusercontent.com/whoisjones/BioScopeSequenceLabelingData/master/sequence_labeled/"
)
cached_path(f"{bioscope_path}output.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="output.txt",
**corpusargs,
)
class NER_ARABIC_ANER(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize a preprocessed version of the Arabic Named Entity Recognition Corpus (ANERCorp).
The dataset is downloaded from http://curtis.ml.cmu.edu/w/courses/index.php/ANERcorp
Column order is swapped
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
anercorp_path = "https://megantosh.s3.eu-central-1.amazonaws.com/ANERcorp/"
# cached_path(f"{anercorp_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{anercorp_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ARABIC_AQMAR(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize a preprocessed and modified version of the American and Qatari Modeling of Arabic (AQMAR) dataset.
The dataset is downloaded from http://www.cs.cmu.edu/~ark/AQMAR/
- Modifications from original dataset: Miscellaneous tags (MIS0, MIS1, MIS2, MIS3) are merged to one tag "MISC" as these categories deviate across the original dataset
- The 28 original Wikipedia articles are merged into a single file containing the articles in alphabetical order
The first time you call this constructor it will automatically download the dataset.
This dataset is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
please cite: "Behrang Mohit, Nathan Schneider, Rishav Bhowmick, Kemal Oflazer, and Noah A. Smith (2012),
Recall-Oriented Learning of Named Entities in Arabic Wikipedia. Proceedings of EACL."
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
aqmar_path = "https://megantosh.s3.eu-central-1.amazonaws.com/AQMAR/"
# cached_path(f"{anercorp_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{aqmar_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_BASQUE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ner_basque_path = "http://ixa2.si.ehu.eus/eiec/"
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "named_ent_eu.train"
if not data_file.is_file():
cached_path(f"{ner_basque_path}/eiec_v1.0.tgz", Path("datasets") / dataset_name)
import shutil
import tarfile
with tarfile.open(
flair.cache_root / "datasets" / dataset_name / "eiec_v1.0.tgz",
"r:gz",
) as f_in:
corpus_files = (
"eiec_v1.0/named_ent_eu.train",
"eiec_v1.0/named_ent_eu.test",
)
for corpus_file in corpus_files:
f_in.extract(corpus_file, data_path)
shutil.move(f"{data_path}/{corpus_file}", data_path)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class NER_CHINESE_WEIBO(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the WEIBO_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
weiboNER_conll_path = "https://raw.githubusercontent.com/87302380/WEIBO_NER/main/data/"
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.train",
Path("datasets") / dataset_name,
)
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.test",
Path("datasets") / dataset_name,
)
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.dev",
Path("datasets") / dataset_name,
)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="weiboNER_2nd_conll_format.train",
test_file="weiboNER_2nd_conll_format.test",
dev_file="weiboNER_2nd_conll_format.dev",
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_DANISH_DANE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 3: "pos", 9: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
data_path = flair.cache_root / "datasets" / dataset_name
train_data_file = data_path / "ddt.train.conllu"
if not train_data_file.is_file():
temp_file = cached_path(
"https://danlp.alexandra.dk/304bd159d5de/datasets/ddt.zip",
Path("datasets") / dataset_name,
)
from zipfile import ZipFile
with ZipFile(temp_file, "r") as zip_file:
zip_file.extractall(path=data_path)
# Remove CoNLL-U meta information in the last column
for part in ["train", "dev", "test"]:
lines = []
data_file = f"ddt.{part}.conllu"
with open(data_path / data_file) as file:
for line in file:
if line.startswith("#") or line == "\n":
lines.append(line)
lines.append(line.replace("name=", "").replace("|SpaceAfter=No", ""))
with open(data_path / data_file, "w") as file:
file.writelines(lines)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
comment_symbol="#",
**corpusargs,
)
class NER_ENGLISH_MOVIE_SIMPLE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the eng corpus of the MIT Movie Corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
# column format
columns = {0: "ner", 1: "text"}
# dataset name
dataset_name = self.__class__.__name__.lower()
# data folder: default dataset folder is the cache root
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / dataset_name
# download data if necessary
mit_movie_path = "https://groups.csail.mit.edu/sls/downloads/movie/"
train_file = "engtrain.bio"
test_file = "engtest.bio"
cached_path(f"{mit_movie_path}{train_file}", Path("datasets") / dataset_name)
cached_path(f"{mit_movie_path}{test_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_MOVIE_COMPLEX(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the trivia10k13 corpus of the MIT Movie Corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
# column format
columns = {0: "ner", 1: "text"}
# dataset name
dataset_name = self.__class__.__name__.lower()
# data folder: default dataset folder is the cache root
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / dataset_name
# download data if necessary
mit_movie_path = "https://groups.csail.mit.edu/sls/downloads/movie/"
train_file = "trivia10k13train.bio"
test_file = "trivia10k13test.bio"
cached_path(f"{mit_movie_path}{train_file}", Path("datasets") / dataset_name)
cached_path(f"{mit_movie_path}{test_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_SEC_FILLINGS(ColumnCorpus):
"""Initialize corpus of SEC-fillings annotated with English NER tags.
See paper "Domain Adaption of Named Entity Recognition to Support Credit Risk Assessment" by Alvarado et al, 2015: https://aclanthology.org/U15-1010/
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
SEC_FILLINGS_Path = "https://raw.githubusercontent.com/juand-r/entity-recognition-datasets/master/data/SEC-filings/CONLL-format/data/"
cached_path(f"{SEC_FILLINGS_Path}test/FIN3.txt", Path("datasets") / dataset_name)
cached_path(f"{SEC_FILLINGS_Path}train/FIN5.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="FIN5.txt",
test_file="FIN3.txt",
skip_first_line=True,
**corpusargs,
)
class NER_ENGLISH_RESTAURANT(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the MIT Restaurant corpus.
The corpus will be downloaded from https://groups.csail.mit.edu/sls/downloads/restaurant/.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
mit_restaurants_path = "https://megantosh.s3.eu-central-1.amazonaws.com/MITRestoCorpus/"
cached_path(f"{mit_restaurants_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{mit_restaurants_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_STACKOVERFLOW(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the STACKOVERFLOW_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
"""
The Datasets are represented in the Conll format.
In this format each line of the Dataset is in the following format:
<word>+"\t"+<NE>"\t"+<word>+"\t"<markdown>
The end of sentence is marked with an empty line.
In each line NE represented the human annotated named entity
and <markdown> represented the code tags provided by the users who wrote the posts.
"""
# column format
columns = {0: "word", 1: "ner", 3: "markdown"}
# entity_mapping
entity_mapping = {
"Library_Function": "Function",
"Function_Name": "Function",
"Class_Name": "Class",
"Library_Class": "Class",
"Organization": "Website",
"Library_Variable": "Variable",
"Variable_Name": "Variable",
"Error_Name": "O",
"Keyboard_IP": "O",
"Value": "O",
"Output_Block": "O",
}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
STACKOVERFLOW_NER_path = "https://raw.githubusercontent.com/jeniyat/StackOverflowNER/master/resources/annotated_ner_data/StackOverflow/"
# data validation
banned_sentences = [
"code omitted for annotation",
"omitted for annotation",
"CODE_BLOCK :",
"OP_BLOCK :",
"Question_URL :",
"Question_ID :",
]
files = ["train", "test", "dev"]
for file in files:
questions = 0
answers = 0
cached_path(f"{STACKOVERFLOW_NER_path}{file}.txt", Path("datasets") / dataset_name)
with (data_folder / (file + ".txt")).open(encoding="utf-8") as fin:
for line in fin:
if line.startswith("Question_ID"):
questions += 1
if line.startswith("Answer_to_Question_ID"):
answers += 1
log.info(f"File {file} has {questions} questions and {answers} answers.")
super().__init__(
data_folder,
columns,
train_file="train.txt",
test_file="test.txt",
dev_file="dev.txt",
encoding="utf-8",
banned_sentences=banned_sentences,
in_memory=in_memory,
label_name_map=entity_mapping,
**corpusargs,
)
class NER_ENGLISH_TWITTER(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the twitter_ner corpus.
The corpus will be downoaded from https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/ner.txt.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
twitter_ner_path = "https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/"
cached_path(f"{twitter_ner_path}ner.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
train_file="ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_PERSON(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
) -> None:
"""Initialize the PERSON_NER corpus for person names.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/das-sudeshna/genid/master/"
# download files if not present locallys
cached_path(f"{conll_path}conll-g.conll", data_folder / "raw")
cached_path(f"{conll_path}ieer-g.conll", data_folder / "raw")
cached_path(f"{conll_path}textbook-g.conll", data_folder / "raw")
cached_path(f"{conll_path}wiki-g.conll", data_folder / "raw")
self.__concatAllFiles(data_folder)
super().__init__(data_folder, columns, in_memory=in_memory, train_file="bigFile.conll")
@staticmethod
def __concatAllFiles(data_folder):
arr = os.listdir(data_folder / "raw")
with open(data_folder / "bigFile.conll", "w") as outfile:
for fname in arr:
with open(data_folder / "raw" / fname) as infile:
outfile.write(infile.read())
class NER_ENGLISH_WEBPAGES(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the WEBPAGES_NER corpus.
The corpus was introduced in the paper "Design Challenges and Misconceptions in Named Entity Recognition" by Ratinov and Roth (2009): https://aclanthology.org/W09-1119/.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "ner", 5: "text"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
import tarfile
if not os.path.isfile(data_folder / "webpages_ner.txt"):
# # download zip
tar_file = "https://cogcomp.seas.upenn.edu/Data/NERWebpagesColumns.tgz"
webpages_ner_path = cached_path(tar_file, Path("datasets") / dataset_name)
tf = tarfile.open(webpages_ner_path)
tf.extractall(data_folder)
tf.close()
outputfile = os.path.abspath(data_folder)
# merge the files in one as the zip is containing multiples files
with open(outputfile / data_folder / "webpages_ner.txt", "w+") as outfile:
for files in os.walk(outputfile):
f = files[1]
ff = os.listdir(outputfile / data_folder / f[-1])
for _i, file in enumerate(ff):
if file.endswith(".gold"):
with open(
outputfile / data_folder / f[-1] / file,
"r+",
errors="replace",
) as infile:
content = infile.read()
outfile.write(content)
break
super().__init__(
data_folder,
columns,
train_file="webpages_ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_WNUT_2020(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the WNUT_2020_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
github_url = "https://github.com/jeniyat/WNUT_2020_NER/archive/master.zip"
for sample in ["train", "test", "dev"]:
sample_file = data_folder / (sample + ".txt")
if not sample_file.is_file():
zip_path = cached_path(f"{github_url}", Path("datasets") / dataset_name)
# unzip the downloaded repo and merge the train, dev and test datasets
unpack_file(zip_path, data_folder, "zip", False) # unzipped folder name: WNUT_2020_NER-master
if sample == "test":
file_path = data_folder / Path("WNUT_2020_NER-master/data/" + sample + "_data_2020/Conll_Format/")
else:
file_path = data_folder / Path("WNUT_2020_NER-master/data/" + sample + "_data/Conll_Format/")
filenames = os.listdir(file_path)
with open(data_folder / (sample + ".txt"), "w") as outfile:
for fname in filenames:
with open(file_path / fname) as infile:
lines = infile.read()
outfile.write(lines)
shutil.rmtree(str(data_folder / "WNUT_2020_NER-master")) # clean up when done
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ENGLISH_WIKIGOLD(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the wikigold corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
wikigold_ner_path = "https://raw.githubusercontent.com/juand-r/entity-recognition-datasets/master/data/wikigold/CONLL-format/data/"
cached_path(f"{wikigold_ner_path}wikigold.conll.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="wikigold.conll.txt",
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_FINNISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ner_finnish_path = "https://raw.githubusercontent.com/mpsilfve/finer-data/master/data/digitoday."
cached_path(f"{ner_finnish_path}2014.train.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2014.dev.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2015.test.csv", Path("datasets") / dataset_name)
self._remove_lines_without_annotations(data_file=Path(data_folder / "digitoday.2015.test.csv"))
super().__init__(
data_folder,
columns,
in_memory=in_memory,
skip_first_line=True,
**corpusargs,
)
def _remove_lines_without_annotations(self, data_file: Union[str, Path]):
with open(data_file) as f:
lines = f.readlines()
with open(data_file, "w") as f:
for line in lines:
if len(line.split()) != 1:
f.write(line)
class NER_GERMAN_BIOFID(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
biofid_path = "https://raw.githubusercontent.com/texttechnologylab/BIOfid/master/BIOfid-Dataset-NER/"
cached_path(f"{biofid_path}train.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}dev.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}test.conll", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class NER_GERMAN_EUROPARL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the EUROPARL_NER_GERMAN corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "np", 4: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
europarl_ner_german_path = "https://nlpado.de/~sebastian/software/ner/"
cached_path(
f"{europarl_ner_german_path}ep-96-04-15.conll",
Path("datasets") / dataset_name,
)
cached_path(
f"{europarl_ner_german_path}ep-96-04-16.conll",
Path("datasets") / dataset_name,
)
self._add_IOB_tags(
data_file=Path(data_folder / "ep-96-04-15.conll"),
encoding="latin-1",
ner_column=4,
)
self._add_IOB_tags(
data_file=Path(data_folder / "ep-96-04-16.conll"),
encoding="latin-1",
ner_column=4,
)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
train_file="ep-96-04-16.conll",
test_file="ep-96-04-15.conll",
**corpusargs,
)
def _add_IOB_tags(self, data_file: Union[str, Path], encoding: str = "utf8", ner_column: int = 1):
"""Function that adds IOB tags if only chunk names are provided.
e.g. words are tagged PER instead of B-PER or I-PER. Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
ner_column : int, optional
Specifies the ner-tagged column. The default is 1 (the second column).
"""
def add_I_prefix(current_line: List[str], ner: int, tag: str):
for i in range(0, len(current_line)):
if i == 0:
f.write(line_list[i])
elif i == ner:
f.write(" I-" + tag)
else:
f.write(" " + current_line[i])
f.write("\n")
with open(file=data_file, encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode="w", encoding=encoding) as f:
pred = "O" # remembers ner tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) > 2: # word with tags
ner_tag = line_list[ner_column]
if ner_tag in ["0", "O"]: # no chunk
for i in range(0, len(line_list)):
if i == 0:
f.write(line_list[i])
elif i == ner_column:
f.write(" O")
else:
f.write(" " + line_list[i])
f.write("\n")
pred = "O"
elif "-" not in ner_tag: # no IOB tags
if pred == "O": # found a new chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # found further part of chunk or new chunk directly after old chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = ner_tag.split("-")[1]
elif len(line_list) == 0: # empty line
f.write("\n")
pred = "O"
class NER_GERMAN_LEGAL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the LER_GERMAN (Legal Entity Recognition) corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ler_path = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/"
cached_path(f"{ler_path}ler.conll", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="ler.conll",
**corpusargs,
)
class NER_GERMAN_GERMEVAL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the GermEval NER corpus for German.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://sites.google.com/site/germeval2014ner/data and put it into some folder.
Then point the base_path parameter in the constructor to this folder
:param base_path: Path to the GermEval corpus on your machine
:param in_memory:If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
# create folder
os.makedirs(data_folder)
# download dataset
import gdown
gdown.download(
url="https://drive.google.com/uc?id={}".format("1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P"),
output=str(data_folder / "train.tsv"),
)
gdown.download(
url="https://drive.google.com/uc?id={}".format("1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH"),
output=str(data_folder / "test.tsv"),
)
gdown.download(
url="https://drive.google.com/uc?id={}".format("1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm"),
output=str(data_folder / "dev.tsv"),
)
super().__init__(
data_folder,
columns,
comment_symbol="#",
in_memory=in_memory,
**corpusargs,
)
class NER_GERMAN_POLITICS(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
column_delimiter: str = r"\s+",
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize corpus with Named Entity Model for German Politics (NEMGP).
data from https://www.thomas-zastrow.de/nlp/.
The first time you call this constructor it will automatically download the
dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download and parse data if necessary
german_politics_path = "https://www.thomas-zastrow.de/nlp/nemgp_trainingdata_01.txt.zip"
corpus_file_name = "nemgp_trainingdata_01.txt"
parsed_dataset = data_folder / "raw" / corpus_file_name
if not parsed_dataset.exists():
german_politics_zip = cached_path(f"{german_politics_path}", Path("datasets") / dataset_name / "raw")
unpack_file(german_politics_zip, data_folder / "raw", "zip", False)
self._convert_to_column_corpus(parsed_dataset)
# create train test dev if not exist
train_dataset = data_folder / "train.txt"
if not train_dataset.exists():
self._create_datasets(parsed_dataset, data_folder)
super().__init__(
data_folder,
columns,
column_delimiter=column_delimiter,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
encoding="utf-8",
in_memory=in_memory,
**corpusargs,
)
def _convert_to_column_corpus(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f:
lines = f.readlines()
with open(data_file, "w", encoding="utf-8") as f:
tag_bool = False
new_sentence = True
for line in lines:
line_splits = re.sub(r"\s{2,}", " ", line).strip().split(" ")
for substr in line_splits:
if substr == ".":
f.write("\n")
new_sentence = True
elif "<START:" in substr:
tag_bool = True
tag = substr.strip("<START:").strip(">")
if "loc" in tag:
tag_IOB = "-LOC"
elif "per" in tag:
tag_IOB = "-PER"
elif "org" in tag:
tag_IOB = "-ORG"
elif "misc" in tag:
tag_IOB = "-MISC"
elif "<END>" in substr:
tag_bool = False
new_sentence = True
else:
if tag_bool:
if new_sentence is True:
start = "B"
new_sentence = False
else:
start = "I"
f.write(substr.strip(" ") + " " + start + tag_IOB + "\n")
else:
f.write(substr.strip(" ") + " " + "O" + "\n")
def _create_datasets(self, data_file: Union[str, Path], data_folder: Path):
with open(data_file) as file:
num_lines = len(file.readlines())
file.seek(0)
train_len = round(num_lines * 0.8)
test_len = round(num_lines * 0.1)
with (data_folder / "train.txt").open("w", encoding="utf-8") as train, (data_folder / "test.txt").open(
"w", encoding="utf-8"
) as test, (data_folder / "dev.txt").open("w", encoding="utf-8") as dev:
k = 0
for line in file.readlines():
k += 1
if k <= train_len:
train.write(line)
elif k > train_len and k <= (train_len + test_len):
test.write(line)
elif k > (train_len + test_len) and k <= num_lines:
dev.write(line)
class NER_HUNGARIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the NER Business corpus for Hungarian.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# If the extracted corpus file is not yet present in dir
if not os.path.isfile(data_folder / "hun_ner_corpus.txt"):
# download zip if necessary
hun_ner_path = "https://rgai.sed.hu/sites/rgai.sed.hu/files/business_NER.zip"
path_to_zipped_corpus = cached_path(hun_ner_path, Path("datasets") / dataset_name)
# extracted corpus is not present , so unpacking it.
unpack_file(path_to_zipped_corpus, data_folder, mode="zip", keep=True)
super().__init__(
data_folder,
columns,
train_file="hun_ner_corpus.txt",
column_delimiter="\t",
encoding="latin-1",
in_memory=in_memory,
label_name_map={"0": "O"},
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ICELANDIC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the ICELANDIC_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if not os.path.isfile(data_folder / "icelandic_ner.txt"):
# download zip
icelandic_ner = "https://repository.clarin.is/repository/xmlui/handle/20.500.12537/42/allzip"
icelandic_ner_path = cached_path(icelandic_ner, Path("datasets") / dataset_name)
# unpacking the zip
unpack_file(icelandic_ner_path, data_folder, mode="zip", keep=True)
outputfile = os.path.abspath(data_folder)
# merge the files in one as the zip is containing multiples files
with open(outputfile / data_folder / "icelandic_ner.txt", "wb") as outfile:
for files in os.walk(outputfile / data_folder):
f = files[2]
for i in range(len(f)):
if f[i].endswith(".txt"):
with open(outputfile / data_folder / f[i], "rb") as infile:
contents = infile.read()
outfile.write(contents)
super().__init__(
data_folder,
columns,
train_file="icelandic_ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_JAPANESE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Hironsan/IOB2 corpus for Japanese.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data from github if necessary (hironsan.txt, ja.wikipedia.conll)
IOB2_path = "https://raw.githubusercontent.com/Hironsan/IOB2Corpus/master/"
# download files if not present locally
cached_path(f"{IOB2_path}hironsan.txt", data_folder / "raw")
cached_path(f"{IOB2_path}ja.wikipedia.conll", data_folder / "raw")
# we need to modify the original files by adding new lines after after the end of each sentence
train_data_file = data_folder / "train.txt"
if not train_data_file.is_file():
self.__prepare_jap_wikinews_corpus(data_folder / "raw" / "hironsan.txt", data_folder / "train.txt")
self.__prepare_jap_wikipedia_corpus(data_folder / "raw" / "ja.wikipedia.conll", data_folder / "train.txt")
super().__init__(
data_folder,
columns,
train_file="train.txt",
in_memory=in_memory,
default_whitespace_after=0,
**corpusargs,
)
@staticmethod
def __prepare_jap_wikipedia_corpus(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in) as f:
lines = f.readlines()
with open(file_out, "a") as f:
for line in lines:
if line[0] == "。":
f.write(line)
f.write("\n")
elif line[0] == "\n":
continue
else:
f.write(line)
@staticmethod
def __prepare_jap_wikinews_corpus(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in) as f:
lines = f.readlines()
with open(file_out, "a") as f:
for line in lines:
sp_line = line.split("\t")
if sp_line[0] == "\n":
f.write("\n")
else:
f.write(sp_line[0] + "\t" + sp_line[-1])
class NER_MASAKHANE(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "luo",
version: str = "v2",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Masakhane corpus available on https://github.com/masakhane-io/masakhane-ner/tree/main/data.
It consists of ten African languages. Pass a language code or a list of language codes to initialize the corpus
with the languages you require. If you pass "all", all languages will be initialized.
:version: Specifies version of the dataset. Currently, only "v1" and "v2" are supported, using "v2" as default.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# if only one language is given
if type(languages) == str:
languages = [languages]
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
supported_versions = ["v1", "v2"]
if version not in supported_versions:
log.error(f"The specified version '{version}' is not in the list of supported version!")
log.error(f"Supported versions are '{supported_versions}'!")
raise Exception
data_folder = base_path / dataset_name / version
languages_to_code = {
"v1": {
"amharic": "amh",
"hausa": "hau",
"igbo": "ibo",
"kinyarwanda": "kin",
"luganda": "lug",
"luo": "luo",
"naija": "pcm",
"swahili": "swa",
"yoruba": "yor",
"wolof": "wol",
},
"v2": {
"bambara": "bam",
"ghomala": "bbj",
"ewe": "ewe",
"fon": "fon",
"hausa": "hau",
"igbo": "ibo",
"kinyarwanda": "kin",
"luganda": "lug",
"mossi": "mos",
"naija": "pcm",
"chichewa": "nya",
"chishona": "sna",
"kiswahili": "swa",
"setswana": "tsn",
"akan_twi": "twi",
"wolof": "wol",
"isixhosa": "xho",
"yoruba": "yor",
"isizulu": "zul",
},
}
language_to_code = languages_to_code[version]
data_paths = {
"v1": "https://raw.githubusercontent.com/masakhane-io/masakhane-ner/main/data",
"v2": "https://raw.githubusercontent.com/masakhane-io/masakhane-ner/main/MasakhaNER2.0/data",
}
# use all languages if explicitly set to "all"
if languages == ["all"]:
languages = list(language_to_code.values())
corpora: List[Corpus] = []
for language in languages:
if language in language_to_code:
language = language_to_code[language]
if language not in language_to_code.values():
log.error(f"Language '{language}' is not in list of supported languages!")
log.error(f"Supported are '{language_to_code.values()}'!")
log.error("Instantiate this Corpus for instance like so 'corpus = NER_MASAKHANE(languages='luo')'")
raise Exception
language_folder = data_folder / language
# download data if necessary
data_path = f"{data_paths[version]}/{language}/"
cached_path(f"{data_path}dev.txt", language_folder)
cached_path(f"{data_path}test.txt", language_folder)
cached_path(f"{data_path}train.txt", language_folder)
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}@{version}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
encoding="utf-8",
in_memory=in_memory,
name=language,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="masakhane-" + "-".join(languages),
)
class NER_MULTI_CONER(MultiFileColumnCorpus):
def __init__(
self,
task: str = "multi",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Download and Initialize the MultiCoNer corpus.
:param task: either 'multi', 'code-switch', or the language code for one of the mono tasks.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
folders = {
"bn": "BN-Bangla",
"de": "DE-German",
"en": "EN-English",
"es": "ES-Espanish",
"fa": "FA-Farsi",
"hi": "HI-Hindi",
"ko": "KO-Korean",
"nl": "NL-Dutch",
"ru": "RU-Russian",
"tr": "TR-Turkish",
"zh": "ZH-Chinese",
"mix": "MIX_Code_mixed",
"multi": "MULTI_Multilingual",
}
possible_tasks = list(folders.keys())
task = task.lower()
if task not in possible_tasks:
raise ValueError(f"task has to be one of {possible_tasks}, but is '{task}'")
# column format
columns = {0: "text", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = cached_path("s3://multiconer", base_path / dataset_name) / "multiconer2022"
train_files = [data_folder / folders[task] / f"{task}_train.conll"]
dev_files = [data_folder / folders[task] / f"{task}_dev.conll"]
test_files = [data_folder / folders[task] / f"{task}_test.conll"]
super().__init__(
train_files=train_files,
dev_files=dev_files,
test_files=test_files,
column_format=columns,
comment_symbol="# id ",
in_memory=in_memory,
**corpusargs,
)
class NER_MULTI_CONER_V2(MultiFileColumnCorpus):
def __init__(
self,
task: str = "multi",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
use_dev_as_test: bool = True,
**corpusargs,
) -> None:
"""Initialize the MultiCoNer V2 corpus for the Semeval2023 workshop.
This is only possible if you've applied and downloaded it to your machine.
Apply for the corpus from here https://multiconer.github.io/dataset and unpack the .zip file's content into
a folder called 'ner_multi_coner_v2'. Then set the base_path parameter in the constructor to the path to the
parent directory where the ner_multi_coner_v2 folder resides. You can also create the multiconer in
the {FLAIR_CACHE_ROOT}/datasets folder to leave the path empty.
:param base_path: Path to the ner_multi_coner_v2 corpus (i.e. 'ner_multi_coner_v2' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param use_dev_as_test: If True, it uses the dev set as test set and samples random training data for a dev split.
:param task: either 'multi', 'code-switch', or the language code for one of the mono tasks.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
folders = {
"bn": "BN-Bangla",
"de": "DE-German",
"en": "EN-English",
"es": "ES-Espanish",
"fa": "FA-Farsi",
"fr": "FR-French",
"hi": "HI-Hindi",
"it": "IT-Italian",
"pt": "PT-Portuguese",
"sv": "SV-Swedish",
"uk": "UK-Ukrainian",
"zh": "ZH-Chinese",
}
possible_tasks = [*list(folders.keys()), "multi"]
task = task.lower()
if task not in possible_tasks:
raise ValueError(f"task has to be one of {possible_tasks}, but is '{task}'")
# column format
columns = {0: "text", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name / "train_dev"
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: MultiCoNerV2 dataset not found at "{data_folder}".')
log.warning('Instructions for obtaining the data can be found here: https://multiconer.github.io/dataset"')
log.warning("-" * 100)
if task == "multi":
train_files = list(data_folder.glob("*-train.conll"))
dev_files = list(data_folder.glob("*-dev.conll"))
else:
train_files = [data_folder / f"{task}-train.conll"]
dev_files = [data_folder / f"{task}-dev.conll"]
test_files = []
if use_dev_as_test:
test_files = dev_files
dev_files = []
super().__init__(
train_files=train_files,
dev_files=dev_files,
test_files=test_files,
column_format=columns,
comment_symbol="# id ",
in_memory=in_memory,
**corpusargs,
)
class NER_MULTI_WIKIANN(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Initialize the WkiAnn corpus for cross-lingual NER consisting of datasets from 282 languages that exist in Wikipedia.
See https://elisa-ie.github.io/wikiann/ for details and for the languages and their
respective abbreveations, i.e. "en" for english. (license: https://opendatacommons.org/licenses/by/)
Parameters
----------
languages : Union[str, List[str]]
Should be an abbreviation of a language ("en", "de",..) or a list of abbreviations.
The datasets of all passed languages will be saved in one MultiCorpus.
(Note that, even though listed on https://elisa-ie.github.io/wikiann/ some datasets are empty.
This includes "aa", "cho", "ho", "hz", "ii", "jam", "kj", "kr", "mus", "olo" and "tcy".)
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
The data is in bio-format. It will by default (with the string "ner" as value) be transformed
into the bioes format. If you dont want that set it to None.
in_memory : bool, optional
Specify that the dataset should be loaded in memory, which speeds up the training process but takes increases the RAM usage significantly.
"""
if type(languages) == str:
languages = [languages]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# For each language in languages, the file is downloaded if not existent
# Then a comlumncorpus of that data is created and saved in a list
# this list is handed to the multicorpus
# list that contains the columncopora
corpora: List[Corpus] = []
google_drive_path = "https://drive.google.com/uc?id="
# download data if necessary
first = True
for language in languages:
language_folder = data_folder / language
file_name = "wikiann-" + language + ".bio"
# if language not downloaded yet, download it
if not language_folder.exists():
if first:
import tarfile
import gdown
first = False
# create folder
os.makedirs(language_folder)
# get google drive id from list
google_id = self._google_drive_id_from_language_name(language)
url = google_drive_path + google_id
# download from google drive
gdown.download(url, str(language_folder / language) + ".tar.gz")
# unzip
log.info("Extracting data...")
tar = tarfile.open(str(language_folder / language) + ".tar.gz", "r:gz")
# tar.extractall(language_folder,members=[tar.getmember(file_name)])
tar.extract(file_name, str(language_folder))
tar.close()
log.info("...done.")
# transform data into required format
# the processed dataset has the additional ending "_new"
log.info("Processing dataset...")
self._silver_standard_to_simple_ner_annotation(str(language_folder / file_name))
# remove the unprocessed dataset
os.remove(str(language_folder / file_name))
log.info("...done.")
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
train_file=file_name + "_new",
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
log.info("...done.")
super().__init__(
corpora,
name="wikiann",
)
def _silver_standard_to_simple_ner_annotation(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f_read, open(
str(data_file) + "_new", "w+", encoding="utf-8"
) as f_write:
while True:
line = f_read.readline()
if line:
if line == "\n":
f_write.write(line)
else:
liste = line.split()
f_write.write(liste[0] + " " + liste[-1] + "\n")
else:
break
def _google_drive_id_from_language_name(self, language):
languages_ids = {
"aa": "1tDDlydKq7KQQ3_23Ysbtke4HJOe4snIk", # leer
"ab": "1hB8REj2XA_0DjI9hdQvNvSDpuBIb8qRf",
"ace": "1WENJS2ppHcZqaBEXRZyk2zY-PqXkTkgG",
"ady": "1n6On8WWDHxEoybj7F9K15d_fkGPy6KgO",
"af": "1CPB-0BD2tg3zIT60D3hmJT0i5O_SKja0",
"ak": "1l2vlGHnQwvm9XhW5S-403fetwUXhBlZm",
"als": "196xyYjhbie7sYLHLZHWkkurOwQLi8wK-",
"am": "1ug1IEoExKD3xWpvfZprAPSQi82YF9Cet",
"an": "1DNLgPOAOsGZBYd6rC5ddhzvc9_DtWnk2",
"ang": "1W_0ti7Tl8AkqM91lRCMPWEuUnPOAZroV",
"ar": "1tyvd32udEQG_cNeVpaD5I2fxvCc6XKIS",
"arc": "1hSOByStqPmP3b9HfQ39EclUZGo8IKCMb",
"arz": "1CKW5ZhxTpIHmc8Jt5JLz_5O6Cr8Icsan",
"as": "12opBoIweBLM8XciMHT4B6-MAaKdYdvpE",
"ast": "1rp64PxGZBDfcw-tpFBjLg_ddLDElG1II",
"av": "1hncGUrkG1vwAAQgLtwOf41BWkHkEvdss",
"ay": "1VmIsWpMTz442b4Mx798ZOgtB9vquKQtf",
"az": "1FXDXsvBSdqc7GGIDZv0hqBOaaw12Ip2-",
"azb": "1amVqOuHLEkhjn8rkGUl-mXdZlaACWyNT",
"ba": "1aLx1d8GagI11VZVYOGQy0BEePeqoT0x3",
"bar": "1JZ8-k8ZmnpWYI_Yl_cBBgjVdxoM9Daci",
"bat-smg": "1trxKXDFSeKsygTMKi-ZqXSJs7F90k5a8",
"bcl": "1Hs0k7KVZ2DPsqroZ4cUKcwZG4HdPV794",
"be-x-old": "1gaK-spj1m6eGYQ-SsngLxxLUvP1VRk08",
"be": "1_ttfOSy9BzCRkIT_p3mImT82XRPpEiuH",
"bg": "1Iug6gYKemb0OrLTUrKDc_c66YGypTfCF",
"bh": "12OcSFLu940A8tVQLxI8pnxKBpTeZHmrh",
"bi": "1rftVziS_pqARx4mvLJC0sKLY-OL5ZIjE",
"bjn": "1n17mkRjPUAOWQk5LQs2C3Tz3ShxK0enZ",
"bm": "1284dwO_sfdsWE7FR06HhfBRUb8ePesKR",
"bn": "1K2DM1mT4hkr6NlAIBTj95BeVXcgvpgDm",
"bo": "1SzGHDVK-OguKdjZ4DXWiOJVrie1iHeWm",
"bpy": "1m-e5EoruJufvwBEgJLmJtx6jzx64pYN2",
"br": "1xdaBoJ1DnwI0iEq7gQN1dWcABAs_bM9H",
"bs": "167dsB01trMYFQl8FshtIdfhjw7IfVKbk",
"bug": "1yCnevM9_KJzFk27Vxsva_20OacLo4Uam",
"bxr": "1DlByAX3zB-9UyEAVD4wtX-R7mXC-8xum",
"ca": "1LuUgbd9sGa-5Ahcsy31EK89a3WOowftY",
"cbk-zam": "1kgF8xoD-kIOWZET_9kp_4yNX6AAXn6PI",
"cdo": "14x1y6611G-UAEGq92QEHRpreVkYnoUCw",
"ce": "1QUUCVKA-fkiCHd3KT3zUWefaWnxzlZLu",
"ceb": "1DJZE9RfaMoPNXHI73KBXAm4YSe-_YCUk",
"ch": "1YzAfhmatkmTpkZbAcD6X83epCgzD5S2_",
"cho": "1ciY0vF3c5a2mTOo_k32A2wMs0klK98Kb", # leer
"chr": "1EHaxz1UZHn7v2bbRzCLAhPsNtRzrG3Ae",
"chy": "1nNWwMAJr1KNdz3bHf6uIn-thZCknlTeB",
"ckb": "1llpaftcUSiXCZQZMdAqaJSrhwMdcf9IV",
"co": "1ZP-8oWgMYfW7a6w6ygEFkKDGbN39QnDn",
"cr": "1ST0xRicLAG4JdCZwGdaY-0pEXooQh7e6",
"crh": "1Jmpq2XVYUR_XaXU5XNhtOMnz-qkpsgpE",
"cs": "1Vydyze-jBkK_S1uV5ewV_Y6dbwhXr7lk",
"csb": "1naUyF74lZPnnopXdOqf5Xor2kT4WoHfS",
"cu": "1EN5dVTU6jc7YOYPCHq8EYUF31HlMUKs7",
"cv": "1gEUAlqYSSDI4TrWCqP1LUq2n0X1XEjN3",
"cy": "1q5g6NJE5GXf65Vc_P4BnUMHQ49Prz-J1",
"da": "11onAGOLkkqrIwM784siWlg-cewa5WKm8",
"de": "1f9nWvNkCCy6XWhd9uf4Dq-2--GzSaYAb",
"diq": "1IkpJaVbEOuOs9qay_KG9rkxRghWZhWPm",
"dsb": "1hlExWaMth-2eVIQ3i3siJSG-MN_7Z6MY",
"dv": "1WpCrslO4I7TMb2uaKVQw4U2U8qMs5szi",
"dz": "10WX52ePq2KfyGliwPvY_54hIjpzW6klV",
"ee": "1tYEt3oN2KPzBSWrk9jpCqnW3J1KXdhjz",
"el": "1cxq4NUYmHwWsEn5waYXfFSanlINXWLfM",
"eml": "17FgGhPZqZNtzbxpTJOf-6nxEuI5oU4Vd",
"en": "1mqxeCPjxqmO7e8utj1MQv1CICLFVvKa-",
"eo": "1YeknLymGcqj44ug2yd4P7xQVpSK27HkK",
"es": "1Dnx3MVR9r5cuoOgeew2gT8bDvWpOKxkU",
"et": "1Qhb3kYlQnLefWmNimdN_Vykm4mWzbcWy",
"eu": "1f613wH88UeITYyBSEMZByK-nRNMwLHTs",
"ext": "1D0nLOZ3aolCM8TShIRyCgF3-_MhWXccN",
"fa": "1QOG15HU8VfZvJUNKos024xI-OGm0zhEX",
"ff": "1h5pVjxDYcq70bSus30oqi9KzDmezVNry",
"fi": "1y3Kf6qYsSvL8_nSEwE1Y6Bf6ninaPvqa",
"fiu-vro": "1oKUiqG19WgPd3CCl4FGudk5ATmtNfToR",
"fj": "10xDMuqtoTJlJFp5ghbhKfNWRpLDK3W4d",
"fo": "1RhjYqgtri1276Be1N9RrNitdBNkpzh0J",
"fr": "1sK_T_-wzVPJYrnziNqWTriU52rEsXGjn",
"frp": "1NUm8B2zClBcEa8dHLBb-ZgzEr8phcQyZ",
"frr": "1FjNqbIUlOW1deJdB8WCuWjaZfUzKqujV",
"fur": "1oqHZMK7WAV8oHoZLjGR0PfmO38wmR6XY",
"fy": "1DvnU6iaTJc9bWedmDklHyx8nzKD1s3Ge",
"ga": "1Ql6rh7absdYQ8l-3hj_MVKcEC3tHKeFB",
"gag": "1zli-hOl2abuQ2wsDJU45qbb0xuvYwA3a",
"gan": "1u2dOwy58y-GaS-tCPJS_i9VRDQIPXwCr",
"gd": "1umsUpngJiwkLdGQbRqYpkgxZju9dWlRz",
"gl": "141K2IbLjJfXwFTIf-kthmmG0YWdi8liE",
"glk": "1ZDaxQ6ilXaoivo4_KllagabbvfOuiZ0c",
"gn": "1hM4MuCaVnZqnL-w-0N-WcWag22ikVLtZ",
"gom": "1BNOSw75tzPC0wEgLOCKbwu9wg9gcLOzs",
"got": "1YSHYBtXc1WvUvMIHPz6HHgJvaXKulJUj",
"gu": "1VdK-B2drqFwKg8KD23c3dKXY-cZgCMgd",
"gv": "1XZFohYNbKszEFR-V-yDXxx40V41PV9Zm",
"ha": "18ZG4tUU0owRtQA8Ey3Dl72ALjryEJWMC",
"hak": "1QQe3WgrCWbvnVH42QXD7KX4kihHURB0Z",
"haw": "1FLqlK-wpz4jy768XbQAtxd9PhC-9ciP7",
"he": "18K-Erc2VOgtIdskaQq4D5A3XkVstDmfX",
"hi": "1lBRapb5tjBqT176gD36K5yb_qsaFeu-k",
"hif": "153MQ9Ga4NQ-CkK8UiJM3DjKOk09fhCOV",
"ho": "1c1AoS7yq15iVkTEE-0f3x25NT4F202B8", # leer
"hr": "1wS-UtB3sGHuXJQQGR0F5lDegogsgoyif",
"hsb": "1_3mMLzAE5OmXn2z64rW3OwWbo85Mirbd",
"ht": "1BwCaF0nfdgkM7Yt7A7d7KyVk0BcuwPGk",
"hu": "10AkDmTxUWNbOXuYLYZ-ZPbLAdGAGZZ8J",
"hy": "1Mi2k2alJJquT1ybd3GC3QYDstSagaWdo",
"hz": "1c1m_-Q92v0Di7Nez6VuaccrN19i8icKV", # leer
"ia": "1jPyqTmDuVhEhj89N606Cja5heJEbcMoM",
"id": "1JWIvIh8fQoMQqk1rPvUThaskxnTs8tsf",
"ie": "1TaKRlTtB8-Wqu4sfvx6JQKIugAlg0pV-",
"ig": "15NFAf2Qx6BXSjv_Oun9_3QRBWNn49g86",
"ii": "1qldGJkMOMKwY13DpcgbxQCbff0K982f9", # leer
"ik": "1VoSTou2ZlwVhply26ujowDz6gjwtxmny",
"ilo": "1-xMuIT6GaM_YeHqgm1OamGkxYfBREiv3",
"io": "19Zla0wsAcrZm2c0Pw5ghpp4rHjYs26Pp",
"is": "11i-NCyqS6HbldIbYulsCgQGZFXR8hwoB",
"it": "1HmjlOaQunHqL2Te7pIkuBWrnjlmdfYo_",
"iu": "18jKm1S7Ls3l0_pHqQH8MycG3LhoC2pdX",
"ja": "10dz8UxyK4RIacXE2HcGdrharmp5rwc3r",
"jam": "1v99CXf9RnbF6aJo669YeTR6mQRTOLZ74", # leer
"jbo": "1_LmH9hc6FDGE3F7pyGB1fUEbSwuTYQdD",
"jv": "1qiSu1uECCLl4IBZS27FBdJIBivkJ7GwE",
"ka": "172UFuFRBX2V1aWeXlPSpu9TjS-3cxNaD",
"kaa": "1kh6hMPUdqO-FIxRY6qaIBZothBURXxbY",
"kab": "1oKjbZI6ZrrALCqnPCYgIjKNrKDA7ehcs",
"kbd": "1jNbfrboPOwJmlXQBIv053d7n5WXpMRv7",
"kg": "1iiu5z-sdJ2JLC4Ja9IgDxpRZklIb6nDx",
"ki": "1GUtt0QI84c5McyLGGxoi5uwjHOq1d6G8",
"kj": "1nSxXUSGDlXVCIPGlVpcakRc537MwuKZR", # leer
"kk": "1ryC3UN0myckc1awrWhhb6RIi17C0LCuS",
"kl": "1gXtGtX9gcTXms1IExICnqZUHefrlcIFf",
"km": "1DS5ATxvxyfn1iWvq2G6qmjZv9pv0T6hD",
"kn": "1ZGLYMxbb5-29MNmuUfg2xFhYUbkJFMJJ",
"ko": "12r8tIkTnwKhLJxy71qpIcoLrT6NNhQYm",
"koi": "1EdG_wZ_Qk124EPAZw-w6rdEhYLsgcvIj",
"kr": "19VNQtnBA-YL_avWuVeHQHxJZ9MZ04WPF", # leer
"krc": "1nReV4Mb7Wdj96czpO5regFbdBPu0zZ_y",
"ks": "1kzh0Pgrv27WRMstR9MpU8mu7p60TcT-X",
"ksh": "1iHJvrl2HeRaCumlrx3N7CPrHQ2KuLUkt",
"ku": "1YqJog7Bkk0fHBCSTxJ9heeE-bfbkbkye",
"kv": "1s91HI4eq8lQYlZwfrJAgaGlCyAtIhvIJ",
"kw": "16TaIX2nRfqDp8n7zudd4bqf5abN49dvW",
"ky": "17HPUKFdKWhUjuR1NOp5f3PQYfMlMCxCT",
"la": "1NiQuBaUIFEERvVXo6CQLwosPraGyiRYw",
"lad": "1PEmXCWLCqnjLBomMAYHeObM1AmVHtD08",
"lb": "1nE4g10xoTU23idmDtOQ0w2QCuizZ6QH_",
"lbe": "1KOm-AdRcCHfSc1-uYBxBA4GjxXjnIlE-",
"lez": "1cJAXshrLlF1TZlPHJTpDwEvurIOsz4yR",
"lg": "1Ur0y7iiEpWBgHECrIrT1OyIC8um_y4th",
"li": "1TikIqfqcZlSDWhOae1JnjJiDko4nj4Dj",
"lij": "1ro5ItUcF49iP3JdV82lhCQ07MtZn_VjW",
"lmo": "1W4rhBy2Pi5SuYWyWbNotOVkVY3kYWS_O",
"ln": "1bLSV6bWx0CgFm7ByKppZLpYCFL8EIAoD",
"lo": "1C6SSLeKF3QirjZbAZAcpVX_AXYg_TJG3",
"lrc": "1GUcS28MlJe_OjeQfS2AJ8uczpD8ut60e",
"lt": "1gAG6TcMTmC128wWK0rCXRlCTsJY9wFQY",
"ltg": "12ziP8t_fAAS9JqOCEC0kuJObEyuoiOjD",
"lv": "1MPuAM04u-AtfybXdpHwCqUpFWbe-zD0_",
"mai": "1d_nUewBkka2QGEmxCc9v3dTfvo7lPATH",
"map-bms": "1wrNIE-mqp2xb3lrNdwADe6pb7f35NP6V",
"mdf": "1BmMGUJy7afuKfhfTBMiKxM3D7FY-JrQ2",
"mg": "105WaMhcWa-46tCztoj8npUyg0aH18nFL",
"mh": "1Ej7n6yA1cF1cpD5XneftHtL33iHJwntT",
"mhr": "1CCPIUaFkEYXiHO0HF8_w07UzVyWchrjS",
"mi": "1F6au9xQjnF-aNBupGJ1PwaMMM6T_PgdQ",
"min": "1tVK5SHiCy_DaZSDm3nZBgT5bgWThbJt_",
"mk": "18NpudytGhSWq_LbmycTDw10cSftlSBGS",
"ml": "1V73UE-EvcE-vV3V1RTvU4sak6QFcP91y",
"mn": "14jRXicA87oXZOZllWqUjKBMetNpQEUUp",
"mo": "1YsLGNMsJ7VsekhdcITQeolzOSK4NzE6U",
"mr": "1vOr1AIHbgkhTO9Ol9Jx5Wh98Qdyh1QKI",
"mrj": "1dW-YmEW8a9D5KyXz8ojSdIXWGekNzGzN",
"ms": "1bs-_5WNRiZBjO-DtcNtkcIle-98homf_",
"mt": "1L7aU3iGjm6SmPIU74k990qRgHFV9hrL0",
"mus": "1_b7DcRqiKJFEFwp87cUecqf8A5BDbTIJ", # leer
"mwl": "1MfP0jba2jQfGVeJOLq26MjI6fYY7xTPu",
"my": "16wsIGBhNVd2lC2p6n1X8rdMbiaemeiUM",
"myv": "1KEqHmfx2pfU-a1tdI_7ZxMQAk5NJzJjB",
"mzn": "1CflvmYEXZnWwpsBmIs2OvG-zDDvLEMDJ",
"na": "1r0AVjee5wNnrcgJxQmVGPVKg5YWz1irz",
"nah": "1fx6eu91NegyueZ1i0XaB07CKjUwjHN7H",
"nap": "1bhT4sXCJvaTchCIV9mwLBtf3a7OprbVB",
"nds-nl": "1UIFi8eOCuFYJXSAXZ9pCWwkQMlHaY4ye",
"nds": "1FLgZIXUWa_vekDt4ndY0B5XL7FNLiulr",
"ne": "1gEoCjSJmzjIH4kdHsbDZzD6ID4_78ekS",
"new": "1_-p45Ny4w9UvGuhD8uRNSPPeaARYvESH",
"ng": "11yxPdkmpmnijQUcnFHZ3xcOmLTYJmN_R",
"nl": "1dqYXg3ilzVOSQ_tz_dF47elSIvSIhgqd",
"nn": "1pDrtRhQ001z2WUNMWCZQU3RV_M0BqOmv",
"no": "1zuT8MI96Ivpiu9mEVFNjwbiM8gJlSzY2",
"nov": "1l38388Rln0NXsSARMZHmTmyfo5C0wYTd",
"nrm": "10vxPq1Nci7Wpq4XOvx3dtqODskzjdxJQ",
"nso": "1iaIV8qlT0RDnbeQlnxJ3RehsG3gU5ePK",
"nv": "1oN31jT0w3wP9aGwAPz91pSdUytnd9B0g",
"ny": "1eEKH_rUPC560bfEg11kp3kbe8qWm35IG",
"oc": "1C01cW8G_j8US-DTrsmeal_ENHTtNWn-H",
"olo": "1vbDwKZKqFq84dusr1SvDx5JbBcPanx9L", # leer
"om": "1q3h22VMbWg2kgVFm-OArR-E4y1yBQ1JX",
"or": "1k8LwCE8nC7lq6neXDaS3zRn0KOrd9RnS",
"os": "1u81KAB34aEQfet00dLMRIBJsfRwbDTij",
"pa": "1JDEHL1VcLHBamgTPBom_Ryi8hk6PBpsu",
"pag": "1k905VUWnRgY8kFb2P2431Kr4dZuolYGF",
"pam": "1ssugGyJb8ipispC60B3I6kzMsri1WcvC",
"pap": "1Za0wfwatxYoD7jGclmTtRoBP0uV_qImQ",
"pcd": "1csJlKgtG04pdIYCUWhsCCZARKIGlEYPx",
"pdc": "1Xnms4RXZKZ1BBQmQJEPokmkiweTpouUw",
"pfl": "1tPQfHX7E0uKMdDSlwNw5aGmaS5bUK0rn",
"pi": "16b-KxNxzbEuyoNSlI3bfe2YXmdSEsPFu",
"pih": "1vwyihTnS8_PE5BNK7cTISmIBqGWvsVnF",
"pl": "1fijjS0LbfpKcoPB5V8c8fH08T8AkXRp9",
"pms": "12ySc7X9ajWWqMlBjyrPiEdc-qVBuIkbA",
"pnb": "1RB3-wjluhTKbdTGCsk3nag1bM3m4wENb",
"pnt": "1ZCUzms6fY4on_fW8uVgO7cEs9KHydHY_",
"ps": "1WKl9Av6Sqz6aHKyUM5kIh90mzFzyVWH9",
"pt": "13BX-_4_hcTUp59HDyczFDI32qUB94vUY",
"qu": "1CB_C4ygtRoegkqgcqfXNHr8oQd-UcvDE",
"rm": "1YRSGgWoxEqSojHXuBHJnY8vAHr1VgLu-",
"rmy": "1uFcCyvOWBJWKFQxbkYSp373xUXVl4IgF",
"rn": "1ekyyb2MvupYGY_E8_BhKvV664sLvW4aE",
"ro": "1YfeNTSoxU-zJMnyQotLk5X8B_6nHryBu",
"roa-rup": "150s4H4TdQ5nNYVC6j0E416TUAjBE85yy",
"roa-tara": "1H6emfQsD_a5yohK4RMPQ-GrnHXqqVgr3",
"ru": "11gP2s-SYcfS3j9MjPp5C3_nFeQB-8x86",
"rue": "1OuSglZAndja1J5D5IUmdbt_niTTyEgYK",
"rw": "1NuhHfi0-B-Xlr_BApijnxCw0WMEltttP",
"sa": "1P2S3gL_zvKgXLKJJxg-Fb4z8XdlVpQik",
"sah": "1qz0MpKckzUref2FX_FYiNzI2p4BDc5oR",
"sc": "1oAYj_Fty4FUwjAOBEBaiZt_cY8dtpDfA",
"scn": "1sDN9zHkXWYoHYx-DUu-GPvsUgB_IRa8S",
"sco": "1i8W7KQPj6YZQLop89vZBSybJNgNsvXWR",
"sd": "1vaNqfv3S8Gl5pQmig3vwWQ3cqRTsXmMR",
"se": "1RT9xhn0Vl90zjWYDTw5V1L_u1Oh16tpP",
"sg": "1iIh2oXD2Szz_AygUvTt3_ZK8a3RYEGZ_",
"sh": "1qPwLiAm6t4__G-zVEOrBgYx6VRmgDgiS",
"si": "1G5ryceID0TP6SAO42e-HAbIlCvYmnUN7",
"simple": "1FVV49o_RlK6M5Iw_7zeJOEDQoTa5zSbq",
"sk": "11mkYvbmAWKTInj6t4Ma8BUPxoR5o6irL",
"sl": "1fsIZS5LgMzMzZ6T7ogStyj-ILEZIBRvO",
"sm": "1yefECpKX_Y4R7G2tggIxvc_BvJfOAz-t",
"sn": "1fYeCjMPvRAv94kvZjiKI-ktIDLkbv0Ve",
"so": "1Uc-eSZnJb36SgeTvRU3GirXZOlGD_NB6",
"sq": "11u-53n71O_yjpwRiCQSwgL7N2w72ZptX",
"sr": "1PGLGlQi8Q0Eac6dib-uuCJAAHK6SF5Pz",
"srn": "1JKiL3TSXqK1-KhPfAwMK0uqw90WEzg7M",
"ss": "1e0quNEsA1dn57-IbincF4D82dRWgzQlp",
"st": "1ny-FBzpBqIDgv6jMcsoFev3Ih65FNZFO",
"stq": "15Fx32ROy2IM6lSqAPUykkr3CITR6Xd7v",
"su": "1C0FJum7bYZpnyptBvfAgwJb0TX2hggtO",
"sv": "1YyqzOSXzK5yrAou9zeTDWH_7s569mDcz",
"sw": "1_bNTj6T8eXlNAIuHaveleWlHB_22alJs",
"szl": "1_dXEip1snK4CPVGqH8x7lF5O-6FdCNFW",
"ta": "1ZFTONsxGtSnC9QB6RpWSvgD_MbZwIhHH",
"tcy": "15R6u7KQs1vmDSm_aSDrQMJ3Q6q3Be0r7", # leer
"te": "11Sx-pBAPeZOXGyv48UNSVMD0AH7uf4YN",
"tet": "11mr2MYLcv9pz7mHhGGNi5iNCOVErYeOt",
"tg": "16ttF7HWqM9Cnj4qmgf3ZfNniiOJfZ52w",
"th": "14xhIt-xr5n9nMuvcwayCGM1-zBCFZquW",
"ti": "123q5e9MStMShp8eESGtHdSBGLDrCKfJU",
"tk": "1X-JNInt34BNGhg8A8Peyjw2WjsALdXsD",
"tl": "1WkQHbWd9cqtTnSHAv0DpUThaBnzeSPTJ",
"tn": "1fHfQHetZn8-fLuRZEu-cvs-kQYwPvjyL",
"to": "1cHOLaczYJ8h-OqQgxeoH9vMG3izg6muT",
"tpi": "1YsRjxVu6NYOrXRb8oqMO9FPaicelFEcu",
"tr": "1J1Zy02IxvtCK0d1Ba2h_Ulit1mVb9UIX",
"ts": "1pIcfAt3KmtmDkyhOl-SMSeoM8aP8bOpl",
"tt": "1vsfzCjj-_bMOn5jBai41TF5GjKJM_Ius",
"tum": "1NWcg65daI2Bt0awyEgU6apUDbBmiqCus",
"tw": "1WCYKZIqS7AagS76QFSfbteiOgFNBvNne",
"ty": "1DIqaP1l-N9VXTNokrlr6EuPMGE765o4h",
"tyv": "1F3qa05OYLBcjT1lXMurAJFDXP_EesCvM",
"udm": "1T0YMTAPLOk768sstnewy5Jxgx2RPu3Rb",
"ug": "1fjezvqlysyZhiQMZdazqLGgk72PqtXAw",
"uk": "1UMJCHtzxkfLDBJE7NtfN5FeMrnnUVwoh",
"ur": "1WNaD2TuHvdsF-z0k_emQYchwoQQDFmRk",
"uz": "11wrG2FSTpRJc2jb5MhgvxjkVDYhT8M-l",
"ve": "1PucJ7pJ4CXGEXZ5p_WleZDs2usNz74to",
"vec": "1cAVjm_y3ehNteDQIYz9yyoq1EKkqOXZ0",
"vep": "1K_eqV7O6C7KPJWZtmIuzFMKAagj-0O85",
"vi": "1yQ6nhm1BmG9lD4_NaG1hE5VV6biEaV5f",
"vls": "1bpQQW6pKHruKJJaKtuggH5rReMXyeVXp",
"vo": "1D80QRdTpe7H4mHFKpfugscsjX71kiMJN",
"wa": "1m4B81QYbf74htpInDU5p7d0n0ot8WLPZ",
"war": "1EC3jsHtu22tHBv6jX_I4rupC5RwV3OYd",
"wo": "1vChyqNNLu5xYHdyHpACwwpw4l3ptiKlo",
"wuu": "1_EIn02xCUBcwLOwYnA-lScjS2Lh2ECw6",
"xal": "19bKXsL1D2UesbB50JPyc9TpG1lNc2POt",
"xh": "1pPVcxBG3xsCzEnUzlohc_p89gQ9dSJB3",
"xmf": "1SM9llku6I_ZuZz05mOBuL2lx-KQXvehr",
"yi": "1WNWr1oV-Nl7c1Jv8x_MiAj2vxRtyQawu",
"yo": "1yNVOwMOWeglbOcRoZzgd4uwlN5JMynnY",
"za": "1i7pg162cD_iU9h8dgtI2An8QCcbzUAjB",
"zea": "1EWSkiSkPBfbyjWjZK0VuKdpqFnFOpXXQ",
"zh-classical": "1uUKZamNp08KA7s7794sKPOqPALvo_btl",
"zh-min-nan": "1oSgz3YBXLGUgI7kl-uMOC_ww6L0FNFmp",
"zh-yue": "1zhwlUeeiyOAU1QqwqZ8n91yXIRPFA7UE",
"zh": "1LZ96GUhkVHQU-aj2C3WOrtffOp0U3Z7f",
"zu": "1FyXl_UK1737XB3drqQFhGXiJrJckiB1W",
}
return languages_ids[language]
class NER_MULTI_XTREME(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Xtreme corpus for cross-lingual NER consisting of datasets of a total of 40 languages.
The data comes from the google research work XTREME https://github.com/google-research/xtreme.
The data is derived from the wikiann dataset https://elisa-ie.github.io/wikiann/ (license: https://opendatacommons.org/licenses/by/)
Parameters
----------
languages : Union[str, List[str]], optional
Specify the languages you want to load. Provide an empty list or string to select all languages.
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this to point to a different folder but typically this should not be necessary.
in_memory : bool, optional
Specify that the dataset should be loaded in memory, which speeds up the training process but takes increases the RAM usage significantly.
"""
# if no languages are given as argument all languages used in XTREME will be loaded
if not languages:
languages = [
"af",
"ar",
"bg",
"bn",
"de",
"el",
"en",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"he",
"hi",
"hu",
"id",
"it",
"ja",
"jv",
"ka",
"kk",
"ko",
"ml",
"mr",
"ms",
"my",
"nl",
"pt",
"ru",
"sw",
"ta",
"te",
"th",
"tl",
"tr",
"ur",
"vi",
"yo",
"zh",
]
# if only one language is given
if type(languages) == str:
languages = [languages]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# For each language in languages, the file is downloaded if not existent
# Then a comlumncorpus of that data is created and saved in a list
# This list is handed to the multicorpus
# list that contains the columncopora
corpora: List[Corpus] = []
hu_path = "https://nlp.informatik.hu-berlin.de/resources/datasets/panx_dataset"
# download data if necessary
for language in languages:
language_folder = data_folder / language
# if language not downloaded yet, download it
if not language_folder.exists():
file_name = language + ".tar.gz"
# create folder
os.makedirs(language_folder)
# download from HU Server
temp_file = cached_path(
hu_path + "/" + file_name,
Path("datasets") / dataset_name / language,
)
# unzip
log.info("Extracting data...")
import tarfile
tar = tarfile.open(str(temp_file), "r:gz")
for part in ["train", "test", "dev"]:
tar.extract(part, str(language_folder))
tar.close()
log.info("...done.")
# transform data into required format
log.info("Processing dataset...")
for part in ["train", "test", "dev"]:
self._xtreme_to_simple_ner_annotation(str(language_folder / part))
log.info("...done.")
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="xtreme",
)
def _xtreme_to_simple_ner_annotation(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f:
lines = f.readlines()
with open(data_file, "w", encoding="utf-8") as f:
for line in lines:
if line == "\n":
f.write(line)
else:
liste = line.split()
f.write(liste[0].split(":", 1)[1] + " " + liste[1] + "\n")
class NER_MULTI_WIKINER(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# if only one language is given
if type(languages) == str:
languages = [languages]
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
corpora: List[Corpus] = []
for language in languages:
language_folder = data_folder / language
# download data if necessary
self._download_wikiner(language, str(language_folder))
# initialize comlumncorpus and add it to list
log.info(f"Read data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="wikiner",
)
def _download_wikiner(self, language_code: str, dataset_name: str):
# download data if necessary
wikiner_path = "https://raw.githubusercontent.com/dice-group/FOX/master/input/Wikiner/"
lc = language_code
data_file = flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.train"
if not data_file.is_file():
cached_path(
f"{wikiner_path}aij-wikiner-{lc}-wp3.bz2",
Path("datasets") / dataset_name,
)
import bz2
# unpack and write out in CoNLL column-like format
bz_file = bz2.BZ2File(
flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.bz2",
"rb",
)
with bz_file as f, open(
flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.train",
"w",
encoding="utf-8",
) as out:
for lineb in f:
line = lineb.decode("utf-8")
words = line.split(" ")
for word in words:
out.write("\t".join(word.split("|")) + "\n")
class NER_SWEDISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the NER_SWEDISH corpus for Swedish.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ner_spraakbanken_path = "https://raw.githubusercontent.com/klintan/swedish-ner-corpus/master/"
cached_path(f"{ner_spraakbanken_path}test_corpus.txt", Path("datasets") / dataset_name)
cached_path(f"{ner_spraakbanken_path}train_corpus.txt", Path("datasets") / dataset_name)
# data is not in IOB2 format. Thus we transform it to IOB2
self._add_IOB2_tags(data_file=Path(data_folder / "test_corpus.txt"))
self._add_IOB2_tags(data_file=Path(data_folder / "train_corpus.txt"))
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
def _add_IOB2_tags(self, data_file: Union[str, Path], encoding: str = "utf8"):
"""Function that adds IOB2 tags if only chunk names are provided.
e.g. words are tagged PER instead of B-PER or I-PER. Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
"""
with open(file=data_file, encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode="w", encoding=encoding) as f:
pred = "O" # remembers tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) == 2: # word with tag
word = line_list[0]
tag = line_list[1]
if tag in ["0", "O"]: # no chunk
f.write(word + " O\n")
pred = "O"
elif "-" not in tag: # no IOB tags
if pred == "O": # found a new chunk
f.write(word + " B-" + tag + "\n")
pred = tag
else: # found further part of chunk or new chunk directly after old chunk
if pred == tag:
f.write(word + " I-" + tag + "\n")
else:
f.write(word + " B-" + tag + "\n")
pred = tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = tag.split("-")[1]
elif len(line_list) == 0: # empty line
f.write("\n")
pred = "O"
class NER_TURKU(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Finnish TurkuNER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/TurkuNLP/turku-ner-corpus/master/data/conll"
dev_file = "dev.tsv"
test_file = "test.tsv"
train_file = "train.tsv"
cached_path(f"{conll_path}/{dev_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{test_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{train_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
dev_file=dev_file,
test_file=test_file,
train_file=train_file,
column_delimiter="\t",
encoding="latin-1",
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class NER_UKRAINIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Ukrainian NER corpus from lang-uk project.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/lang-uk/flair-ner/master/fixed-split"
test_file = "test.iob"
train_file = "train.iob"
cached_path(f"{conll_path}/{test_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{train_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
test_file=test_file,
train_file=train_file,
column_delimiter=" ",
encoding="utf-8",
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_SEMEVAL2017(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
semeval2017_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/SemEval-2017"
cached_path(f"{semeval2017_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/test.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/dev.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_INSPEC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
inspec_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/Inspec"
cached_path(f"{inspec_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{inspec_path}/test.txt", Path("datasets") / dataset_name)
if "dev.txt" not in os.listdir(data_folder):
cached_path(f"{inspec_path}/valid.txt", Path("datasets") / dataset_name)
# rename according to train - test - dev - convention
os.rename(data_folder / "valid.txt", data_folder / "dev.txt")
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_SEMEVAL2010(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
semeval2010_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/processed_semeval-2010"
cached_path(f"{semeval2010_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2010_path}/test.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class UP_CHINESE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Chinese dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_zh_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Chinese/"
cached_path(f"{up_zh_path}zh-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_zh_path}zh-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_zh_path}zh-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="zh-up-train.conllu",
test_file="zh-up-test.conllu",
dev_file="zh-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_ENGLISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the English dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 10: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_en_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_English-EWT/"
cached_path(f"{up_en_path}en_ewt-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_en_path}en_ewt-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_en_path}en_ewt-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="en_ewt-up-train.conllu",
test_file="en_ewt-up-test.conllu",
dev_file="en_ewt-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
label_name_map={"_": "O"},
**corpusargs,
)
class UP_FRENCH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the French dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_fr_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_French/"
cached_path(f"{up_fr_path}fr-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fr_path}fr-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fr_path}fr-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="fr-up-train.conllu",
test_file="fr-up-test.conllu",
dev_file="fr-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_FINNISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Finnish dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_fi_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Finnish/"
cached_path(f"{up_fi_path}fi-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fi_path}fi-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fi_path}fi-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="fi-up-train.conllu",
test_file="fi-up-test.conllu",
dev_file="fi-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the German dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_de_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_German/"
cached_path(f"{up_de_path}de-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_de_path}de-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_de_path}de-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="de-up-train.conllu",
test_file="de-up-test.conllu",
dev_file="de-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_ITALIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Italian dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_it_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Italian/"
cached_path(f"{up_it_path}it-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_it_path}it-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_it_path}it-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="it-up-train.conllu",
test_file="it-up-test.conllu",
dev_file="it-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Spanish dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_es_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Spanish/"
cached_path(f"{up_es_path}es-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="es-up-train.conllu",
test_file="es-up-test.conllu",
dev_file="es-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_SPANISH_ANCORA(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Spanish AnCora dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_es_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Spanish-AnCora/"
cached_path(f"{up_es_path}es_ancora-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es_ancora-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es_ancora-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="es_ancora-up-train.conllu",
test_file="es_ancora-up-test.conllu",
dev_file="es_ancora-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class NER_HIPE_2022(ColumnCorpus):
@staticmethod
def _prepare_corpus(
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool
):
with open(file_in, encoding="utf-8") as f_p:
lines = f_p.readlines()
with open(file_out, "w", encoding="utf-8") as f_out:
# Add missing newline after header
f_out.write(lines[0] + "\n")
for line in lines[1:]:
if line.startswith(" \t"):
# Workaround for empty tokens
continue
line = line.strip()
# Add "real" document marker
if add_document_separator and line.startswith(document_separator):
f_out.write("-DOCSTART- O\n\n")
f_out.write(line + "\n")
if eos_marker in line:
f_out.write("\n")
def __init__(
self,
dataset_name: str,
language: str,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
version: str = "v2.1",
branch_name: str = "main",
dev_split_name="dev",
add_document_separator=False,
sample_missing_splits=False,
preproc_fn=None,
**corpusargs,
) -> None:
"""Initialize the CLEF-HIPE 2022 NER dataset.
The first time you call this constructor it will automatically
download the specified dataset (by given a language).
:dataset_name: Supported datasets are: ajmc, hipe2020, letemps, newseye, sonar and topres19th.
:language: Language for a supported dataset.
:base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:in_memory: If True, keeps dataset in memory giving speedups in training.
:version: Version of CLEF-HIPE dataset. Currently only v1.0 is supported and available.
:branch_name: Defines git branch name of HIPE data repository (main by default).
:dev_split_name: Defines default name of development split (dev by default). Only the NewsEye dataset has
currently two development splits: dev and dev2.
:add_document_separator: If True, a special document seperator will be introduced. This is highly
recommended when using our FLERT approach.
:sample_missing_splits: If True, data is automatically sampled when certain data splits are None.
:preproc_fn: Function that is used for dataset preprocessing. If None, default preprocessing will be performed.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# Dataset split mapping
hipe_available_splits = {
"v1.0": {
"ajmc": {"de": ["sample"], "en": ["sample"]},
"hipe2020": {"de": ["train", "dev"], "en": ["dev"], "fr": ["train", "dev"]},
"letemps": {"fr": ["train", "dev"]},
"newseye": {
"de": ["train", "dev", "dev2"],
"fi": ["train", "dev", "dev2"],
"fr": ["train", "dev", "dev2"],
"sv": ["train", "dev", "dev2"],
},
"sonar": {"de": ["dev"]},
"topres19th": {"en": ["train", "dev"]},
}
}
# v2.0 only adds new language and splits for AJMC dataset
hipe_available_splits["v2.0"] = copy.deepcopy(hipe_available_splits["v1.0"])
hipe_available_splits["v2.0"]["ajmc"] = {"de": ["train", "dev"], "en": ["train", "dev"], "fr": ["train", "dev"]}
hipe_available_splits["v2.1"] = copy.deepcopy(hipe_available_splits["v2.0"])
for dataset_name_values in hipe_available_splits["v2.1"].values():
for splits in dataset_name_values.values():
splits.append("test") # test datasets are only available for >= v2.1
eos_marker = "EndOfSentence"
document_separator = "# hipe2022:document_id"
# Special document marker for sample splits in AJMC dataset
if f"{dataset_name}" == "ajmc":
document_separator = "# hipe2022:original_source"
columns = {0: "text", 1: "ner"}
dataset_base = self.__class__.__name__.lower()
data_folder = base_path / dataset_base / version / dataset_name / language
data_url = (
f"https://github.com/hipe-eval/HIPE-2022-data/raw/{branch_name}/data/{version}/{dataset_name}/{language}"
)
dataset_splits = hipe_available_splits[version][dataset_name][language]
for split in dataset_splits:
cached_path(
f"{data_url}/HIPE-2022-{version}-{dataset_name}-{split}-{language}.tsv", data_folder / "original"
)
train_file = "train.txt" if "train" in dataset_splits else None
dev_file = f"{dev_split_name}.txt" if "sample" not in dataset_splits else "sample.txt"
test_file = "test.txt" if "test" in dataset_splits else None
new_data_folder = data_folder
if add_document_separator:
new_data_folder = new_data_folder / "with_doc_seperator"
new_data_folder.mkdir(parents=True, exist_ok=True)
self.preproc_fn = preproc_fn if preproc_fn else self._prepare_corpus
if not all( # Only reprocess if some files are not there yet
split_path.exists()
for split_path in [new_data_folder / f"{split_file}.txt" for split_file in dataset_splits]
):
for split in dataset_splits:
original_filename = f"HIPE-2022-{version}-{dataset_name}-{split}-{language}.tsv"
self.preproc_fn(
data_folder / "original" / original_filename,
new_data_folder / f"{split}.txt",
eos_marker,
document_separator,
add_document_separator,
)
super().__init__(
new_data_folder,
columns,
train_file=train_file,
dev_file=dev_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
skip_first_line=True,
column_delimiter="\t",
comment_symbol="# ",
sample_missing_splits=sample_missing_splits,
**corpusargs,
)
class NER_ICDAR_EUROPEANA(ColumnCorpus):
def __init__(
self,
language: str,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the ICDAR Europeana NER dataset.
The dataset is based on the French and Dutch Europeana NER corpora
from the Europeana Newspapers NER dataset (https://lab.kb.nl/dataset/europeana-newspapers-ner), with additional
preprocessing steps being performed (sentence splitting, punctuation normalizing, training/development/test splits).
The resulting dataset is released in the "Data Centric Domain Adaptation for Historical Text with OCR Errors" ICDAR paper
by Luisa März, Stefan Schweter, Nina Poerner, Benjamin Roth and Hinrich Schütze.
:param language: Language for a supported dataset. Supported languages are "fr" (French) and "nl" (Dutch).
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
"""
supported_languages = ["fr", "nl"]
if language not in supported_languages:
log.error(f"Language '{language}' is not in list of supported languages!")
log.error(f"Supported are '{supported_languages}'!")
raise Exception
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name / language
# download data if necessary
github_path = "https://raw.githubusercontent.com/stefan-it/historic-domain-adaptation-icdar/main/data"
for split in ["train", "dev", "test"]:
cached_path(f"{github_path}/{language}/{split}.txt", data_folder)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
comment_symbol="# ",
column_delimiter="\t",
**corpusargs,
)
class NER_NERMUD(MultiCorpus):
def __init__(
self,
domains: Union[str, List[str]] = "all",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Initilize the NERMuD 2023 dataset.
NERMuD is a task presented at EVALITA 2023 consisting in the extraction and classification
of named-entities in a document, such as persons, organizations, and locations. NERMuD 2023 will include two different sub-tasks:
- Domain-agnostic classification (DAC). Participants will be asked to select and classify entities among three categories
(person, organization, location) in different types of texts (news, fiction, political speeches) using one single general model.
- Domain-specific classification (DSC). Participants will be asked to deploy a different model for each of the above types,
trying to increase the accuracy for each considered type.
:param domains: Domains to be used. Supported are "WN" (Wikinews), "FIC" (fiction), "ADG" (De Gasperi subset) and "all".
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
"""
supported_domains = ["WN", "FIC", "ADG"]
if type(domains) == str and domains == "all":
domains = supported_domains
if type(domains) == str:
domains = [domains]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
corpora: List[Corpus] = []
github_path = "https://raw.githubusercontent.com/dhfbk/KIND/main/evalita-2023"
for domain in domains:
if domain not in supported_domains:
log.error(f"Domain '{domain}' is not in list of supported domains!")
log.error(f"Supported are '{supported_domains}'!")
raise Exception
domain_folder = data_folder / domain.lower()
for split in ["train", "dev"]:
cached_path(f"{github_path}/{domain}_{split}.tsv", domain_folder)
corpus = ColumnCorpus(
data_folder=domain_folder,
train_file=f"{domain}_train.tsv",
dev_file=f"{domain}_dev.tsv",
test_file=None,
column_format=columns,
in_memory=in_memory,
sample_missing_splits=False, # No test data is available, so do not shrink dev data for shared task preparation!
**corpusargs,
)
corpora.append(corpus)
super().__init__(
corpora,
sample_missing_splits=False,
name="nermud",
)
| 197,364 | 40.160584 | 192 | py |
flair | flair-master/flair/datasets/document_classification.py | import csv
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import flair
from flair.data import (
Corpus,
DataPair,
FlairDataset,
Sentence,
Tokenizer,
_iter_dataset,
)
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path, unpack_file, unzip_file
from flair.tokenization import SegtokTokenizer, SpaceTokenizer
log = logging.getLogger("flair")
class ClassificationCorpus(Corpus):
"""A classification corpus from FastText-formatted text files."""
def __init__(
self,
data_folder: Union[str, Path],
label_type: str = "class",
train_file=None,
test_file=None,
dev_file=None,
truncate_to_max_tokens: int = -1,
truncate_to_max_chars: int = -1,
filter_if_longer_than: int = -1,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
label_name_map: Optional[Dict[str, str]] = None,
skip_labels: Optional[List[str]] = None,
allow_examples_without_labels=False,
sample_missing_splits: bool = True,
encoding: str = "utf-8",
) -> None:
"""Instantiates a Corpus from text classification-formatted task data.
:param data_folder: base folder with the task data
:param label_type: name of the label
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param truncate_to_max_tokens: If set, truncates each Sentence to a maximum number of tokens
:param truncate_to_max_chars: If set, truncates each Sentence to a maximum number of chars
:param filter_if_longer_than: If set, filters documents that are longer that the specified number of tokens.
:param tokenizer: Tokenizer for dataset, default is SegtokTokenizer
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param label_name_map: Optionally map label names to different schema.
:param allow_examples_without_labels: set to True to allow Sentences without label in the corpus.
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
train: FlairDataset = ClassificationDataset(
train_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
# use test_file to create test split if available
test = (
ClassificationDataset(
test_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
if test_file is not None
else None
)
# use dev_file to create test split if available
dev = (
ClassificationDataset(
dev_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
if dev_file is not None
else None
)
super().__init__(train, dev, test, name=str(data_folder), sample_missing_splits=sample_missing_splits)
log.info(f"Initialized corpus {self.name} (label type name is '{label_type}')")
class ClassificationDataset(FlairDataset):
"""Dataset for classification instantiated from a single FastText-formatted file."""
def __init__(
self,
path_to_file: Union[str, Path],
label_type: str,
truncate_to_max_tokens=-1,
truncate_to_max_chars=-1,
filter_if_longer_than: int = -1,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
label_name_map: Optional[Dict[str, str]] = None,
skip_labels: Optional[List[str]] = None,
allow_examples_without_labels=False,
encoding: str = "utf-8",
) -> None:
"""Reads a data file for text classification.
The file should contain one document/text per line.
The line should have the following format:
__label__<class_name> <text>
If you have a multi class task, you can have as many labels as you want at the beginning of the line, e.g.,
__label__<class_name_1> __label__<class_name_2> <text>
:param path_to_file: the path to the data file
:param label_type: name of the label
:param truncate_to_max_tokens: If set, truncates each Sentence to a maximum number of tokens
:param truncate_to_max_chars: If set, truncates each Sentence to a maximum number of chars
:param filter_if_longer_than: If set, filters documents that are longer that the specified number of tokens.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param label_name_map: Optionally map label names to different schema.
:param allow_examples_without_labels: set to True to allow Sentences without label in the Dataset.
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:return: list of sentences
"""
path_to_file = Path(path_to_file)
assert path_to_file.exists()
self.label_prefix = "__label__"
self.label_type = label_type
self.memory_mode = memory_mode
self.tokenizer = tokenizer
if self.memory_mode == "full":
self.sentences = []
if self.memory_mode == "partial":
self.lines = []
if self.memory_mode == "disk":
self.indices = []
self.total_sentence_count: int = 0
self.truncate_to_max_chars = truncate_to_max_chars
self.truncate_to_max_tokens = truncate_to_max_tokens
self.filter_if_longer_than = filter_if_longer_than
self.label_name_map = label_name_map
self.allow_examples_without_labels = allow_examples_without_labels
self.path_to_file = path_to_file
with open(str(path_to_file), encoding=encoding) as f:
line = f.readline()
position = 0
while line:
if ("__label__" not in line and not allow_examples_without_labels) or (
" " not in line and "\t" not in line
):
position = f.tell()
line = f.readline()
continue
if 0 < self.filter_if_longer_than < len(line.split(" ")):
position = f.tell()
line = f.readline()
continue
# if data point contains black-listed label, do not use
if skip_labels:
skip = False
for skip_label in skip_labels:
if "__label__" + skip_label in line:
skip = True
if skip:
line = f.readline()
continue
if self.memory_mode == "full":
sentence = self._parse_line_to_sentence(line, self.label_prefix, tokenizer)
if sentence is not None and len(sentence.tokens) > 0:
self.sentences.append(sentence)
self.total_sentence_count += 1
if self.memory_mode == "partial" or self.memory_mode == "disk":
# first check if valid sentence
words = line.split()
l_len = 0
label = False
for i in range(len(words)):
if words[i].startswith(self.label_prefix):
l_len += len(words[i]) + 1
label = True
else:
break
text = line[l_len:].strip()
# if so, add to indices
if text and (label or allow_examples_without_labels):
if self.memory_mode == "partial":
self.lines.append(line)
self.total_sentence_count += 1
if self.memory_mode == "disk":
self.indices.append(position)
self.total_sentence_count += 1
position = f.tell()
line = f.readline()
def _parse_line_to_sentence(self, line: str, label_prefix: str, tokenizer: Union[bool, Tokenizer]):
words = line.split()
labels = []
l_len = 0
for i in range(len(words)):
if words[i].startswith(label_prefix):
l_len += len(words[i]) + 1
label = words[i].replace(label_prefix, "")
if self.label_name_map and label in self.label_name_map:
label = self.label_name_map[label]
labels.append(label)
else:
break
text = line[l_len:].strip()
if self.truncate_to_max_chars > 0:
text = text[: self.truncate_to_max_chars]
if text and (labels or self.allow_examples_without_labels):
sentence = Sentence(text, use_tokenizer=tokenizer)
for label in labels:
sentence.add_label(self.label_type, label)
if sentence is not None and 0 < self.truncate_to_max_tokens < len(sentence):
sentence.tokens = sentence.tokens[: self.truncate_to_max_tokens]
return sentence
return None
def is_in_memory(self) -> bool:
if self.memory_mode == "disk":
return False
if self.memory_mode == "partial":
return False
return True
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.memory_mode == "full":
return self.sentences[index]
if self.memory_mode == "partial":
sentence = self._parse_line_to_sentence(self.lines[index], self.label_prefix, self.tokenizer)
return sentence
if self.memory_mode == "disk":
with open(str(self.path_to_file), encoding="utf-8") as file:
file.seek(self.indices[index])
line = file.readline()
sentence = self._parse_line_to_sentence(line, self.label_prefix, self.tokenizer)
return sentence
raise AssertionError
class CSVClassificationCorpus(Corpus):
"""Classification corpus instantiated from CSV data files."""
def __init__(
self,
data_folder: Union[str, Path],
column_name_map: Dict[int, str],
label_type: str,
name: str = "csv_corpus",
train_file=None,
test_file=None,
dev_file=None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = False,
skip_header: bool = False,
encoding: str = "utf-8",
no_class_label=None,
**fmtparams,
) -> None:
"""Instantiates a Corpus for text classification from CSV column formatted data.
:param data_folder: base folder with the task data
:param column_name_map: a column name map that indicates which column is text and which the label(s)
:param label_type: name of the label
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Tokenizer for dataset, default is SegtokTokenizer
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:param skip_header: If True, skips first line because it is header
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:param fmtparams: additional parameters for the CSV file reader
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
train: FlairDataset = CSVClassificationDataset(
train_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
test = (
CSVClassificationDataset(
test_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
if test_file is not None
else None
)
dev = (
CSVClassificationDataset(
dev_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
if dev_file is not None
else None
)
super().__init__(train, dev, test, name=name)
class CSVClassificationDataset(FlairDataset):
"""Dataset for text classification from CSV column formatted data."""
def __init__(
self,
path_to_file: Union[str, Path],
column_name_map: Dict[int, str],
label_type: str,
max_tokens_per_doc: int = -1,
max_chars_per_doc: int = -1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = True,
skip_header: bool = False,
encoding: str = "utf-8",
no_class_label=None,
**fmtparams,
) -> None:
"""Instantiates a Dataset for text classification from CSV column formatted data.
:param path_to_file: path to the file with the CSV data
:param column_name_map: a column name map that indicates which column is text and which the label(s)
:param label_type: name of the label
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Tokenizer for dataset, default is SegTokTokenizer
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:param skip_header: If True, skips first line because it is header
:param encoding: Most datasets are 'utf-8' but some are 'latin-1'
:param fmtparams: additional parameters for the CSV file reader
:return: a Corpus with annotated train, dev and test data
"""
path_to_file = Path(path_to_file)
assert path_to_file.exists()
# variables
self.path_to_file = path_to_file
self.in_memory = in_memory
self.tokenizer = tokenizer
self.column_name_map = column_name_map
self.max_tokens_per_doc = max_tokens_per_doc
self.max_chars_per_doc = max_chars_per_doc
self.no_class_label = no_class_label
self.label_type = label_type
# different handling of in_memory data than streaming data
if self.in_memory:
self.sentences = []
else:
self.raw_data = []
self.total_sentence_count: int = 0
# most data sets have the token text in the first column, if not, pass 'text' as column
self.text_columns: List[int] = []
self.pair_columns: List[int] = []
for column in column_name_map:
if column_name_map[column] == "text":
self.text_columns.append(column)
if column_name_map[column] == "pair":
self.pair_columns.append(column)
with open(self.path_to_file, encoding=encoding) as csv_file:
csv_reader = csv.reader(csv_file, **fmtparams)
if skip_header:
next(csv_reader, None) # skip the headers
for row in csv_reader:
# test if format is OK
wrong_format = False
for text_column in self.text_columns:
if text_column >= len(row):
wrong_format = True
if wrong_format:
continue
# test if at least one label given
has_label = False
for column in self.column_name_map:
if self.column_name_map[column].startswith("label") and row[column]:
has_label = True
break
if not has_label:
continue
if self.in_memory:
sentence = self._make_labeled_data_point(row)
self.sentences.append(sentence)
else:
self.raw_data.append(row)
self.total_sentence_count += 1
def _make_labeled_data_point(self, row):
# make sentence from text (and filter for length)
text = " ".join([row[text_column] for text_column in self.text_columns])
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
sentence = Sentence(text, use_tokenizer=self.tokenizer)
if 0 < self.max_tokens_per_doc < len(sentence):
sentence.tokens = sentence.tokens[: self.max_tokens_per_doc]
# if a pair column is defined, make a sentence pair object
if len(self.pair_columns) > 0:
text = " ".join([row[pair_column] for pair_column in self.pair_columns])
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
pair = Sentence(text, use_tokenizer=self.tokenizer)
if 0 < self.max_tokens_per_doc < len(sentence):
pair.tokens = pair.tokens[: self.max_tokens_per_doc]
data_point = DataPair(first=sentence, second=pair)
else:
data_point = sentence
for column in self.column_name_map:
column_value = row[column]
if (
self.column_name_map[column].startswith("label")
and column_value
and column_value != self.no_class_label
):
data_point.add_label(self.label_type, column_value)
return data_point
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
return self.sentences[index]
else:
row = self.raw_data[index]
sentence = self._make_labeled_data_point(row)
return sentence
class AMAZON_REVIEWS(ClassificationCorpus):
"""A very large corpus of Amazon reviews with positivity ratings.
Corpus is downloaded from and documented at
https://nijianmo.github.io/amazon/index.html.
We download the 5-core subset which is still tens of millions of
reviews.
"""
# noinspection PyDefaultArgument
def __init__(
self,
split_max: int = 30000,
label_name_map: Dict[str, str] = {
"1.0": "NEGATIVE",
"2.0": "NEGATIVE",
"3.0": "NEGATIVE",
"4.0": "POSITIVE",
"5.0": "POSITIVE",
},
skip_labels=["3.0", "4.0"],
fraction_of_5_star_reviews: int = 10,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Constructs corpus object.
Split_max indicates how many data points from each of the 28 splits are used, so
set this higher or lower to increase/decrease corpus size.
:param label_name_map: Map label names to different schema. By default, the 5-star rating is mapped onto 3
classes (POSITIVE, NEGATIVE, NEUTRAL)
:param split_max: Split_max indicates how many data points from each of the 28 splits are used, so
set this higher or lower to increase/decrease corpus size.
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Arguments for ClassificationCorpus
"""
# dataset name includes the split size
dataset_name = self.__class__.__name__.lower() + "_" + str(split_max) + "_" + str(fraction_of_5_star_reviews)
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download each of the 28 splits
self.download_and_prepare_amazon_product_file(
data_folder, "AMAZON_FASHION_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "All_Beauty_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Appliances_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Arts_Crafts_and_Sewing_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Arts_Crafts_and_Sewing_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Automotive_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Books_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "CDs_and_Vinyl_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Cell_Phones_and_Accessories_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Clothing_Shoes_and_Jewelry_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Digital_Music_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Electronics_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Gift_Cards_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Grocery_and_Gourmet_Food_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Home_and_Kitchen_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Industrial_and_Scientific_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Kindle_Store_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Luxury_Beauty_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Magazine_Subscriptions_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Movies_and_TV_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Musical_Instruments_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Office_Products_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Patio_Lawn_and_Garden_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Pet_Supplies_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Prime_Pantry_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Software_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Sports_and_Outdoors_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Tools_and_Home_Improvement_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Toys_and_Games_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Video_Games_5.json.gz", split_max, fraction_of_5_star_reviews
)
super().__init__(
data_folder,
label_type="sentiment",
label_name_map=label_name_map,
skip_labels=skip_labels,
tokenizer=tokenizer,
memory_mode=memory_mode,
**corpusargs,
)
def download_and_prepare_amazon_product_file(
self, data_folder, part_name, max_data_points=None, fraction_of_5_star_reviews=None
):
amazon__path = "http://deepyeti.ucsd.edu/jianmo/amazon/categoryFilesSmall"
cached_path(f"{amazon__path}/{part_name}", Path("datasets") / "Amazon_Product_Reviews")
import gzip
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
with open(data_folder / "train.txt", "a") as train_file:
write_count = 0
review_5_count = 0
# download senteval datasets if necessary und unzip
with gzip.open(flair.cache_root / "datasets" / "Amazon_Product_Reviews" / part_name, "rb") as f_in:
for line in f_in:
parsed_json = json.loads(line)
if "reviewText" not in parsed_json:
continue
if parsed_json["reviewText"].strip() == "":
continue
text = parsed_json["reviewText"].replace("\n", "")
if fraction_of_5_star_reviews and str(parsed_json["overall"]) == "5.0":
review_5_count += 1
if review_5_count != fraction_of_5_star_reviews:
continue
else:
review_5_count = 0
train_file.write(f"__label__{parsed_json['overall']} {text}\n")
write_count += 1
if max_data_points and write_count >= max_data_points:
break
class IMDB(ClassificationCorpus):
"""Corpus of IMDB movie reviews labeled by sentiment (POSITIVE, NEGATIVE).
Downloaded from and documented at http://ai.stanford.edu/~amaas/data/sentiment/.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
rebalance_corpus: bool = True,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Initialize the IMDB move review sentiment corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param rebalance_corpus: Default splits for this corpus have a strange 50/50 train/test split that are impractical.
With rebalance_corpus=True (default setting), corpus is rebalanced to a 80/10/10 train/dev/test split. If you
want to use original splits, set this to False.
:param memory_mode: Set to 'partial' because this is a huge corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_v4"
# download data if necessary
imdb_acl_path = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
if rebalance_corpus:
dataset_name = dataset_name + "-rebalanced"
data_folder = base_path / dataset_name
data_path = flair.cache_root / "datasets" / dataset_name
train_data_file = data_path / "train.txt"
test_data_file = data_path / "test.txt"
if not train_data_file.is_file() or (not rebalance_corpus and not test_data_file.is_file()):
for file_path in [train_data_file, test_data_file]:
if file_path.is_file():
os.remove(file_path)
cached_path(imdb_acl_path, Path("datasets") / dataset_name)
import tarfile
with tarfile.open(flair.cache_root / "datasets" / dataset_name / "aclImdb_v1.tar.gz", "r:gz") as f_in:
datasets = ["train", "test"]
labels = ["pos", "neg"]
for label in labels:
for dataset in datasets:
f_in.extractall(
data_path, members=[m for m in f_in.getmembers() if f"{dataset}/{label}" in m.name]
)
data_file = train_data_file
if not rebalance_corpus and dataset == "test":
data_file = test_data_file
with open(data_file, "at") as f_p:
current_path = data_path / "aclImdb" / dataset / label
for file_name in current_path.iterdir():
if file_name.is_file() and file_name.name.endswith(".txt"):
if label == "pos":
sentiment_label = "POSITIVE"
if label == "neg":
sentiment_label = "NEGATIVE"
f_p.write(
f"__label__{sentiment_label} "
+ file_name.open("rt", encoding="utf-8").read()
+ "\n"
)
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class NEWSGROUPS(ClassificationCorpus):
"""20 newsgroups corpus, classifying news items into one of 20 categories.
Downloaded from http://qwone.com/~jason/20Newsgroups
Each data point is a full news article so documents may be very
long.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Instantiates 20 newsgroups corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
twenty_newsgroups_path = "http://qwone.com/~jason/20Newsgroups/20news-bydate.tar.gz"
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "20news-bydate-train.txt"
if not data_file.is_file():
cached_path(twenty_newsgroups_path, Path("datasets") / dataset_name / "original")
import tarfile
with tarfile.open(
flair.cache_root / "datasets" / dataset_name / "original" / "20news-bydate.tar.gz", "r:gz"
) as f_in:
datasets = ["20news-bydate-test", "20news-bydate-train"]
labels = [
"alt.atheism",
"comp.graphics",
"comp.os.ms-windows.misc",
"comp.sys.ibm.pc.hardware",
"comp.sys.mac.hardware",
"comp.windows.x",
"misc.forsale",
"rec.autos",
"rec.motorcycles",
"rec.sport.baseball",
"rec.sport.hockey",
"sci.crypt",
"sci.electronics",
"sci.med",
"sci.space",
"soc.religion.christian",
"talk.politics.guns",
"talk.politics.mideast",
"talk.politics.misc",
"talk.religion.misc",
]
for label in labels:
for dataset in datasets:
f_in.extractall(
data_path / "original",
members=[m for m in f_in.getmembers() if f"{dataset}/{label}" in m.name],
)
with open(f"{data_path}/{dataset}.txt", "at", encoding="utf-8") as f_p:
current_path = data_path / "original" / dataset / label
for file_name in current_path.iterdir():
if file_name.is_file():
f_p.write(
f"__label__{label} "
+ file_name.open("rt", encoding="latin1").read().replace("\n", " <n> ")
+ "\n"
)
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class STACKOVERFLOW(ClassificationCorpus):
"""Stackoverflow corpus classifying questions into one of 20 labels.
The data will be downloaded from "https://github.com/jacoxu/StackOverflow",
Each data point is a question.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Instantiates Stackoverflow corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
stackoverflow_path_data = (
"https://raw.githubusercontent.com/jacoxu/StackOverflow/master/rawText/title_StackOverflow.txt"
)
stackoverflow_path_label = (
"https://raw.githubusercontent.com/jacoxu/StackOverflow/master/rawText/label_StackOverflow.txt"
)
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "title_StackOverflow.txt"
if not data_file.is_file():
cached_path(stackoverflow_path_data, Path("datasets") / dataset_name / "original")
cached_path(stackoverflow_path_label, Path("datasets") / dataset_name / "original")
label_list = []
labels = [
"wordpress",
"oracle",
"svn",
"apache",
"excel",
"matlab",
"visual-studio",
"cocoa",
"osx",
"bash",
"spring",
"hibernate",
"scala",
"sharepoint",
"ajax",
"qt",
"drupal",
"linq",
"haskell",
"magento",
]
# handle labels file
with open(data_path / "original" / "label_StackOverflow.txt", encoding="latin1") as open_fp:
for line in open_fp:
line = line.rstrip()
label_list.append(labels[int(line) - 1])
# handle data file
with (data_path / "original" / "title_StackOverflow.txt").open(encoding="latin1") as open_fp, (
data_folder / "train.txt"
).open("w", encoding="utf-8") as write_fp:
for idx, line in enumerate(open_fp):
line = line.rstrip()
# Create flair compatible labels
label = label_list[idx]
write_fp.write(f"__label__{label} {line}\n")
super().__init__(data_folder, label_type="class", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class SENTIMENT_140(ClassificationCorpus):
"""Twitter sentiment corpus.
See http://help.sentiment140.com/for-students
Two sentiments in train data (POSITIVE, NEGATIVE) and three
sentiments in test data (POSITIVE, NEGATIVE, NEUTRAL).
"""
def __init__(
self, label_name_map=None, tokenizer: Tokenizer = SegtokTokenizer(), memory_mode: str = "partial", **corpusargs
) -> None:
"""Instantiates twitter sentiment corpus.
:param label_name_map: By default, the numeric values are mapped to ('NEGATIVE', 'POSITIVE' and 'NEUTRAL')
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
# by default, map point score to POSITIVE / NEGATIVE values
if label_name_map is None:
label_name_map = {"0": "NEGATIVE", "2": "NEUTRAL", "4": "POSITIVE"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if True:
# download senteval datasets if necessary und unzip
sentiment_url = "https://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip"
cached_path(sentiment_url, Path("datasets") / dataset_name / "raw")
senteval_folder = flair.cache_root / "datasets" / dataset_name / "raw"
unzip_file(senteval_folder / "trainingandtestdata.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file from CSV
with open(data_folder / "train.txt", "w") as train_file, open(
senteval_folder / "training.1600000.processed.noemoticon.csv", encoding="latin-1"
) as csv_train:
csv_reader = csv.reader(csv_train)
for row in csv_reader:
label = row[0]
text = row[5]
train_file.write(f"__label__{label} {text}\n")
# create test.txt file from CSV
with (data_folder / "test.txt").open("w", encoding="utf-8") as train_file, (
senteval_folder / "testdata.manual.2009.06.14.csv"
).open(encoding="latin-1") as csv_train:
csv_reader = csv.reader(csv_train)
for row in csv_reader:
label = row[0]
text = row[5]
train_file.write(f"__label__{label} {text}\n")
super().__init__(
data_folder,
label_type="sentiment",
tokenizer=tokenizer,
memory_mode=memory_mode,
label_name_map=label_name_map,
**corpusargs,
)
class SENTEVAL_CR(ClassificationCorpus):
"""The customer reviews dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval customer reviews dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer())
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "customerr" / "custrev.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "customerr" / "custrev.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_MR(ClassificationCorpus):
"""The movie reviews dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval movie reviews dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "rt10662" / "rt-polarity.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "rt10662" / "rt-polarity.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_SUBJ(ClassificationCorpus):
"""The subjectivity dataset of SentEval, classified into SUBJECTIVE or OBJECTIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval subjectivity dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "subj" / "subj.subjective", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__SUBJECTIVE {line}")
with open(senteval_folder / "data" / "subj" / "subj.objective", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__OBJECTIVE {line}")
super().__init__(
data_folder, label_type="objectivity", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_MPQA(ClassificationCorpus):
"""The opinion-polarity dataset of SentEval, classified into NEGATIVE or POSITIVE polarity.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval opinion polarity dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "mpqa" / "mpqa.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "mpqa" / "mpqa.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_SST_BINARY(ClassificationCorpus):
"""The Stanford sentiment treebank dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval Stanford sentiment treebank dataset.
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_v2"
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-train",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-test",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-dev",
Path("datasets") / dataset_name / "raw",
)
original_filenames = ["sentiment-train", "sentiment-dev", "sentiment-test"]
new_filenames = ["train.txt", "dev.txt", "test.txt"]
# create train dev and test files in fasttext format
for new_filename, original_filename in zip(new_filenames, original_filenames):
with open(data_folder / new_filename, "a") as out_file, open(
data_folder / "raw" / original_filename
) as in_file:
for line in in_file:
fields = line.split("\t")
label = "POSITIVE" if fields[1].rstrip() == "1" else "NEGATIVE"
out_file.write(f"__label__{label} {fields[0]}\n")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class SENTEVAL_SST_GRANULAR(ClassificationCorpus):
"""The Stanford sentiment treebank dataset of SentEval, classified into 5 sentiment classes.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval Stanford sentiment treebank dataset.
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.train",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.test",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.dev",
Path("datasets") / dataset_name / "raw",
)
# convert to FastText format
for split in ["train", "dev", "test"]:
with (data_folder / f"{split}.txt").open("w", encoding="utf-8") as train_file, (
data_folder / "raw" / f"stsa.fine.{split}"
).open(encoding="latin1") as file:
for line in file:
train_file.write(f"__label__{line[0]} {line[2:]}")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class GLUE_COLA(ClassificationCorpus):
"""Corpus of Linguistic Acceptability from GLUE benchmark.
see https://gluebenchmark.com/tasks
The task is to predict whether an English sentence is grammatically
correct. Additionaly to the Corpus we have eval_dataset containing
the unlabeled test data for Glue evaluation.
"""
def __init__(
self,
label_type="acceptability",
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
**corpusargs,
) -> None:
"""Instantiates CoLA dataset.
:param base_path: Provide this only if you store the COLA corpus in a specific folder.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
# download data if necessary
cola_path = "https://dl.fbaipublicfiles.com/glue/data/CoLA.zip"
data_file = data_folder / "CoLA/train.txt"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(cola_path, Path("datasets") / dataset_name)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# move original .tsv files to another folder
Path(data_folder / "CoLA/train.tsv").rename(data_folder / "CoLA/original/train.tsv")
Path(data_folder / "CoLA/dev.tsv").rename(data_folder / "CoLA/original/dev.tsv")
Path(data_folder / "CoLA/test.tsv").rename(data_folder / "CoLA/original/test.tsv")
label_map = {0: "not_grammatical", 1: "grammatical"}
# create train and dev splits in fasttext format
for split in ["train", "dev"]:
with open(data_folder / "CoLA" / (split + ".txt"), "a") as out_file, open(
data_folder / "CoLA" / "original" / (split + ".tsv")
) as in_file:
for line in in_file:
fields = line.rstrip().split("\t")
label = int(fields[1])
sentence = fields[3]
out_file.write(f"__label__{label_map[label]} {sentence}\n")
# create eval_dataset file with no labels
with open(data_folder / "CoLA" / "eval_dataset.txt", "a") as out_file, open(
data_folder / "CoLA" / "original" / "test.tsv"
) as in_file:
for line in in_file:
fields = line.rstrip().split("\t")
sentence = fields[1]
out_file.write(f"{sentence}\n")
super().__init__(data_folder / "CoLA", label_type=label_type, tokenizer=tokenizer, **corpusargs)
self.eval_dataset = ClassificationDataset(
data_folder / "CoLA/eval_dataset.txt",
label_type=label_type,
allow_examples_without_labels=True,
tokenizer=tokenizer,
memory_mode="full",
)
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
"""Create eval prediction file.
This function creates a tsv file with predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='acceptability')). The resulting file
is called CoLA.tsv and is in the format required for submission to the Glue Benchmark.
"""
folder_path = Path(folder_path)
folder_path = folder_path / "CoLA.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
reverse_label_map = {"grammatical": 1, "not_grammatical": 0}
predicted_label = reverse_label_map[datapoint.get_labels("acceptability")[0].value]
tsv_file.write(str(index) + "\t" + str(predicted_label) + "\n")
class GLUE_SST2(CSVClassificationCorpus):
label_map = {0: "negative", 1: "positive"}
def __init__(
self,
label_type: str = "sentiment",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = False,
encoding: str = "utf-8",
**datasetargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "SST-2"
data_folder = base_path / dataset_name
train_file = data_folder / "train.tsv"
sst2_url = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
if not train_file.is_file():
# download zip archive
zipped_data_path = cached_path(sst2_url, data_folder)
# unpack file in datasets directory (zip archive contains a directory named SST-2)
unpack_file(zipped_data_path, data_folder.parent, "zip", False)
kwargs = dict(
delimiter="\t",
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
tokenizer=tokenizer,
in_memory=in_memory,
encoding=encoding,
skip_header=True,
**datasetargs,
)
super().__init__(
name=dataset_name,
data_folder=data_folder,
label_type=label_type,
column_name_map={0: "text", 1: "label"},
train_file=train_file,
dev_file=data_folder / "dev.tsv",
**kwargs,
)
eval_file = data_folder / "test.tsv"
log.info("Evaluation (no labels): %s", eval_file)
self.eval_dataset = CSVClassificationDataset(
eval_file,
label_type="sentence_index",
column_name_map={
0: "label_index",
1: "text",
},
**kwargs,
)
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
"""Create eval prediction file."""
folder_path = Path(folder_path)
folder_path = folder_path / "SST-2.tsv"
reverse_label_map = {label_name: label_numerical for label_numerical, label_name in self.label_map.items()}
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
predicted_label = reverse_label_map[datapoint.get_labels(self.eval_dataset.label_type)[0].value]
tsv_file.write(f"{index}\t{predicted_label}\n")
class GO_EMOTIONS(ClassificationCorpus):
"""GoEmotions dataset containing 58k Reddit comments labeled with 27 emotion categories.
see https://github.com/google-research/google-research/tree/master/goemotions
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Initializes the GoEmotions corpus.
Parameters
----------
base_path: Union[str, Path]
Provide this only if you want to store the corpus in a specific folder, otherwise use default.
tokenizer: Union[bool, Tokenizer]
Specify which tokenizer to use, the default is SegtokTokenizer().
memory_mode: str
Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
"""
label_name_map = {
"0": "ADMIRATION",
"1": "AMUSEMENT",
"2": "ANGER",
"3": "ANNOYANCE",
"4": "APPROVAL",
"5": "CARING",
"6": "CONFUSION",
"7": "CURIOSITY",
"8": "DESIRE",
"9": "DISAPPOINTMENT",
"10": "DISAPPROVAL",
"11": "DISGUST",
"12": "EMBARRASSMENT",
"13": "EXCITEMENT",
"14": "FEAR",
"15": "GRATITUDE",
"16": "GRIEF",
"17": "JOY",
"18": "LOVE",
"19": "NERVOUSNESS",
"20": "OPTIMISM",
"21": "PRIDE",
"22": "REALIZATION",
"23": "RELIEF",
"24": "REMORSE",
"25": "SADNESS",
"26": "SURPRISE",
"27": "NEUTRAL",
}
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download datasets if necessary
goemotions_url = "https://raw.githubusercontent.com/google-research/google-research/master/goemotions/data/"
for name in ["train.tsv", "test.tsv", "dev.tsv"]:
cached_path(goemotions_url + name, Path("datasets") / dataset_name / "raw")
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
data_path = flair.cache_root / "datasets" / dataset_name / "raw"
# create correctly formated txt files
for name in ["train", "test", "dev"]:
with (data_folder / (name + ".txt")).open("w", encoding="utf-8") as txt_file, (
data_path / (name + ".tsv")
).open(encoding="utf-8") as tsv_file:
lines = tsv_file.readlines()
for line in lines:
row = line.split("\t")
text = row[0]
# multiple labels are possible
labels = row[1].split(",")
label_string = ""
for label in labels:
label_string += "__label__"
label_string += label
label_string += " "
txt_file.write(f"{label_string}{text}\n")
super().__init__(
data_folder,
label_type="emotion",
tokenizer=tokenizer,
memory_mode=memory_mode,
label_name_map=label_name_map,
**corpusargs,
)
class TREC_50(ClassificationCorpus):
"""The TREC Question Classification Corpus, classifying questions into 50 fine-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="full",
**corpusargs,
) -> None:
"""Instantiates TREC Question Classification Corpus with 6 classes.
:param base_path: Provide this only if you store the TREC corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
trec_path = "https://cogcomp.seas.upenn.edu/Data/QA/QC/"
original_filenames = ["train_5500.label", "TREC_10.label"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{trec_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / new_filenames[0]
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="latin1") as open_fp, (
data_folder / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split()
old_label = fields[0]
question = " ".join(fields[1:])
# Create flair compatible labels
# TREC-6 : NUM:dist -> __label__NUM
# TREC-50: NUM:dist -> __label__NUM:dist
new_label = "__label__"
new_label += old_label
write_fp.write(f"{new_label} {question}\n")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class TREC_6(ClassificationCorpus):
"""The TREC Question Classification Corpus, classifying questions into 6 coarse-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="full",
**corpusargs,
) -> None:
"""Instantiates TREC Question Classification Corpus with 6 classes.
:param base_path: Provide this only if you store the TREC corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
trec_path = "https://cogcomp.seas.upenn.edu/Data/QA/QC/"
original_filenames = ["train_5500.label", "TREC_10.label"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{trec_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / new_filenames[0]
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="latin1") as open_fp, (
data_folder / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split()
old_label = fields[0]
question = " ".join(fields[1:])
# Create flair compatible labels
# TREC-6 : NUM:dist -> __label__NUM
# TREC-50: NUM:dist -> __label__NUM:dist
new_label = "__label__"
new_label += old_label.split(":")[0]
write_fp.write(f"{new_label} {question}\n")
super().__init__(
data_folder, label_type="question_class", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class YAHOO_ANSWERS(ClassificationCorpus):
"""The YAHOO Question Classification Corpus, classifying questions into 10 coarse-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Instantiates YAHOO Question Classification Corpus with 10 classes.
:param base_path: Provide this only if you store the YAHOO corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'partial' by default since this is a rather big corpus. Can also be 'full' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
url = "https://s3.amazonaws.com/fast-ai-nlp/yahoo_answers_csv.tgz"
label_map = {
"1": "Society_&_Culture",
"2": "Science_&_Mathematics",
"3": "Health",
"4": "Education_&_Reference",
"5": "Computers_&_Internet",
"6": "Sports",
"7": "Business_&_Finance",
"8": "Entertainment_&_Music",
"9": "Family_&_Relationships",
"10": "Politics_&_Government",
}
original = flair.cache_root / "datasets" / dataset_name / "original"
if not (data_folder / "train.txt").is_file():
cached_path(url, original)
import tarfile
tar = tarfile.open(original / "yahoo_answers_csv.tgz", "r:gz")
members = []
for member in tar.getmembers():
if "test.csv" in member.name or "train.csv" in member.name:
members.append(member)
tar.extractall(original, members=members)
for name in ["train", "test"]:
with (original / "yahoo_answers_csv" / (name + ".csv")).open(encoding="utf-8") as file, (
data_folder / (name + ".txt")
).open("w", encoding="utf-8") as writer:
reader = csv.reader(file)
for row in reader:
writer.write("__label__" + label_map[row[0]] + " " + row[1] + "\n")
super().__init__(
data_folder, label_type="question_type", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class GERMEVAL_2018_OFFENSIVE_LANGUAGE(ClassificationCorpus):
"""GermEval 2018 corpus for identification of offensive language.
Classifying German tweets into 2 coarse-grained categories OFFENSIVE
and OTHER or 4 fine-grained categories ABUSE, INSULT, PROFATINTY and
OTHER.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "full",
fine_grained_classes: bool = False,
**corpusargs,
) -> None:
"""Instantiates GermEval 2018 Offensive Language Classification Corpus.
:param base_path: Provide this only if you store the Offensive Language corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param fine_grained_classes: Set to True to load the dataset with 4 fine-grained classes
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
offlang_path = "https://raw.githubusercontent.com/uds-lsv/GermEval-2018-Data/master/"
original_filenames = ["germeval2018.training.txt", "germeval2018.test.txt"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{offlang_path}{original_filename}", Path("datasets") / dataset_name / "original")
task_setting = "coarse_grained"
if fine_grained_classes:
task_setting = "fine_grained"
task_folder = data_folder / task_setting
data_file = task_folder / new_filenames[0]
# create a separate directory for different tasks
if not os.path.exists(task_folder):
os.makedirs(task_folder)
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="utf-8") as open_fp, (
data_folder / task_setting / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split("\t")
tweet = fields[0]
old_label = fields[2] if task_setting == "fine_grained" else fields[1]
new_label = "__label__" + old_label
write_fp.write(f"{new_label} {tweet}\n")
super().__init__(data_folder=task_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class COMMUNICATIVE_FUNCTIONS(ClassificationCorpus):
"""The Communicative Functions Classification Corpus.
Classifying sentences from scientific papers into 39 communicative functions.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
memory_mode: str = "full",
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
**corpusargs,
) -> None:
"""Instantiates Communicative Functions Classification Corpus with 39 classes.
:param base_path: Provide this only if you store the Communicative Functions date in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
original_filenames = ["background.tsv", "discussion.tsv", "introduction.tsv", "method.tsv", "result.tsv"]
# download data if necessary
comm_path = "https://raw.githubusercontent.com/Alab-NII/FECFevalDataset/master/sentences/"
for original_filename in original_filenames:
cached_path(f"{comm_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / "train.txt"
if not data_file.is_file(): # check if new file already exists
with open(data_folder / "train.txt", "a+", encoding="utf-8") as write_fp:
for original_filename in original_filenames[:4]:
with open(data_folder / "original" / original_filename, encoding="utf-8") as open_fp:
for line in open_fp:
liste = line.split("\t")
write_fp.write("__label__" + liste[0].replace(" ", "_") + " " + liste[2] + "\n")
with open(data_folder / "original" / "result.tsv", encoding="utf-8") as open_fp:
for line in open_fp:
liste = line.split("\t")
if liste[0].split(" ")[-1] == "(again)":
write_fp.write("__label__" + liste[0][:-8].replace(" ", "_") + " " + liste[2] + "\n")
else:
write_fp.write("__label__" + liste[0].replace(" ", "_") + " " + liste[2] + "\n")
super().__init__(
data_folder, label_type="communicative_function", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
def _download_wassa_if_not_there(emotion, data_folder, dataset_name):
for split in ["train", "dev", "test"]:
data_file = data_folder / f"{emotion}-{split}.txt"
if not data_file.is_file():
if split == "train":
url = f"http://saifmohammad.com/WebDocs/EmoInt%20Train%20Data/{emotion}-ratings-0to1.train.txt"
if split == "dev":
url = f"http://saifmohammad.com/WebDocs/EmoInt%20Dev%20Data%20With%20Gold/{emotion}-ratings-0to1.dev.gold.txt"
if split == "test":
url = (
f"http://saifmohammad.com/WebDocs/EmoInt%20Test%20Gold%20Data/{emotion}-ratings-0to1.test.gold.txt"
)
path = cached_path(url, Path("datasets") / dataset_name)
with open(path, encoding="UTF-8") as f, open(data_file, "w", encoding="UTF-8") as out:
next(f)
for line in f:
fields = line.split("\t")
out.write(f"__label__{fields[3].rstrip()} {fields[1]}\n")
os.remove(path)
class WASSA_ANGER(ClassificationCorpus):
"""WASSA-2017 anger emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 anger emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("anger", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_FEAR(ClassificationCorpus):
"""WASSA-2017 fear emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 fear emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("fear", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_JOY(ClassificationCorpus):
"""WASSA-2017 joy emotion-intensity dataset corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 joy emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("joy", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_SADNESS(ClassificationCorpus):
"""WASSA-2017 sadness emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 sadness emotion-intensity dataset.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("sadness", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
| 88,472 | 41.049905 | 134 | py |
flair | flair-master/flair/visual/training_curves.py | import csv
import logging
import math
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Union
import matplotlib.pyplot as plt
import numpy as np
# header for 'weights.txt'
WEIGHT_NAME = 1
WEIGHT_NUMBER = 2
WEIGHT_VALUE = 3
log = logging.getLogger("flair")
class Plotter:
"""Plots training parameters (loss, f-score, and accuracy) and training weights over time.
Input files are the output files 'loss.tsv' and 'weights.txt' from
training either a sequence tagger or text classification model.
"""
@staticmethod
def _extract_evaluation_data(file_name: Union[str, Path], score: str = "F1") -> dict:
file_name = Path(file_name)
training_curves: Dict[str, Dict[str, List[float]]] = {
"train": {"loss": [], "score": []},
"test": {"loss": [], "score": []},
"dev": {"loss": [], "score": []},
}
with open(file_name) as f:
tsvin = csv.reader(f, delimiter="\t")
# determine the column index of loss, f-score and accuracy for
# train, dev and test split
row = next(tsvin)
score = score.upper()
if f"TEST_{score}" not in row:
log.warning("-" * 100)
log.warning(f"WARNING: No {score} found for test split in this data.")
log.warning(f"Are you sure you want to plot {score} and not another value?")
log.warning("-" * 100)
TRAIN_SCORE = row.index(f"TRAIN_{score}") if f"TRAIN_{score}" in row else None
DEV_SCORE = row.index(f"DEV_{score}") if f"DEV_{score}" in row else None
TEST_SCORE = row.index(f"TEST_{score}") if f"TEST_{score}" in row else None
# then get all relevant values from the tsv
for row in tsvin:
if TRAIN_SCORE is not None and row[TRAIN_SCORE] != "_":
training_curves["train"]["score"].append(float(row[TRAIN_SCORE]))
if DEV_SCORE is not None and row[DEV_SCORE] != "_":
training_curves["dev"]["score"].append(float(row[DEV_SCORE]))
if TEST_SCORE is not None and row[TEST_SCORE] != "_":
training_curves["test"]["score"].append(float(row[TEST_SCORE]))
return training_curves
@staticmethod
def _extract_weight_data(file_name: Union[str, Path]) -> dict:
if type(file_name) is str:
file_name = Path(file_name)
weights: Dict[str, Dict[str, List[float]]] = defaultdict(lambda: defaultdict(list))
with open(file_name) as f:
tsvin = csv.reader(f, delimiter="\t")
for row in tsvin:
name = row[WEIGHT_NAME]
param = row[WEIGHT_NUMBER]
value = float(row[WEIGHT_VALUE])
weights[name][param].append(value)
return weights
@staticmethod
def _extract_learning_rate(file_name: Union[str, Path]):
if type(file_name) is str:
file_name = Path(file_name)
lrs = []
losses = []
with open(file_name) as f:
tsvin = csv.reader(f, delimiter="\t")
row = next(tsvin)
LEARNING_RATE = row.index("LEARNING_RATE")
TRAIN_LOSS = row.index("TRAIN_LOSS")
# then get all relevant values from the tsv
for row in tsvin:
if row[TRAIN_LOSS] != "_":
losses.append(float(row[TRAIN_LOSS]))
if row[LEARNING_RATE] != "_":
lrs.append(float(row[LEARNING_RATE]))
return lrs, losses
def plot_weights(self, file_name: Union[str, Path]):
file_name = Path(file_name)
weights = self._extract_weight_data(file_name)
total = len(weights)
columns = 2
rows = max(2, int(math.ceil(total / columns)))
figsize = (4 * columns, 3 * rows)
fig = plt.figure()
f, axarr = plt.subplots(rows, columns, figsize=figsize)
c = 0
r = 0
for name, values in weights.items():
# plot i
axarr[r, c].set_title(name, fontsize=6)
for _, v in values.items():
axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35)
axarr[r, c].set_yticks([])
axarr[r, c].set_xticks([])
c += 1
if c == columns:
c = 0
r += 1
while r != rows and c != columns:
axarr[r, c].set_yticks([])
axarr[r, c].set_xticks([])
c += 1
if c == columns:
c = 0
r += 1
# save plots
f.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
path = file_name.parent / "weights.png"
plt.savefig(path, dpi=300)
log.info(f"Weights plots are saved in {path}") # to let user know the path of the save plots
plt.close(fig)
def plot_training_curves(self, file_name: Union[str, Path], plot_values: List[str] = ["loss", "F1"]):
file_name = Path(file_name)
fig = plt.figure(figsize=(15, 10))
for plot_no, plot_value in enumerate(plot_values):
training_curves = self._extract_evaluation_data(file_name, plot_value)
plt.subplot(len(plot_values), 1, plot_no + 1)
if training_curves["train"]["score"]:
x = np.arange(0, len(training_curves["train"]["score"]))
plt.plot(x, training_curves["train"]["score"], label=f"training {plot_value}")
if training_curves["dev"]["score"]:
x = np.arange(0, len(training_curves["dev"]["score"]))
plt.plot(x, training_curves["dev"]["score"], label=f"validation {plot_value}")
if training_curves["test"]["score"]:
x = np.arange(0, len(training_curves["test"]["score"]))
plt.plot(x, training_curves["test"]["score"], label=f"test {plot_value}")
plt.legend(bbox_to_anchor=(1.04, 0), loc="lower left", borderaxespad=0)
plt.ylabel(plot_value)
plt.xlabel("epochs")
# save plots
plt.tight_layout(pad=1.0)
path = file_name.parent / "training.png"
plt.savefig(path, dpi=300)
log.info(f"Loss and F1 plots are saved in {path}") # to let user know the path of the save plots
plt.show(block=False) # to have the plots displayed when user run this module
plt.close(fig)
def plot_learning_rate(self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5):
file_name = Path(file_name)
lrs, losses = self._extract_learning_rate(file_name)
lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:]
losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(plt.FormatStrFormatter("%.0e"))
# plt.show()
# save plot
plt.tight_layout(pad=1.0)
path = file_name.parent / "learning_rate.png"
plt.savefig(path, dpi=300)
log.info(f"Learning_rate plots are saved in {path}") # to let user know the path of the save plots
plt.show(block=True) # to have the plots displayed when user run this module
plt.close(fig)
| 7,518 | 35.323671 | 107 | py |
flair | flair-master/flair/visual/tree_printer.py | from typing import List
from pptree import print_tree
from flair.data import Sentence, Token
class NodeToken:
def __init__(self, token: Token, tag_type: str) -> None:
self.token: Token = token
self.tag_type: str = tag_type
self.children: List[NodeToken] = []
def set_haed(self, parent):
parent.children.append(self)
def __str__(self) -> str:
return f" {self.token.text}({self.token.get_labels(self.tag_type)[0].value}) "
def tree_printer(sentence: Sentence, tag_type: str):
tree: List[NodeToken] = [NodeToken(token, tag_type) for token in sentence]
for x in tree:
if x.token.head_id != 0:
head_token = x.token.get_head()
for y in tree:
if y.token == head_token:
x.set_haed(y)
else:
root_node = x
print_tree(root_node, "children")
| 891 | 26.030303 | 86 | py |
flair | flair-master/flair/visual/activations.py | import numpy
class Highlighter:
def __init__(self) -> None:
self.color_map = [
"#ff0000",
"#ff4000",
"#ff8000",
"#ffbf00",
"#ffff00",
"#bfff00",
"#80ff00",
"#40ff00",
"#00ff00",
"#00ff40",
"#00ff80",
"#00ffbf",
"#00ffff",
"#00bfff",
"#0080ff",
"#0040ff",
"#0000ff",
"#4000ff",
"#8000ff",
"#bf00ff",
"#ff00ff",
"#ff00bf",
"#ff0080",
"#ff0040",
"#ff0000",
]
def highlight(self, activation, text):
activation = activation.detach().cpu().numpy()
step_size = (max(activation) - min(activation)) / len(self.color_map)
lookup = numpy.array(list(numpy.arange(min(activation), max(activation), step_size)))
colors = []
for _i, act in enumerate(activation):
try:
colors.append(self.color_map[numpy.where(act > lookup)[0][-1]])
except IndexError:
colors.append(len(self.color_map) - 1)
str_ = "<br><br>"
for i, (char, color) in enumerate(zip(list(text), colors)):
str_ += self._render(char, color)
if i % 100 == 0 and i > 0:
str_ += "<br>"
return str_
def highlight_selection(self, activations, text, file_="resources/data/highlight.html", n=10):
ix = numpy.random.default_rng().choice(activations.shape[1], size=n)
rendered = ""
for i in ix:
rendered += self.highlight(activations[:, i], text)
with open(file_, "w") as f:
f.write(rendered)
@staticmethod
def _render(char, color):
return f'<span style="background-color: {color}">{char}</span>'
| 1,910 | 25.178082 | 98 | py |
flair | flair-master/flair/visual/manifold.py | import numpy
import tqdm
from sklearn.manifold import TSNE
class _Transform:
def __init__(self) -> None:
pass
def fit(self, X):
return self.transform.fit_transform(X)
class tSNE(_Transform):
def __init__(self) -> None:
super().__init__()
self.transform = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
class Visualizer:
def visualize_word_emeddings(self, embeddings, sentences, output_file):
X = self.prepare_word_embeddings(embeddings, sentences)
contexts = self.word_contexts(sentences)
trans_ = tSNE()
reduced = trans_.fit(X)
self.visualize(reduced, contexts, output_file)
def visualize_char_emeddings(self, embeddings, sentences, output_file):
X = self.prepare_char_embeddings(embeddings, sentences)
contexts = self.char_contexts(sentences)
trans_ = tSNE()
reduced = trans_.fit(X)
self.visualize(reduced, contexts, output_file)
@staticmethod
def prepare_word_embeddings(embeddings, sentences):
X = []
for sentence in tqdm.tqdm(sentences):
embeddings.embed(sentence)
for _i, token in enumerate(sentence):
X.append(token.embedding.detach().numpy()[None, :])
X = numpy.concatenate(X, 0)
return X
@staticmethod
def word_contexts(sentences):
contexts = []
for sentence in sentences:
strs = [x.text for x in sentence.tokens]
for i, token in enumerate(strs):
prop = f'<b><font color="red"> {token} </font></b>'
prop = " ".join(strs[max(i - 4, 0) : i]) + prop
prop = prop + " ".join(strs[i + 1 : min(len(strs), i + 5)])
contexts.append("<p>" + prop + "</p>")
return contexts
@staticmethod
def prepare_char_embeddings(embeddings, sentences):
X = []
for sentence in tqdm.tqdm(sentences):
sentence = " ".join([x.text for x in sentence])
hidden = embeddings.lm.get_representation([sentence], "", "")
X.append(hidden.squeeze().detach().numpy())
X = numpy.concatenate(X, 0)
return X
@staticmethod
def char_contexts(sentences):
contexts = []
for sentence in sentences:
sentence = " ".join([token.text for token in sentence])
for i, char in enumerate(sentence):
context = f'<span style="background-color: yellow"><b>{char}</b></span>'
context = "".join(sentence[max(i - 30, 0) : i]) + context
context = context + "".join(sentence[i + 1 : min(len(sentence), i + 30)])
contexts.append(context)
return contexts
@staticmethod
def visualize(X, contexts, file):
import matplotlib.pyplot
import mpld3
fig, ax = matplotlib.pyplot.subplots()
ax.grid(True, alpha=0.3)
points = ax.plot(X[:, 0], X[:, 1], "o", color="b", mec="k", ms=5, mew=1, alpha=0.6)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Hover mouse to reveal context", size=20)
tooltip = mpld3.plugins.PointHTMLTooltip(points[0], contexts, voffset=10, hoffset=10)
mpld3.plugins.connect(fig, tooltip)
mpld3.save_html(fig, file)
| 3,350 | 26.694215 | 93 | py |
flair | flair-master/flair/visual/__init__.py | from .activations import Highlighter
from .manifold import Visualizer
__all__ = ["Highlighter", "Visualizer"]
| 111 | 21.4 | 39 | py |
flair | flair-master/flair/visual/ner_html.py | import html
from typing import List, Union
from flair.data import Sentence
TAGGED_ENTITY = """
<mark class="entity" style="background: {color}; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 3; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone">
{entity}
<span style="font-size: 0.8em; font-weight: bold; line-height: 3; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">{label}</span>
</mark>
"""
PARAGRAPH = """<p>{sentence}</p>"""
HTML_PAGE = """
<!DOCTYPE html>
<html lang="en">
<head>
<title>{title}</title>
</head>
<body style="font-size: 16px; font-family: 'Segoe UI'; padding: 4rem 2rem">{text}</body>
</html>
"""
def split_to_spans(s: Sentence, label_name="ner"):
orig = s.to_original_text()
last_idx = 0
spans = []
tagged_ents = s.get_labels(label_name)
for ent in tagged_ents:
if last_idx != ent.data_point.start_position:
spans.append((orig[last_idx : ent.data_point.start_position], None))
spans.append((ent.data_point.text, ent.value))
assert ent.data_point.end_position is not None
last_idx = ent.data_point.end_position
if last_idx < len(orig) - 1:
spans.append((orig[last_idx : len(orig)], None))
return spans
def render_ner_html(
sentences: Union[List[Sentence], Sentence],
title: str = "Flair",
colors={
"PER": "#F7FF53",
"ORG": "#E8902E",
"LOC": "#FF40A3",
"MISC": "#4647EB",
"O": "#ddd",
},
default_color: str = "#ddd",
wrap_page=True,
label_name="ner",
) -> str:
"""Create the html code to visualize some sentences.
:param sentences: single sentence or list of sentences to convert to HTML
:param title: title of the HTML page
:param colors: dict where keys are tags and values are color HTML codes
:param default_color: color to use if colors parameter is missing a tag
:param wrap_page: if True method returns result of processing sentences wrapped by <html> and <body> tags, otherwise - without these tags # noqa: E501
:return: HTML as a string
"""
if isinstance(sentences, Sentence):
sentences = [sentences]
sentences_html = []
for s in sentences:
spans = split_to_spans(s, label_name=label_name)
spans_html = []
for fragment, tag in spans:
escaped_fragment = html.escape(fragment).replace("\n", "<br/>")
if tag:
escaped_fragment = TAGGED_ENTITY.format(
entity=escaped_fragment,
label=tag,
color=colors.get(tag, default_color),
)
spans_html.append(escaped_fragment)
line = PARAGRAPH.format(sentence="".join(spans_html))
sentences_html.append(line)
final_text = "".join(sentences_html)
if wrap_page:
return HTML_PAGE.format(text=final_text, title=title)
else:
return final_text
| 3,042 | 32.811111 | 195 | py |
flair | flair-master/flair/trainers/language_model_trainer.py | import datetime
import logging
import math
import random
import time
from pathlib import Path
from typing import Iterable, Optional, Type, Union
import torch
from torch import cuda
from torch.optim import AdamW, Optimizer
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.sgd import SGD
from torch.utils.data import DataLoader, Dataset
from flair.optim import SGDW, ReduceLRWDOnPlateau
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.training_utils import add_file_handler
log = logging.getLogger("flair")
class TextDataset(Dataset):
def __init__(
self,
path: Union[str, Path],
dictionary: Dictionary,
expand_vocab: bool = False,
forward: bool = True,
split_on_char: bool = True,
random_case_flip: bool = True,
document_delimiter: str = "\n",
shuffle: bool = True,
) -> None:
path = Path(path)
assert path.exists()
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.document_delimiter = document_delimiter
self.shuffle = shuffle
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index=0) -> torch.Tensor:
"""Tokenizes a text file on character basis."""
if type(self.files[index]) is str:
self.files[index] = Path(self.files[index])
assert self.files[index].exists()
with self.files[index].open("r", encoding="utf-8") as fin:
text_lines: Iterable[str] = (
doc + self.document_delimiter for doc in fin.read().split(self.document_delimiter) if doc
)
if self.random_case_flip:
text_lines = map(self.random_casechange, text_lines)
lines = [list(line) if self.split_on_char else line.split() for line in text_lines]
log.info(f"read text file with {len(lines)} lines")
if self.shuffle:
random.shuffle(lines)
log.info("shuffled")
if self.expand_vocab:
for chars in lines:
for char in chars:
self.dictionary.add_item(char)
ids = torch.tensor(
[self.dictionary.get_idx_for_item(char) for chars in lines for char in chars],
dtype=torch.long,
)
if not self.forward:
ids = ids.flip(0)
return ids
@staticmethod
def random_casechange(line: str) -> str:
no = random.randint(0, 99)
if no == 0:
line = line.lower()
if no == 1:
line = line.upper()
return line
class TextCorpus:
def __init__(
self,
path: Union[Path, str],
dictionary: Dictionary,
forward: bool = True,
character_level: bool = True,
random_case_flip: bool = True,
document_delimiter: str = "\n",
) -> None:
self.dictionary: Dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.document_delimiter: str = document_delimiter
path = Path(path)
self.train = TextDataset(
path / "train",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=self.document_delimiter,
shuffle=True,
)
# TextDataset returns a list. valid and test are only one file,
# so return the first element
self.valid = TextDataset(
path / "valid.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
self.test = TextDataset(
path / "test.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
class LanguageModelTrainer:
def __init__(
self,
model: LanguageModel,
corpus: TextCorpus,
optimizer: Type[Optimizer] = SGD,
test_mode: bool = False,
epoch: int = 0,
split: int = 0,
loss: float = 10000,
optimizer_state: Optional[dict] = None,
) -> None:
self.model: LanguageModel = model
self.optimizer: Type[Optimizer] = optimizer
self.corpus: TextCorpus = corpus
self.test_mode: bool = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp and amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from "
"https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
base_path = Path(base_path)
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
# error message if the validation dataset is too small
if val_data.size(0) == 1:
raise RuntimeError(
f"ERROR: Your validation dataset is too small. For your "
f"mini_batch_size, the data needs to "
f"consist of at least {mini_batch_size * 2} characters!"
)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
log_handler = add_file_handler(log, base_path / "training.log")
best_val_loss = self.loss
kwargs["lr"] = learning_rate
optimizer = self.optimizer(self.model.parameters(), **kwargs)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLROnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler = ReduceLROnPlateau(optimizer, verbose=True, factor=anneal_factor, patience=patience)
if use_amp:
self.model, optimizer = amp.initialize(self.model, optimizer, opt_level=amp_opt_level)
training_generator = DataLoader(self.corpus.train, shuffle=False, num_workers=num_workers)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating
# through corpus one
if epoch > 0:
training_generator = DataLoader(self.corpus.train, shuffle=True, num_workers=num_workers)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at
# self.split (for checkpointing)
for curr_split, train_slice in enumerate(training_generator, self.split):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info("Split %d" % curr_split + f"\t - ({datetime.datetime.now():%H:%M:%S})")
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = torch.zeros(1, device=flair.device)
start_time = time.time()
for batch, i in enumerate(range(0, train_data.size(0) - 1, sequence_length)):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info("Batch %d is not on CUDA, training will be very slow" % (batch))
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient
# problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was
# previously produced.
# If we didn't, the model would try backpropagating
# all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
f"| split {curr_split:3d}/{number_of_splits:3d} | {batch:5d}/{len(train_data) // sequence_length:5d} batches "
f"| ms/batch {elapsed * 1000 / self.log_interval:5.2f} | loss {cur_loss:5.4f} | ppl {math.exp(cur_loss):5.4f}"
)
total_loss = torch.zeros(1, device=flair.device)
start_time = time.time()
##########################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
# Save the model if the validation loss is the best we've
# seen so far.
if val_loss < best_val_loss:
self.model.save(savefile)
best_val_loss = val_loss
log.info("best split so far")
scheduler.step(val_loss)
log.info(f"best loss so far {best_val_loss:5.8f}")
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
##########################################################
# print info
##########################################################
log.info("-" * 89)
summary = (
f"| end of split {curr_split:3d} /{number_of_splits:3d} | epoch {epoch + 1:3d} | time: "
f"{(time.time() - split_start_time):5.2f}s | valid loss {val_loss:5.4f} | valid ppl "
f"{math.exp(val_loss):5.4f} | learning rate {learning_rate:3.4f}"
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("%d seconds for train split %d" % (time.time() - split_start_time, curr_split))
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
finally:
if log_handler is not None:
log_handler.close()
log.removeHandler(log_handler)
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = f"TEST: valid loss {test_loss:5.4f} | valid ppl {math.exp(test_loss):8.4f}"
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
def evaluate(self, data_source, eval_batch_size, sequence_length):
# Turn on evaluation mode which disables dropout.
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, sequence_length):
data, targets = self._get_batch(data_source, i, sequence_length)
prediction, rnn_output, hidden = self.model.forward(data, hidden)
output_flat = prediction.view(-1, ntokens)
total_loss += len(data) * self.loss_function(output_flat, targets).data
hidden = self._repackage_hidden(hidden)
return total_loss.item() / len(data_source)
@staticmethod
def _batchify(data, batch_size):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
data = data.view(batch_size, -1).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
data = data.to(flair.device)
target = target.to(flair.device)
return data, target
@staticmethod
def _repackage_hidden(h):
"""Wraps hidden states in new tensors, to detach them from their history."""
return tuple(v.detach() for v in h)
@staticmethod
def load_checkpoint(
checkpoint_file: Union[str, Path],
corpus: TextCorpus,
optimizer: Type[Optimizer] = SGD,
):
if type(checkpoint_file) is str:
checkpoint_file = Path(checkpoint_file)
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(
checkpoint["model"],
corpus,
optimizer,
epoch=checkpoint["epoch"],
split=checkpoint["split"],
loss=checkpoint["loss"],
optimizer_state=checkpoint["optimizer_state_dict"],
)
| 17,266 | 35.660297 | 142 | py |
flair | flair-master/flair/trainers/__init__.py | from .language_model_trainer import LanguageModelTrainer, TextCorpus
from .trainer import ModelTrainer
__all__ = ["ModelTrainer", "LanguageModelTrainer", "TextCorpus"]
| 169 | 33 | 68 | py |
flair | flair-master/flair/trainers/trainer.py | import inspect
import logging
import os
import random
import time
import warnings
from inspect import signature
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
import torch
from torch.optim.sgd import SGD
from torch.utils.data.dataset import ConcatDataset
import flair
import flair.nn
from flair.data import Corpus, Dictionary, _len_dataset
from flair.datasets import DataLoader
from flair.trainers.plugins import (
AnnealingPlugin,
CheckpointPlugin,
LinearSchedulerPlugin,
LogFilePlugin,
LossFilePlugin,
MetricName,
MetricRecord,
Pluggable,
TrainerPlugin,
TrainingInterrupt,
WeightExtractorPlugin,
)
from flair.training_utils import identify_dynamic_embeddings, log_line, store_embeddings
log = logging.getLogger("flair")
class ModelTrainer(Pluggable):
valid_events = {
"after_setup",
"before_training_epoch",
"before_training_batch",
"before_training_optimizer_step",
"after_training_batch",
"after_training_epoch",
"after_evaluation",
"after_training_loop",
"training_interrupt",
"_training_finally",
"_training_exception",
"after_training",
"metric_recorded",
}
def __init__(self, model: flair.nn.Model, corpus: Corpus) -> None:
"""Initialize a model trainer.
:param model: The model that you want to train. The model should inherit from flair.nn.Model # noqa: E501
:param corpus: The dataset used to train the model, should be of type Corpus
"""
super().__init__()
self.model: flair.nn.Model = model
self.corpus: Corpus = corpus
self.reset_training_attributes()
self.return_values: dict = {}
def reset_training_attributes(self):
if hasattr(self, "optimizer") and self.optimizer is not None:
self.optimizer.zero_grad(set_to_none=True)
del self.optimizer
self.optimizer = None
self.mini_batch_size = None
self.return_values: dict = {}
@staticmethod
def check_for_and_delete_previous_best_models(base_path):
all_best_model_names = [filename for filename in os.listdir(base_path) if filename.startswith("best-model")]
if len(all_best_model_names) != 0:
warnings.warn(
"There should be no best model saved at epoch 1 except there "
"is a model from previous trainings"
" in your training folder. All previous best models will be deleted."
)
for single_model in all_best_model_names:
previous_best_path = os.path.join(base_path, single_model)
if os.path.exists(previous_best_path):
os.remove(previous_best_path)
@staticmethod
def get_batch_steps(batch, mini_batch_chunk_size):
# if necessary, make batch_steps
if mini_batch_chunk_size is not None and len(batch) > mini_batch_chunk_size:
# break up the batch into slices of size
# mini_batch_chunk_size
return [batch[i : i + mini_batch_chunk_size] for i in range(0, len(batch), mini_batch_chunk_size)]
else:
return [batch]
def _get_train_data(self, train_with_dev, train_with_test):
# if training also uses dev/train data, include in training set
train_data = self.corpus.train
if train_with_dev or train_with_test:
parts = [self.corpus.train]
if train_with_dev and self.corpus.dev:
parts.append(self.corpus.dev)
if train_with_test and self.corpus.test:
parts.append(self.corpus.test)
train_data = ConcatDataset(parts)
return train_data
def _backward(self, loss):
"""Calls backward on the loss.
This allows plugins to overwrite the backward call.
"""
loss.backward()
def train(
self,
base_path,
anneal_factor: float = 0.5,
patience: int = 3,
min_learning_rate: Union[float, List[float]] = 0.0001,
initial_extra_patience: int = 0,
anneal_with_restarts: bool = False,
learning_rate: float = 0.1,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 32,
eval_batch_size: int = 64,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 100,
optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = False,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "cpu",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: Optional[List[TrainerPlugin]] = None,
attach_default_scheduler: bool = True,
**kwargs,
):
if plugins is None:
plugins = []
if attach_default_scheduler:
# activate annealing plugin
plugins.append(
AnnealingPlugin(
base_path=base_path,
anneal_factor=anneal_factor,
patience=patience,
min_learning_rate=min_learning_rate,
initial_extra_patience=initial_extra_patience,
anneal_with_restarts=anneal_with_restarts,
)
)
# call self.train_custom with all parameters (minus the ones specific to the AnnealingPlugin)
local_variables = locals()
for var in [
"self",
"anneal_factor",
"patience",
"min_learning_rate",
"initial_extra_patience",
"anneal_with_restarts",
"attach_default_scheduler",
"kwargs",
]:
local_variables.pop(var)
return self.train_custom(**local_variables, **kwargs)
def fine_tune(
self,
base_path: Union[Path, str],
# training parameters
warmup_fraction: float = 0.1,
learning_rate: float = 5e-5,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 4,
eval_batch_size: int = 16,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 10,
optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = True,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "none",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: Optional[List[TrainerPlugin]] = None,
attach_default_scheduler: bool = True,
**kwargs,
):
# annealing logic
if plugins is None:
plugins = []
if attach_default_scheduler:
plugins.append(LinearSchedulerPlugin(warmup_fraction=warmup_fraction))
return self.train_custom(
base_path=base_path,
# training parameters
learning_rate=learning_rate,
decoder_learning_rate=decoder_learning_rate,
mini_batch_size=mini_batch_size,
eval_batch_size=eval_batch_size,
mini_batch_chunk_size=mini_batch_chunk_size,
max_epochs=max_epochs,
optimizer=optimizer,
train_with_dev=train_with_dev,
train_with_test=train_with_test,
# evaluation and monitoring
main_evaluation_metric=main_evaluation_metric,
monitor_test=monitor_test,
monitor_train_sample=monitor_train_sample,
use_final_model_for_eval=use_final_model_for_eval,
gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,
exclude_labels=exclude_labels,
# sampling and shuffling
sampler=sampler,
shuffle=shuffle,
shuffle_first_epoch=shuffle_first_epoch,
# evaluation and monitoring
embeddings_storage_mode=embeddings_storage_mode,
epoch=epoch,
# when and what to save
save_final_model=save_final_model,
save_optimizer_state=save_optimizer_state,
save_model_each_k_epochs=save_model_each_k_epochs,
# logging parameters
create_file_logs=create_file_logs,
create_loss_file=create_loss_file,
write_weights=write_weights,
# plugins
plugins=plugins,
**kwargs,
)
def train_custom(
self,
base_path: Union[Path, str],
# training parameters
learning_rate: float = 0.1,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 32,
eval_batch_size: int = 64,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 100,
optimizer: Type[torch.optim.Optimizer] = SGD,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = False,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "cpu",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: List[TrainerPlugin] = [],
**kwargs,
) -> dict:
"""Trains any class that implements the flair.nn.Model interface.
Parameters
----------
base_path: Main path to which all output during training is logged and models are saved
learning_rate (float): The learning rate of the optimizer
decoder_learning_rate (Optional[float]): Optional, if set, the decoder is trained with a separate learning rate
mini_batch_size (int): Size of mini-batches during training
eval_batch_size (int): Size of mini-batches during evaluation
mini_batch_chunk_size (int): If mini-batches are larger than this number, they get broken down into chunks of
this size for processing purposes
max_epochs (int): Maximum number of epochs to train. Terminates training if this number is surpassed.
optimizer: The optimizer to use (typically SGD or Adam)
train_with_dev (bool): If True, the data from dev split is added to the training data
train_with_test (bool): If True, the data from test split is added to the training data
main_evaluation_metric: The metric to optimize (often micro-average or macro-average F1-score, or accuracy)
monitor_test (bool): If True, test data is evaluated at end of each epoch
monitor_train_sample: Set this to evaluate on a sample of the train data at the end of each epoch.
If you set an int, it will sample this many sentences to evaluate on. If you set a float, it will sample
a percentage of data points from train.
use_final_model_for_eval (bool): If True, the final model is used for the final evaluation. If False, the
model from the best epoch as determined by main_evaluation_metric is used for the final evaluation.
gold_label_dictionary_for_eval: Set to force evaluation to use a particular label dictionary
exclude_labels: Optionally define a list of labels to exclude from the evaluation
sampler: You can pass a data sampler here for special sampling of data.
shuffle: If True, data is shuffled during training
shuffle_first_epoch: If True, data is shuffled during the first epoch of training
embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),
'cpu' (embeddings stored on CPU) or 'gpu' (embeddings stored on GPU)
epoch: The starting epoch (normally 0 but could be higher if you continue training model)
save_final_model: If True, the final model is saved at the end of training.
save_optimizer_state (bool): If True, the optimizer state is saved alongside the model
save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will
be saved each 5 epochs. Default is 0 which means no model saving.
create_file_logs (bool): If True, logging output is written to a file
create_loss_file (bool): If True, a loss file logging output is created
write_weights (bool): If True, write weights to weights.txt on each batch logging event.
plugins: Any additional plugins you want to pass to the trainer
**kwargs: Additional arguments, for instance for the optimizer
Returns:
-------
dict: A dictionary with at least the key "test_score" containing the final evaluation score. Some plugins
add additional information to this dictionary, such as the :class:`MetricHistoryPlugin`
"""
# Create output folder
base_path = Path(base_path)
base_path.mkdir(exist_ok=True, parents=True)
# === START BLOCK: ACTIVATE PLUGINS === #
# We first activate all optional plugins. These take care of optional functionality such as various
# logging techniques and checkpointing
for plugin in plugins:
plugin.attach_to(self)
# log file plugin
if create_file_logs:
LogFilePlugin(base_path=base_path).attach_to(self)
# loss file plugin
if create_loss_file:
LossFilePlugin(base_path=base_path, epoch=epoch).attach_to(self)
# plugin for writing weights
if write_weights:
WeightExtractorPlugin(base_path=base_path).attach_to(self)
# plugin for checkpointing
if save_model_each_k_epochs > 0:
CheckpointPlugin(
save_model_each_k_epochs=save_model_each_k_epochs,
save_optimizer_state=save_optimizer_state,
base_path=base_path,
).attach_to(self)
# === END BLOCK: ACTIVATE PLUGINS === #
# derive parameters the function was called with (or defaults)
local_variables = locals()
training_parameters = {
parameter: local_variables[parameter] for parameter in signature(self.train_custom).parameters
}
training_parameters.update(kwargs)
# initialize model card with these parameters
self.model.model_card = self._initialize_model_card(**training_parameters)
# Prepare training data and get dataset size
train_data = self._get_train_data(train_with_dev=train_with_dev, train_with_test=train_with_test)
dataset_size = _len_dataset(train_data)
parameters = {"dataset_size": dataset_size, **training_parameters}
# determine what splits (train, dev, test) to evaluate
evaluation_splits = {}
if not train_with_dev and self.corpus.dev:
evaluation_splits["dev"] = self.corpus.dev
if self.corpus.test and monitor_test:
evaluation_splits["test"] = self.corpus.test
if monitor_train_sample > 0.0:
evaluation_splits["train_sample"] = self._sample_train_split(monitor_train_sample)
# determine how to determine best model and whether to save it
determine_best_epoch_using_dev_score = not train_with_dev and self.corpus.dev
best_epoch_score = 0 if determine_best_epoch_using_dev_score else float("inf")
save_best_model = not train_with_dev and not use_final_model_for_eval
# instantiate the optimizer
kwargs["lr"] = learning_rate
if decoder_learning_rate:
params = [
{
"params": [param for name, param in self.model.named_parameters() if "embeddings" not in name],
"lr": decoder_learning_rate,
},
{
"params": [param for name, param in self.model.named_parameters() if "embeddings" in name],
"lr": learning_rate,
},
]
self.optimizer = optimizer(params=params, **kwargs)
log.info(
f"Modifying learning rate to {decoder_learning_rate} for the following "
f"parameters: {[name for name, param in self.model.named_parameters() if 'embeddings' not in name]}"
)
else:
self.optimizer = optimizer(params=self.model.parameters(), **kwargs)
# initialize sampler if provided
if sampler is not None:
# init with default values if only class is provided
if inspect.isclass(sampler):
sampler = sampler()
# set dataset to sample from
sampler.set_dataset(train_data)
shuffle = False
# this field stores the names of all dynamic embeddings in the model (determined after first forward pass)
dynamic_embeddings = None
# Sanity checks
assert len(train_data) > 0
if epoch >= max_epochs:
log.warning(f"Starting at epoch {epoch + 1}/{max_epochs}. No training will be done.")
if epoch == 0:
self.check_for_and_delete_previous_best_models(base_path)
# -- AmpPlugin -> wraps with AMP
# -- AnnealingPlugin -> initialize schedulers (requires instantiated optimizer)
self.dispatch("after_setup", **parameters)
final_eval_info = (
"model after last epoch (final-model.pt)"
if use_final_model_for_eval
else "model from best epoch (best-model.pt)"
)
log_line(log)
log.info(f'Model: "{self.model}"')
log_line(log)
log.info(f"{self.corpus}")
log_line(log)
log.info(f"Train: {len(train_data)} sentences")
log.info(f" (train_with_dev={train_with_dev}, train_with_test={train_with_test})")
log_line(log)
log.info("Training Params:")
log.info(
f' - learning_rate: "{learning_rate}" '
f'{"(decoder: " + str(decoder_learning_rate) + ")" if decoder_learning_rate else ""}'
)
log.info(f' - mini_batch_size: "{mini_batch_size}"')
log.info(f' - max_epochs: "{max_epochs}"')
log.info(f' - shuffle: "{shuffle}"')
log_line(log)
log.info("Plugins:")
for plugin in plugins:
log.info(" - " + str(plugin))
log_line(log)
log.info(f"Final evaluation on {final_eval_info}")
log.info(f' - metric: "{main_evaluation_metric}"')
log_line(log)
log.info("Computation:")
log.info(f" - compute on device: {flair.device}")
log.info(f" - embedding storage: {embeddings_storage_mode}")
log_line(log)
log.info(f'Model training base path: "{base_path}"')
log_line(log)
# At any point you can hit Ctrl + C to break out of training early.
try:
total_train_samples = 0
for epoch in range(epoch + 1, max_epochs + 1):
log_line(log)
# - SchedulerPlugin -> load state for anneal_with_restarts, batch_growth_annealing, logic for early stopping
# - LossFilePlugin -> get the current epoch for loss file logging
self.dispatch("before_training_epoch", epoch=epoch)
self.model.model_card["training_parameters"]["epoch"] = epoch # type: ignore[index]
lr_info, momentum_info = self._get_current_lr_and_momentum(epoch)
# if shuffle_first_epoch==False, the first epoch is not shuffled
shuffle_data_this_epoch = shuffle
if not shuffle_first_epoch and epoch == 1:
shuffle_data_this_epoch = False
batch_loader = DataLoader(
train_data,
batch_size=mini_batch_size,
shuffle=shuffle_data_this_epoch,
sampler=sampler,
)
self.model.train()
epoch_train_loss: float = 0.0
epoch_train_samples: int = 0
epoch_start_time = time.time()
# log infos on training progress every `log_modulo` batches
log_modulo = max(1, int(len(batch_loader) / 10))
# process mini-batches
for batch_no, batch in enumerate(batch_loader):
# zero the gradients on the model and optimizer
self.model.zero_grad()
self.optimizer.zero_grad()
batch_train_loss = 0.0
batch_train_samples = 0
batch_kw = {
"batch_no": batch_no,
"batch": batch,
"total_number_of_batches": len(batch_loader),
"epoch": epoch,
}
self.dispatch("before_training_batch", **batch_kw)
batch_steps = self.get_batch_steps(batch, mini_batch_chunk_size=mini_batch_chunk_size)
# forward and backward for batch
for batch_step in batch_steps:
# forward pass
loss, datapoint_count = self.model.forward_loss(batch_step)
batch_train_samples += datapoint_count
batch_train_loss += loss.item()
self._backward(loss)
# identify dynamic embeddings (always deleted) on first sentence
if dynamic_embeddings is None:
dynamic_embeddings = identify_dynamic_embeddings(batch)
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(batch_step, embeddings_storage_mode, dynamic_embeddings)
self.dispatch("before_training_optimizer_step", **batch_kw)
# do the optimizer step
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
self.optimizer.step()
if batch_train_samples > 0:
train_loss = batch_train_loss / batch_train_samples
self._record(MetricRecord.scalar(("train", "batch_loss"), train_loss, total_train_samples))
epoch_train_loss += batch_train_loss
epoch_train_samples += batch_train_samples
if (batch_no + 1) % log_modulo == 0:
intermittent_loss = (
epoch_train_loss / epoch_train_samples
if epoch_train_samples > 0
else epoch_train_samples / (batch_no + 1)
)
current_time = time.time()
lr_info, momentum_info = self._get_current_lr_and_momentum(epoch)
log.info(
f"epoch {epoch}"
f" - iter {batch_no + 1}/{len(batch_loader)}"
f" - loss {intermittent_loss:.8f}"
f" - time (sec): {(current_time - epoch_start_time):.2f}"
f" - samples/sec: {epoch_train_samples / (current_time - epoch_start_time):.2f}"
f"{lr_info}{momentum_info}"
)
# - SchedulerPlugin -> do the scheduler step if one-cycle or linear decay
# - WeightExtractorPlugin -> extracts weights
self.dispatch("after_training_batch", **batch_kw)
train_loss = epoch_train_loss / epoch_train_samples
self._record(MetricRecord.scalar(("train", "loss"), train_loss, epoch))
total_train_samples += epoch_train_samples
log_line(log)
log.info(f"EPOCH {epoch} done: loss {train_loss:.4f}{lr_info}")
# - CheckpointPlugin -> executes save_model_each_k_epochs
# - SchedulerPlugin -> log bad epochs
self.dispatch("after_training_epoch", epoch=epoch)
self.model.eval()
# Determine if this is the best model or if we need to anneal
current_epoch_has_best_model_so_far = False
validation_scores: tuple
for evaluation_split, evaluation_split_data in evaluation_splits.items():
eval_result = self.model.evaluate(
evaluation_split_data,
out_path=base_path / f"{evaluation_split}.tsv",
mini_batch_size=eval_batch_size,
exclude_labels=exclude_labels,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
embedding_storage_mode=embeddings_storage_mode,
gold_label_type=self.model.label_type,
gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,
)
# log results
log.info(
f"{evaluation_split.upper()} : loss {eval_result.loss}"
f" - {main_evaluation_metric[1]}"
f" ({main_evaluation_metric[0]})"
f" {round(eval_result.main_score, 4)}"
)
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(evaluation_split_data, embeddings_storage_mode)
self._publish_eval_result(eval_result, evaluation_split, global_step=epoch)
# use DEV split to determine if this is the best model so far
if determine_best_epoch_using_dev_score and evaluation_split == "dev":
validation_scores = eval_result.main_score, eval_result.loss
if eval_result.main_score > best_epoch_score:
current_epoch_has_best_model_so_far = True
best_epoch_score = eval_result.main_score
# if not using DEV score, determine best model using train loss
if not determine_best_epoch_using_dev_score:
validation_scores = (train_loss,)
if epoch_train_loss < best_epoch_score:
current_epoch_has_best_model_so_far = True
best_epoch_score = train_loss
# - LossFilePlugin -> somehow prints all relevant metrics
# - AnnealPlugin -> scheduler step
self.dispatch(
"after_evaluation",
epoch=epoch,
current_model_is_best=current_epoch_has_best_model_so_far,
validation_scores=validation_scores,
)
if save_best_model and current_epoch_has_best_model_so_far:
log.info("saving best model")
self.model.save(base_path / "best-model.pt", checkpoint=save_optimizer_state)
# - SWAPlugin -> restores SGD weights from SWA
self.dispatch("after_training_loop")
# if we do not use dev data for model selection, save final model
if save_final_model:
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
except KeyboardInterrupt:
log_line(log)
log.info("Exiting from training early.")
self.dispatch("training_interrupt") # TODO: no plugin calls this event
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
log.info("Done.")
except TrainingInterrupt as exc:
log_line(log)
log.info(str(exc))
log_line(log)
self.dispatch("training_interrupt") # TODO: no plugin calls this event
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
log.info("Done.")
except Exception:
self.dispatch("_training_exception")
raise
finally:
# TensorboardLogger -> closes writer
self.dispatch("_training_finally")
# test best model if test data is present
if self.corpus.test and not train_with_test:
log_line(log)
self.model.eval()
if (base_path / "best-model.pt").exists():
log.info("Loading model from best epoch ...")
self.model.load_state_dict(self.model.load(base_path / "best-model.pt").state_dict())
else:
log.info("Testing using last state of model ...")
test_results = self.model.evaluate(
self.corpus.test,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
out_path=base_path / "test.tsv",
embedding_storage_mode="none",
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
exclude_labels=exclude_labels,
return_loss=False,
)
log.info(test_results.detailed_results)
log_line(log)
# get and return the final test score of best model
self.return_values["test_score"] = test_results.main_score
else:
self.return_values["test_score"] = 0
log.info("Test data not provided setting final score to 0")
# MetricHistoryPlugin -> stores the loss history in return_values
self.dispatch("after_training")
# Store return values, as they will be erased by reset_training_attributes
return_values = self.return_values
self.reset_training_attributes()
return return_values
def _get_current_lr_and_momentum(self, epoch):
current_learning_rate = [group["lr"] for group in self.optimizer.param_groups]
momentum = [group["momentum"] if "momentum" in group else 0 for group in self.optimizer.param_groups]
lr_info = " - lr: " + ",".join([f"{m:.6f}" for m in current_learning_rate])
momentum_info = " - momentum: " + ",".join([f"{m:.6f}" for m in momentum])
self._record(MetricRecord.scalar_list("learning_rate", current_learning_rate, epoch))
self._record(MetricRecord.scalar_list(("optimizer", "momentum"), momentum, epoch))
return lr_info, momentum_info
def _sample_train_split(self, monitor_train_sample):
train_part_size = 0
if isinstance(monitor_train_sample, float):
train_part_size = int(_len_dataset(self.corpus.train) * monitor_train_sample)
if isinstance(monitor_train_sample, int):
train_part_size = monitor_train_sample
assert train_part_size > 0
# get a random sample of training sentences
train_part_indices = list(range(_len_dataset(self.corpus.train)))
random.shuffle(train_part_indices)
train_part_indices = train_part_indices[:train_part_size]
train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)
return train_part
def _flat_dict_items(self, d, composite_key=()):
for key, value in d.items():
key = (*composite_key, key) if isinstance(key, str) else composite_key + tuple(key)
if isinstance(value, dict):
yield from self._flat_dict_items(value, composite_key=key)
else:
yield key, value
def _publish_eval_result(self, result, prefix=(), **kw):
for key, value in self._flat_dict_items(result.scores, composite_key=MetricName(prefix)):
try:
self._record(MetricRecord.scalar(name=key, value=float(value), **kw))
except TypeError:
if isinstance(value, list):
self._record(MetricRecord.scalar_list(name=key, value=value, **kw))
elif isinstance(value, torch.Tensor):
self._record(MetricRecord.histogram(name=key, value=value, **kw))
else:
value = str(value)
self._record(MetricRecord.string(name=key, value=value, **kw))
self._record(MetricRecord.string(name=MetricName(prefix) + "score", value=result.main_score, **kw))
self._record(
MetricRecord.string(name=MetricName(prefix) + "detailed_result", value=result.detailed_results, **kw)
)
def _initialize_model_card(self, **training_parameters):
"""Initializes model card with library versions and parameters.
:param training_parameters:
:return:
"""
# create a model card for this model with Flair and PyTorch version
model_card = {
"flair_version": flair.__version__,
"pytorch_version": torch.__version__,
}
# record Transformers version if library is loaded
try:
import transformers
model_card["transformers_version"] = transformers.__version__
except ImportError:
pass
# remember all parameters used in train() call
model_card["training_parameters"] = {
k: str(v) if isinstance(v, Path) else v for k, v in training_parameters.items()
}
plugins = [plugin.__class__ for plugin in model_card["training_parameters"]["plugins"]]
model_card["training_parameters"]["plugins"] = plugins
return model_card
def _record(self, metric):
self.dispatch("metric_recorded", metric)
| 36,077 | 41.245902 | 124 | py |
flair | flair-master/flair/trainers/plugins/base.py | import logging
from collections import defaultdict
from inspect import isclass, signature
from itertools import count
from queue import Queue
from typing import (
Callable,
Dict,
Iterator,
List,
NewType,
Optional,
Sequence,
Set,
Type,
Union,
cast,
)
log = logging.getLogger("flair")
PluginArgument = Union["BasePlugin", Type["BasePlugin"]]
HookHandleId = NewType("HookHandleId", int)
EventIdenifier = str
class TrainingInterrupt(Exception):
"""Allows plugins to interrupt the training loop."""
class Pluggable:
"""Dispatches events which attached plugins can react to."""
valid_events: Optional[Set[EventIdenifier]] = None
def __init__(self, *, plugins: Sequence[PluginArgument] = []) -> None:
"""Initialize a `Pluggable`.
:param plugins: Plugins which should be attached to this `Pluggable`.
"""
self._hook_handles: Dict[EventIdenifier, Dict[HookHandleId, HookHandle]] = defaultdict(dict)
self._hook_handle_id_counter = count()
self._plugins: List[BasePlugin] = []
# This flag tracks, whether an event is currently being processed (otherwise it is added to the queue)
self._processing_events = False
self._event_queue: Queue = Queue()
for plugin in plugins:
if isclass(plugin):
# instantiate plugin
plugin = plugin()
plugin = cast("BasePlugin", plugin)
plugin.attach_to(self)
@property
def plugins(self):
return self._plugins
def append_plugin(self, plugin):
self._plugins.append(plugin)
def validate_event(self, *events: EventIdenifier):
for event in events:
assert isinstance(event, EventIdenifier)
if self.valid_events is not None and event not in self.valid_events:
raise RuntimeError(f"Event '{event}' not recognized. Available: {', '.join(self.valid_events)}")
return event
return None
def register_hook(self, func: Callable, *events: EventIdenifier):
"""Register a hook.
:param func: Function to be called when the event is emitted.
:param *events: List of events to call this function on.
"""
self.validate_event(*events)
handle: HookHandle = HookHandle(
HookHandleId(next(self._hook_handle_id_counter)), events=events, func=func, pluggable=self
)
for event in events:
self._hook_handles[event][handle.id] = handle
return handle
def dispatch(self, event: EventIdenifier, *args, **kwargs) -> None:
"""Call all functions hooked to a certain event."""
self.validate_event(event)
self._event_queue.put((event, args, kwargs))
if not self._processing_events:
try:
self._processing_events = True
while not self._event_queue.empty():
event, args, kwargs = self._event_queue.get()
for hook in self._hook_handles[event].values():
hook(*args, **kwargs)
finally:
# Reset the flag, since an exception event might be dispatched
self._processing_events = False
def remove_hook(self, handle: "HookHandle"):
"""Remove a hook handle from this instance."""
for event in handle.events:
del self._hook_handles[event][handle.id]
class HookHandle:
"""Represents the registration information of a hook callback."""
def __init__(
self, _id: HookHandleId, *, events: Sequence[EventIdenifier], func: Callable, pluggable: Pluggable
) -> None:
"""Intitialize `HookHandle`.
:param _id: Id, the callback is stored as in the `Pluggable`.
:param *events: List of events, the callback is registered for.
:param func: The callback function.
:param pluggable: The `Pluggable` where the callback is registered.
"""
pluggable.validate_event(*events)
self._id = _id
self._events = events
self._func = func
self._pluggable = pluggable
@property
def id(self) -> HookHandleId:
"""Return the id of this `HookHandle`."""
return self._id
@property
def func_name(self):
return self._func.__qualname__
@property
def events(self) -> Iterator[EventIdenifier]:
"""Return iterator of events whis `HookHandle` is registered for."""
yield from self._events
def remove(self):
"""Remove a hook from the `Pluggable` it is attached to."""
self._pluggable.remove_hook(self)
def __call__(self, *args, **kw):
"""Call the hook this `HookHandle` is associated with."""
try:
return self._func(*args, **kw)
except TypeError as err:
sig = signature(self._func)
if not any(p.kind == p.VAR_KEYWORD for p in sig.parameters.values()):
# If there is no **kw argument in the callback, check if any of the passed kw args is not accepted by
# the callback
for name in kw:
if name not in sig.parameters:
raise TypeError(
f"Hook callback {self.func_name}() does not accept keyword argument '{name}'"
) from err
raise err
class BasePlugin:
"""Base class for all plugins."""
def __init__(self) -> None:
"""Initialize the base plugin."""
self._hook_handles: List[HookHandle] = []
self._pluggable: Optional[Pluggable] = None
def attach_to(self, pluggable: Pluggable):
"""Attach this plugin to a `Pluggable`."""
assert self._pluggable is None
assert len(self._hook_handles) == 0
self._pluggable = pluggable
pluggable.append_plugin(self)
# go through all attributes
for name in dir(self):
try:
func = getattr(self, name)
# get attribute hook events (mayr aise an AttributeError)
events = func._plugin_hook_events
# register function as a hook
handle = pluggable.register_hook(func, *events)
self._hook_handles.append(handle)
except AttributeError:
continue
def detach(self):
"""Detach a plugin from the `Pluggable` it is attached to."""
assert self._pluggable is not None
for handle in self._hook_handles:
handle.remove()
self._pluggable = None
self._hook_handles = []
@classmethod
def mark_func_as_hook(cls, func: Callable, *events: EventIdenifier) -> Callable:
"""Mark method as a hook triggered by the `Pluggable`."""
if len(events) == 0:
events = (func.__name__,)
func._plugin_hook_events = events # type: ignore[attr-defined]
return func
@classmethod
def hook(
cls,
first_arg: Optional[Union[Callable, EventIdenifier]] = None,
*other_args: EventIdenifier,
) -> Callable:
"""Convience function for `BasePlugin.mark_func_as_hook`).
Enables using the `@BasePlugin.hook` syntax.
Can also be used as:
`@BasePlugin.hook("some_event", "another_event")`
"""
if first_arg is None:
# Decorator was used with parentheses, but no args
return cls.mark_func_as_hook
if isinstance(first_arg, EventIdenifier):
# Decorator was used with args (strings specifiying the events)
def decorator_func(func: Callable):
return cls.mark_func_as_hook(func, cast(EventIdenifier, first_arg), *other_args)
return decorator_func
# Decorator was used without args
return cls.mark_func_as_hook(first_arg, *other_args)
@property
def pluggable(self) -> Optional[Pluggable]:
return self._pluggable
def __str__(self) -> str:
return self.__class__.__name__
class TrainerPlugin(BasePlugin):
@property
def trainer(self):
return self.pluggable
@property
def model(self):
return self.trainer.model
@property
def corpus(self):
return self.trainer.corpus
| 8,348 | 29.694853 | 117 | py |
flair | flair-master/flair/trainers/plugins/__init__.py | from .base import BasePlugin, Pluggable, TrainerPlugin, TrainingInterrupt
from .functional.amp import AmpPlugin
from .functional.anneal_on_plateau import AnnealingPlugin
from .functional.checkpoints import CheckpointPlugin
from .functional.linear_scheduler import LinearSchedulerPlugin
from .functional.weight_extractor import WeightExtractorPlugin
from .loggers.log_file import LogFilePlugin
from .loggers.loss_file import LossFilePlugin
from .loggers.metric_history import MetricHistoryPlugin
from .loggers.tensorboard import TensorboardLogger
from .metric_records import MetricName, MetricRecord
__all__ = [
"AmpPlugin",
"AnnealingPlugin",
"CheckpointPlugin",
"LinearSchedulerPlugin",
"WeightExtractorPlugin",
"LogFilePlugin",
"LossFilePlugin",
"MetricHistoryPlugin",
"TensorboardLogger",
"BasePlugin",
"Pluggable",
"TrainerPlugin",
"TrainingInterrupt",
"MetricName",
"MetricRecord",
]
| 950 | 30.7 | 73 | py |
flair | flair-master/flair/trainers/plugins/metric_records.py | import time
from dataclasses import dataclass
from enum import Enum
from typing import Any, Iterable, Iterator, Optional, Tuple, Union
RecordType = Enum("RecordType", ["scalar", "image", "histogram", "string", "scalar_list"])
class MetricName:
def __init__(self, name) -> None:
self.parts: Tuple[str, ...]
if isinstance(name, str):
self.parts = tuple(name.split("/"))
else:
self.parts = tuple(name)
def __str__(self) -> str:
return "/".join(self.parts)
def __repr__(self) -> str:
return str(self)
def __iter__(self) -> Iterator[str]:
return iter(self.parts)
def __getitem__(self, i) -> Union["MetricName", str]:
item = self.parts[i]
if isinstance(i, slice):
item = self.__class__(item)
return item
def __add__(self, other) -> "MetricName":
if isinstance(other, str):
return self.__class__((*self.parts, other))
elif isinstance(other, MetricName):
return self.__class__(self.parts + other.parts)
else:
return self.__class__(self.parts + tuple(other))
def __radd__(self, other) -> "MetricName":
if isinstance(other, str):
return self.__class__((other, *self.parts))
else:
# no need to check for MetricName, as __add__ of other would be called in this case
return self.__class__(tuple(other) + self.parts)
def __eq__(self, other) -> bool:
if isinstance(other, str):
return self.parts == tuple(other.split("/"))
elif isinstance(other, MetricName):
return self.parts == other.parts
elif other is None:
return False
else:
return self.parts == tuple(other)
def __hash__(self):
return hash(self.parts)
@dataclass
class MetricRecord:
"""Represents a recorded metric value."""
def __init__(
self,
name: Union[Iterable[str], str],
value: Any,
global_step: int,
typ: RecordType,
*,
walltime: Optional[float] = None,
) -> None:
"""Create a metric record.
:param name: Name of the metric.
:param typ: Type of metric.
:param value: Value of the metric (can be anything: scalar, tensor,
image, etc.).
:param walltime: Time of recording this metric.
"""
self.name: MetricName = MetricName(name)
self.typ: RecordType = typ
self.value: Any = value
self.global_step: int = global_step
self.walltime: float = walltime if walltime is not None else time.time()
@property
def joined_name(self) -> str:
return str(self.name)
@classmethod
def scalar(cls, name: Iterable[str], value: Any, global_step: int, *, walltime=None):
return cls(name=name, value=value, global_step=global_step, typ=RecordType.scalar, walltime=walltime)
@classmethod
def scalar_list(cls, name: Iterable[str], value: list, global_step: int, *, walltime=None):
return cls(name=name, value=value, global_step=global_step, typ=RecordType.scalar_list, walltime=walltime)
@classmethod
def string(cls, name: Iterable[str], value: str, global_step: int, *, walltime=None):
return cls(name=name, value=value, global_step=global_step, typ=RecordType.string, walltime=walltime)
@classmethod
def histogram(cls, name: Iterable[str], value: str, global_step: int, *, walltime=None):
return cls(name=name, value=value, global_step=global_step, typ=RecordType.histogram, walltime=walltime)
def is_type(self, typ):
return self.typ == typ
@property
def is_scalar(self):
return self.is_type(RecordType.scalar)
@property
def is_scalar_list(self):
return self.is_type(RecordType.scalar_list)
@property
def is_string(self):
return self.is_type(RecordType.string)
@property
def is_histogram(self):
return self.is_type(RecordType.histogram)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.joined_name} at step {self.global_step}, {self.walltime:.4f})"
| 4,204 | 30.856061 | 114 | py |
flair | flair-master/flair/trainers/plugins/loggers/tensorboard.py | import logging
import os
from flair.trainers.plugins.base import TrainerPlugin
from flair.training_utils import log_line
log = logging.getLogger("flair")
class TensorboardLogger(TrainerPlugin):
"""Plugin that takes care of tensorboard logging."""
def __init__(self, log_dir=None, comment="", tracked_metrics=()) -> None:
"""Initializes the TensorboardLogger.
:param log_dir: Directory into which tensorboard log files will be written # noqa: E501
:param tracked_metrics: List of tuples that specify which metrics (in addition to the main_score) shall be plotted in tensorboard, could be [("macro avg", 'f1-score'), ("macro avg", 'precision')] for example # noqa: E501
"""
super().__init__()
self.comment = comment
self.tracked_metrics = tracked_metrics
try:
from torch.utils.tensorboard import SummaryWriter
if log_dir is not None and not os.path.exists(log_dir):
os.mkdir(log_dir)
self.writer = SummaryWriter(log_dir=log_dir, comment=self.comment)
log.info(f"tensorboard logging path is {log_dir}")
except ImportError:
log_line(log)
log.warning("ATTENTION! PyTorch >= 1.1.0 and pillow are required for TensorBoard support!")
log_line(log)
self._warned = False
@TrainerPlugin.hook
def metric_recorded(self, record):
assert self.writer is not None
# TODO: check if metric is in tracked metrics
if record.is_scalar:
self.writer.add_scalar(str(record.name), record.value, record.global_step, walltime=record.walltime)
else:
if not self._warned:
log.warning("Logging anything other than scalars to TensorBoard is currently not supported.")
self._warned = True
@TrainerPlugin.hook
def _training_finally(self, **kw):
"""Closes the writer.
:param kw:
:return:
"""
assert self.writer is not None
self.writer.close()
| 2,065 | 33.433333 | 229 | py |
flair | flair-master/flair/trainers/plugins/loggers/log_file.py | import logging
from pathlib import Path
from flair.trainers.plugins.base import TrainerPlugin
from flair.training_utils import add_file_handler
log = logging.getLogger("flair")
class LogFilePlugin(TrainerPlugin):
"""Plugin for the training.log file."""
def __init__(self, base_path) -> None:
super().__init__()
self.log_handler = add_file_handler(log, Path(base_path) / "training.log")
@TrainerPlugin.hook("_training_exception", "after_training")
def close_file_handler(self, **kw):
self.log_handler.close()
log.removeHandler(self.log_handler)
| 599 | 26.272727 | 82 | py |
flair | flair-master/flair/trainers/plugins/loggers/loss_file.py | from datetime import datetime
from typing import Dict, Optional, Tuple, Union
from flair.trainers.plugins.base import TrainerPlugin
from flair.trainers.plugins.metric_records import MetricName
from flair.training_utils import init_output_file
class LossFilePlugin(TrainerPlugin):
"""Plugin that manages the loss.tsv file output."""
def __init__(
self, base_path, epoch: int, metrics_to_collect: Optional[Dict[Union[Tuple, str], str]] = None
) -> None:
super().__init__()
self.first_epoch = epoch + 1
# prepare loss logging file and set up header
self.loss_txt = init_output_file(base_path, "loss.tsv")
# set up all metrics to collect
self.metrics_to_collect = metrics_to_collect
if self.metrics_to_collect is not None:
metrics_to_collect = self.metrics_to_collect
else:
metrics_to_collect = {
"loss": "LOSS",
("micro avg", "precision"): "PRECISION",
("micro avg", "recall"): "RECALL",
("micro avg", "f1-score"): "F1",
"accuracy": "ACCURACY",
}
# set up headers
self.headers = {
# name: HEADER
MetricName("epoch"): "EPOCH",
MetricName("timestamp"): "TIMESTAMP",
MetricName("bad_epochs"): "BAD_EPOCHS",
MetricName("learning_rate"): "LEARNING_RATE",
}
# Add all potentially relevant metrics. If a metric is not published
# after the first epoch (when the header is written), the column is
# removed at that point.
for prefix in ["train", "train_sample", "dev", "test"]:
for name, header in metrics_to_collect.items():
metric_name = MetricName(name)
if prefix == "train" and metric_name != "loss":
metric_name = "train_eval" + metric_name
else:
metric_name = prefix + metric_name
self.headers[metric_name] = f"{prefix.upper()}_{header}"
# initialize the first log line
self.current_row: Optional[Dict[MetricName, str]] = None
@TrainerPlugin.hook
def before_training_epoch(self, epoch, **kw):
"""Get the current epoch for loss file logging.
:param epoch:
:param kw:
:return:
"""
self.current_row = {MetricName("epoch"): epoch}
@TrainerPlugin.hook
def metric_recorded(self, record):
"""Add the metric of a record to the current row.
:param record:
:return:
"""
if record.name in self.headers and self.current_row is not None:
if record.name == "learning_rate" and not record.is_scalar:
# record is a list of scalars
value = ",".join([f"{lr:.4f}" for lr in record.value])
elif record.is_scalar and isinstance(record.value, int):
value = str(record.value)
else:
assert record.is_scalar
value = f"{record.value:.4f}"
self.current_row[record.name] = value
@TrainerPlugin.hook
def after_evaluation(self, epoch, **kw):
"""This prints all relevant metrics.
:param epoch:
:param kw:
:return:
"""
if self.loss_txt is not None:
self.current_row[MetricName("timestamp")] = f"{datetime.now():%H:%M:%S}"
# output log file
with open(self.loss_txt, "a") as f:
# remove columns where no value was found on the first epoch (could be != 1 if training was resumed)
if epoch == self.first_epoch:
for k in list(self.headers.keys()):
if k not in self.current_row:
del self.headers[k]
# make headers on epoch 1
if epoch == 1:
# write header
f.write("\t".join(self.headers.values()) + "\n")
for col in self.headers:
assert col in self.current_row, str(col) + " " + str(self.current_row.keys())
assert all(col in self.current_row for col in self.headers)
f.write("\t".join([str(self.current_row[col]) for col in self.headers]) + "\n")
self.current_row = {}
| 4,382 | 34.634146 | 116 | py |
flair | flair-master/flair/trainers/plugins/loggers/wandb.py | import logging
from flair.trainers.plugins.base import TrainerPlugin
log = logging.getLogger("flair")
class WandbLoggingHandler(logging.Handler):
def __init__(self, wandb, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.wandb = wandb
def emit(self, record):
try:
# adjust alert level
if record.level >= logging.ERROR:
level = self.wandb.AlertLevel.ERROR
elif record.level >= logging.WARNING:
level = self.wandb.AlertLevel.WARN
else:
level = self.wandb.AlertLevel.INFO
self.wandb.alert(
title=f"Alert from {record.module}:{record.lineno}",
text=self.format(record),
level=level,
)
except Exception:
self.handleError(record)
class WandbLogger(TrainerPlugin):
def __init__(self, wandb, emit_alerts=True, alert_level=logging.WARNING, **kwargs) -> None:
super().__init__(**kwargs)
self.wandb = wandb
self.emit_alerts = emit_alerts
self.alert_level = alert_level
self._emitted_record_type_warning = False
@TrainerPlugin.hook
def after_training_setup(self, **kw):
if self.emit_alerts:
self.log_handler = WandbLoggingHandler(self.wandb)
self.log_handler.setLevel(self.alert_level)
formatter = logging.Formatter("%(asctime)-15s %(message)s")
self.log_handler.setFormatter(formatter)
log.addHandler(self.log_handler)
else:
self.log_handler = None
@TrainerPlugin.hook("_training_exception", "after_teardown")
def close_file_handler(self, **kw):
if self.emit_alerts:
self.log_handler.close()
log.removeHandler(self.log_handler)
@TrainerPlugin.hook
def metric_recorded(self, record):
if record.is_scalar:
self.wandb.log({record.name: record.value})
else:
if not self._emitted_record_type_warning:
log.warning("Logging anything other than scalars to W&B is currently not supported.")
self._emitted_record_type_warning = True
@TrainerPlugin.hook
def _training_finally(self, **kw):
self.writer.close()
| 2,315 | 30.726027 | 101 | py |
flair | flair-master/flair/trainers/plugins/loggers/metric_history.py | import logging
from typing import Dict, Mapping
from flair.trainers.plugins.base import TrainerPlugin
log = logging.getLogger("flair")
default_metrics_to_collect = {
("train", "loss"): "train_loss_history",
("dev", "score"): "dev_score_history",
("dev", "loss"): "dev_loss_history",
}
class MetricHistoryPlugin(TrainerPlugin):
def __init__(self, metrics_to_collect: Mapping = default_metrics_to_collect) -> None:
super().__init__()
self.metric_history: Dict[str, list] = {}
self.metrics_to_collect: Mapping = metrics_to_collect
for target in self.metrics_to_collect.values():
self.metric_history[target] = []
@TrainerPlugin.hook
def metric_recorded(self, record):
if tuple(record.name) in self.metrics_to_collect:
target = self.metrics_to_collect[tuple(record.name)]
self.metric_history[target].append(record.value)
@TrainerPlugin.hook
def after_training(self, **kw):
"""Returns metric history.
:param kw:
:return:
"""
self.trainer.return_values.update(self.metric_history)
| 1,132 | 28.051282 | 89 | py |
flair | flair-master/flair/trainers/plugins/loggers/__init__.py | 0 | 0 | 0 | py | |
flair | flair-master/flair/trainers/plugins/functional/weight_extractor.py | from flair.trainers.plugins.base import TrainerPlugin
from flair.training_utils import WeightExtractor
class WeightExtractorPlugin(TrainerPlugin):
"""Simple Plugin for weight extraction."""
def __init__(self, base_path) -> None:
super().__init__()
self.weight_extractor = WeightExtractor(base_path)
@TrainerPlugin.hook
def after_training_batch(self, batch_no, epoch, total_number_of_batches, **kw):
"""Extracts weights.
:param batch_no:
:param epoch:
:param total_number_of_batches:
:param kw:
:return:
"""
modulo = max(1, int(total_number_of_batches / 10))
iteration = epoch * total_number_of_batches + batch_no
if (iteration + 1) % modulo == 0:
self.weight_extractor.extract_weights(self.model.state_dict(), iteration)
| 851 | 30.555556 | 85 | py |
flair | flair-master/flair/trainers/plugins/functional/checkpoints.py | import logging
from flair.trainers.plugins.base import TrainerPlugin
log = logging.getLogger("flair")
class CheckpointPlugin(TrainerPlugin):
def __init__(
self,
save_model_each_k_epochs,
save_optimizer_state,
base_path,
) -> None:
super().__init__()
self.save_optimizer_state = save_optimizer_state
self.save_model_each_k_epochs = save_model_each_k_epochs
self.base_path = base_path
@TrainerPlugin.hook
def after_training_epoch(self, epoch, **kw):
"""Saves the model each k epochs.
:param epoch:
:param kw:
:return:
"""
if self.save_model_each_k_epochs > 0 and epoch % self.save_model_each_k_epochs == 0:
log.info(
f"Saving model at current epoch since 'save_model_each_k_epochs={self.save_model_each_k_epochs}' "
f"was set"
)
model_name = "model_epoch_" + str(epoch) + ".pt"
self.model.save(self.base_path / model_name, checkpoint=self.save_optimizer_state)
| 1,073 | 29.685714 | 114 | py |
flair | flair-master/flair/trainers/plugins/functional/linear_scheduler.py | import logging
from flair.optim import LinearSchedulerWithWarmup
from flair.trainers.plugins.base import TrainerPlugin
log = logging.getLogger("flair")
class LinearSchedulerPlugin(TrainerPlugin):
"""Plugin for LinearSchedulerWithWarmup."""
def __init__(self, warmup_fraction: float, **kwargs) -> None:
super().__init__()
self.warmup_fraction = warmup_fraction
def store_learning_rate(self):
optimizer = self.trainer.optimizer
self.current_learning_rate = [group["lr"] for group in optimizer.param_groups]
self.current_momentum = [
group["betas"][0] if "betas" in group else group.get("momentum", 0) for group in optimizer.param_groups
]
@TrainerPlugin.hook
def after_setup(
self,
dataset_size,
mini_batch_size,
max_epochs,
**kw,
):
"""Initialize different schedulers, including anneal target for AnnealOnPlateau, batch_growth_annealing, loading schedulers.
:param dataset_size:
:param mini_batch_size:
:param max_epochs:
:param kw:
:return:
"""
# calculate warmup steps
steps_per_epoch = (dataset_size + mini_batch_size - 1) / mini_batch_size
num_train_steps = int(steps_per_epoch * max_epochs)
num_warmup_steps = int(num_train_steps * self.warmup_fraction)
self.scheduler = LinearSchedulerWithWarmup(
num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, optimizer=self.trainer.optimizer
)
self.store_learning_rate()
@TrainerPlugin.hook
def before_training_epoch(self, **kw):
"""Load state for anneal_with_restarts, batch_growth_annealing, logic for early stopping.
:param kw:
:return:
"""
self.store_learning_rate()
self.previous_learning_rate = self.current_learning_rate
@TrainerPlugin.hook
def after_training_batch(self, **kw):
"""Do the scheduler step if one-cycle or linear decay.
:param kw:
:return:
"""
self.scheduler.step()
self.store_learning_rate()
def __str__(self) -> str:
return f"LinearScheduler | warmup_fraction: '{self.warmup_fraction}'"
| 2,261 | 29.16 | 132 | py |
flair | flair-master/flair/trainers/plugins/functional/amp.py | from flair.trainers.plugins.base import TrainerPlugin
class AmpPlugin(TrainerPlugin):
"""Simple plugin for AMP."""
def __init__(self, opt_level) -> None:
super().__init__()
self.opt_level = opt_level
self.wrapped_backward = None
try:
from apex import amp
self.amp = amp
except ImportError as exc:
raise RuntimeError(
"Failed to import apex. Please install apex from "
"https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
) from exc
def detach(self, *args, **kwargs):
# TODO: what does this do?
super().detach(*args, **kwargs)
# unwrap trainer backward function
self.trainer.backward = self.wrapped_backward
self.wrapped_backward = None
def backward(self, loss):
assert self.amp is not None
optimizer = self.trainer.optimizer
with self.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
@TrainerPlugin.hook
def after_setup(self, **kw):
"""Wraps with AMP.
:param kw:
:return:
"""
optimizer = self.trainer.optimizer
self.trainer.model, self.trainer.optimizer = self.amp.initialize(
self.model, optimizer, opt_level=self.opt_level
)
# replace trainers backward function
self.wrapped_backward = self.trainer.backward
self.trainer.backward = self.backward
| 1,537 | 25.982456 | 73 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.