repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
liualexiang/liualexiang.github.io | generate_front_matter.py | import os,re
def get_docs_list():
docs_list=[]
for path, subdirs, files in os.walk("."):
for name in files:
if len(name.split(".")) == 2 and name.split(".")[1] == "md":
docs_list.append(os.path.join(path, name))
return docs_list
def get_doc_title(file):
with open(file,"r+", encoding="utf-8") as f:
content = f.readlines()
for line in content:
if re.fullmatch("#+ .*\n", line):
titleRegex = re.compile(r"#+ (.*)\n")
title = titleRegex.search(line).group(1)
return title
def add_front_matter(file):
with open(file,"r+", encoding="utf-8") as f:
content = f.read()
title = get_doc_title(file)
f.seek(0)
f.write("---\nauthor: liualexiang\ntitle:{title}\n---\n\n".format(title= title) + content)
def do_add_front_matter(file):
with open(file,"r+", encoding="utf-8") as f:
first_line = f.readlines()[0]
if re.match("-+", first_line):
pass
else:
add_front_matter(file)
if __name__ == "__main__":
docs_list = get_docs_list()
for doc in docs_list:
do_add_front_matter(doc) |
crangelsmith/synthetic-data-tutorial | tutorial/synthesise.py | '''
This generates synthetic data from the hospital_ae_data_deidentify.csv
file. It generates three types of synthetic data and saves them in
different files.
'''
import random
import os
import time
import pandas as pd
import numpy as np
import filepaths
from DataDescriber import DataDescriber
from DataGenerator import DataGenerator
from ModelInspector import ModelInspector
from lib.utils import read_json_file
attribute_to_datatype = {
'Time in A&E (mins)': 'Integer',
'Treatment': 'String',
'Gender': 'String',
'Index of Multiple Deprivation Decile': 'Integer',
'Hospital ID': 'String',
'Arrival Date': 'String',
'Arrival hour range': 'String',
'Age bracket': 'String'
}
attribute_is_categorical = {
'Time in A&E (mins)': False,
'Treatment': True,
'Gender': True,
'Index of Multiple Deprivation Decile': True,
'Hospital ID': True,
'Arrival Date': True,
'Arrival hour range': True,
'Age bracket': True
}
mode_filepaths = {
'random': {
'description': filepaths.hospital_ae_description_random,
'data': filepaths.hospital_ae_data_synthetic_random
},
'independent': {
'description': filepaths.hospital_ae_description_independent,
'data': filepaths.hospital_ae_data_synthetic_independent
},
'correlated': {
'description': filepaths.hospital_ae_description_correlated,
'data': filepaths.hospital_ae_data_synthetic_correlated
}
}
def main():
start = time.time()
# "_df" is the usual way people refer to a Pandas DataFrame object
hospital_ae_df = pd.read_csv(filepaths.hospital_ae_data_deidentify)
# let's generate the same amount of rows as original data (though we don't have to)
num_rows = len(hospital_ae_df)
# iterate through the 3 modes to generate synthetic data
for mode in ['random','independent', 'correlated']:
print('describing synthetic data for', mode, 'mode...')
describe_synthetic_data(mode, mode_filepaths[mode]['description'])
print('generating synthetic data for', mode, 'mode...')
generate_synthetic_data(
mode,
num_rows,
mode_filepaths[mode]['description'],
mode_filepaths[mode]['data']
)
print('comparing histograms for', mode, 'mode...')
compare_histograms(
mode,
hospital_ae_df,
mode_filepaths[mode]['description'],
mode_filepaths[mode]['data']
)
print('comparing pairwise mutual information for', mode, 'mode...')
compare_pairwise_mutual_information(
mode,
hospital_ae_df,
mode_filepaths[mode]['description'],
mode_filepaths[mode]['data']
)
elapsed = round(time.time() - start, 2)
print('done in ' + str(elapsed) + ' seconds.')
def describe_synthetic_data(mode: str, description_filepath:str):
'''
Describes the synthetic data and saves it to the data/ directory.
Keyword arguments:
mode -- what type of synthetic data
category_threshold -- limit at which categories are considered blah
description_filepath -- filepath to the data description
'''
describer = DataDescriber()
if mode == 'random':
describer.describe_dataset_in_random_mode(
filepaths.hospital_ae_data_deidentify,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
elif mode == 'independent':
describer.describe_dataset_in_independent_attribute_mode(
filepaths.hospital_ae_data_deidentify,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
elif mode == 'correlated':
# Increase epsilon value to reduce the injected noises.
# We're not using differential privacy in this tutorial,
# so we'll set epsilon=0 to turn off differential privacy
epsilon = 0
# The maximum number of parents in Bayesian network
# i.e., the maximum number of incoming edges.
degree_of_bayesian_network = 1
describer.describe_dataset_in_correlated_attribute_mode(
dataset_file=filepaths.hospital_ae_data_deidentify,
epsilon=epsilon,
k=degree_of_bayesian_network,
attribute_to_datatype=attribute_to_datatype,
attribute_to_is_categorical=attribute_is_categorical)
# attribute_to_is_candidate_key=attribute_to_is_candidate_key)
describer.save_dataset_description_to_file(description_filepath)
def generate_synthetic_data(
mode: str,
num_rows: int,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Generates the synthetic data and saves it to the data/ directory.
Keyword arguments:
mode -- what type of synthetic data
num_rows -- number of rows in the synthetic dataset
description_filepath -- filepath to the data description
synthetic_data_filepath -- filepath to where synthetic data written
'''
generator = DataGenerator()
if mode == 'random':
generator.generate_dataset_in_random_mode(num_rows, description_filepath)
elif mode == 'independent':
generator.generate_dataset_in_independent_mode(num_rows, description_filepath)
elif mode == 'correlated':
generator.generate_dataset_in_correlated_attribute_mode(num_rows, description_filepath)
generator.save_synthetic_data(synthetic_data_filepath)
def compare_histograms(
mode: str,
hospital_ae_df: pd.DataFrame,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Makes comapirson plots showing the histograms for each column in the
synthetic data.
Keyword arguments:
mode -- what type of synthetic data
hospital_ae_df -- DataFrame of the original dataset
description_filepath -- filepath to the data description
synthetic_data_filepath -- filepath to where synthetic data written
'''
synthetic_df = pd.read_csv(synthetic_data_filepath)
# Read attribute description from the dataset description file.
attribute_description = read_json_file(
description_filepath)['attribute_description']
inspector = ModelInspector(
hospital_ae_df, synthetic_df, attribute_description)
for attribute in synthetic_df.columns:
figure_filepath = os.path.join(
filepaths.plots_dir,
mode + '_' + attribute + '.png'
)
# need to replace whitespace in filepath for Markdown reference
figure_filepath = figure_filepath.replace(' ', '_')
inspector.compare_histograms(attribute, figure_filepath)
def compare_pairwise_mutual_information(
mode: str,
hospital_ae_df: pd.DataFrame,
description_filepath: str,
synthetic_data_filepath: str
):
'''
Looks at correlation of attributes by producing heatmap
Keyword arguments:
mode -- what type of synthetic data
hospital_ae_df -- DataFrame of the original dataset
description_filepath -- filepath to the data description
synthetic_data_filepath -- filepath to where synthetic data written
'''
synthetic_df = pd.read_csv(synthetic_data_filepath)
attribute_description = read_json_file(
description_filepath)['attribute_description']
inspector = ModelInspector(
hospital_ae_df, synthetic_df, attribute_description)
figure_filepath = os.path.join(
filepaths.plots_dir,
'mutual_information_heatmap_' + mode + '.png'
)
inspector.mutual_information_heatmap(figure_filepath)
if __name__ == "__main__":
main()
|
crangelsmith/synthetic-data-tutorial | DataSynthesizer/datatypes/utils/AttributeLoader.py | from pandas import Series
from datatypes.DateTimeAttribute import DateTimeAttribute
from datatypes.FloatAttribute import FloatAttribute
from datatypes.IntegerAttribute import IntegerAttribute
from datatypes.SocialSecurityNumberAttribute import SocialSecurityNumberAttribute
from datatypes.StringAttribute import StringAttribute
from datatypes.utils.DataType import DataType
def parse_json(attribute_in_json):
name = attribute_in_json['name']
data_type = DataType(attribute_in_json['data_type'])
is_candidate_key = attribute_in_json['is_candidate_key']
is_categorical = attribute_in_json['is_categorical']
histogram_size = len(attribute_in_json['distribution_bins'])
if data_type is DataType.INTEGER:
attribute = IntegerAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.FLOAT:
attribute = FloatAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.DATETIME:
attribute = DateTimeAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.STRING:
attribute = StringAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is data_type.SOCIAL_SECURITY_NUMBER:
attribute = SocialSecurityNumberAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
else:
raise Exception('Data type {} is unknown.'.format(data_type.value))
attribute.missing_rate = attribute_in_json['missing_rate']
attribute.min = attribute_in_json['min']
attribute.max = attribute_in_json['max']
attribute.distribution_bins = attribute_in_json['distribution_bins']
attribute.distribution_probabilities = attribute_in_json['distribution_probabilities']
return attribute
|
crangelsmith/synthetic-data-tutorial | DataSynthesizer/datatypes/IntegerAttribute.py | <gh_stars>10-100
from typing import Union
from pandas import Series
from datatypes.AbstractAttribute import AbstractAttribute
from datatypes.utils.DataType import DataType
class IntegerAttribute(AbstractAttribute):
def __init__(self, name: str, is_candidate_key, is_categorical, histogram_size: Union[int, str], data: Series):
super().__init__(name, is_candidate_key, is_categorical, histogram_size, data)
self.is_numerical = True
self.data_type = DataType.INTEGER
def infer_domain(self, categorical_domain=None, numerical_range=None):
super().infer_domain(categorical_domain, numerical_range)
self.min = int(self.min)
self.max = int(self.max)
def infer_distribution(self):
super().infer_distribution()
def generate_values_as_candidate_key(self, n):
return super().generate_values_as_candidate_key(n)
def sample_values_from_binning_indices(self, binning_indices):
column = super().sample_values_from_binning_indices(binning_indices)
column[~column.isnull()] = column[~column.isnull()].astype(int)
return column
|
crangelsmith/synthetic-data-tutorial | tutorial/generate.py | """
Script that generates hospital A&E data to use in the synthetic data tutorial.
Columns of data inpired by NHS+ODI Leeds blog post:
https://odileeds.org/blog/2019-01-24-exploring-methods-for-creating-synthetic-a-e-data
"""
import os
import random
from datetime import datetime, timedelta
import uuid
import random, string
import time
import pandas as pd
import numpy as np
import scipy.stats as stats
from scipy.linalg import eigh, cholesky
from scipy.stats import norm
import statsmodels.api as sm
import filepaths
# TODO: give hospitals different average waiting times
num_of_rows = 10000
def main():
generate_fake_dataset()
def generate_fake_dataset():
print('generating data...')
start = time.time()
hospital_ae_dataset = {}
print('generating Health Service ID numbers...')
hospital_ae_dataset['Health Service ID'] = generate_health_service_id_numbers()
print('generating patient ages and times in A&E...')
(hospital_ae_dataset['Age'], hospital_ae_dataset['Time in A&E (mins)']) = generate_ages_times_in_age_kde()
print('generating hospital instances...')
hospital_ae_dataset['Hospital'] = generate_hospitals()
print('generating arrival times...')
hospital_ae_dataset['Arrival Time'] = generate_arrival_times()
print('generating A&E treaments...')
hospital_ae_dataset['Treatment'] = generate_treatments()
print('generating patient gender instances...')
hospital_ae_dataset['Gender'] = generate_genders()
print('generating patient postcodes...')
hospital_ae_dataset['Postcode'] = generate_postcodes()
write_out_dataset(hospital_ae_dataset, filepaths.hospital_ae_data)
elapsed = round(time.time() - start, 2)
print('done in ' + str(elapsed) + ' seconds.')
return pd.DataFrame(hospital_ae_dataset)
def generate_ages_times_in_age() -> (list, list):
"""
Generates correlated ages and waiting times and returns them as lists
Obviously normally distributed ages is not very true to real life but is fine for our mock data.
Correlated random data generation code based on:
https://realpython.com/python-random/
"""
# Start with a correlation matrix and standard deviations.
# 0.9 is the correlation between ages and waiting times, and the correlation of a variable with itself is 1
correlations = np.array([[1, 0.95], [0.95, 1]])
# Standard deviations/means of ages and waiting times, respectively
stdev = np.array([20, 20])
mean = np.array([41, 60])
cov = corr2cov(correlations, stdev)
print (cov)
data = np.random.multivariate_normal(mean=mean, cov=cov, size=num_of_rows)
data = np.array(data, dtype=int)
# negative ages or waiting times wouldn't make sense
# so set any negative values to 0 and 1 respectively
data[np.nonzero(data[:, 0] < 1)[0], 0] = 0
data[np.nonzero(data[:, 1] < 1)[0], 1] = 1
ages = data[:, 0].tolist()
times_in_ae = data[:, 1].tolist()
return (ages, times_in_ae)
def generate_ages_times_in_age_kde() -> (list, list):
"""
Generates correlated ages and waiting times and returns them as lists
Obviously normally distributed ages is not very true to real life but is fine for our mock data.
Correlated random data generation code based on:
https://realpython.com/python-random/
"""
# Start with a correlation matrix and standard deviations.i
# 0.9 is the correlation between ages and waiting times, and the correlation of a variable with itself is 1
times_in_ae_init = np.random.normal(3,1,size=(1,num_of_rows))[0]
ages_init = make_gaussian_mixture_data(num_of_rows)
dens_time_age = sm.nonparametric.KDEMultivariate(data=[times_in_ae_init, ages_init],var_type = 'cc', bw = [100,100])
data = resample(dens_time_age,num_of_rows)
# negative ages or waiting times wouldn't make sense
# so set any negative values to 0 and 1 respectively
data[np.nonzero(data[:, 0] < 1)[0], 1] = 0
data[np.nonzero(data[:, 1] < 1)[0], 0] = 1
ages = data[1, :].tolist()
times_in_ae = data[0, :].tolist()
cov = np.diag(dens_time_age.bw) ** 2
print (cov)
return (ages, times_in_ae)
def resample(kde, size):
n, d = kde.data.shape
indices = np.random.randint(0, n, size)
cov = np.diag(kde.bw)**2
cov = [[100., 50.],[50., 100.]]
means = kde.data[indices, :]
norm = np.random.multivariate_normal(np.zeros(d), cov, size)
return np.transpose(means + norm)
def make_gaussian_mixture_data(N, f=0.7, rseed=1):
rand = np.random.RandomState(rseed)
x = np.random.normal(60,5,size=(1,num_of_rows))[0]
x[int(f * N):] += -55
return x
def corr2cov(correlations: np.ndarray, stdev: np.ndarray) -> np.ndarray:
"""Covariance matrix from correlation & standard deviations"""
diagonal_stdev = np.diag(stdev)
covariance = diagonal_stdev @ correlations @ diagonal_stdev
return covariance
def generate_admission_ids() -> list:
""" Generate a unique 10-digit ID for every admission record """
uids = []
for _ in range(num_of_rows):
x = ''.join(random.choice(string.digits) for _ in range(10))
uids.append(x)
return uids
def generate_health_service_id_numbers() -> list:
""" Generate dummy Health Service ID numbers similar to NHS 10 digit format
See: https://www.nhs.uk/using-the-nhs/about-the-nhs/what-is-an-nhs-number/
"""
health_service_id_numbers = []
for _ in range(num_of_rows):
health_service_id = ''.join(random.choice(string.digits) for _ in range(3)) + '-'
health_service_id += ''.join(random.choice(string.digits) for _ in range(3)) + '-'
health_service_id += ''.join(random.choice(string.digits) for _ in range(4))
health_service_id_numbers.append(health_service_id)
return health_service_id_numbers
def generate_postcodes() -> list:
""" Reads a .csv containing info on every London postcode. Reads the
postcodes in use and returns a sample of them.
# List of London postcodes from https://www.doogal.co.uk/PostcodeDownloads.php
"""
postcodes_df = pd.read_csv(filepaths.postcodes_london)
postcodes_in_use = list(postcodes_df[postcodes_df['In Use?'] == "No"]['Postcode'])
postcodes = random.choices(postcodes_in_use, k=num_of_rows)
return postcodes
def generate_hospitals() -> list:
""" Reads the data/hospitals_london.txt file, and generates a
sample of them to add to the dataset.
List of London hospitals loosely based on
https://en.wikipedia.org/wiki/Category:NHS_hospitals_in_London
"""
with open(filepaths.hospitals_london, 'r') as file_in:
hospitals = file_in.readlines()
hospitals = [name.strip() for name in hospitals]
weights = random.choices(range(1, 100), k=len(hospitals))
hospitals = random.choices(hospitals, k=num_of_rows, weights=weights)
return hospitals
def generate_arrival_times() -> list:
""" Generate and return arrival times.
Hardcoding times to first week of April 2019
"""
arrival_times = []
# first 7 days in April 2019
days_dates = [1, 2, 3, 4, 5, 6, 7]
# have more people come in at the weekend - higher weights
day_weights = [0.5, 0.6, 0.7, 0.8, 0.9, 1, 1]
days = random.choices(days_dates, day_weights, k=num_of_rows)
# this is just so each day has a different peak time
days_time_modes = {day: random.random() for day in days_dates}
for day in days:
start = datetime(2019, 4, day, 00, 00, 00)
end = datetime(2019, 4, day, 23, 59, 59)
random_num = random.triangular(0, 1, days_time_modes[day])
random_datetime = start + (end - start) * random_num
arrival_times.append(random_datetime.strftime('%Y-%m-%d %H:%M:%S'))
return arrival_times
def generate_genders() -> list:
""" Generate and return list of genders for every row.
# National codes for gender in NHS data
# https://www.datadictionary.nhs.uk/data_dictionary/attributes/p/person/person_gender_code_de.asp?shownav=1
"""
gender_codes_df = pd.read_csv(filepaths.nhs_ae_gender_codes)
genders = gender_codes_df['Gender'].tolist()
# these weights are just dummy values. please don't take them as accurate.
weights =[0.005, 0.495, 0.495, 0.005]
gender_codes = random.choices(genders, k=num_of_rows, weights=weights)
return gender_codes
def generate_treatments() -> list:
""" Generate and return sample of treatments patients received.
Reads data/treatment_codes_nhs_ae.csv file
NHS treatment codes:
https://www.datadictionary.nhs.uk/web_site_content/supporting_information/clinical_coding/accident_and_emergency_treatment_tables.asp?shownav=1
"""
treatment_codes_df = pd.read_csv(filepaths.nhs_ae_treatment_codes)
treatments = treatment_codes_df['Treatment'].tolist()
# likelihood of each of the treatments - make some more common
weights = random.choices(range(1, 100), k=len(treatments))
treatment_codes = random.choices(
treatments, k=num_of_rows, weights=weights)
return treatment_codes
def write_out_dataset(dataset: dict, filepath: str):
"""Writing dataset to .csv file
Keyword arguments:
dataset -- the dataset to be written to disk
filepath -- path to write the file out to
"""
df = pd.DataFrame.from_dict(dataset)
df.to_csv(filepath, index=False)
if __name__ == "__main__":
main()
|
crangelsmith/synthetic-data-tutorial | DataSynthesizer/ModelInspector.py | from typing import List
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from lib.utils import pairwise_attributes_mutual_information, normalize_given_distribution
matplotlib.rc('xtick', labelsize=20)
matplotlib.rc('ytick', labelsize=20)
sns.set()
class ModelInspector(object):
def __init__(self, private_df: pd.DataFrame, synthetic_df: pd.DataFrame, attribute_description):
self.private_df = private_df
self.synthetic_df = synthetic_df
self.attribute_description = attribute_description
self.candidate_keys = set()
for attr in synthetic_df:
if synthetic_df[attr].unique().size == synthetic_df.shape[0]:
self.candidate_keys.add(attr)
self.private_df.drop(columns=self.candidate_keys, inplace=True)
self.synthetic_df.drop(columns=self.candidate_keys, inplace=True)
def compare_histograms(self, attribute, figure_filepath):
datatype = self.attribute_description[attribute]['data_type']
is_categorical = self.attribute_description[attribute]['is_categorical']
# ignore datetime attributes, since they are converted into timestamps
if datatype == 'DateTime':
return
# ignore non-categorical string attributes
elif datatype == 'String' and not is_categorical:
return
elif attribute in self.candidate_keys:
return
else:
fig = plt.figure(figsize=(25, 12), dpi=120)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
if is_categorical:
dist_priv = self.private_df[attribute].value_counts()
dist_synt = self.synthetic_df[attribute].value_counts()
for idx, number in dist_priv.iteritems():
if idx not in dist_synt.index:
dist_synt.loc[idx] = 0
for idx, number in dist_synt.iteritems():
if idx not in dist_priv.index:
dist_priv.loc[idx] = 0
dist_priv.index = [str(i) for i in dist_priv.index]
dist_synt.index = [str(i) for i in dist_synt.index]
dist_priv.sort_index(inplace=True)
dist_synt.sort_index(inplace=True)
pos_priv = list(range(len(dist_priv)))
pos_synt = list(range(len(dist_synt)))
ax1.bar(pos_priv, normalize_given_distribution(dist_priv.values), align='center', width=0.8)
ax2.bar(pos_synt, normalize_given_distribution(dist_synt.values), align='center', width=0.8)
ax1.set_xticks(np.arange(min(pos_priv), max(pos_priv) + 1, 1.0))
ax2.set_xticks(np.arange(min(pos_synt), max(pos_synt) + 1, 1.0))
ax1.set_xticklabels(dist_priv.index.tolist(), fontsize=10)
ax2.set_xticklabels(dist_synt.index.tolist(), fontsize=10)
# the rest are non-categorical numeric attributes.
else:
ax1.hist(self.private_df[attribute].dropna(), bins=15, align='left', density=True)
ax2.hist(self.synthetic_df[attribute].dropna(), bins=15, align='left', density=True)
ax1_x_min, ax1_x_max = ax1.get_xlim()
ax2_x_min, ax2_x_max = ax2.get_xlim()
ax1_y_min, ax1_y_max = ax1.get_ylim()
ax2_y_min, ax2_y_max = ax2.get_ylim()
x_min = min(ax1_x_min, ax2_x_min)
x_max = max(ax1_x_max, ax2_x_max)
y_min = min(ax1_y_min, ax2_y_min)
y_max = max(ax1_y_max, ax2_y_max)
ax1.set_xlim([x_min, x_max])
ax1.set_ylim([y_min, y_max])
ax2.set_xlim([x_min, x_max])
ax2.set_ylim([y_min, y_max])
fig.autofmt_xdate()
plt.savefig(figure_filepath, bbox_inches='tight')
plt.close()
def mutual_information_heatmap(self, figure_filepath, attributes: List = None):
if attributes:
private_df = self.private_df[attributes]
synthetic_df = self.synthetic_df[attributes]
else:
private_df = self.private_df
synthetic_df = self.synthetic_df
private_mi = pairwise_attributes_mutual_information(private_df)
synthetic_mi = pairwise_attributes_mutual_information(synthetic_df)
fig = plt.figure(figsize=(15, 6), dpi=120)
fig.suptitle('Pairwise Mutual Information Comparison (Private vs Synthetic)', fontsize=20)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
sns.heatmap(private_mi, ax=ax1, cmap="GnBu")
sns.heatmap(synthetic_mi, ax=ax2, cmap="GnBu")
ax1.set_title('Private, max=1', fontsize=15)
ax2.set_title('Synthetic, max=1', fontsize=15)
fig.autofmt_xdate()
fig.tight_layout()
plt.subplots_adjust(top=0.83)
plt.savefig(figure_filepath, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
# Directories of input and output files
input_dataset_file = '../datasets/AdultIncomeData/adult.csv'
dataset_description_file = '../output/description/AdultIncomeData_description.txt'
synthetic_dataset_file = '../output/synthetic_data/AdultIncomeData_synthetic.csv'
df = pd.read_csv(input_dataset_file)
print(df.head(5))
|
crangelsmith/synthetic-data-tutorial | DataSynthesizer/DataGenerator.py | import numpy as np
import pandas as pd
from datatypes.utils.AttributeLoader import parse_json
from lib.utils import set_random_seed, read_json_file, generate_random_string
class DataGenerator(object):
def __init__(self):
self.n = 0
self.synthetic_dataset = None
self.description = {}
self.encoded_dataset = None
def generate_dataset_in_random_mode(self, n, description_file, seed=0, minimum=0, maximum=100):
set_random_seed(seed)
description = read_json_file(description_file)
self.synthetic_dataset = pd.DataFrame()
for attr in description['attribute_description'].keys():
attr_info = description['attribute_description'][attr]
datatype = attr_info['data_type']
is_categorical = attr_info['is_categorical']
is_candidate_key = attr_info['is_candidate_key']
if is_candidate_key:
self.synthetic_dataset[attr] = parse_json(attr_info).generate_values_as_candidate_key(n)
elif is_categorical:
self.synthetic_dataset[attr] = np.random.choice(attr_info['distribution_bins'], n)
elif datatype == 'String':
length = np.random.randint(attr_info['min'], attr_info['max'])
self.synthetic_dataset[attr] = length
self.synthetic_dataset[attr] = self.synthetic_dataset[attr].map(lambda x: generate_random_string(x))
else:
if datatype == 'Integer':
self.synthetic_dataset[attr] = np.random.randint(minimum, maximum + 1, n)
else:
self.synthetic_dataset[attr] = np.random.uniform(minimum, maximum, n)
def generate_dataset_in_independent_mode(self, n, description_file, seed=0):
set_random_seed(seed)
self.description = read_json_file(description_file)
all_attributes = self.description['meta']['all_attributes']
candidate_keys = set(self.description['meta']['candidate_keys'])
self.synthetic_dataset = pd.DataFrame(columns=all_attributes)
for attr in all_attributes:
attr_info = self.description['attribute_description'][attr]
column = parse_json(attr_info)
if attr in candidate_keys:
self.synthetic_dataset[attr] = column.generate_values_as_candidate_key(n)
else:
binning_indices = column.sample_binning_indices_in_independent_attribute_mode(n)
self.synthetic_dataset[attr] = column.sample_values_from_binning_indices(binning_indices)
def generate_dataset_in_correlated_attribute_mode(self, n, description_file, seed=0):
set_random_seed(seed)
self.n = n
self.description = read_json_file(description_file)
all_attributes = self.description['meta']['all_attributes']
candidate_keys = set(self.description['meta']['candidate_keys'])
self.encoded_dataset = DataGenerator.generate_encoded_dataset(self.n, self.description)
self.synthetic_dataset = pd.DataFrame(columns=all_attributes)
for attr in all_attributes:
attr_info = self.description['attribute_description'][attr]
column = parse_json(attr_info)
if attr in self.encoded_dataset:
self.synthetic_dataset[attr] = column.sample_values_from_binning_indices(self.encoded_dataset[attr])
elif attr in candidate_keys:
self.synthetic_dataset[attr] = column.generate_values_as_candidate_key(n)
else:
# for attributes not in BN or candidate keys, use independent attribute mode.
binning_indices = column.sample_binning_indices_in_independent_attribute_mode(n)
self.synthetic_dataset[attr] = column.sample_values_from_binning_indices(binning_indices)
@staticmethod
def get_sampling_order(bn):
order = [bn[0][1][0]]
for child, _ in bn:
order.append(child)
return order
@staticmethod
def generate_encoded_dataset(n, description):
bn = description['bayesian_network']
bn_root_attr = bn[0][1][0]
root_attr_dist = description['conditional_probabilities'][bn_root_attr]
encoded_df = pd.DataFrame(columns=DataGenerator.get_sampling_order(bn))
encoded_df[bn_root_attr] = np.random.choice(len(root_attr_dist), size=n, p=root_attr_dist)
for child, parents in bn:
child_conditional_distributions = description['conditional_probabilities'][child]
for parents_instance in child_conditional_distributions.keys():
dist = child_conditional_distributions[parents_instance]
parents_instance = list(eval(parents_instance))
filter_condition = ''
for parent, value in zip(parents, parents_instance):
filter_condition += f"(encoded_df['{parent}']=={value})&"
filter_condition = eval(filter_condition[:-1])
size = encoded_df[filter_condition].shape[0]
if size:
encoded_df.loc[filter_condition, child] = np.random.choice(len(dist), size=size, p=dist)
unconditioned_distribution = description['attribute_description'][child]['distribution_probabilities']
encoded_df.loc[encoded_df[child].isnull(), child] = np.random.choice(len(unconditioned_distribution),
size=encoded_df[child].isnull().sum(),
p=unconditioned_distribution)
encoded_df[encoded_df.columns] = encoded_df[encoded_df.columns].astype(int)
return encoded_df
def save_synthetic_data(self, to_file):
self.synthetic_dataset.to_csv(to_file, index=False)
if __name__ == '__main__':
from time import time
dataset_description_file = '../out/AdultIncome/description_test.txt'
dataset_description_file = '/home/haoyue/GitLab/data-responsibly-webUI/dataResponsiblyUI/static/intermediatedata/1498175138.8088856_description.txt'
generator = DataGenerator()
t = time()
generator.generate_dataset_in_correlated_attribute_mode(51, dataset_description_file)
print('running time: {} s'.format(time() - t))
print(generator.synthetic_dataset.loc[:50])
|
crangelsmith/synthetic-data-tutorial | tutorial/filepaths.py | <reponame>crangelsmith/synthetic-data-tutorial
import os
import sys
from pathlib import Path
this_filepath = Path(os.path.realpath(__file__))
project_root = str(this_filepath.parents[1])
data_dir = os.path.join(project_root, 'data/')
# add the DataSynthesizer repo to the pythonpath
data_synthesizer_dir = os.path.join(project_root, 'DataSynthesizer/')
sys.path.append(data_synthesizer_dir)
plots_dir = os.path.join(project_root, 'plots/')
postcodes_london = os.path.join(data_dir, 'London postcodes.csv')
hospitals_london = os.path.join(data_dir, 'hospitals_london.txt')
nhs_ae_gender_codes = os.path.join(data_dir, 'nhs_ae_gender_codes.csv')
nhs_ae_treatment_codes = os.path.join(data_dir, 'nhs_ae_treatment_codes.csv')
age_population_london = os.path.join(data_dir, 'age_population_london.csv')
hospital_ae_data = os.path.join(data_dir, 'hospital_ae_data.csv')
hospital_ae_data_deidentify = os.path.join(data_dir, 'hospital_ae_data_deidentify.csv')
hospital_ae_data_synthetic_random = os.path.join(
data_dir, 'hospital_ae_data_synthetic_random.csv')
hospital_ae_data_synthetic_independent = os.path.join(
data_dir, 'hospital_ae_data_synthetic_independent.csv')
hospital_ae_data_synthetic_correlated = os.path.join(
data_dir, 'hospital_ae_data_synthetic_correlated.csv')
hospital_ae_description_random = os.path.join(
data_dir, 'hospital_ae_description_random.json')
hospital_ae_description_independent = os.path.join(
data_dir, 'hospital_ae_description_independent.json')
hospital_ae_description_correlated = os.path.join(
data_dir, 'hospital_ae_description_correlated.json')
|
chickenjohn/bit-rnn | bit_utils.py | import functools
import tensorflow as tf
from tensorflow.python.ops import variable_scope
_origin_get_variable = tf.get_variable
_object_stack = []
def _new_get_variable(*args, **kwargs):
v = _origin_get_variable(*args, **kwargs)
if len(_object_stack) != 0:
return _object_stack[-1]._fn(v)
else:
return v
class TFVariableReplaceHelper(object):
def __init__(self, fn):
self._old_get_variable = None
self._fn = fn
def __enter__(self):
global _object_stack
_object_stack.append(self)
self._old_get_variable = tf.get_variable
tf.get_variable = _new_get_variable
variable_scope.get_variable = _new_get_variable
def __exit__(self, *args):
global _object_stack
_object_stack.pop()
tf.get_variable = self._old_get_variable
variable_scope.get_variable = self._old_get_variable
def replace_variable(fn):
return TFVariableReplaceHelper(fn)
def round_bit(x, bit):
if bit == 32:
return x
g = tf.get_default_graph()
k = 2**bit - 1
with g.gradient_override_map({'Round': 'Identity'}):
return tf.round(x * k) / k
_grad_defined = False
if not _grad_defined:
@tf.RegisterGradient("IdentityMaxMinGrad")
def _identigy_max_min_grad(op, grad):
return grad, None
def quantize_w(x, bit):
if bit == 32:
return x
g = tf.get_default_graph()
# do not compute gradient with respect to scale
scale = tf.stop_gradient(tf.reduce_mean(tf.abs(x)) * 2.5)
with g.gradient_override_map({'Minimum': 'IdentityMaxMinGrad'}):
with g.gradient_override_map({'Maximum': 'IdentityMaxMinGrad'}):
return (round_bit(tf.clip_by_value(x / scale, -0.5, 0.5) + 0.5,
bit=bit) - 0.5) * scale
round_bit_1bit = functools.partial(round_bit, bit=1)
round_bit_2bit = functools.partial(round_bit, bit=2)
round_bit_3bit = functools.partial(round_bit, bit=3)
round_bit_4bit = functools.partial(round_bit, bit=4)
quantize_w_1bit = functools.partial(quantize_w, bit=1)
quantize_w_2bit = functools.partial(quantize_w, bit=2)
quantize_w_3bit = functools.partial(quantize_w, bit=3)
quantize_w_4bit = functools.partial(quantize_w, bit=4)
|
chickenjohn/bit-rnn | config/gru.py | <filename>config/gru.py<gh_stars>10-100
class Config(object):
learning_rate = 1e-3
max_grad_norm = 10
num_layers = 1
num_steps = 20
hidden_size = 300
max_epoch = 100
keep_prob = 0.5
batch_size = 20
vocab_size = 10000
nr_epoch_first_stage = 40
nr_epoch_second_stage = 80
w_bit = 2
f_bit = 2
cell_type = 'gru'
|
chickenjohn/bit-rnn | train.py | import time
import functools
import importlib
import numpy as np
import tensorflow as tf
import reader
import bit_utils
from bit_rnn_cell import BitGRUCell
from model import PTBModel
flags = tf.flags
logging = tf.logging
flags.DEFINE_string('data_path', None, 'data_path')
flags.DEFINE_string('config', None, 'config')
FLAGS = flags.FLAGS
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(
reader.ptb_iterator(data, m.batch_size, m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
return importlib.import_module(FLAGS.config).Config()
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.initializers.variance_scaling(distribution='uniform')
with tf.variable_scope("model", reuse=tf.AUTO_REUSE, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.global_variables_initializer().run()
def get_learning_rate(epoch, config):
base_lr = config.learning_rate
if epoch <= config.nr_epoch_first_stage:
return base_lr
elif epoch <= config.nr_epoch_second_stage:
return base_lr * 0.1
else:
return base_lr * 0.01
for i in range(config.max_epoch):
m.assign_lr(session, get_learning_rate(i, config))
print("Epoch: %d Learning rate: %f"
% (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(
session, m, train_data, m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f"
% (i + 1, train_perplexity))
valid_perplexity = run_epoch(
session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f"
% (i + 1, valid_perplexity))
test_perplexity = run_epoch(
session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
|
chickenjohn/bit-rnn | model.py | <gh_stars>10-100
import time
import functools
import numpy as np
import tensorflow as tf
import reader
import bit_utils
from bit_rnn_cell import BitGRUCell, BitLSTMCell
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
if 'cell_type' not in dir(config) or config.cell_type == 'gru':
cell = BitGRUCell(size, w_bit=config.w_bit, f_bit=config.f_bit)
elif config.cell_type == 'lstm':
cell = BitLSTMCell(size, w_bit=config.w_bit,
f_bit=config.f_bit, state_is_tuple=False)
if is_training and config.keep_prob < 1:
cell = tf.nn.rnn_cell.DropoutWrapper(
cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell(
[cell] * config.num_layers, state_is_tuple=False)
self._initial_state = cell.zero_state(batch_size, tf.float32)
self._initial_state = bit_utils.round_bit(
tf.sigmoid(self._initial_state), bit=config.f_bit)
embedding = tf.get_variable(
"embedding",
[vocab_size, size],
initializer=tf.random_uniform_initializer())
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
inputs = bit_utils.round_bit(tf.nn.relu(inputs), bit=config.f_bit)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
inputs = [tf.squeeze(input_, [1])
for input_ in tf.split(value=inputs, num_or_size_splits=num_steps, axis=1)]
outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,
initial_state=self._initial_state)
output = tf.reshape(tf.concat(values=outputs, axis=1), [-1, size])
with bit_utils.replace_variable(
lambda x: bit_utils.quantize_w(tf.tanh(x), bit=config.w_bit)):
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
|
openvinotoolkit/mmaction2 | mmaction/datasets/samplers/balanced_distributed_sampler.py | <filename>mmaction/datasets/samplers/balanced_distributed_sampler.py
import copy
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class BalancedDistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, num_instances=1):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.num_instances = num_instances
assert self.num_instances > 0
num_groups = 0
for dataset_items in dataset.clustered_ids.values():
for record_ids in dataset_items.values():
num_same_label_records = len(record_ids)
num_extra_records = len(record_ids) % self.num_instances
num_groups += (num_same_label_records + num_extra_records) // self.num_instances
self.num_samples = math.ceil(num_groups * num_instances / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
clustered_ids = self.dataset.clustered_ids
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
grouped_records = []
for dataset_items in clustered_ids.values():
for record_ids in dataset_items.values():
same_label_records = copy.deepcopy(record_ids)
num_extra_records = len(record_ids) % self.num_instances
if num_extra_records > 0:
if self.shuffle:
rand_ids = torch.randperm(len(record_ids), generator=g).tolist()
extra_record_ids = [record_ids[ii] for ii in rand_ids[:num_extra_records]]
else:
extra_record_ids = record_ids[:num_extra_records]
same_label_records.extend(extra_record_ids)
num_groups = len(same_label_records) // self.num_instances
for group_ind in range(num_groups):
start_pos = group_ind * self.num_instances
end_pos = (group_ind + 1) * self.num_instances
grouped_records.append(same_label_records[start_pos:end_pos])
if self.shuffle:
group_inds = torch.randperm(len(grouped_records), generator=g).tolist()
grouped_records = [grouped_records[group_ind] for group_ind in group_inds]
indices = []
for group in grouped_records:
indices.extend(group)
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
samples_start_pos = self.rank * self.num_samples
samples_end_pos = (self.rank + 1) * self.num_samples
indices = indices[samples_start_pos:samples_end_pos]
assert len(indices) == self.num_samples
return iter(indices)
|
openvinotoolkit/mmaction2 | mmaction/core/ops/conv.py | import torch.nn as nn
from .conv3d import Conv3d
def conv_kxkxk_bn(in_planes, out_planes, spatial_k=3, temporal_k=3,
spatial_stride=1, temporal_stride=1, groups=1,
as_list=True, norm='none', center_weight=None):
layers = [Conv3d(in_planes, out_planes, bias=False, groups=groups,
kernel_size=(temporal_k, spatial_k, spatial_k),
padding=((temporal_k - 1) // 2, (spatial_k - 1) // 2, (spatial_k - 1) // 2),
stride=(temporal_stride, spatial_stride, spatial_stride),
normalization=norm, center_weight=center_weight),
nn.BatchNorm3d(out_planes)]
return layers if as_list else nn.Sequential(*layers)
def conv_1xkxk_bn(in_planes, out_planes, k=3, spatial_stride=1, groups=1,
as_list=True, norm='none', center_weight=None):
layers = [Conv3d(in_planes, out_planes, bias=False, groups=groups,
kernel_size=(1, k, k), padding=(0, (k - 1) // 2, (k - 1) // 2),
stride=(1, spatial_stride, spatial_stride),
normalization=norm, center_weight=center_weight),
nn.BatchNorm3d(out_planes)]
return layers if as_list else nn.Sequential(*layers)
def conv_kx1x1_bn(in_planes, out_planes, k, temporal_stride=1, groups=1,
as_list=True, norm='none', center_weight=None):
layers = [Conv3d(in_planes, out_planes, bias=False, groups=groups,
kernel_size=(k, 1, 1), padding=((k - 1) // 2, 0, 0),
stride=(temporal_stride, 1, 1),
normalization=norm, center_weight=center_weight),
nn.BatchNorm3d(out_planes)]
return layers if as_list else nn.Sequential(*layers)
def conv_1x1x1_bn(in_planes, out_planes, as_list=True, norm='none', center_weight=None, bias=False, bn=True):
conv_layer = Conv3d(
in_planes, out_planes, bias=bias, kernel_size=1,
padding=0, stride=1,
normalization=norm, center_weight=center_weight
)
if bn:
layers = [conv_layer,
nn.BatchNorm3d(out_planes)]
return layers if as_list else nn.Sequential(*layers)
else:
return conv_layer
|
openvinotoolkit/mmaction2 | mmaction/integration/nncf/utils.py | <filename>mmaction/integration/nncf/utils.py
import torch
from collections import OrderedDict
from contextlib import contextmanager
try:
import nncf
_is_nncf_enabled = True
except ImportError:
_is_nncf_enabled = False
except RuntimeError as _e:
_is_nncf_enabled = False
print('Attention: RuntimeError happened when tried to import nncf')
print(' The reason may be in absent CUDA devices')
print(' RuntimeError:')
print(' ' + str(_e), flush=True)
def is_nncf_enabled():
return _is_nncf_enabled
def check_nncf_is_enabled():
if not is_nncf_enabled():
raise RuntimeError('Tried to use NNCF, but NNCF is not installed')
def get_nncf_version():
if not is_nncf_enabled():
return None
return nncf.__version__
def load_checkpoint(model, filename, map_location=None, strict=False):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
from nncf.torch.checkpoint_loading import load_state
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
_ = load_state(model, state_dict, strict)
return checkpoint
@contextmanager
def nullcontext():
"""
Context which does nothing
"""
yield
def no_nncf_trace():
"""
Wrapper for original NNCF no_nncf_trace() context
"""
if is_nncf_enabled():
from nncf.torch.dynamic_graph.context import \
no_nncf_trace as original_no_nncf_trace
return original_no_nncf_trace()
return nullcontext()
def is_in_nncf_tracing():
if not is_nncf_enabled():
return False
from nncf.torch.dynamic_graph.context import get_current_context
ctx = get_current_context()
if ctx is None:
return False
return ctx.is_tracing
|
openvinotoolkit/mmaction2 | mmaction/datasets/video_dataset.py | import os.path as osp
from .recognition_dataset import RecognitionDataset
from .registry import DATASETS
@DATASETS.register_module()
class VideoDataset(RecognitionDataset):
"""Video dataset for action recognition.
The dataset loads raw videos and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath and label, which are split with a
whitespace. Example of a annotation file:
.. code-block:: txt
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
some/path/003.mp4 2
some/path/004.mp4 3
some/path/005.mp4 3
"""
def __init__(self,
start_index=0,
**kwargs):
super().__init__(start_index=start_index, **kwargs)
def _parse_data_source(self, data_source, data_prefix):
filename = data_source
if data_prefix is not None:
filename = osp.join(data_prefix, filename)
return dict(
filename=filename,
)
|
openvinotoolkit/mmaction2 | mmaction/models/necks/__init__.py | <reponame>openvinotoolkit/mmaction2
from .video_aligner import VideoAligner
from .self_feature_regularizer import SelfFeatureRegularizer
from .emd_regularizer import EMDRegularizer
__all__ = [
'VideoAligner',
'SelfFeatureRegularizer',
'EMDRegularizer',
]
|
openvinotoolkit/mmaction2 | mmaction/core/ops/__init__.py | from .conv2d import Conv2d
from .conv3d import Conv3d
from .conv import conv_kxkxk_bn, conv_1xkxk_bn, conv_kx1x1_bn, conv_1x1x1_bn
from .linear import AngleMultipleLinear, SymmetricalLayer
from .kernelzed import KernelizedClassifier, kernel_prod
from .nonlinearities import HSigmoid, HSwish
from .dropout import Dropout, info_dropout
from .gumbel_sigmoid import gumbel_sigmoid
from .math import normalize, Normalize
from .losses import (CrossEntropy, NormalizedCrossEntropy, build_classification_loss, entropy, focal_loss,
MaxEntropyLoss)
from .domain_generalization import rsc, RSC
from .temporal_samplers import SimilarityGuidedSampling
from .pooling import AdaptivePool3D
from .regularizers import NormRegularizer
from .normalizers import balance_losses
from .soft_dtw import soft_dtw
from .labels import PRISM
__all__ = ['Conv2d', 'Conv3d',
'conv_kxkxk_bn', 'conv_1xkxk_bn', 'conv_kx1x1_bn', 'conv_1x1x1_bn',
'AngleMultipleLinear', 'SymmetricalLayer',
'KernelizedClassifier', 'kernel_prod',
'HSigmoid', 'HSwish',
'Dropout', 'info_dropout',
'gumbel_sigmoid',
'normalize', 'Normalize',
'CrossEntropy', 'NormalizedCrossEntropy', 'build_classification_loss',
'MaxEntropyLoss', 'entropy', 'focal_loss',
'rsc', 'RSC',
'SimilarityGuidedSampling',
'AdaptivePool3D',
'NormRegularizer',
'balance_losses',
'soft_dtw',
'PRISM',
]
|
openvinotoolkit/mmaction2 | mmaction/core/ops/linear.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from .math import normalize
class AngleMultipleLinear(nn.Module):
"""Based on SoftTriplet loss: https://arxiv.org/pdf/1909.05235.pdf
"""
def __init__(self, in_features, num_classes, num_centers=1, scale=10.0, reg_weight=0.2, reg_threshold=0.2):
super(AngleMultipleLinear, self).__init__()
self.in_features = in_features
assert in_features > 0
self.num_classes = num_classes
assert num_classes >= 2
self.num_centers = num_centers
assert num_centers >= 1
self.scale = scale
assert scale > 0.0
weight_shape = [in_features, num_classes, num_centers] if num_centers > 1 else [in_features, num_classes]
self.weight = Parameter(torch.Tensor(*weight_shape))
self.weight.data.normal_().renorm_(2, 1, 1e-5).mul_(1e5)
self.enable_regularization = reg_weight is not None and reg_weight > 0.0
if self.enable_regularization:
self.reg_weight = reg_weight
if num_centers == 1:
self.reg_threshold = reg_threshold
assert self.reg_threshold >= 0.0
reg_valid_mask = np.triu(np.ones((num_classes, num_classes), dtype=np.float32), k=1)
else:
self.reg_weight /= num_classes
if num_centers > 2:
self.reg_weight /= (num_centers - 1) * (num_centers - 2)
reg_valid_mask = np.tile(np.triu(np.ones((1, num_centers, num_centers), dtype=np.float32), k=1),
(num_classes, 1, 1))
self.register_buffer('reg_mask', torch.from_numpy(reg_valid_mask))
else:
self.reg_weight = None
self.reg_mask = None
def forward(self, normalized_x):
normalized_x = normalized_x.view(-1, self.in_features)
normalized_weights = normalize(self.weight.view(self.in_features, -1), dim=0)
prod = normalized_x.mm(normalized_weights)
if not torch.onnx.is_in_onnx_export():
prod = prod.clamp(-1.0, 1.0)
if self.num_centers > 1:
prod = prod.view(-1, self.num_classes, self.num_centers)
prod_weights = F.softmax(self.scale * prod, dim=-1)
scores = torch.sum(prod_weights * prod, dim=-1)
else:
scores = prod
return scores
def loss(self, name):
out_losses = dict()
if self.enable_regularization:
normalized_weights = F.normalize(self.weight, dim=0)
if self.num_centers == 1:
all_pairwise_scores = normalized_weights.permute(1, 0).matmul(normalized_weights)
valid_pairwise_scores = all_pairwise_scores[self.reg_mask > 0.0]
losses = valid_pairwise_scores[valid_pairwise_scores > self.reg_threshold] - self.reg_threshold
out_losses['loss/cpush' + name] =\
self.reg_weight * losses.mean() if losses.numel() > 0 else losses.sum()
else:
all_pairwise_scores = normalized_weights.permute(1, 2, 0).matmul(normalized_weights.permute(1, 0, 2))
valid_pairwise_scores = all_pairwise_scores[self.reg_mask > 0.0]
losses = 1.0 - valid_pairwise_scores
out_losses['loss/st_reg' + name] = self.reg_weight * losses.sum()
return out_losses
class SymmetricalLayer(nn.Module):
"""
Init version: https://github.com/IoannisKansizoglou/Symmetrical-Feature-Space
"""
def __init__(self, in_features, num_classes):
super().__init__()
self.in_features = in_features
assert in_features > 0
self.num_classes = num_classes
assert num_classes > 1
self.weight = nn.Parameter(torch.FloatTensor(2, self.in_features))
self.weight.data.normal_()
steps = torch.arange(self.num_classes, dtype=torch.float32)
thetas = 2.0 * math.pi / float(self.num_classes) * steps
self.register_buffer('thetas', thetas)
eye_matrix = torch.eye(self.in_features)
self.register_buffer('eye_matrix', eye_matrix)
def _generate_centers(self, v1, v2):
n1 = normalize(v1, dim=0, p=2)
n2 = normalize(v2, dim=0, p=2)
n2 = normalize(n2 - torch.dot(n1, n2) * n1, dim=0, p=2)
ger_sub = torch.outer(n2, n1) - torch.outer(n1, n2)
ger_add = torch.outer(n1, n1) + torch.outer(n2, n2)
sin_thetas = torch.unsqueeze(torch.unsqueeze(torch.sin(self.thetas), dim=-1), dim=-1)
cos_thetas = torch.unsqueeze(torch.unsqueeze(torch.cos(self.thetas) - 1, dim=-1), dim=-1)
R = self.eye_matrix + ger_sub * sin_thetas + ger_add * cos_thetas
return torch.einsum('bij,j->bi', R, n1)
def forward(self, normalized_x):
normalized_x = normalized_x.view(-1, self.in_features)
centers = self._generate_centers(self.weight[0], self.weight[1])
scores = F.linear(normalized_x, centers)
return scores
|
openvinotoolkit/mmaction2 | mmaction/datasets/rawframe_dataset.py | import os.path as osp
from .recognition_dataset import RecognitionDataset
from .registry import DATASETS
@DATASETS.register_module()
class RawframeDataset(RecognitionDataset):
"""RawframeDataset dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, the label of a video,
start/end frames of the clip, start/end frames of the video and
video frame rate, which are split with a whitespace.
Example of a full annotation file:
.. code-block:: txt
some/directory-1 1 0 120 0 120 30.0
some/directory-2 1 0 120 0 120 30.0
some/directory-3 2 0 120 0 120 30.0
some/directory-4 2 0 120 0 120 30.0
some/directory-5 3 0 120 0 120 30.0
some/directory-6 3 0 120 0 120 30.0
Example of a simple annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int): Number of classes in the dataset. Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'. Default: 'RGB'.
"""
def __init__(self,
start_index=1,
filename_tmpl='img_{:05}.jpg',
**kwargs):
self.filename_tmpl = filename_tmpl
super().__init__(start_index=start_index, **kwargs)
def _parse_data_source(self, data_source, data_prefix):
record = dict(
filename_tmpl=self.filename_tmpl,
)
frame_dir = data_source
record['rel_frame_dir'] = frame_dir
if data_prefix is not None:
frame_dir = osp.join(data_prefix, frame_dir)
record['frame_dir'] = frame_dir
return record
|
openvinotoolkit/mmaction2 | mmaction/models/heads/cls_head.py | <reponame>openvinotoolkit/mmaction2
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init
from .base import BaseHead
from ..registry import HEADS
from ...core.ops import (conv_1x1x1_bn, normalize, AngleMultipleLinear, KernelizedClassifier,
SymmetricalLayer, PRISM)
@HEADS.register_module()
class ClsHead(BaseHead):
def __init__(self,
spatial_type=None,
temporal_size=1,
spatial_size=7,
init_std=0.01,
embedding=False,
enable_rebalance=False,
rebalance_num_groups=3,
rebalance_alpha=0.9,
classification_layer='linear',
embd_size=128,
num_centers=1,
st_scale=5.0,
reg_threshold=0.1,
enable_sampling=False,
adaptive_sampling=False,
sampling_angle_std=None,
reg_weight=1.0,
enable_class_mixing=False,
class_mixing_alpha=0.1,
label_cleaning_cfg=None,
enable_bias=False,
enable_bn=True,
**kwargs):
super(ClsHead, self).__init__(**kwargs)
self.embd_size = embd_size
self.temporal_feature_size = temporal_size
self.spatial_feature_size = \
spatial_size \
if not isinstance(spatial_size, int) \
else (spatial_size, spatial_size)
self.init_std = init_std
self.avg_pool = None
if spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.enable_rebalance = enable_rebalance and rebalance_num_groups > 1
if self.enable_rebalance:
assert classification_layer == 'linear', 'Re-balancing supports linear head only'
assert self.class_sizes is not None, 'Re-balancing requires class_sizes'
self.rebalance_alpha = rebalance_alpha
assert 0.0 <= self.rebalance_alpha <= 1.0
rebalance_zero_mask, imbalance_ratios = self._build_rebalance_masks(
self.class_sizes, rebalance_num_groups
)
np.set_printoptions(precision=3)
print(f'[INFO] Balance ratios for dataset with {self.num_classes} '
f'classes : {imbalance_ratios}')
self.register_buffer('rebalance_zero_mask', torch.from_numpy(rebalance_zero_mask))
rebalance_weights = np.where(rebalance_zero_mask > 0.0,
1.0 / np.sum(rebalance_zero_mask, axis=1, keepdims=True),
np.zeros_like(rebalance_zero_mask))
self.register_buffer('rebalance_weights', torch.from_numpy(rebalance_weights))
self.with_embedding = embedding and self.embd_size > 0
if self.with_embedding:
if self.enable_rebalance:
assert not enable_sampling, 'Re-balancing does not support embd sampling'
assert not enable_class_mixing, 'Re-balancing does not support embd mixing'
self.fc_pre_angular = nn.ModuleList([
conv_1x1x1_bn(self.in_channels, self.embd_size,
as_list=False, bias=enable_bias, bn=enable_bn)
for _ in range(rebalance_num_groups)
])
else:
self.fc_pre_angular = conv_1x1x1_bn(self.in_channels, self.embd_size,
as_list=False, bias=enable_bias, bn=enable_bn)
if classification_layer == 'linear':
self.fc_angular = AngleMultipleLinear(self.embd_size, self.num_classes, num_centers,
st_scale, reg_weight, reg_threshold)
elif classification_layer == 'symmetric':
assert num_centers == 1, 'Symmetric classifier does not support num_centers > 1'
assert not enable_class_mixing, 'Symmetric classifier does not support class mixing'
self.fc_angular = SymmetricalLayer(self.embd_size, self.num_classes)
elif classification_layer == 'kernel':
assert not enable_class_mixing, 'Kernelized classifier does not support class mixing'
self.fc_angular = KernelizedClassifier(self.embd_size, self.num_classes, num_centers)
else:
raise ValueError(f'Unknown classification layer: {classification_layer}')
else:
if self.enable_rebalance:
self.internal_num_channels = int(1.3 * self.in_channels)
self.fc_pre_cls = nn.ModuleList([
conv_1x1x1_bn(self.in_channels, self.internal_num_channels,
as_list=False, bias=enable_bias, bn=enable_bn)
for _ in range(rebalance_num_groups)
])
self.fc_cls_out = nn.Linear(self.internal_num_channels, self.num_classes)
else:
self.fc_pre_cls = None
self.fc_cls_out = nn.Linear(self.in_channels, self.num_classes)
self.enable_sampling = (self.with_embedding and
enable_sampling and
sampling_angle_std is not None and
sampling_angle_std > 0.0)
self.adaptive_sampling = (self.enable_sampling and
adaptive_sampling and
self.class_sizes is not None)
if self.enable_sampling:
assert sampling_angle_std < 0.5 * np.pi
if self.adaptive_sampling:
counts = np.ones([self.num_classes], dtype=np.float32)
for class_id, class_size in self.class_sizes.items():
counts[class_id] = class_size
class_angle_std = sampling_angle_std * np.power(counts, -1. / 4.)
self.register_buffer('sampling_angle_std', torch.from_numpy(class_angle_std))
else:
self.sampling_angle_std = sampling_angle_std
self.enable_class_mixing = enable_class_mixing
self.alpha_class_mixing = class_mixing_alpha
self.label_cleaner = None
if label_cleaning_cfg is not None:
self.label_cleaner = PRISM(num_classes=self.num_classes, feature_length=self.embd_size,
**label_cleaning_cfg)
def init_weights(self):
if self.with_embedding:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
constant_init(m, 1.0, 0.0)
else:
nn.init.normal_(self.fc_cls_out.weight, 0, self.init_std)
nn.init.constant_(self.fc_cls_out.bias, 0)
@staticmethod
def _build_rebalance_masks(classes_meta, num_groups):
assert 1 < num_groups <= 3
assert len(classes_meta) >= num_groups
num_borders = num_groups - 1
ordered_class_sizes = list(sorted(classes_meta.items(), key=lambda tup: -tup[1]))
class_ids = np.array([class_id for class_id, _ in ordered_class_sizes], dtype=np.int32)
class_sizes = np.array([class_size for _, class_size in ordered_class_sizes], dtype=np.float32)
all_border_combinations = itertools.combinations(range(1, len(ordered_class_sizes)), num_borders)
all_border_combinations = np.array(list(all_border_combinations))
ratios = [class_sizes[0] / class_sizes[all_border_combinations[:, 0] - 1]]
for ii in range(num_borders - 1):
starts = all_border_combinations[:, ii]
ends = all_border_combinations[:, ii + 1] - 1
ratios.append(class_sizes[starts] / class_sizes[ends])
ratios.append(class_sizes[all_border_combinations[:, -1]] / class_sizes[-1])
ratios = np.stack(ratios, axis=1)
costs = np.max(ratios, axis=1) - np.min(ratios, axis=1)
best_match_idx = np.argmin(costs)
best_border_combination = all_border_combinations[best_match_idx]
best_ratios = ratios[best_match_idx]
groups = [class_ids[:best_border_combination[0]]]
for ii in range(num_borders - 1):
groups.append(class_ids[best_border_combination[ii]:best_border_combination[ii + 1]])
groups.append(class_ids[best_border_combination[-1]:])
num_classes = max(classes_meta.keys()) + 1
mask = np.zeros([num_groups, num_classes], dtype=np.float32)
for group_id in range(num_groups):
for ii in range(group_id, num_groups):
mask[group_id, groups[ii]] = 1.0
return mask.reshape([1, num_groups, num_classes]), best_ratios
def _squash_features(self, x):
if x.ndimension() == 4:
x = x.unsqueeze(2)
if self.avg_pool is not None:
x = self.avg_pool(x)
return x
@staticmethod
def _mix_embd(norm_embd, labels, norm_centers, num_classes, alpha_class_mixing):
with torch.no_grad():
sampled_ids = torch.randint_like(labels, 0, num_classes - 1)
sampled_neg_ids = torch.where(sampled_ids < labels, sampled_ids, sampled_ids + 1)
random_centers = norm_centers[sampled_neg_ids]
alpha = alpha_class_mixing * torch.rand_like(labels, dtype=norm_embd.dtype)
mixed_embd = (1.0 - alpha.view(-1, 1)) * norm_embd + alpha.view(-1, 1) * random_centers
norm_embd = normalize(mixed_embd, dim=1)
return norm_embd
@staticmethod
def _sample_embd(norm_embd, labels, batch_size, adaptive_sampling, sampling_angle_std):
with torch.no_grad():
unit_directions = F.normalize(torch.randn_like(norm_embd), dim=1)
dot_prod = torch.sum(norm_embd * unit_directions, dim=1, keepdim=True)
orthogonal_directions = unit_directions - dot_prod * norm_embd
if adaptive_sampling and labels is not None:
all_angle_std = sampling_angle_std.expand(batch_size, -1)
class_indices = torch.arange(batch_size, device=labels.device)
angle_std = all_angle_std[class_indices, labels].view(-1, 1)
else:
angle_std = sampling_angle_std
angles = angle_std * torch.randn_like(dot_prod)
alpha = torch.clamp_max(torch.where(angles > 0.0, angles, torch.neg(angles)), 0.5 * np.pi)
cos_alpha = torch.cos(alpha)
sin_alpha = torch.sin(alpha)
out_norm_embd = cos_alpha * norm_embd + sin_alpha * orthogonal_directions
return out_norm_embd
def forward(self, x, labels=None, return_extra_data=False, **kwargs):
x = self._squash_features(x)
if self.dropout is not None:
x = self.dropout(x)
if self.with_embedding:
if self.enable_rebalance:
unnorm_embd = [module(x) for module in self.fc_pre_angular]
norm_embd = [normalize(embd.view(-1, self.embd_size), dim=1) for embd in unnorm_embd]
split_scores = [self.fc_angular(embd) for embd in norm_embd]
all_scores = torch.cat([score.unsqueeze(1) for score in split_scores], dim=1)
main_cls_score = torch.sum(all_scores * self.rebalance_weights, dim=1)
extra_cls_score = split_scores
else:
unnorm_embd = self.fc_pre_angular(x)
norm_embd = normalize(unnorm_embd.view(-1, self.embd_size), dim=1)
if self.training:
neg_samples_mask = labels.view(-1) < 0
if neg_samples_mask.sum() > 0 and (self.enable_class_mixing or self.enable_sampling):
raise NotImplementedError
if self.enable_class_mixing:
norm_class_centers = normalize(self.fc_angular.weight.permute(1, 0), dim=1)
norm_embd = self._mix_embd(
norm_embd, labels, norm_class_centers, self.num_classes, self.alpha_class_mixing
)
if self.enable_sampling:
norm_embd = self._sample_embd(
norm_embd, labels, x.shape[0], self.adaptive_sampling, self.sampling_angle_std
)
main_cls_score = self.fc_angular(norm_embd)
extra_cls_score = None
else:
norm_embd = None
if self.enable_rebalance:
unnorm_embd = [module(x).view(-1, self.internal_num_channels) for module in self.fc_pre_cls]
split_scores = [self.fc_cls_out(embd) for embd in unnorm_embd]
all_scores = torch.cat([score.unsqueeze(1) for score in split_scores], dim=1)
main_cls_score = torch.sum(all_scores * self.rebalance_weights, dim=1)
extra_cls_score = split_scores
else:
extra_cls_score = None
main_cls_score = self.fc_cls_out(x.view(-1, self.in_channels))
if return_extra_data:
return main_cls_score, norm_embd, extra_cls_score
else:
return main_cls_score
def loss(self, main_cls_score, labels, norm_embd, name, extra_cls_score, **kwargs):
losses = dict()
if self.label_cleaner is not None:
scale = self.head_loss.last_scale if hasattr(self.head_loss, 'last_scale') else None
labels = self.label_cleaner(norm_embd, labels, scale)
pos_samples_mask = labels.view(-1) >= 0
pos_labels = labels.view(-1)[pos_samples_mask]
pos_main_cls_score = main_cls_score[pos_samples_mask]
main_cls_loss = self.head_loss(pos_main_cls_score, pos_labels)
if hasattr(self.head_loss, 'last_scale'):
losses['scale/cls' + name] = self.head_loss.last_scale
if self.enable_rebalance:
if pos_main_cls_score.size(0) < main_cls_score.size(0):
raise NotImplementedError('Negative mining is not implemented for thr rebalance loss')
with torch.no_grad():
all_indexed_labels_mask = torch.zeros_like(main_cls_score, dtype=torch.float32)\
.scatter_(1, labels.view(-1, 1), 1)
indexed_labels_mask = all_indexed_labels_mask.unsqueeze(1) * self.rebalance_zero_mask
valid_samples_mask = indexed_labels_mask.sum(dim=2) > 0.0
group_losses = []
for group_id, group_cls_score in enumerate(extra_cls_score):
with torch.no_grad():
group_samples_mask = valid_samples_mask[:, group_id]
if torch.sum(group_samples_mask) == 0:
continue
group_labels_mask = indexed_labels_mask[:, group_id]
group_logits_mask = self.rebalance_zero_mask[:, group_id].view(-1) > 0.0
group_labels = group_labels_mask[group_samples_mask][:, group_logits_mask]
group_targets = torch.argmax(group_labels, dim=1)
group_cls_score = group_cls_score[group_samples_mask][:, group_logits_mask]
group_loss_args = dict(increment_step=False) if self.with_embedding else dict()
group_loss = self.head_loss(group_cls_score, group_targets, **group_loss_args)
group_losses.append(group_loss)
fused_group_loss = sum(group_losses) / float(len(group_losses))
losses['loss/cls' + name] = \
(1.0 - self.rebalance_alpha) * main_cls_loss + self.rebalance_alpha * fused_group_loss
else:
losses['loss/cls' + name] = main_cls_loss
if self.losses_extra is not None and not self.enable_rebalance:
for extra_loss_name, extra_loss in self.losses_extra.items():
losses[extra_loss_name.replace('_', '/') + name] = extra_loss(
norm_embd, main_cls_score, labels)
if self.with_embedding and hasattr(self.fc_angular, 'loss'):
losses.update(self.fc_angular.loss(name))
return losses
@property
def last_scale(self):
if hasattr(self.head_loss, 'last_scale'):
return self.head_loss.last_scale
else:
return None
|
openvinotoolkit/mmaction2 | mmaction/models/spatial_temporal_modules/__init__.py | from .aggregator_spatial_temporal_module import AggregatorSpatialTemporalModule
from .average_spatial_temporal_module import AverageSpatialTemporalModule
from .trg_spatial_temporal_module import TRGSpatialTemporalModule
from .non_local import NonLocalModule
from .bert import BERTSpatialTemporalModule
__all__ = [
'AggregatorSpatialTemporalModule',
'AverageSpatialTemporalModule',
'TRGSpatialTemporalModule',
'NonLocalModule',
'BERTSpatialTemporalModule',
]
|
openvinotoolkit/mmaction2 | mmaction/core/data/sample_info_aggregator.py | import torch
import torch.distributed as dist
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class SampleInfoAggregatorHook(Hook):
def __init__(self, warmup_epochs=0):
self.warmup_epochs = int(warmup_epochs)
assert self.warmup_epochs >= 0
def after_train_iter(self, runner):
enable_sample_filtering = runner.epoch >= self.warmup_epochs
if not enable_sample_filtering:
return
local_meta = runner.model.module.train_meta
sync_meta = {
meta_name: self._sync(meta_data, runner.rank, runner.world_size)
for meta_name, meta_data in local_meta.items()
}
dataset = runner.data_loader.dataset
dataset.enable_sample_filtering = True
dataset.update_meta_info(**sync_meta)
samples_active_ratio = dataset.get_filter_active_samples_ratio()
runner.log_buffer.update({'filter_active_samples': samples_active_ratio})
@staticmethod
def _sync(data, rank, world_size):
if dist.is_available() and dist.is_initialized():
batch_size = data.size(0)
shared_shape = [world_size * batch_size] + list(data.shape[1:])
shared_data = torch.zeros(shared_shape, dtype=data.dtype, device=data.device)
shared_data[rank*batch_size:(rank + 1)*batch_size] = data
dist.all_reduce(shared_data, dist.ReduceOp.SUM)
out_data = shared_data
else:
out_data = data
return out_data.cpu().numpy()
|
openvinotoolkit/mmaction2 | tools/export.py | <filename>tools/export.py<gh_stars>1-10
import sys
import argparse
import json
from os import makedirs
from os.path import exists, dirname, basename, splitext, join
from subprocess import run, CalledProcessError, DEVNULL
import torch
import onnx
import mmcv
from mmcv.runner import set_random_seed
from mmaction.apis import get_fake_input, init_recognizer
from mmaction.integration.nncf import (check_nncf_is_enabled,
get_nncf_config_from_meta,
is_checkpoint_nncf,
wrap_nncf_model)
from mmaction.models import build_recognizer
from mmaction.core import load_checkpoint
from mmaction.utils import ExtendedDictAction
def convert_to_onnx(net, input_size, output_file_path, opset, check=True):
dummy_input = torch.randn((1, *input_size))
input_names = ['input']
output_names = ['output']
dynamic_axes = {'input': {0: 'batch_size', 1: 'channels', 2: 'length', 3: 'height', 4: 'width'},
'output': {0: 'batch_size', 1: 'scores'}}
net = net.cpu()
with torch.no_grad():
torch.onnx.export(
net,
dummy_input,
output_file_path,
verbose=False,
opset_version=opset,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX
)
net_from_onnx = onnx.load(output_file_path)
if check:
try:
onnx.checker.check_model(net_from_onnx)
print('ONNX check passed.')
except onnx.onnx_cpp2py_export.checker.ValidationError as ex:
print('ONNX check failed: {}.'.format(ex))
def export_to_openvino(cfg, onnx_model_path, output_dir_path, input_shape=None, input_format='rgb'):
cfg.model.pretrained = None
cfg.data.test.test_mode = True
onnx_model = onnx.load(onnx_model_path)
output_names = set(out.name for out in onnx_model.graph.output)
# Clear names of the nodes that produce network's output blobs.
for node in onnx_model.graph.node:
if output_names.intersection(node.output):
node.ClearField('name')
onnx.save(onnx_model, onnx_model_path)
output_names = ','.join(output_names)
normalize = [v for v in cfg.data.test.pipeline if v['type'] == 'Normalize'][0]
mean_values = normalize['mean']
scale_values = normalize['std']
command_line = f'mo.py --input_model="{onnx_model_path}" ' \
f'--mean_values="{mean_values}" ' \
f'--scale_values="{scale_values}" ' \
f'--output_dir="{output_dir_path}" ' \
f'--output="{output_names}"'
assert input_format.lower() in ['bgr', 'rgb']
if input_shape is not None:
command_line += f' --input_shape="{input_shape}"'
if not normalize['to_bgr'] and input_format.lower() == 'bgr' or \
normalize['to_bgr'] and input_format.lower() == 'rgb':
command_line += ' --reverse_input_channels'
try:
run('mo.py -h', stdout=DEVNULL, stderr=DEVNULL, shell=True, check=True)
except CalledProcessError:
print('OpenVINO Model Optimizer not found, please source '
'openvino/bin/setupvars.sh before running this script.')
return
print(command_line)
run(command_line, shell=True, check=True)
def parse_args():
parser = argparse.ArgumentParser(description='Export model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help="path to file with model's weights")
parser.add_argument('output_dir', help='path to directory to save exported models in')
parser.add_argument('meta_info', help='path to file to save meta info in')
parser.add_argument('--opset', type=int, default=10, help='ONNX opset')
parser.add_argument('--update_config', nargs='+', action=ExtendedDictAction,
help='Update configuration file by parameters specified here.')
subparsers = parser.add_subparsers(title='target', dest='target', help='target model format')
subparsers.required = True
parser_onnx = subparsers.add_parser('onnx', help='export to ONNX')
parser_openvino = subparsers.add_parser('openvino', help='export to OpenVINO')
parser_openvino.add_argument('--input_format', choices=['BGR', 'RGB'], default='RGB',
help='Input image format for exported model.')
return parser.parse_args()
def main(args):
cfg = mmcv.Config.fromfile(args.config)
if args.update_config is not None:
cfg.merge_from_dict(args.update_config)
cfg.data.videos_per_gpu = 1
if cfg.get('seed'):
print(f'Set random seed to {cfg.seed}')
set_random_seed(cfg.seed)
class_maps = None
if cfg.get('classes'):
class_maps = {0: {k: v for k, v in enumerate(sorted(cfg.classes))}}
model = build_recognizer(
cfg.model,
train_cfg=None,
test_cfg=cfg.test_cfg,
class_maps=class_maps
)
model.eval()
load_checkpoint(model, args.checkpoint, force_matching=True)
if hasattr(model, 'forward_inference'):
model.forward = model.forward_inference
input_time_size = cfg.input_clip_length
input_image_size = (tuple(cfg.input_img_size)
if isinstance(cfg.input_img_size, (list, tuple))
else (cfg.input_img_size, cfg.input_img_size))
input_size = (3, input_time_size) + input_image_size
# BEGIN nncf part
was_model_compressed = is_checkpoint_nncf(args.checkpoint)
cfg_contains_nncf = cfg.get('nncf_config')
if cfg_contains_nncf and not was_model_compressed:
raise RuntimeError('Trying to make export with NNCF compression '
'a model snapshot that was NOT trained with NNCF')
if was_model_compressed and not cfg_contains_nncf:
# reading NNCF config from checkpoint
nncf_part = get_nncf_config_from_meta(args.checkpoint)
for k, v, in nncf_part.items():
cfg[k] = v
if cfg.get('nncf_config'):
if torch.cuda.is_available():
model.cuda()
check_nncf_is_enabled()
cfg.load_from = args.checkpoint
cfg.resume_from = None
compression_ctrl, model = wrap_nncf_model(model, cfg, None, get_fake_input, export=True)
compression_ctrl.prepare_for_export()
# END nncf part
onnx_model_path = join(args.output_dir, splitext(basename(args.config))[0] + '.onnx')
base_output_dir = dirname(onnx_model_path)
if not exists(base_output_dir):
makedirs(base_output_dir)
convert_to_onnx(model, input_size, onnx_model_path, opset=args.opset, check=True)
if args.target == 'openvino':
input_shape = (1,) + input_size
export_to_openvino(cfg, onnx_model_path, args.output_dir, input_shape, args.input_format)
meta = {'model_classes': model.CLASSES[0]}
with open(args.meta_info, 'w') as output_meta_stream:
json.dump(meta, output_meta_stream)
if __name__ == '__main__':
args = parse_args()
sys.exit(main(args) or 0)
|
openvinotoolkit/mmaction2 | mmaction/models/losses/metric_learning_base.py | <reponame>openvinotoolkit/mmaction2<filename>mmaction/models/losses/metric_learning_base.py
from abc import abstractmethod
import numpy as np
import torch
import torch.nn.functional as F
from .base import BaseWeightedLoss
from .. import builder
from ...core.ops import entropy
class BaseMetricLearningLoss(BaseWeightedLoss):
_loss_filter_types = ['positives', 'top_k']
def __init__(self, scale_cfg, pr_product=False, conf_penalty_weight=None,
filter_type=None, top_k=None, class_sizes=None,
enable_class_weighting=False, **kwargs):
super(BaseMetricLearningLoss, self).__init__(**kwargs)
self._enable_pr_product = pr_product
self._conf_penalty_weight = conf_penalty_weight
self._filter_type = filter_type
self._top_k = top_k
if self._filter_type == 'top_k':
assert self._top_k is not None and self._top_k >= 1
self.scale_scheduler = builder.build_scheduler(scale_cfg)
self._last_scale = 0.0
self.class_sizes = class_sizes
if enable_class_weighting and self.class_sizes is not None:
self.num_classes = max(list(self.class_sizes.keys())) + 1
weights = self._estimate_class_weights(self.class_sizes, self.num_classes)
self.register_buffer('class_weights', torch.from_numpy(weights))
else:
self.num_classes = None
self.class_weights = None
@property
def with_regularization(self):
return self._conf_penalty_weight is not None and self._conf_penalty_weight > 0.0
@property
def with_class_weighting(self):
return self.class_weights is not None
@property
def with_filtering(self):
return self._filter_type is not None and self._filter_type in self._loss_filter_types
@property
def with_pr_product(self):
return self._enable_pr_product
@property
def last_scale(self):
return self._last_scale
def update_state(self, num_iters_per_epoch):
assert num_iters_per_epoch > 0
self.scale_scheduler.iters_per_epoch = num_iters_per_epoch
@staticmethod
def _estimate_class_weights(class_sizes, num_classes, num_steps=1000, num_samples=14, scale=1.0, eps=1e-4):
class_ids = np.array(list(class_sizes.keys()), dtype=np.int32)
counts = np.array(list(class_sizes.values()), dtype=np.float32)
frequencies = counts / np.sum(counts)
init_weights = np.reciprocal(frequencies + eps)
average_weights = list()
for _ in range(num_steps):
ids = np.random.choice(class_ids, num_samples, p=frequencies)
values = init_weights[ids]
average_weights.append(np.mean(values))
weights = scale / np.median(average_weights) * init_weights
out_weights = np.zeros([num_classes], dtype=np.float32)
for class_id, class_weight in zip(class_ids, weights):
out_weights[class_id] = class_weight
return out_weights
@staticmethod
def _pr_product(prod):
alpha = torch.sqrt(1.0 - prod.pow(2.0))
out_prod = alpha.detach() * prod + prod.detach() * (1.0 - alpha)
return out_prod
def _regularization(self, cos_theta, scale):
probs = F.softmax(scale * cos_theta, dim=-1)
entropy_values = entropy(probs, dim=-1)
out_values = np.negative(self._conf_penalty_weight) * entropy_values
return out_values
def _reweight(self, losses, labels):
with torch.no_grad():
loss_weights = torch.gather(self.class_weights, 0, labels.view(-1))
weighted_losses = loss_weights * losses
return weighted_losses
def _filter_losses(self, losses):
if self._filter_type == 'positives':
losses = losses[losses > 0.0]
elif self._filter_type == 'top_k':
valid_losses = losses[losses > 0.0]
if valid_losses.numel() > 0:
num_top_k = int(min(valid_losses.numel(), self._top_k))
losses, _ = torch.topk(valid_losses, k=num_top_k)
else:
losses = valid_losses.new_zeros((0,))
return losses
def _forward(self, output, labels, increment_step=True):
if increment_step:
self._last_scale = self.scale_scheduler.get_scale_and_increment_step()
else:
self._last_scale = self.scale_scheduler.get_scale()
if self.with_pr_product:
output = self._pr_product(output)
losses = self._calculate(output, labels, self._last_scale)
if self.with_regularization:
losses += self._regularization(output, self._last_scale)
if self.with_class_weighting:
losses = self._reweight(losses, labels)
if self.with_filtering:
losses = self._filter_losses(losses)
return losses.mean() if losses.numel() > 0 else losses.sum()
@abstractmethod
def _calculate(self, output, labels, scale):
pass
|
openvinotoolkit/mmaction2 | mmaction/datasets/builder.py | import platform
import random
from functools import partial
from copy import deepcopy
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from torch.utils.data import DataLoader
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS
from .samplers import DistributedSampler, BalancedDistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def _to_list(value, num=None):
if isinstance(value, (tuple, list)):
if num is not None:
assert len(value) == num, f'Invalid len of argument: {len(value)} but expected {num}'
return value
else:
return [value] * (num if num is not None else 1)
def build_dataset(cfg, target, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
target (str): Target name. One of : "train", "val", "test".
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
source_cfg = cfg['dataset'] if hasattr(cfg, 'type') and cfg['type'] == 'RepeatDataset' else cfg
target_cfg = source_cfg[target]
assert 'root_dir' in source_cfg, 'Data config does not contain \'root_dir\' field'
assert 'source' in target_cfg, 'Data config does not contain \'sources\' field'
assert 'ann_file' in target_cfg, 'Data config does not contain \'ann_file\' field'
sources = _to_list(target_cfg['source'])
num_sources = len(sources)
ann_files = _to_list(target_cfg['ann_file'], num_sources)
shared_info = {k: _to_list(v, num_sources) for k, v in source_cfg.get('shared', dict()).items()}
datasets = []
for dataset_id in range(num_sources):
dataset_cfg = deepcopy(target_cfg)
dataset_cfg['root_dir'] = source_cfg['root_dir']
dataset_cfg['source'] = sources[dataset_id]
dataset_cfg['ann_file'] = ann_files[dataset_id]
for shared_key, shared_value in shared_info.items():
dataset_cfg[shared_key] = shared_value[dataset_id]
datasets.append(build_from_cfg(dataset_cfg, DATASETS, default_args))
dataset = sum(datasets)
if hasattr(cfg, 'type') and cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(dataset, cfg['times'])
return dataset
def build_dataloader(dataset,
videos_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
num_instances_per_batch=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
videos_per_gpu (int): Number of videos on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
if num_instances_per_batch is not None and num_instances_per_batch > 1:
sampler = BalancedDistributedSampler(dataset, world_size, rank, shuffle=shuffle,
num_instances=num_instances_per_batch)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = videos_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * videos_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)\
if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=videos_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Init the random seed for various workers."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
|
openvinotoolkit/mmaction2 | mmaction/models/backbones/mobilenetv3_lgd.py | <gh_stars>1-10
import torch
import torch.nn as nn
from ...core.ops import HSwish, conv_1x1x1_bn, Dropout
from ..registry import BACKBONES
from .mobilenetv3_s3d import MobileNetV3_S3D
class ContextPool3d(nn.Module):
def __init__(self, in_channels):
super(ContextPool3d, self).__init__()
self.encoder = conv_1x1x1_bn(in_channels, 1, as_list=False)
def forward(self, x):
_, c, t, h, w = x.size()
keys = self.encoder(x)
attention_map = torch.softmax(keys.view(-1, t * h * w, 1), dim=1)
context = torch.matmul(x.view(-1, c, t * h * w), attention_map)
out = context.view(-1, c, 1, 1, 1)
return out
class PoolingBlock(nn.Module):
modes = ['average', 'attention']
def __init__(self, in_planes, out_planes, factor=3, norm='none', mode='average'):
super(PoolingBlock, self).__init__()
assert mode in self.modes
hidden_dim = int(factor * in_planes)
layers = [
nn.AdaptiveAvgPool3d((1, 1, 1)) if mode == 'average' else ContextPool3d(in_planes),
*conv_1x1x1_bn(in_planes, hidden_dim, norm=norm),
HSwish(),
*conv_1x1x1_bn(hidden_dim, out_planes, norm=norm),
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
return self.conv(x)
class UpsampleBlock(nn.Module):
def __init__(self, in_planes, out_planes, factor=3, norm='none'):
super(UpsampleBlock, self).__init__()
hidden_dim = int(factor * in_planes)
layers = [
*conv_1x1x1_bn(in_planes, hidden_dim, norm=norm),
HSwish(),
*conv_1x1x1_bn(hidden_dim, out_planes, norm=norm),
]
self.conv = nn.Sequential(*layers)
self._reset_weights()
def forward(self, x):
return self.conv(x)
def _reset_weights(self):
last_stage = self.conv[len(self.conv) - 1]
if hasattr(last_stage, 'weight') and last_stage.weight is not None:
last_stage.weight.data.zero_()
if hasattr(last_stage, 'bias') and last_stage.bias is not None:
last_stage.bias.data.zero_()
class GlobBlock(nn.Module):
def __init__(self, in_planes, out_planes, factor=3,
dropout_cfg=None, internal_dropout=True,
norm='none'):
super(GlobBlock, self).__init__()
self.identity = in_planes == out_planes
self.internal_dropout = internal_dropout
hidden_dim = int(factor * in_planes)
layers = [
*conv_1x1x1_bn(in_planes, hidden_dim, norm=norm),
HSwish(),
*conv_1x1x1_bn(hidden_dim, out_planes, norm=norm),
]
self.conv = nn.Sequential(*layers)
if dropout_cfg is not None and self.identity:
self.dropout = Dropout(**dropout_cfg)
else:
self.dropout = None
def forward(self, x):
y = self.conv(x)
if self.dropout is not None and self.internal_dropout:
y = self.dropout(y, x)
out = x + y if self.identity else y
if self.dropout is not None and not self.internal_dropout:
out = self.dropout(out)
return out
@BACKBONES.register_module()
class MobileNetV3_LGD(MobileNetV3_S3D):
def __init__(self,
mix_paths,
pool_method='average',
channel_factor=3,
**kwargs):
super(MobileNetV3_LGD, self).__init__(**kwargs)
assert len(mix_paths) == len(self.cfg)
assert mix_paths[0] == 0
mix_idx = [idx for idx in range(len(self.cfg)) if mix_paths[idx] > 0]
assert len(mix_idx) > 0
self.glob_to_local_idx = mix_idx
self.local_to_glob_idx = [mix_idx[0] - 1] + mix_idx[:-1]
self.glob_idx = mix_idx[:-1]
self.glob_channels_num = [self.channels_num[idx] for idx in self.local_to_glob_idx]
self.channel_factor = channel_factor
self.lgd_upsample = nn.ModuleDict({
f'upsample_{idx}': UpsampleBlock(
glob_channels,
self.channels_num[idx],
factor=self.channel_factor,
norm=self.weight_norm
)
for idx, glob_channels in zip(self.glob_to_local_idx, self.glob_channels_num)
})
self.lgd_pool = nn.ModuleDict({
f'pooling_{idx}': PoolingBlock(
self.channels_num[idx],
self.channels_num[idx],
factor=self.channel_factor,
norm=self.weight_norm,
mode=pool_method
)
for idx in self.local_to_glob_idx
})
self.lgd_glob = nn.ModuleDict({
f'glob_{idx}': GlobBlock(
glob_channels,
self.channels_num[idx],
factor=self.channel_factor,
dropout_cfg=self.dropout_cfg if self.use_dropout[idx] > 0 else None,
internal_dropout=self.internal_dropout,
norm=self.weight_norm
)
for idx, glob_channels in zip(self.glob_idx, self.glob_channels_num[:-1])
})
def forward(self, x, return_extra_data=False, enable_extra_modules=True):
y = self._norm_input(x)
local_y = y
glob_y = None
local_outs = []
feature_data, att_data, sgs_data = dict(), dict(), dict()
for module_idx in range(len(self.features)):
local_y = self._infer_module(
local_y, module_idx, return_extra_data, enable_extra_modules, feature_data, att_data
)
if self.sgs_modules is not None and module_idx in self.sgs_idx:
sgs_module_name = 'sgs_{}'.format(module_idx)
sgs_module = self.sgs_modules[sgs_module_name]
if self.enable_sgs_loss:
local_y, sgs_extra_data = sgs_module(local_y, return_extra_data=True)
sgs_data[sgs_module_name] = sgs_extra_data
else:
local_y = sgs_module(local_y)
if module_idx in self.glob_to_local_idx:
assert glob_y is not None
upsample_module = self.lgd_upsample[f'upsample_{module_idx}']
local_y = upsample_module(glob_y) + local_y
if module_idx in self.local_to_glob_idx:
pooling_module = self.lgd_pool[f'pooling_{module_idx}']
pooled_local_y = pooling_module(local_y)
if glob_y is not None:
glob_module = self.lgd_glob[f'glob_{module_idx}']
glob_y = glob_module(glob_y) + pooled_local_y
else:
glob_y = pooled_local_y
if module_idx in self.out_ids:
local_outs.append(local_y)
assert len(local_outs) > 0
local_outs = self._out_conv(local_outs, return_extra_data, enable_extra_modules, att_data)
if return_extra_data:
return local_outs, dict(feature_data=feature_data, att_data=att_data, sgs_data=sgs_data)
else:
return local_outs
|
openvinotoolkit/mmaction2 | tools/data/custom/prepare_annot.py | <filename>tools/data/custom/prepare_annot.py<gh_stars>1-10
import json
from os import makedirs
from os.path import exists, join
from argparse import ArgumentParser
from collections import defaultdict
from random import shuffle
CLASS_MAP = {
'digit_0': 0,
'digit_1': 1,
'digit_1_hand_to_the_camera': 1,
'digit_2': 2,
'digit_2_hand_to_the_camera': 2,
'digit_3': 3,
'digit_3_hand_to_the_camera': 3,
'digit_3_middle_fingers': 3,
'digit_3_middle_fingers_hand_to_the_camera': 3,
'digit_3_with_big_finger': 3,
'digit_3_with_big_finger_hand_to_the_camera': 3,
'digit_4': 4,
'digit_4_hand_to_the_camera': 4,
'digit_5': 5,
'digit_5_hand_to_the_camera': 5,
'thumb_up': 6,
'thumb_down': 7,
'sliding_two_fingers_up': 8,
'sliding_two_fingers_down': 9,
'sliding_two_fingers_left': 10,
'sliding_two_fingers_right': 11,
}
def load_videos_info(file_path):
with open(file_path) as input_stream:
data = json.load(input_stream)
out_data = {k.lower(): v for k, v in data.items()}
return out_data
def update_samples_info(records, file_path, class_map):
out_records = []
with open(file_path) as input_stream:
for line in input_stream:
line_parts = line.strip().split(' ')
if len(line_parts) != 7:
continue
name, _, _, _, _, _, fps = line_parts
name = name.lower()
fps = max(5.0, min(30.0, float(fps)))
assert name in records, f'Cannot find \"{name}\" in records'
record = records[name]
video_annot = record['annot']
if video_annot is None:
continue
assert video_annot['clip_start'] >= video_annot['video_start']
assert video_annot['clip_end'] <= video_annot['video_end']
record['video_start'] = video_annot['video_start']
record['video_end'] = video_annot['video_end']
record['clip_start'] = video_annot['clip_start']
record['clip_end'] = video_annot['clip_end']
record['fps'] = fps
del record['annot']
label = record['label']
assert label in class_map, f'Cannot find {label} in class_map'
record['label'] = class_map[label]
record['name'] = name
out_records.append(record)
return out_records
def split_train_val(records, test_ratio):
data_by_id = defaultdict(list)
for record in records:
data_by_id[record['user_id']].append(record)
num_all_ids = len(data_by_id)
num_test_ids = max(1, int(test_ratio * float(num_all_ids)))
assert 0 < num_test_ids < num_all_ids
all_ids = list(data_by_id.keys())
shuffle(all_ids)
test_ids = set(all_ids[:num_test_ids])
train_records, test_records = [], []
for user_id, user_records in data_by_id.items():
if user_id in test_ids:
test_records.extend(user_records)
else:
train_records.extend(user_records)
return train_records, test_records
def dump_annot(records, out_path):
with open(out_path, 'w') as output_stream:
for record in records:
name = record['name']
label = record['label']
fps = record['fps']
video_start = record['video_start']
video_end = record['video_end']
clip_start = record['clip_start']
clip_end = record['clip_end']
output_stream.write(f'{name} {label} {clip_start} {clip_end} {video_start} {video_end} {fps}\n')
def main():
parser = ArgumentParser()
parser.add_argument('--videos_info', '-iv', type=str, required=True)
parser.add_argument('--samples_info', '-is', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
parser.add_argument('--test_ratio', '-t', type=float, required=False, default=0.2)
args = parser.parse_args()
assert exists(args.videos_info)
assert exists(args.samples_info)
if not exists(args.output_dir):
makedirs(args.output_dir)
records = load_videos_info(args.videos_info)
print(f'Loaded {len(records)} video records')
records = update_samples_info(records, args.samples_info, CLASS_MAP)
print(f'Merged {len(records)} final records')
train_records, test_records = split_train_val(records, test_ratio=args.test_ratio)
print(f'Split on {len(train_records)} train and {len(test_records)} test records')
train_out_path = join(args.output_dir, 'train.txt')
dump_annot(train_records, train_out_path)
print(f'Train annotation is dumped to: {train_out_path}')
test_out_path = join(args.output_dir, 'test.txt')
dump_annot(test_records, test_out_path)
print(f'Test annotation is dumped to: {test_out_path}')
if __name__ == '__main__':
main() |
openvinotoolkit/mmaction2 | tools/collect_data_stat.py | # Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import sys
import argparse
import numpy as np
import mmcv
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.utils import ExtendedDictAction
from mmaction.core.utils import propagate_root_dir
from mmcv.runner import set_random_seed
def update_config(cfg, args):
if args.num_workers is not None and args.num_workers > 0:
cfg.data.workers_per_gpu = args.num_workers
cfg.data.test.test_mode = True
normalize_idx = [i for i, v in enumerate(cfg.data.test.pipeline) if v['type'] == 'Normalize'][0]
cfg.data.test.pipeline[normalize_idx]['mean'] = [0.0, 0.0, 0.0]
cfg.data.test.pipeline[normalize_idx]['std'] = [1.0, 1.0, 1.0]
cfg.data.test.pipeline[normalize_idx]['to_bgr'] = False
return cfg
def merge_configs(cfg1, cfg2):
# Merge cfg2 into cfg1
# Overwrite cfg1 if repeated, ignore if value is None.
cfg1 = {} if cfg1 is None else cfg1.copy()
cfg2 = {} if cfg2 is None else cfg2
for k, v in cfg2.items():
if v:
cfg1[k] = v
return cfg1
def collect_stat(data_loader):
mean_data, std_data = [], []
progress_bar = mmcv.ProgressBar(len(data_loader.dataset))
for data in data_loader:
input_data = data['imgs'].detach().squeeze().cpu().numpy()
mean_data.append(np.mean(input_data, axis=(2, 3, 4)))
std_data.append(np.std(input_data, axis=(2, 3, 4)))
batch_size = len(input_data)
for _ in range(batch_size):
progress_bar.update()
mean_data = np.concatenate(mean_data, axis=0)
std_data = np.concatenate(std_data, axis=0)
return mean_data, std_data
def filter_stat(mean_data, std_data, min_value=1.0):
mask = np.all(mean_data > min_value, axis=1) & np.all(std_data > min_value, axis=1)
return mean_data[mask], std_data[mask]
def dump_stat(mean_data, std_data, out_filepath):
assert mean_data.shape == std_data.shape
with open(out_filepath, 'w') as output_stream:
for mean_value, std_value in zip(mean_data, std_data):
mean_value_str = ','.join(str(v) for v in mean_value)
std_value_str = ','.join(str(v) for v in std_value)
output_stream.write(f'{mean_value_str} {std_value_str}\n')
def parse_args():
parser = argparse.ArgumentParser(description='Test model deployed to ONNX or OpenVINO')
parser.add_argument('config', help='path to configuration file')
parser.add_argument('out', help='path to save stat')
parser.add_argument('--data_dir', type=str,
help='the dir with dataset')
parser.add_argument('--num_workers', type=int,
help='number of CPU workers per GPU')
parser.add_argument('--update_config', nargs='+', action=ExtendedDictAction,
help='Update configuration file by parameters specified here.')
args = parser.parse_args()
return args
def main(args):
# load config
cfg = mmcv.Config.fromfile(args.config)
if args.update_config is not None:
cfg.merge_from_dict(args.update_config)
cfg = update_config(cfg, args)
cfg = propagate_root_dir(cfg, args.data_dir)
if cfg.get('seed'):
print(f'Set random seed to {cfg.seed}')
set_random_seed(cfg.seed)
# build the dataset
dataset = build_dataset(cfg.data, 'test', dict(test_mode=True))
print(f'Test datasets:\n{str(dataset)}')
# build the dataloader
data_loader = build_dataloader(
dataset,
videos_per_gpu=20,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False
)
# collect results
mean_data, std_data = collect_stat(data_loader)
# filter data
mean_data, std_data = filter_stat(mean_data, std_data, min_value=1.0)
# dump stat
dump_stat(mean_data, std_data, args.out)
if __name__ == '__main__':
args = parse_args()
sys.exit(main(args) or 0) |
openvinotoolkit/mmaction2 | mmaction/models/spatial_temporal_modules/bert.py | """
Original repo: https://github.com/artest08/LateTemporalModeling3DCNN
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import SPATIAL_TEMPORAL_MODULES
from ...core.ops import conv_1x1x1_bn
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self._init_weights()
def _init_weights(self):
for module in self.modules():
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0, std=0.02)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
@staticmethod
def _gelu_activation(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def forward(self, x):
return self.w_2(self.dropout(self._gelu_activation(self.w_1(x))))
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
self._init_weights()
def _init_weights(self):
for module in self.modules():
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0, std=0.02)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
@staticmethod
def _attention(query, key, value, mask=None, dropout=None):
scale = 1.0 / math.sqrt(query.size(-1))
scores = scale * torch.matmul(query, key.transpose(-2, -1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x = self._attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class TransformerBlock(nn.Module):
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
super().__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
y = self.input_sublayer(x, lambda _x: self.attention(_x, _x, _x, mask=mask))
y = self.output_sublayer(y, self.feed_forward)
y = self.dropout(y)
return y
class BERTEmbedding(nn.Module):
def __init__(self, input_dim, max_len, dropout=0.1):
super().__init__()
self.max_len = max_len
# Compute the positional encodings once in log space.
self.pe = nn.Parameter(torch.Tensor(1, max_len, input_dim))
self.pe.data.normal_(std=0.02)
self.dropout = nn.Dropout(p=dropout)
def forward(self, sequence):
y = self.pe + sequence
y = self.dropout(y)
return y
class BERT(nn.Module):
def __init__(self, input_dim, max_len, hidden=768, n_layers=12, attn_heads=12, dropout=0.1, mask_prob=0.8):
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
self.max_len = max_len
self.input_dim = input_dim
self.mask_prob = mask_prob
self.cls_token = nn.Parameter(torch.Tensor(1, 1, self.input_dim))
self.cls_token.data.normal_(std=0.02)
token_drop_probs = torch.cat((torch.ones([1]), torch.full([self.max_len], self.mask_prob)), dim=0)
self.register_buffer('token_drop_probs', token_drop_probs)
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(input_dim=input_dim, max_len=max_len + 1)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList([
TransformerBlock(hidden, attn_heads, self.feed_forward_hidden, dropout)
for _ in range(n_layers)
])
def forward(self, input_vectors):
# attention masking for padded token
batch_size = input_vectors.shape[0]
if self.training:
token_drop_probs = self.token_drop_probs.unsqueeze(0).repeat(batch_size, 1)
rand_matrix = torch.rand([batch_size, self.max_len + 1],
dtype=input_vectors.dtype, device=input_vectors.device)
mask = (rand_matrix < token_drop_probs).unsqueeze(1).repeat(1, self.max_len + 1, 1).unsqueeze(1)
else:
mask = torch.ones(batch_size, 1, self.max_len + 1, self.max_len + 1,
dtype=input_vectors.dtype, device=input_vectors.device)
# embedding the indexed sequence to sequence of vectors
y = torch.cat((self.cls_token.repeat(batch_size, 1, 1), input_vectors), 1)
y = self.embedding(y)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
y = transformer(y, mask)
return y
@SPATIAL_TEMPORAL_MODULES.register_module()
class BERTSpatialTemporalModule(nn.Module):
def __init__(self, in_channels, spatial_size=7, temporal_size=1, hidden_size=256, num_layers=1, num_heads=8):
super().__init__()
self.in_channels = in_channels
self.spatial_size = spatial_size if not isinstance(spatial_size, int) else (spatial_size, spatial_size)
self.temporal_size = temporal_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_heads = num_heads
self.mapper = None
if self.in_channels != self.hidden_size:
self.mapper = conv_1x1x1_bn(self.in_channels, self.hidden_size, as_list=False)
self.spatial_pool = nn.AvgPool3d((1,) + self.spatial_size, stride=1, padding=0)
self.bert = BERT(self.hidden_size, self.temporal_size,
hidden=self.hidden_size, n_layers=self.num_layers, attn_heads=self.num_heads)
def init_weights(self):
pass
def forward(self, x, return_extra_data=False):
y = self.spatial_pool(x)
if self.mapper is not None:
y = self.mapper(y)
input_vectors = y.view(-1, self.hidden_size, self.temporal_size).transpose(1, 2)
outputs = self.bert(input_vectors)
output = outputs[:, 0].view(-1, self.hidden_size, 1, 1, 1)
if return_extra_data:
return output, dict()
else:
return output
|
openvinotoolkit/mmaction2 | tools/data/dump_frames.py | import subprocess
from os import makedirs, listdir, walk, popen
from os.path import exists, join, isfile, abspath
from shutil import rmtree
from argparse import ArgumentParser
from tqdm import tqdm
VIDEO_EXTENSIONS = 'avi', 'mp4', 'mov', 'webm', 'mkv'
def create_dirs(dir_path, override=False):
if override:
if exists(dir_path):
rmtree(dir_path)
makedirs(dir_path)
elif not exists(dir_path):
makedirs(dir_path)
def parse_relative_paths(data_dir, extensions):
data_dir = abspath(data_dir)
skip_size = len(data_dir) + 1
relative_paths = []
for root, sub_dirs, files in walk(data_dir):
if len(sub_dirs) == 0 and len(files) > 0:
valid_files = [f for f in files if f.split('.')[-1].lower() in extensions]
if len(valid_files) > 0:
relative_paths.append(root[skip_size:])
return relative_paths
def prepare_tasks(relative_paths, input_dir, output_dir, extensions):
out_tasks = []
for relative_path in relative_paths:
input_videos_dir = join(input_dir, relative_path)
assert exists(input_videos_dir)
input_video_files = [f for f in listdir(input_videos_dir)
if isfile(join(input_videos_dir, f)) and f.split('.')[-1].lower() in extensions]
if len(input_video_files) == 0:
continue
for input_video_file in input_video_files:
input_video_path = join(input_videos_dir, input_video_file)
video_name = input_video_file.split('.')[0].lower()
output_video_dir = join(output_dir, relative_path, video_name)
if exists(output_video_dir):
existed_files = [f for f in listdir(output_video_dir) if isfile(join(output_video_dir, f))]
existed_frame_ids = [int(f.split('.')[0]) for f in existed_files]
existed_num_frames = len(existed_frame_ids)
if min(existed_frame_ids) != 1 or max(existed_frame_ids) != existed_num_frames:
rmtree(output_video_dir)
else:
continue
out_tasks.append((input_video_path, output_video_dir, join(relative_path, video_name)))
return out_tasks
def extract_properties(video_path):
result = popen(
f'ffprobe -hide_banner '
f'-loglevel error '
f'-select_streams v:0 '
f'-show_entries stream=width,height '
f'-of csv=p=0 {video_path}'
)
video_width, video_height = result.readline().rstrip().split(',')
video_width = int(video_width)
video_height = int(video_height)
return dict(
width=video_width,
height=video_height
)
def dump_frames(video_path, video_info, out_dir, image_name_template, max_image_size):
if video_info['width'] > video_info['height']:
command = ['ffmpeg',
'-i', '"{}"'.format(video_path),
'-vsync', '0',
'-vf', '"scale={}:-2"'.format(max_image_size),
'-q:v', '5',
'"{}"'.format(join(out_dir, image_name_template)),
'-y']
else:
command = ['ffmpeg',
'-i', '"{}"'.format(video_path),
'-vsync', '0',
'-vf', '"scale=-2:{}"'.format(max_image_size),
'-q:v', '5',
'"{}"'.format(join(out_dir, image_name_template)),
'-y']
command = ' '.join(command)
try:
log = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
return str(log, 'utf-8')
def parse_video_info(log_message):
all_entries = []
for line in log_message.split('\n'):
if line.startswith('frame='):
line = line.strip().split('\rframe=')[-1].strip()
line = ' '.join([el for el in line.split(' ') if el])
line = line.replace('= ', '=')
line_parts = line.split(' ')
num_frames = int(line_parts[0].split('=')[-1])
assert num_frames > 0
time_parts = [float(t) for t in line_parts[4].split('=')[-1].split(':')]
assert len(time_parts) == 3
duration = time_parts[0] * 3600.0 + time_parts[1] * 60.0 + time_parts[2]
assert duration > 0.0
frame_rate = num_frames / duration
all_entries.append((num_frames, frame_rate))
assert len(all_entries) > 0
return all_entries[-1]
def process_task(input_video_path, output_video_dir, relative_path, image_name_template, max_image_size):
create_dirs(output_video_dir)
video_info = extract_properties(input_video_path)
log_message = dump_frames(input_video_path, video_info,
output_video_dir, image_name_template,
max_image_size)
if log_message is None:
rmtree(output_video_dir)
return None
video_num_frames, video_fps = parse_video_info(log_message)
return relative_path, video_num_frames, video_fps
def dump_records(records, out_path):
with open(out_path, 'w') as output_stream:
for record in records:
if record is None:
continue
rel_path, num_frames, fps = record
if num_frames <= 0:
continue
converted_record = rel_path, -1, 0, num_frames, 0, num_frames, fps
output_stream.write(' '.join([str(r) for r in converted_record]) + '\n')
def main():
parser = ArgumentParser()
parser.add_argument('--input_dir', '-i', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
parser.add_argument('--out_extension', '-ie', type=str, required=False, default='jpg')
parser.add_argument('--max_image_size', '-ms', type=int, required=False, default=720)
parser.add_argument('--override', action='store_true', required=False)
args = parser.parse_args()
assert exists(args.input_dir)
create_dirs(args.output_dir, override=args.override)
print('\nPreparing tasks ...')
relative_paths = parse_relative_paths(args.input_dir, VIDEO_EXTENSIONS)
tasks = prepare_tasks(relative_paths, args.input_dir, args.output_dir, VIDEO_EXTENSIONS)
print(f'Prepared {len(tasks)} tasks.')
print('\nDumping frames ...')
image_name_template = f'%05d.{args.out_extension}'
records = []
for task in tqdm(tasks, leave=False):
record = process_task(*task,
image_name_template=image_name_template,
max_image_size=args.max_image_size)
records.append(record)
print('Finished.')
out_annot_path = abspath('{}/../annot.txt'.format(args.output_dir))
dump_records(records, out_annot_path)
print('\nAnnotated has been stored at: {}'.format(out_annot_path))
if __name__ == '__main__':
main()
|
openvinotoolkit/mmaction2 | tools/rename_layers.py | from os.path import exists
from argparse import ArgumentParser
import torch
def main():
parser = ArgumentParser()
parser.add_argument('--input', '-i', type=str, required=True)
parser.add_argument('--output', '-o', type=str, required=True)
args = parser.parse_args()
assert exists(args.input)
model_checkpoint = torch.load(args.input, map_location='cpu')
assert 'state_dict' in model_checkpoint
state_dict = model_checkpoint['state_dict']
replacements = []
for old_key in list(state_dict.keys()):
if 'fc_pre_cls' in old_key:
new_key = old_key.replace('fc_pre_cls', 'fc_pre_angular')
state_dict[new_key] = state_dict.pop(old_key)
replacements.append((old_key, new_key))
torch.save(model_checkpoint, args.output)
if len(replacements) > 0:
print(f'Replacements ({len(replacements)}):')
for old_key, new_key in replacements:
print(f' {old_key} --> {new_key}')
if __name__ == '__main__':
main()
|
openvinotoolkit/mmaction2 | mmaction/models/necks/self_feature_regularizer.py | <filename>mmaction/models/necks/self_feature_regularizer.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init
from ..registry import NECKS
from ...core.ops import conv_1x1x1_bn, HSwish
class ChannelReducer(nn.Module):
def __init__(self, dim=1, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.mean(x, dim=self.dim, keepdim=self.keepdim)
@NECKS.register_module()
class SelfFeatureRegularizer(nn.Module):
""" Based on the paper: https://arxiv.org/abs/2103.07350
"""
def __init__(self, in_channels, spatial_size=7, temporal_size=1, hidden_size=256, loss_weight=1.0):
super().__init__()
self.loss_weight = float(loss_weight)
assert self.loss_weight > 0.0
self.hidden_size = int(hidden_size)
assert self.hidden_size > 0
self.scale = self.hidden_size ** (-0.5)
self.in_channels = in_channels if isinstance(in_channels, (tuple, list)) else [in_channels]
num_inputs = len(self.in_channels)
assert num_inputs > 1
self.temporal_size = temporal_size if isinstance(temporal_size, (tuple, list)) else [temporal_size]
assert len(self.temporal_size) == num_inputs
spatial_size = spatial_size if isinstance(spatial_size, (tuple, list)) else [spatial_size]
self.spatial_size = [ss if isinstance(ss, (tuple, list)) else (ss, ss) for ss in spatial_size]
assert len(self.spatial_size) == num_inputs
self.keys = nn.ModuleList([
nn.Sequential(
nn.AvgPool3d((self.temporal_size[input_id],) + self.spatial_size[input_id], stride=1, padding=0),
conv_1x1x1_bn(self.in_channels[input_id], self.hidden_size, as_list=False),
HSwish(),
conv_1x1x1_bn(self.hidden_size, self.hidden_size, as_list=False),
)
for input_id in range(num_inputs)
])
self.student_tokens = nn.Parameter(torch.Tensor(1, num_inputs - 1, self.hidden_size))
self.student_tokens.data.normal_(std=0.02)
self.teacher_token = nn.Parameter(torch.Tensor(1, 1, self.hidden_size))
self.teacher_token.data.normal_(std=0.02)
self.student_mappers = nn.ModuleList([
ChannelReducer(dim=1, keepdim=True)
for _ in range(num_inputs - 1)
])
self.teacher_reducer = ChannelReducer(dim=1, keepdim=True)
self.teacher_mappers = nn.ModuleList([
nn.Upsample(size=(self.temporal_size[input_id],) + self.spatial_size[input_id],
mode='trilinear', align_corners=False)
for input_id in range(num_inputs - 1)
])
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1.0, 0.0)
elif isinstance(m, nn.Parameter):
m.data.normal_()
def forward(self, inputs, return_extra_data=False):
assert len(inputs) == len(self.keys) + 1
inputs = inputs[:-2] + [inputs[-1]]
attention, student_features, teacher_features = None, None, None
if self.training:
keys = [
key_module(input_feature).view(-1, 1, self.hidden_size)
for input_feature, key_module in zip(inputs, self.keys)
]
student_keys = torch.cat(keys[:-1], dim=1)
teacher_key = keys[-1].view(-1, self.hidden_size, 1)
key_prod = torch.matmul(student_keys, teacher_key).squeeze(2)
token_prod = torch.sum(self.teacher_token * self.student_tokens, dim=-1)
attention = F.softmax(self.scale * (key_prod + token_prod), dim=-1)
student_features = [mapper(x_) for x_, mapper in zip(inputs[:-1], self.student_mappers)]
teacher_feature = self.teacher_reducer(inputs[-1])
teacher_features = [mapper(teacher_feature) for mapper in self.teacher_mappers]
# returns the input unchanged
if return_extra_data:
return inputs, dict(attention=attention,
student_features=student_features,
teacher_features=teacher_features)
else:
return inputs
def loss(self, attention=None, student_features=None, teacher_features=None, **kwargs):
losses = dict()
if attention is None or student_features is None or teacher_features is None:
return losses
all_losses = [
self._reg_loss(student_feature, teacher_feature).view(-1, 1)
for student_feature, teacher_feature in zip(student_features, teacher_features)
]
weighted_losses = attention * torch.cat(all_losses, dim=1)
losses['loss/sfr'] = self.loss_weight * torch.mean(torch.sum(weighted_losses, dim=1))
return losses
@staticmethod
def _reg_loss(x, y):
sqr_diff = (x - y) ** 2
return torch.mean(sqr_diff, dim=(1, 2, 3, 4))
|
openvinotoolkit/mmaction2 | mmaction/integration/nncf/compression_hooks.py | <filename>mmaction/integration/nncf/compression_hooks.py
from mmcv.runner.hooks.hook import HOOKS, Hook
from mmcv.runner.dist_utils import master_only
@HOOKS.register_module()
class CompressionHook(Hook):
def __init__(self, compression_ctrl=None):
self.compression_ctrl = compression_ctrl
def after_train_iter(self, runner):
self.compression_ctrl.scheduler.step()
def after_train_epoch(self, runner):
self.compression_ctrl.scheduler.epoch_step()
def before_run(self, runner):
if runner.rank == 0:
print_statistics(self.compression_ctrl.statistics(), runner.logger)
@HOOKS.register_module()
class CheckpointHookBeforeTraining(Hook):
"""Save checkpoints before training.
Args:
save_optimizer (bool): Whether to save optimizer state_dict in the
checkpoint. It is usually used for resuming experiments.
Default: True.
out_dir (str, optional): The directory to save checkpoints. If not
specified, ''runner.work_dir'' will be used by default
"""
def __init__(self,
save_optimizer=True,
out_dir=None,
**kwargs):
self.save_optimizer = save_optimizer
self.out_dir = out_dir
self.args = kwargs
@master_only
def before_run(self, runner):
runner.logger.info(f'Saving checkpoint before training')
if not self.out_dir:
self.out_dir = runner.work_dir
runner.save_checkpoint(
self.out_dir, filename_tmpl='before_training.pth', save_optimizer=self.save_optimizer, **self.args
)
def print_statistics(stats, logger):
try:
from texttable import Texttable
texttable_imported = True
except ImportError:
texttable_imported = False
for key, val in stats:
if texttable_imported and isinstance(val, Texttable):
logger.info(key)
logger.info(val.draw())
else:
logger.info('{}: {}'.format(key, val))
|
openvinotoolkit/mmaction2 | mmaction/integration/nncf/compression.py | <reponame>openvinotoolkit/mmaction2
import os
import pathlib
import tempfile
from functools import partial
import mmcv
import torch
from mmaction.utils import get_root_logger
from .utils import (check_nncf_is_enabled, get_nncf_version, is_nncf_enabled,
load_checkpoint, no_nncf_trace)
def get_nncf_metadata():
"""
The function returns NNCF metadata that should be stored into a checkpoint.
The metadata is used to check in wrap_nncf_model if the checkpoint should be used
to resume NNCF training or initialize NNCF fields of NNCF-wrapped model.
"""
check_nncf_is_enabled()
return dict(nncf_enable_compression=True, nncf_version=get_nncf_version())
def is_checkpoint_nncf(path):
"""
The function uses metadata stored in a checkpoint to check if the
checkpoint was the result of trainning of NNCF-compressed model.
See the function get_nncf_metadata above.
"""
if not os.path.exists(path):
return False
checkpoint = torch.load(path, map_location='cpu')
meta = checkpoint.get('meta', {})
nncf_enable_compression = meta.get('nncf_enable_compression', False)
return bool(nncf_enable_compression)
def get_nncf_config_from_meta(path):
"""
The function uses metadata stored in a checkpoint to restore the nncf
part of the model config.
"""
logger = get_root_logger()
checkpoint = torch.load(path, map_location='cpu')
meta = checkpoint.get('meta', {})
nncf_enable_compression = meta.get('nncf_enable_compression', False)
assert nncf_enable_compression, \
'get_nncf_config_from_meta should be run for NNCF-compressed checkpoints only'
config_text = meta['config']
with tempfile.NamedTemporaryFile(prefix='config_', suffix='.py',
mode='w', delete=False) as f_tmp:
f_tmp.write(config_text)
tmp_name = f_tmp.name
cfg = mmcv.Config.fromfile(tmp_name)
os.unlink(tmp_name)
nncf_config = cfg.get('nncf_config')
assert isinstance(nncf_config, dict), (
f'Wrong nncf_config part of the config saved in the metainfo'
f' of the snapshot {path}:'
f' nncf_config={nncf_config}')
nncf_config_part = {
'nncf_config': nncf_config,
'find_unused_parameters': True
}
if nncf_config_part['nncf_config'].get('log_dir'):
log_dir = tempfile.mkdtemp(prefix='nncf_output_')
nncf_config_part['nncf_config']['log_dir'] = log_dir
logger.info(f'Read nncf config from meta nncf_config_part={nncf_config_part}')
return nncf_config_part
def wrap_nncf_model(model,
cfg,
data_loader_for_init=None,
get_fake_input_func=None,
export=False):
"""
The function wraps mmaction model by NNCF
Note that the parameter `get_fake_input_func` should be the function `get_fake_input`
-- cannot import this function here explicitly
"""
check_nncf_is_enabled()
from nncf.config import NNCFConfig
from nncf.torch import (create_compressed_model,
register_default_init_args)
from nncf.torch.dynamic_graph.io_handling import nncf_model_input
from nncf.torch.dynamic_graph.trace_tensor import TracedTensor
from nncf.torch.initialization import DefaultInitializingDataLoader
class MMInitializeDataLoader(DefaultInitializingDataLoader):
def get_inputs(self, dataloader_output):
return (), dataloader_output
pathlib.Path(cfg.work_dir).mkdir(parents=True, exist_ok=True)
nncf_config = NNCFConfig(cfg.nncf_config)
logger = get_root_logger(cfg.log_level)
if data_loader_for_init:
wrapped_loader = MMInitializeDataLoader(data_loader_for_init)
nncf_config = register_default_init_args(nncf_config, wrapped_loader, device=next(model.parameters()).device)
if cfg.get('resume_from'):
checkpoint_path = cfg.get('resume_from')
assert is_checkpoint_nncf(checkpoint_path), (
'It is possible to resume training with NNCF compression from NNCF checkpoints only. '
'Use "load_from" with non-compressed model for further compression by NNCF.')
elif cfg.get('load_from'):
checkpoint_path = cfg.get('load_from')
if not is_checkpoint_nncf(checkpoint_path):
checkpoint_path = None
logger.info('Received non-NNCF checkpoint to start training '
'-- initialization of NNCF fields will be done')
else:
checkpoint_path = None
if not data_loader_for_init and not checkpoint_path:
raise RuntimeError('Either data_loader_for_init or NNCF pre-trained '
'model checkpoint should be set')
if checkpoint_path:
logger.info(f'Loading NNCF checkpoint from {checkpoint_path}')
logger.info(
'Please, note that this first loading is made before addition of '
'NNCF FakeQuantize nodes to the model, so there may be some '
'warnings on unexpected keys')
resuming_state_dict = load_checkpoint(model, checkpoint_path)
logger.info(f'Loaded NNCF checkpoint from {checkpoint_path}')
else:
resuming_state_dict = None
if "nncf_compress_postprocessing" in cfg:
# NB: This parameter is used to choose if we should try to make NNCF compression
# for a whole model graph including postprocessing (`nncf_compress_postprocessing=True`),
# or make NNCF compression of the part of the model without postprocessing
# (`nncf_compress_postprocessing=False`).
# Our primary goal is to make NNCF compression of such big part of the model as
# possible, so `nncf_compress_postprocessing=True` is our primary choice, whereas
# `nncf_compress_postprocessing=False` is our fallback decision.
# When we manage to enable NNCF compression for sufficiently many models,
# we should keep one choice only.
nncf_compress_postprocessing = cfg.get('nncf_compress_postprocessing')
logger.debug('set should_compress_postprocessing='f'{nncf_compress_postprocessing}')
else:
nncf_compress_postprocessing = True
def _get_fake_data_for_forward(cfg, nncf_config, get_fake_input_func):
input_size = nncf_config.get("input_info").get('sample_size')
assert get_fake_input_func is not None
assert len(input_size) == 4 and input_size[0] == 1
H, W, C = input_size[2], input_size[3], input_size[1]
device = next(model.parameters()).device
with no_nncf_trace():
return get_fake_input_func(cfg, orig_img_shape=tuple([H, W, C]), device=device)
def dummy_forward(model):
fake_data = _get_fake_data_for_forward(cfg, nncf_config, get_fake_input_func)
img = fake_data["imgs"]
img = nncf_model_input(img)
if export:
img, _, _ = model.reshape_input(imgs=img)
model(imgs=img)
else:
model(imgs=img, return_loss=False)
def wrap_inputs(args, kwargs):
# during dummy_forward
if not len(kwargs):
if not isinstance(args[0][0], TracedTensor):
args[0][0] = nncf_model_input(args[0][0])
return args, kwargs
# during building original graph
if not kwargs.get('return_loss') and kwargs.get('forward_export'):
return args, kwargs
# during model's forward
assert 'imgs' in kwargs, 'During model forward imgs must be in kwargs'
img = kwargs['imgs']
if isinstance(img, list):
assert len(img) == 1, 'Input list must have a length 1'
assert torch.is_tensor(img[0]), 'Input for a model must be a tensor'
if not isinstance(img[0], TracedTensor):
img[0] = nncf_model_input(img[0])
else:
assert torch.is_tensor(img), 'Input for a model must be a tensor'
if not isinstance(img, TracedTensor):
img = nncf_model_input(img)
kwargs['imgs'] = img
return args, kwargs
model.dummy_forward_fn = dummy_forward
if 'log_dir' in nncf_config:
os.makedirs(nncf_config['log_dir'], exist_ok=True)
compression_ctrl, model = create_compressed_model(model,
nncf_config,
dummy_forward_fn=dummy_forward,
wrap_inputs_fn=wrap_inputs,
compression_state=resuming_state_dict)
return compression_ctrl, model
def change_export_func_first_conv(model):
""" To avoid saturation issue
At the moment works only for mobilenet
"""
def run_hacked_export_quantization(self, x):
from nncf.quantization.layers import (
ExportQuantizeToFakeQuantize, ExportQuantizeToONNXQuantDequant,
QuantizerExportMode, get_scale_zp_from_input_low_input_high)
from nncf.utils import no_jit_trace
with no_jit_trace():
input_range = abs(self.scale) + self.eps
input_low = input_range * self.level_low / self.level_high
input_high = input_range
if self._export_mode == QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS:
y_scale, y_zero_point = get_scale_zp_from_input_low_input_high(self.level_low,
self.level_high,
input_low,
input_high)
if self._export_mode == QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS:
return ExportQuantizeToONNXQuantDequant.apply(x, y_scale, y_zero_point)
if self._export_mode == QuantizerExportMode.FAKE_QUANTIZE:
x = x / 2.0
return ExportQuantizeToFakeQuantize.apply(x, self.levels, input_low, input_high, input_low * 2,
input_high * 2)
raise RuntimeError
logger = get_root_logger()
orig_model = model.get_nncf_wrapped_model()
try:
# pylint: disable=protected-access
module_ = orig_model.backbone.features.init_block.conv.pre_ops._modules['0']
except (AttributeError, KeyError) as e:
logger.info(f'Cannot change an export function for the first Conv due {e}')
return model
module_.op.run_export_quantization = partial(run_hacked_export_quantization, module_.op)
logger.info('Change an export function for the first Conv to avoid saturation issue on AVX2, AVX512')
return model
def get_uncompressed_model(module):
if not is_nncf_enabled():
return module
from nncf.torch.nncf_network import NNCFNetwork
if isinstance(module, NNCFNetwork):
return module.get_nncf_wrapped_model()
return module
|
openvinotoolkit/mmaction2 | tools/data/copy_data.py | <reponame>openvinotoolkit/mmaction2
from os import makedirs
from os.path import exists, join, isfile
from shutil import copyfile, copytree
from argparse import ArgumentParser
from tqdm import tqdm
def load_sources(file_path, root_dir):
sources = []
with open(file_path) as input_stream:
for line in input_stream:
parts = line.strip().split(' ')
if len(parts) != 7:
continue
rel_path = parts[0]
abs_path = join(root_dir, rel_path)
assert exists(abs_path)
sources.append((abs_path, rel_path))
return sources
def copy_data(sources, out_dir):
for src_path, rel_path in tqdm(sources, leave=False):
dst_path = join(out_dir, rel_path)
if isfile(src_path):
copyfile(src_path, dst_path)
else:
copytree(src_path, dst_path)
def main():
parser = ArgumentParser()
parser.add_argument('--annot', '-a', type=str, required=True)
parser.add_argument('--input_dir', '-i', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
args = parser.parse_args()
assert exists(args.annot)
assert exists(args.input_dir)
if not exists(args.output_dir):
makedirs(args.output_dir)
sources = load_sources(args.annot, args.input_dir)
print(f'Loaded {len(sources)} records')
copy_data(sources, args.output_dir)
print('Done')
if __name__ == '__main__':
main()
|
openvinotoolkit/mmaction2 | mmaction/models/params/freeze_layers_hook.py | <reponame>openvinotoolkit/mmaction2<filename>mmaction/models/params/freeze_layers_hook.py<gh_stars>1-10
from mmcv.runner.hooks import Hook
import torch
from ..registry import PARAMS_MANAGERS
@PARAMS_MANAGERS.register_module()
class FreezeLayers(Hook):
def __init__(self, epochs=0, open_layers=None, **kwargs):
super(FreezeLayers, self).__init__(**kwargs)
self.epochs = epochs
self.open_layers = open_layers
if isinstance(self.open_layers, str):
self.open_layers = [self.open_layers]
self.enable = self.epochs > 0 and self.open_layers is not None and len(self.open_layers) > 0
def before_train_epoch(self, runner):
cur_epoch = runner.epoch
model = runner.model.module
if self.enable and cur_epoch < self.epochs:
runner.logger.info('* Only train {} (epoch: {}/{})'.format(self.open_layers, cur_epoch + 1, self.epochs))
self.open_specified_layers(model, self.open_layers)
else:
self.open_all_layers(model)
@staticmethod
def open_all_layers(model):
model.train()
for p in model.parameters():
if p.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, torch.bool):
# only Tensors of floating point dtype can require gradients
continue
p.requires_grad = True
@staticmethod
def open_specified_layers(model, open_layers):
for name, module in model.named_modules():
if any([open_substring in name for open_substring in open_layers]):
module.train()
for p in module.parameters():
p.requires_grad = True
else:
module.eval()
for p in module.parameters():
p.requires_grad = False
|
openvinotoolkit/mmaction2 | mmaction/models/necks/video_aligner.py | <filename>mmaction/models/necks/video_aligner.py<gh_stars>1-10
import torch
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from ..registry import NECKS
from ...core.ops import conv_1x1x1_bn, HSwish, Normalize, soft_dtw
@NECKS.register_module()
class VideoAligner(nn.Module):
"""
Implementation of the paper: https://arxiv.org/abs/2103.17260
"""
merge_modes = ['concat', 'sum']
def __init__(self, in_channels, spatial_size=7, temporal_size=1, hidden_size=512, embedding_size=256,
smoothness=0.1, margin=2, window_size=1, reg_weight=1.0, merge_mode='concat'):
super().__init__()
self.smoothness = float(smoothness)
assert self.smoothness > 0.0
self.margin = float(margin)
assert self.margin >= 0.0
self.window_size = int(window_size)
assert self.window_size > 0
self.reg_weight = float(reg_weight)
assert self.reg_weight > 0.0
self.hidden_size = int(hidden_size)
assert self.hidden_size > 0
self.embd_size = int(embedding_size)
assert self.embd_size > 0
self.merge_mode = merge_mode
assert self.merge_mode in self.merge_modes
self.in_channels = in_channels if isinstance(in_channels, (tuple, list)) else [in_channels]
self.temporal_size = temporal_size if isinstance(temporal_size, (tuple, list)) else [temporal_size]
assert len(self.in_channels) == len(self.temporal_size)
spatial_size = spatial_size if isinstance(spatial_size, (tuple, list)) else [spatial_size]
self.spatial_size = [ss if isinstance(ss, (tuple, list)) else (ss, ss) for ss in spatial_size]
assert len(self.spatial_size) == len(self.temporal_size)
self.trg_temporal_size = max(self.temporal_size)
self.mapper = nn.ModuleList([
nn.Sequential(
nn.AvgPool3d((1,) + self.spatial_size[input_id], stride=1, padding=0),
conv_1x1x1_bn(self.in_channels[input_id], self.hidden_size, as_list=False),
nn.Upsample(size=(self.trg_temporal_size, 1, 1), mode='trilinear', align_corners=False)
if self.temporal_size[input_id] < self.trg_temporal_size else nn.Sequential(),
HSwish()
)
for input_id in range(len(self.in_channels))
])
merged_channels = self.hidden_size
if self.merge_mode == 'concat':
merged_channels *= len(self.in_channels)
self.embedding = nn.Sequential(
conv_1x1x1_bn(merged_channels, self.embd_size, as_list=False),
Normalize(dim=1, p=2)
)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1.0, 0.0)
elif isinstance(m, nn.Parameter):
m.data.normal_()
def forward(self, inputs, return_extra_data=False):
temporal_embd = None
if self.training:
assert len(inputs) >= len(self.mapper)
internal_outs = [
self.mapper[input_id](inputs[input_id])
for input_id in range(len(self.mapper))
]
if self.merge_mode == 'concat':
y = torch.cat(internal_outs, dim=1)
else:
y = sum(internal_outs)
temporal_embd = self.embedding(y)
# returns the input unchanged
if return_extra_data:
return inputs, dict(temporal_embd=temporal_embd)
else:
return inputs
def loss(self, temporal_embd=None, labels=None, dataset_id=None, num_clips=1, **kwargs):
losses = dict()
if temporal_embd is None or labels is None:
return losses
with torch.no_grad():
batch_size = temporal_embd.size(0)
batch_range = torch.arange(batch_size, device=labels.device)
top_diagonal_pairs = batch_range.view(-1, 1) < batch_range.view(1, -1)
same_class_pairs = labels.view(-1, 1) == labels.view(1, -1)
valid_pairs = same_class_pairs * top_diagonal_pairs
if dataset_id is not None:
same_dataset_pairs = dataset_id.view(-1, 1) == dataset_id.view(1, -1)
valid_pairs = same_dataset_pairs * valid_pairs
if num_clips > 1:
instances_range = torch.arange(0, batch_size, num_clips, device=labels.device)
instances_batch_range = instances_range.view(-1, 1).repeat(1, 2)
different_instance_pairs = batch_range.view(-1, 1) < instances_batch_range.view(1, -1)
valid_pairs = different_instance_pairs * valid_pairs
valid_samples_mask = torch.any(valid_pairs, dim=-1)
num_valid_pairs = torch.sum(valid_samples_mask, dim=0).item()
if num_valid_pairs == 0:
losses['loss/align'] = torch.zeros([], dtype=temporal_embd.dtype, device=temporal_embd.device)
losses['loss/align_reg'] = torch.zeros([], dtype=temporal_embd.dtype, device=temporal_embd.device)
return losses
valid_pairs_subset = valid_pairs[valid_samples_mask].float()
rand_weights = 1.0 + torch.rand_like(valid_pairs_subset)
valid_pairs_subset_weights = valid_pairs_subset * rand_weights
valid_pairs_ids = torch.argmax(valid_pairs_subset_weights, dim=-1)
temporal_embd = temporal_embd.view(-1, self.embd_size, self.trg_temporal_size)
left_embd = temporal_embd[valid_samples_mask]
right_embd = temporal_embd[valid_pairs_ids]
pair_distances = (1.0 - torch.matmul(left_embd.transpose(1, 2), right_embd)).clamp_min(0.0)
left_distances = (1.0 - torch.matmul(left_embd.transpose(1, 2), left_embd)).clamp_min(0.0)
right_distances = (1.0 - torch.matmul(right_embd.transpose(1, 2), right_embd)).clamp_min(0.0)
main_losses = soft_dtw(1e-2 + pair_distances, self.smoothness, 0)
losses['loss/align'] = main_losses.mean()
left_reg_loss = self._contrastive_idm_loss(left_distances, self.margin, self.window_size)
right_reg_loss = self._contrastive_idm_loss(right_distances, self.margin, self.window_size)
reg_loss = (self.reg_weight / float(num_valid_pairs)) * (left_reg_loss + right_reg_loss)
losses['loss/align_reg'] = reg_loss
return losses
# @staticmethod
# def compute_dtw(cost_matrix):
# B = cost_matrix.shape[0]
# N = cost_matrix.shape[1]
# M = cost_matrix.shape[2]
#
# directions = np.full([B, N + 1, M + 1], -1)
# R = np.full([B, N + 1, M + 1], np.inf)
# R[:, 0, 0] = 0
#
# traces = []
# for b in range(B):
# for j in range(1, M + 1):
# for i in range(1, N + 1):
# r0 = R[b, i - 1, j - 1]
# r1 = R[b, i - 1, j]
# r2 = R[b, i, j - 1]
# r_values = np.array([r0, r1, r2])
#
# R[b, i, j] = cost_matrix[b, i - 1, j - 1] + np.min(r_values)
# directions[b, i, j] = np.argmin(r_values)
#
# i, j = N, M
# trace = [(i, j)]
# while i > 0 or j > 0:
# direction = directions[b, i, j]
# if direction == 0:
# i -= 1
# j -= 1
# elif direction == 1:
# i -= 1
# else:
# j -= 1
#
# trace.append((i, j))
#
# traces.append(trace)
#
# return traces
@staticmethod
def _contrastive_idm_loss(dist_matrix, margin, window_size):
with torch.no_grad():
temporal_size = dist_matrix.size(1)
temporal_range = torch.arange(temporal_size, device=dist_matrix.device)
range_diff = temporal_range.view(-1, 1) - temporal_range.view(1, -1)
mode_mask = torch.abs(range_diff) > window_size
outer_weights = range_diff ** 2 + 1.0
inner_weights = torch.reciprocal(outer_weights)
inner_losses = inner_weights.unsqueeze(0) * dist_matrix
outer_losses = outer_weights.unsqueeze(0) * (margin - dist_matrix).clamp_min(0.0)
losses = torch.where(mode_mask.unsqueeze(0), outer_losses, inner_losses)
weight = 1.0 / float(temporal_size * temporal_size)
loss = weight * torch.sum(losses)
return loss
|
openvinotoolkit/mmaction2 | tools/data/dump_skeletons.py | import json
import math
import argparse
from os import makedirs, listdir
from os.path import exists, join, isfile, isdir
from collections import OrderedDict, defaultdict
from operator import itemgetter
import cv2
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
TRG_KPT_IDS = [4, 7]
NET_STRIDE = 8
NET_UPSAMPLE_RATIO = 4
def conv(in_channels, out_channels, kernel_size=3, padding=1, bn=True, dilation=1, stride=1, relu=True, bias=True):
modules = [nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)]
if bn:
modules.append(nn.BatchNorm2d(out_channels))
if relu:
modules.append(nn.ReLU(inplace=True))
return nn.Sequential(*modules)
def conv_dw(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding,
dilation=dilation, groups=in_channels, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def conv_dw_no_bn(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding,
dilation=dilation, groups=in_channels, bias=False),
nn.ELU(inplace=True),
nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
nn.ELU(inplace=True),
)
class Cpm(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.align = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False)
self.trunk = nn.Sequential(
conv_dw_no_bn(out_channels, out_channels),
conv_dw_no_bn(out_channels, out_channels),
conv_dw_no_bn(out_channels, out_channels)
)
self.conv = conv(out_channels, out_channels, bn=False)
def forward(self, x):
x = self.align(x)
x = self.conv(x + self.trunk(x))
return x
class InitialStage(nn.Module):
def __init__(self, num_channels, num_heatmaps, num_pafs):
super().__init__()
self.trunk = nn.Sequential(
conv(num_channels, num_channels, bn=False),
conv(num_channels, num_channels, bn=False),
conv(num_channels, num_channels, bn=False)
)
self.heatmaps = nn.Sequential(
conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
conv(512, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False)
)
self.pafs = nn.Sequential(
conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
conv(512, num_pafs, kernel_size=1, padding=0, bn=False, relu=False)
)
def forward(self, x):
trunk_features = self.trunk(x)
heatmaps = self.heatmaps(trunk_features)
pafs = self.pafs(trunk_features)
return [heatmaps, pafs]
class RefinementStageBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.initial = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False)
self.trunk = nn.Sequential(
conv(out_channels, out_channels),
conv(out_channels, out_channels, dilation=2, padding=2)
)
def forward(self, x):
initial_features = self.initial(x)
trunk_features = self.trunk(initial_features)
return initial_features + trunk_features
class RefinementStage(nn.Module):
def __init__(self, in_channels, out_channels, num_heatmaps, num_pafs):
super().__init__()
self.trunk = nn.Sequential(
RefinementStageBlock(in_channels, out_channels),
RefinementStageBlock(out_channels, out_channels),
RefinementStageBlock(out_channels, out_channels),
RefinementStageBlock(out_channels, out_channels),
RefinementStageBlock(out_channels, out_channels)
)
self.heatmaps = nn.Sequential(
conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False),
conv(out_channels, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False)
)
self.pafs = nn.Sequential(
conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False),
conv(out_channels, num_pafs, kernel_size=1, padding=0, bn=False, relu=False)
)
def forward(self, x):
trunk_features = self.trunk(x)
heatmaps = self.heatmaps(trunk_features)
pafs = self.pafs(trunk_features)
return [heatmaps, pafs]
class PoseEstimationWithMobileNet(nn.Module):
def __init__(self, num_refinement_stages=1, num_channels=128, num_heatmaps=19, num_pafs=38):
super().__init__()
self.model = nn.Sequential(
conv( 3, 32, stride=2, bias=False),
conv_dw( 32, 64),
conv_dw( 64, 128, stride=2),
conv_dw(128, 128),
conv_dw(128, 256, stride=2),
conv_dw(256, 256),
conv_dw(256, 512), # conv4_2
conv_dw(512, 512, dilation=2, padding=2),
conv_dw(512, 512),
conv_dw(512, 512),
conv_dw(512, 512),
conv_dw(512, 512) # conv5_5
)
self.cpm = Cpm(512, num_channels)
self.initial_stage = InitialStage(num_channels, num_heatmaps, num_pafs)
self.refinement_stages = nn.ModuleList()
for idx in range(num_refinement_stages):
self.refinement_stages.append(RefinementStage(num_channels + num_heatmaps + num_pafs, num_channels,
num_heatmaps, num_pafs))
def forward(self, x):
backbone_features = self.model(x)
backbone_features = self.cpm(backbone_features)
stages_output = self.initial_stage(backbone_features)
for refinement_stage in self.refinement_stages:
stages_output.extend(
refinement_stage(torch.cat([backbone_features, stages_output[-2], stages_output[-1]], dim=1)))
return stages_output
def create_network(weights, use_cuda):
net = PoseEstimationWithMobileNet()
checkpoint = torch.load(weights, map_location='cpu')
load_state(net, checkpoint)
if use_cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
torch.backends.cudnn.benchmark = False
net.eval()
return net
def load_state(net, checkpoint):
source_state = checkpoint['state_dict']
target_state = net.state_dict()
new_target_state = OrderedDict()
for target_key, target_value in target_state.items():
if target_key in source_state and source_state[target_key].size() == target_state[target_key].size():
new_target_state[target_key] = source_state[target_key]
else:
new_target_state[target_key] = target_state[target_key]
print('[WARNING] Not found pre-trained parameters for {}'.format(target_key))
net.load_state_dict(new_target_state)
def normalize(img, img_mean, img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
def pad_width(img, stride, pad_value, min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
pad = list()
pad.append(int(math.floor((min_dims[0] - h) / 2.0)))
pad.append(int(math.floor((min_dims[1] - w) / 2.0)))
pad.append(int(min_dims[0] - h - pad[0]))
pad.append(int(min_dims[1] - w - pad[1]))
padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3],
cv2.BORDER_CONSTANT, value=pad_value)
return padded_img, pad
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, use_cuda,
pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
if use_cuda:
tensor_img = tensor_img.cuda()
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
return heatmaps, pafs, scale, pad
def extract_keypoints(heatmap, min_conf, threshold):
heatmap[heatmap < min_conf] = 0.0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode='constant')
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 1:heatmap_with_borders.shape[1]-1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 0:heatmap_with_borders.shape[1]-2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1]-1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0]-2, 1:heatmap_with_borders.shape[1]-1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0]-1, 1:heatmap_center.shape[1]-1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0]))
keypoints = sorted(keypoints, key=itemgetter(0))
out_keypoints = []
suppressed = np.zeros(len(keypoints), np.bool)
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i+1, len(keypoints)):
suppressed[j] = math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 +
(keypoints[i][1] - keypoints[j][1]) ** 2) < threshold
out_keypoints.append((keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]]))
return out_keypoints
def scale_keypoints(kpts, stride, upsample_ratio, pad, scale):
converted_records = []
for kpt in kpts:
record = (kpt[0] * stride / upsample_ratio - pad[1]) / scale, \
(kpt[1] * stride / upsample_ratio - pad[0]) / scale, \
kpt[2]
converted_records.append(record)
return converted_records
class Track:
def __init__(self, frame_id=-1, kpt=None):
self.frame_ids = []
self.kpts = []
if frame_id >= 0 and kpt is not None:
self.add(frame_id, kpt)
def add(self, frame_id, kpt):
self.frame_ids.append(frame_id)
self.kpts.append(kpt)
@property
def last_kpt(self):
assert len(self.kpts) > 0
return self.kpts[-1]
@property
def length(self):
return len(self.frame_ids)
@property
def data(self):
return {frame_id: kpt for frame_id, kpt in zip(self.frame_ids, self.kpts)}
def select_single_person_track(kpts):
active_track = None
for frame_id, candidates in kpts.items():
if len(candidates) == 0:
continue
candidate_kpts = [(kpt[0], kpt[1]) for kpt in candidates]
candidate_scores = [kpt[2] for kpt in candidates]
if active_track is None:
active_track = Track(frame_id, candidate_kpts[np.argmax(candidate_scores)])
continue
previous = np.array([active_track.last_kpt], dtype=np.float32).reshape([-1, 2])
current = np.array(candidate_kpts, dtype=np.float32).reshape([-1, 2])
distance_matrix = (previous[:, 0].reshape([-1, 1]) - current[:, 0].reshape([1, -1])) ** 2 + \
(previous[:, 1].reshape([-1, 1]) - current[:, 1].reshape([1, -1])) ** 2
distance_matrix = np.sqrt(distance_matrix)
match_ind = np.argmin(distance_matrix, axis=1)[0]
active_track.add(frame_id, candidate_kpts[match_ind])
return active_track.data if active_track is not None else None
def ensure_dir_exists(dir_path):
if not exists(dir_path):
makedirs(dir_path)
def parse_images(root_dir, extension):
base_names = [d for d in listdir(root_dir) if isdir(join(root_dir, d))]
out_paths = dict()
total_num_images = 0
for base_name in tqdm(base_names, desc='Parsing images'):
frames_dir = join(root_dir, base_name)
frames = {int(f.split('.')[0]): join(frames_dir, f) for f in listdir(frames_dir)
if isfile(join(frames_dir, f)) and f.endswith(extension)}
frame_ids = list(frames.keys())
start_frame_id, end_frame_id = np.min(frame_ids), np.max(frame_ids) + 1
assert len(frame_ids) == end_frame_id - start_frame_id
out_paths[base_name] = frames
total_num_images += len(frames)
return out_paths, total_num_images
def main():
def _str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', '-i', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
parser.add_argument('--model', '-m', type=str, required=True)
parser.add_argument('--height_size', type=int, default=256, required=False)
parser.add_argument('--cuda', default=True, type=_str2bool, required=False)
parser.add_argument('--override', action='store_true')
args = parser.parse_args()
assert exists(args.input_dir)
assert exists(args.model)
ensure_dir_exists(args.output_dir)
net = create_network(args.model, args.cuda)
all_paths, total_num_images = parse_images(args.input_dir, 'jpg')
print('Found {} images.'.format(total_num_images))
num_valid_videos = 0
pbar = tqdm(total=total_num_images, desc='Processing')
for video_name, frames in all_paths.items():
frame_ids = list(frames)
frame_ids.sort()
out_data_path = join(args.output_dir, '{}.json'.format(video_name))
if not args.override and exists(out_data_path):
pbar.update(len(frame_ids))
continue
video_kpts = defaultdict(dict)
for frame_id in frame_ids:
frame_path = frames[frame_id]
frame = cv2.imread(frame_path)
heatmaps, _, scale, pad = infer_fast(
net, frame, args.height_size, NET_STRIDE, NET_UPSAMPLE_RATIO, args.cuda
)
for kpt_idx in TRG_KPT_IDS:
extracted_kpts = extract_keypoints(heatmaps[:, :, kpt_idx], min_conf=0.1, threshold=6)
scaled_kpts = scale_keypoints(extracted_kpts, NET_STRIDE, NET_UPSAMPLE_RATIO, pad, scale)
video_kpts[kpt_idx][frame_id] = scaled_kpts
pbar.update(1)
out_tracks = dict()
for kpt_idx in video_kpts.keys():
track = select_single_person_track(video_kpts[kpt_idx])
if track is not None:
out_tracks[kpt_idx] = track
if len(out_tracks) > 0:
num_valid_videos += 1
with open(out_data_path, 'w') as output_stream:
json.dump(out_tracks, output_stream)
pbar.close()
print('Finished: {} / {} valid videos.'.format(num_valid_videos, len(all_paths)))
if __name__ == '__main__':
main()
|
openvinotoolkit/mmaction2 | mmaction/models/losses/local_push_loss.py | <reponame>openvinotoolkit/mmaction2<filename>mmaction/models/losses/local_push_loss.py
import torch
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class LocalPushLoss(BaseWeightedLoss):
def __init__(self, margin=0.1, smart_margin=True, **kwargs):
super(LocalPushLoss, self).__init__(**kwargs)
self.margin = margin
assert self.margin >= 0.0
self.smart_margin = smart_margin
def _forward(self, all_norm_embd, cos_theta, labels):
pos_samples_mask = labels.view(-1) >= 0
if torch.sum(pos_samples_mask) == 0:
return torch.zeros([], dtype=all_norm_embd.dtype, device=all_norm_embd.device)
pos_labels = labels.view(-1)[pos_samples_mask]
pos_norm_embd = all_norm_embd[pos_samples_mask]
pos_cos_theta = cos_theta[pos_samples_mask]
similarity = pos_norm_embd.matmul(all_norm_embd.permute(1, 0))
with torch.no_grad():
pairs_mask = pos_labels.view(-1, 1) != labels.view(1, -1)
if self.smart_margin:
batch_inds = torch.arange(pos_cos_theta.size(0), device=pos_labels.device)
center_similarity = pos_cos_theta[batch_inds, pos_labels]
threshold = center_similarity.clamp(min=self.margin).view(-1, 1) - self.margin
else:
threshold = self.margin
similarity_mask = similarity > threshold
mask = pairs_mask & similarity_mask
filtered_similarity = torch.where(mask, similarity - threshold, torch.zeros_like(similarity))
losses = filtered_similarity.max(dim=-1)[0]
return losses.mean()
|
openvinotoolkit/mmaction2 | mmaction/core/ops/soft_dtw.py | """
The original repo: https://github.com/Maghoumi/pytorch-softdtw-cuda
"""
import numpy as np
import torch
def compute_softdtw(D, gamma, bandwidth):
B = D.shape[0]
N = D.shape[1]
M = D.shape[2]
scale = -1.0 / gamma
R = np.ones((B, N + 2, M + 2)) * np.inf
R[:, 0, 0] = 0
for b in range(B):
for j in range(1, M + 1):
for i in range(1, N + 1):
# Check the pruning condition
if 0 < bandwidth < np.abs(i - j):
continue
r0 = scale * R[b, i - 1, j - 1]
r1 = scale * R[b, i - 1, j]
r2 = scale * R[b, i, j - 1]
r_max = max(max(r0, r1), r2)
r_sum = np.exp(r0 - r_max) + np.exp(r1 - r_max) + np.exp(r2 - r_max)
soft_min = - gamma * (np.log(r_sum) + r_max)
R[b, i, j] = D[b, i - 1, j - 1] + soft_min
return R
def compute_softdtw_backward(D_, R, gamma, bandwidth):
B = D_.shape[0]
N = D_.shape[1]
M = D_.shape[2]
D = np.zeros((B, N + 2, M + 2))
E = np.zeros((B, N + 2, M + 2))
D[:, 1:N + 1, 1:M + 1] = D_
E[:, -1, -1] = 1
R[:, :, -1] = -np.inf
R[:, -1, :] = -np.inf
R[:, -1, -1] = R[:, -2, -2]
for k in range(B):
for j in range(M, 0, -1):
for i in range(N, 0, -1):
if np.isinf(R[k, i, j]):
R[k, i, j] = -np.inf
# Check the pruning condition
if 0 < bandwidth < np.abs(i - j):
continue
a = np.exp((R[k, i + 1, j] - R[k, i, j] - D[k, i + 1, j]) / gamma)
b = np.exp((R[k, i, j + 1] - R[k, i, j] - D[k, i, j + 1]) / gamma)
c = np.exp((R[k, i + 1, j + 1] - R[k, i, j] - D[k, i + 1, j + 1]) / gamma)
E[k, i, j] = E[k, i + 1, j] * a + E[k, i, j + 1] * b + E[k, i + 1, j + 1] * c
return E[:, 1:N + 1, 1:M + 1]
class SoftDTW(torch.autograd.Function):
"""
CPU implementation based on https://github.com/Sleepwalking/pytorch-softdtw
"""
@staticmethod
def forward(ctx, cost_matrix, gamma, bandwidth):
dev = cost_matrix.device
dtype = cost_matrix.dtype
ctx.gamma = gamma
ctx.bandwidth = bandwidth
cost_matrix_ = cost_matrix.detach().cpu().numpy()
R = torch.Tensor(compute_softdtw(cost_matrix_, gamma, bandwidth)).to(dev).type(dtype)
ctx.save_for_backward(cost_matrix, R)
return R[:, -2, -2]
@staticmethod
def backward(ctx, grad_output):
dev = grad_output.device
dtype = grad_output.dtype
cost_matrix, R = ctx.saved_tensors
gamma = ctx.gamma
bandwidth = ctx.bandwidth
cost_matrix_ = cost_matrix.detach().cpu().numpy()
R_ = R.detach().cpu().numpy()
E = torch.Tensor(compute_softdtw_backward(cost_matrix_, R_, gamma, bandwidth)).to(dev).type(dtype)
return grad_output.view(-1, 1, 1).expand_as(E) * E, None, None
soft_dtw = SoftDTW.apply
|
openvinotoolkit/mmaction2 | mmaction/apis/fake_input.py | <filename>mmaction/apis/fake_input.py
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
#
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from ..datasets.pipelines import Compose
def get_fake_data(orig_img_shape, stream_sample_frames):
data = {}
data['clip_len'] = stream_sample_frames.clip_len
data['num_clips'] = stream_sample_frames.num_clips
data['imgs'] = [np.zeros(orig_img_shape, dtype=np.uint8), ] * data['clip_len']
data['modality'] = 'RGB'
return data
def get_fake_input(cfg, orig_img_shape=(128, 128, 3), device='cuda'):
test_pipeline = cfg.data.test.pipeline[2:]
test_pipeline = Compose(test_pipeline)
data = get_fake_data(orig_img_shape, stream_sample_frames=cfg.data.test.pipeline[0])
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
return data
|
openvinotoolkit/mmaction2 | mmaction/models/necks/emd_regularizer.py | <reponame>openvinotoolkit/mmaction2
import cv2
import torch
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from ..registry import NECKS
from ...core.ops import conv_1x1x1_bn, normalize
@NECKS.register_module()
class EMDRegularizer(nn.Module):
""" Based on the paper: https://arxiv.org/abs/2103.07350
"""
modes = ['pairs', 'classmates', 'random']
def __init__(self, in_channels, mode='pairs', hidden_size=256, loss_weight=1.0):
super().__init__()
self.mode = mode
assert self.mode in self.modes
self.loss_weight = float(loss_weight)
assert self.loss_weight > 0.0
self.hidden_size = int(hidden_size)
assert self.hidden_size > 0
self.scale = self.hidden_size ** (-0.5)
self.in_channels = in_channels if isinstance(in_channels, (tuple, list)) else [in_channels]
num_inputs = len(self.in_channels)
assert num_inputs > 0
self.mappers = nn.ModuleList([
nn.Sequential(
conv_1x1x1_bn(self.in_channels[input_id], self.hidden_size, as_list=False),
nn.AvgPool3d(kernel_size=(3, 3, 3), stride=1, padding=1, count_include_pad=False)
)
for input_id in range(num_inputs)
])
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1.0, 0.0)
elif isinstance(m, nn.Parameter):
m.data.normal_()
def forward(self, inputs, return_extra_data=False):
# assert len(inputs) == len(self.mappers) + 1
# filtered_inputs = inputs[:-1]
filtered_inputs = [inputs[-1]]
assert len(filtered_inputs) == len(self.mappers)
features = None
if self.training:
features = []
for input_feature, mapper in zip(filtered_inputs, self.mappers):
output_feature = mapper(input_feature)
features.append(output_feature.view(output_feature.size(0), self.hidden_size, -1))
features = torch.cat(features, dim=2)
# returns the input unchanged
if return_extra_data:
return inputs, dict(features=features)
else:
return inputs
def loss(self, features=None, **kwargs):
losses = dict()
if features is None:
return losses
features_a, features_b = self._split_features(features, self.mode, **kwargs)
if features_a is None or features_b is None:
losses['loss/emd_sfr'] = torch.zeros([], dtype=features.dtype, device=features.device)
return losses
assert features_a.size(0) == features_b.size(0)
num_pairs = features_a.size(0)
assert features_a.size(2) == features_b.size(2)
num_nodes = features_a.size(2)
cost_scale = 1.0 / float(num_nodes)
cost_matrix = self._get_cost_matrix(features_a, features_b)
weights_a = self._get_weights(features_a, features_b)
weights_b = self._get_weights(features_b, features_a)
pair_losses = []
for pair_id in range(num_pairs):
local_weights_a = weights_a[pair_id]
local_weights_b = weights_b[pair_id]
if torch.sum(local_weights_a > 0.0) == 0 or torch.sum(local_weights_b > 0.0) == 0:
continue
flow = self._solve_emd(cost_matrix[pair_id], local_weights_a, local_weights_b)
cost = torch.sum(flow * cost_matrix[pair_id])
pair_losses.append(cost_scale * cost)
if len(pair_losses) > 0:
loss_weight = self.loss_weight / float(len(pair_losses))
losses['loss/emd_sfr'] = loss_weight * sum(pair_losses)
else:
losses['loss/emd_sfr'] = torch.zeros([], dtype=features.dtype, device=features.device)
return losses
@staticmethod
def _split_features(features, mode, labels=None, dataset_id=None, num_clips=1, **kwargs):
if mode == 'pairs':
assert num_clips == 2
features_a = features[::num_clips]
features_b = features[1::num_clips]
elif mode == 'classmates':
with torch.no_grad():
batch_size = features.size(0)
batch_range = torch.arange(batch_size, device=labels.device)
top_diagonal_pairs = batch_range.view(-1, 1) < batch_range.view(1, -1)
same_class_pairs = labels.view(-1, 1) == labels.view(1, -1)
valid_pairs = same_class_pairs * top_diagonal_pairs
if dataset_id is not None:
same_dataset_pairs = dataset_id.view(-1, 1) == dataset_id.view(1, -1)
valid_pairs = same_dataset_pairs * valid_pairs
if num_clips > 1:
instances_range = torch.arange(0, batch_size, num_clips, device=labels.device)
instances_batch_range = instances_range.view(-1, 1).repeat(1, 2)
different_instance_pairs = batch_range.view(-1, 1) < instances_batch_range.view(1, -1)
valid_pairs = different_instance_pairs * valid_pairs
valid_samples_mask = torch.any(valid_pairs, dim=-1)
num_valid_pairs = torch.sum(valid_samples_mask, dim=0).item()
if num_valid_pairs > 0:
valid_pairs_subset = valid_pairs[valid_samples_mask].float()
rand_weights = 1.0 + torch.rand_like(valid_pairs_subset)
valid_pairs_subset_weights = valid_pairs_subset * rand_weights
valid_pairs_ids = torch.argmax(valid_pairs_subset_weights, dim=-1)
features_a = features[valid_samples_mask]
features_b = features[valid_pairs_ids]
else:
features_a, features_b = None, None
else:
batch_size = features.size(0)
assert batch_size % 2 == 0
idx = torch.randperm(batch_size, device=features.device)
features_a = features[idx[:(batch_size // 2)]]
features_b = features[idx[(batch_size // 2):]]
return features_a, features_b
@staticmethod
def _get_cost_matrix(features_a, features_b):
norm_a = normalize(features_a, dim=1, p=2)
norm_b = normalize(features_b, dim=1, p=2)
dist_matrix = 1.0 - torch.matmul(norm_a.transpose(1, 2), norm_b)
return dist_matrix.clamp_min(0.0)
@staticmethod
def _get_weights(ref, trg):
with torch.no_grad():
mean_trg = normalize(trg.mean(dim=2, keepdim=True), dim=1, p=2)
weights = torch.sum(ref * mean_trg, dim=1).clamp_min(0.0)
sum_weights = torch.sum(weights, dim=1, keepdim=True)
scales = torch.where(sum_weights > 0.0,
torch.reciprocal(sum_weights),
torch.ones_like(sum_weights))
num_nodes = weights.size(1)
norm_weights = (float(num_nodes) * scales) * weights
return norm_weights
@staticmethod
def _solve_emd(cost_matrix, weights_a, weights_b):
data_type = cost_matrix.dtype
device = cost_matrix.device
cost_matrix = cost_matrix.detach().cpu().numpy()
weights_a = weights_a.detach().cpu().numpy()
weights_b = weights_b.detach().cpu().numpy()
_, _, flow = cv2.EMD(weights_a, weights_b, cv2.DIST_USER, cost_matrix)
return torch.from_numpy(flow).to(device).type(data_type)
|
openvinotoolkit/mmaction2 | mmaction/datasets/recognition_dataset.py | from abc import ABCMeta, abstractmethod
from collections import defaultdict
from os.path import exists, join
import torch
from mmcv.utils import print_log
from ..core import (mean_class_accuracy, top_k_accuracy, mean_top_k_accuracy,
mean_average_precision, ranking_mean_average_precision,
invalid_pred_info, confusion_matrix)
from .base import BaseDataset
SIMPLE_RECORD_SIZE = 2
DETAILED_RECORD_SIZE = 7
class RecognitionDataset(BaseDataset, metaclass=ABCMeta):
"""Base class for action recognition datasets.
"""
allowed_metrics = [
'top_k_accuracy', 'mean_top_k_accuracy', 'mean_class_accuracy',
'mean_average_precision', 'ranking_mean_average_precision',
'confusion_matrix', 'invalid_info'
]
def __init__(self,
action_type_file=None,
filter_min_fraction=0.8,
**kwargs):
self.filter_min_fraction = filter_min_fraction
super().__init__(**kwargs)
if action_type_file is not None:
assert isinstance(action_type_file, dict)
assert len(self.dataset_ids_map) == 1
source_name = self.dataset_ids_map[0]
if source_name in action_type_file:
action_type_file = join(self.root_dir, action_type_file[source_name])
action_type_map = self._load_action_type_map(action_type_file)
if action_type_map is not None:
self.records = self._update_action_type_info(self.records, action_type_map)
@staticmethod
def _load_action_type_map(file_path):
if not exists(file_path):
return None
action_type_map = dict()
with open(file_path) as input_stream:
for line in input_stream:
line_parts = line.strip().split(':')
if len(line_parts) != 2:
continue
action_type_map[int(line_parts[0])] = line_parts[1]
return action_type_map
@staticmethod
def _update_action_type_info(records, action_type_map):
for record in records:
label = record['label']
record['action_type'] = action_type_map[label]
return records
def _load_annotations(self, ann_file, data_prefix=None):
"""Load annotation file to get video information."""
if ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(ann_file, 'r') as input_stream:
for line in input_stream:
line_split = line.strip().split()
if self.multi_class or self.with_offset:
record = self._parse_original_record(line_split[1:], self.multi_class,
self.with_offset, self.num_classes)
elif len(line_split) == SIMPLE_RECORD_SIZE:
record = self._parse_simple_record(line_split[1:])
elif len(line_split) == DETAILED_RECORD_SIZE:
record = self._parse_detailed_record(line_split[1:])
else:
continue
record.update(self._parse_data_source(line_split[0], data_prefix))
record.update(self._get_extra_info())
video_infos.append(record)
return video_infos
@staticmethod
def _parse_original_record(line_splits, multi_class, with_offset, num_classes):
record = dict()
idx = 0
if with_offset:
# idx for offset and total_frames
record['offset'] = int(line_splits[idx])
record['total_frames'] = int(line_splits[idx + 1])
idx += 2
else:
# idx for total_frames
record['total_frames'] = int(line_splits[idx])
idx += 1
record['clip_start'] = 0
record['clip_end'] = record['total_frames']
record['video_start'] = 0
record['video_end'] = record['total_frames']
record['fps'] = 30.0
# idx for label[s]
label = [int(x) for x in line_splits[idx:]]
assert len(label), 'missing label'
if multi_class:
assert num_classes is not None
label_vector = torch.zeros(num_classes)
label_vector[label] = 1.0
record['label'] = label_vector
else:
assert len(label) == 1
record['label'] = int(label[0])
return record
@staticmethod
def _parse_simple_record(line_splits):
record = dict(
label=int(line_splits[0]),
)
return record
@staticmethod
def _parse_detailed_record(line_splits):
record = dict(
label=int(line_splits[0]),
clip_start=int(line_splits[1]),
clip_end=int(line_splits[2]),
video_start=int(line_splits[3]),
video_end=int(line_splits[4]),
fps=float(line_splits[5]),
)
record['clip_len'] = record['clip_end'] - record['clip_start']
assert record['clip_len'] > 0
record['video_len'] = record['video_end'] - record['video_start']
assert record['video_len'] > 0
record['total_frames'] = record['clip_len']
return record
@staticmethod
def _get_extra_info():
return dict(
matched_weights=defaultdict(float),
filter_ready=False,
action_type='dynamic',
)
@abstractmethod
def _parse_data_source(self, data_source, data_prefix):
pass
def _evaluate(self, results, metrics='top_k_accuracy', topk=(1, 5), logger=None):
"""Evaluation in action recognition dataset.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'top_k_accuracy'.
logger (obj): Training logger. Defaults: None.
topk (tuple[int]): K value for top_k_accuracy metric.
Defaults: (1, 5).
logger (logging.Logger | None): Logger for recording.
Default: None.
Return:
dict: Evaluation results dict.
"""
if isinstance(topk, int):
topk = (topk,)
elif not isinstance(topk, tuple):
raise TypeError(f'topk must be int or tuple of int, but got {type(topk)}')
all_gt_labels = [ann['label'] for ann in self.records]
all_dataset_ids = [ann['dataset_id'] for ann in self.records]
split_results, split_gt_labels = defaultdict(list), defaultdict(list)
for ind, result in enumerate(results):
dataset_id = all_dataset_ids[ind]
dataset_name = self.dataset_ids_map[dataset_id]
split_results[dataset_name].append(result.reshape([-1]))
split_gt_labels[dataset_name].append(all_gt_labels[ind])
eval_results = dict()
for dataset_name in split_results.keys():
dataset_results = split_results[dataset_name]
dataset_gt_labels = split_gt_labels[dataset_name]
dataset_results = self._evaluate_dataset(
dataset_results, dataset_gt_labels, dataset_name, metrics, topk, logger
)
eval_results.update(dataset_results)
return eval_results
@staticmethod
def _evaluate_dataset(results, gt_labels, name, metrics, topk, logger=None):
eval_results = dict()
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'top_k_accuracy':
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for k, acc in zip(topk, top_k_acc):
eval_results[f'val/{name}/top{k}_acc'] = acc
log_msg.append(f'\n{name}/top{k}_acc\t{acc:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric == 'mean_top_k_accuracy':
log_msg = []
for k in topk:
acc = mean_top_k_accuracy(results, gt_labels, k)
eval_results[f'val/{name}/mean_top{k}_acc'] = acc
log_msg.append(f'\n{name}/mean_top{k}_acc\t{acc:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric == 'mean_class_accuracy':
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results[f'val/{name}/mean_class_accuracy'] = mean_acc
log_msg = f'\n{name}/mean_acc\t{mean_acc:.4f}'
print_log(log_msg, logger=logger)
continue
if metric == 'mean_average_precision':
mAP = mean_average_precision(results, gt_labels)
eval_results[f'val/{name}/mAP'] = mAP
log_msg = f'\n{name}/mAP\t{mAP:.4f}'
print_log(log_msg, logger=logger)
continue
if metric == 'ranking_mean_average_precision':
mAP = ranking_mean_average_precision(results, gt_labels)
eval_results[f'val/{name}/rank_mAP'] = mAP
log_msg = f'\n{name}/rank_mAP\t{mAP:.4f}'
print_log(log_msg, logger=logger)
continue
if metric == 'confusion_matrix':
cm = confusion_matrix(results, gt_labels)
eval_results[f'val/{name}/conf_matrix'] = cm
log_msg = f'\n{name}/conf_matrix evaluated'
print_log(log_msg, logger=logger)
continue
if metric == 'invalid_info':
invalid_ids, invalid_conf, invalid_pred = invalid_pred_info(results, gt_labels, k=1)
eval_results[f'val/{name}/invalid_info'] = \
dict(ids=invalid_ids, conf=invalid_conf, pred=invalid_pred)
log_msg = f'\n{name}/invalid is collected'
print_log(log_msg, logger=logger)
continue
return eval_results
def update_meta_info(self, pred_labels, pred_conf, sample_idx, clip_starts, clip_ends, total_frames):
for idx, pred_label, pred_weight, start, end, num_frames in \
zip(sample_idx, pred_labels, pred_conf, clip_starts, clip_ends, total_frames):
video_info = self.records[idx]
video_label = video_info['label']
video_matched_weights = video_info['matched_weights']
weight = pred_weight if video_label == pred_label else -pred_weight
for ii in range(start, end):
video_matched_weights[ii] += weight
filter_ready = float(len(video_matched_weights)) / float(num_frames) > self.filter_min_fraction
video_info['filter_ready'] = filter_ready
def get_filter_active_samples_ratio(self):
num_active_samples = len([True for record in self.records if record['filter_ready']])
return float(num_active_samples) / float(len(self.records))
|
openvinotoolkit/mmaction2 | mmaction/core/ops/labels.py | <gh_stars>1-10
import torch
import torch.nn as nn
from .math import normalize
class PRISM(nn.Module):
"""Filters labels according to the distance to set of class centers.
The original paper: https://arxiv.org/abs/2103.16047
"""
def __init__(self, num_classes, feature_length, buffer_size=1, min_num_updates=10,
clear_prob_margin=0.5, default_scale=10.0):
super().__init__()
self.num_classes = int(num_classes)
assert self.num_classes > 0
self.feature_length = int(feature_length)
assert self.feature_length > 0
self.buffer_size = int(buffer_size)
assert self.buffer_size > 0
self.min_num_updates = max(int(min_num_updates), self.buffer_size)
assert self.min_num_updates > 0
self.clear_prob_margin = float(clear_prob_margin)
assert self.clear_prob_margin >= 0.0
self.scale = float(default_scale)
assert self.scale >= 0.0
buffer_size = [self.num_classes, self.buffer_size, self.feature_length]
self.register_buffer('feature_buffer', torch.zeros(buffer_size))
self.start_pos = [0] * self.num_classes
self.num_updates = [0] * self.num_classes
def forward(self, features, labels, scale=None):
features = features.view(-1, self.feature_length)
labels = labels.view(-1)
fixed_labels = torch.full_like(labels, -1)
enable_cleaning = all(n_ >= self.min_num_updates for n_ in self.num_updates)
unique_labels = torch.unique(labels).detach().cpu().numpy()
for class_id in unique_labels:
if class_id < 0:
continue
class_mask = labels == class_id
class_features = features[class_mask]
if enable_cleaning:
clear_mask = self._estimate_clear_features(class_features, class_id, scale)
clear_features = class_features[clear_mask]
local_labels = torch.full_like(clear_mask, -1, dtype=labels.dtype)
local_labels[clear_mask] = class_id
fixed_labels[class_mask] = local_labels
else:
clear_features = class_features
fixed_labels[class_mask] = class_id
self._store_features(clear_features, class_id)
return fixed_labels
def _store_features(self, new_features, class_id):
num_features = new_features.size(0)
if num_features == 0:
return
elif num_features > self.buffer_size:
raise ValueError(f'Num features ({num_features}) is bigger than '
f'tbe buffer size ({self.buffer_size})')
start_pos = self.start_pos[class_id]
num_free_lines = self.buffer_size - start_pos
if num_free_lines >= num_features:
with torch.no_grad():
self.feature_buffer[class_id, start_pos:(start_pos + num_features)] = new_features
start_pos += num_features
else:
num_extra_lines = num_features - num_free_lines
with torch.no_grad():
self.feature_buffer[class_id, start_pos:] = new_features[:num_free_lines]
self.feature_buffer[class_id, :num_extra_lines] = new_features[num_free_lines:]
start_pos = num_extra_lines
self.start_pos[class_id] = start_pos % self.buffer_size
self.num_updates[class_id] += num_features
def _estimate_clear_features(self, features, class_id, scale=None):
scale = scale if scale is not None else self.scale
with torch.no_grad():
class_centers = torch.mean(self.feature_buffer, dim=1)
norm_class_centers = normalize(class_centers, dim=1, p=2)
set_similarities = torch.matmul(norm_class_centers, torch.transpose(features, 0, 1))
set_probs = torch.softmax(scale * set_similarities, dim=0)
clear_prob = set_probs[class_id].view(-1)
clear_mask = clear_prob > self.clear_prob_margin
return clear_mask
|
openvinotoolkit/mmaction2 | mmaction/integration/nncf/__init__.py | <reponame>openvinotoolkit/mmaction2
from .compression import (check_nncf_is_enabled, get_nncf_config_from_meta,
get_nncf_metadata, get_uncompressed_model,
is_checkpoint_nncf, wrap_nncf_model)
from .compression_hooks import CompressionHook
from .utils import get_nncf_version, is_in_nncf_tracing, no_nncf_trace, nullcontext
__all__ = [
'check_nncf_is_enabled',
'CompressionHook',
'get_nncf_config_from_meta',
'get_nncf_metadata',
'get_nncf_version',
'get_uncompressed_model',
'is_checkpoint_nncf',
'is_in_nncf_tracing'
'no_nncf_trace',
'wrap_nncf_model',
'nullcontext',
]
|
openvinotoolkit/mmaction2 | tools/test.py | <gh_stars>1-10
import argparse
import os
import shutil
import mmcv
import torch
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, set_random_seed
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.apis import multi_gpu_test, single_gpu_test, get_fake_input
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
from mmaction.utils import ExtendedDictAction
from mmaction.core.utils import propagate_root_dir, load_checkpoint
from mmaction.integration.nncf import (check_nncf_is_enabled,
get_nncf_config_from_meta,
is_checkpoint_nncf, wrap_nncf_model)
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 test (and eval) a model')
parser.add_argument('config',
help='test config file path')
parser.add_argument('checkpoint',
help='checkpoint file')
parser.add_argument('--data_dir', type=str,
help='the dir with dataset')
parser.add_argument('--out', default=None,
help='output result file in pickle format')
parser.add_argument('--out_invalid', default=None,
help='output mismatched samples')
parser.add_argument('--fuse_conv_bn', action='store_true',
help='Whether to fuse conv and bn, this will slightly increase the inference speed')
parser.add_argument('--eval', type=str, nargs='+',
help='evaluation metrics, which depends on the dataset, e.g.,'
' "top_k_accuracy", "mean_class_accuracy" for video dataset')
parser.add_argument('--gpu_collect', action='store_true',
help='whether to use gpu to collect results')
parser.add_argument('--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--options', nargs='+', help='custom options')
parser.add_argument('--average_clips', choices=['score', 'prob'], default='score',
help='average type when averaging test clips')
parser.add_argument('--num_workers', type=int,
help='number of CPU workers per GPU')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--update_config', nargs='+', action=ExtendedDictAction,
help='Update configuration file by parameters specified here.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def update_config(cfg, args):
# set cudnn benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.num_workers is not None and args.num_workers > 0:
cfg.data.workers_per_gpu = args.num_workers
if cfg.test_cfg is None:
cfg.test_cfg = dict(average_clips=args.average_clips)
else:
cfg.test_cfg.average_clips = args.average_clips
cfg.data.train.test_mode = True
cfg.data.val.test_mode = True
cfg.data.test.test_mode = True
cfg.data.train.transforms = None
return cfg
def merge_configs(cfg1, cfg2):
# Merge cfg2 into cfg1
# Overwrite cfg1 if repeated, ignore if value is None.
cfg1 = {} if cfg1 is None else cfg1.copy()
cfg2 = {} if cfg2 is None else cfg2
for k, v in cfg2.items():
if v:
cfg1[k] = v
return cfg1
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.update_config is not None:
cfg.merge_from_dict(args.update_config)
cfg = update_config(cfg, args)
cfg = propagate_root_dir(cfg, args.data_dir)
# Load output_config from cfg
output_config = cfg.get('output_config', {})
# Overwrite output_config from args.out
output_config = merge_configs(output_config, dict(out=args.out))
# Load eval_config from cfg
eval_config = cfg.get('eval_config', {})
# Overwrite eval_config from args.eval
eval_config = merge_configs(eval_config, dict(metrics=args.eval))
# Add options from args.option
eval_config = merge_configs(eval_config, args.options)
assert output_config or eval_config, \
('Please specify at least one operation (save or eval the '
'results) with the argument "--out" or "--eval"')
# init distributed env first, since logger depends on the dist info.
distributed = args.launcher != 'none'
if distributed:
init_dist(args.launcher, **cfg.dist_params)
# get rank
rank, _ = get_dist_info()
if cfg.get('seed'):
print(f'Set random seed to {cfg.seed}')
set_random_seed(cfg.seed)
# build the dataset
dataset = build_dataset(cfg.data, 'test', dict(test_mode=True))
if cfg.get('classes'):
dataset = dataset.filter(cfg.classes)
if rank == 0:
print(f'Test datasets:\n{str(dataset)}')
# build the dataloader
data_loader = build_dataloader(
dataset,
videos_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False
)
# build the model and load checkpoint
model = build_model(
cfg.model,
train_cfg=None,
test_cfg=cfg.test_cfg,
class_sizes=dataset.class_sizes,
class_maps=dataset.class_maps
)
# nncf model wrapper
if is_checkpoint_nncf(args.checkpoint) and not cfg.get('nncf_config'):
# reading NNCF config from checkpoint
nncf_part = get_nncf_config_from_meta(args.checkpoint)
for k, v in nncf_part.items():
cfg[k] = v
if cfg.get('nncf_config'):
check_nncf_is_enabled()
if not is_checkpoint_nncf(args.checkpoint):
raise RuntimeError('Trying to make testing with NNCF compression a model snapshot that was NOT trained with NNCF')
cfg.load_from = args.checkpoint
cfg.resume_from = None
if torch.cuda.is_available():
model = model.cuda()
_, model = wrap_nncf_model(model, cfg, None, get_fake_input)
else:
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
# load model weights
load_checkpoint(model, args.checkpoint, map_location='cpu', force_matching=True)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
if rank == 0:
if output_config:
out = output_config['out']
print(f'\nwriting results to {out}')
dataset.dump_results(outputs, **output_config)
if eval_config:
eval_res = dataset.evaluate(outputs, **eval_config)
print('\nFinal metrics:')
for name, val in eval_res.items():
if 'invalid_info' in name:
continue
if isinstance(val, float):
print(f'{name}: {val:.04f}')
elif isinstance(val, str):
print(f'{name}:\n{val}')
else:
print(f'{name}: {val}')
invalid_info = {name: val for name, val in eval_res.items() if 'invalid_info' in name}
if len(invalid_info) > 0:
assert args.out_invalid is not None and args.out_invalid != ''
if os.path.exists(args.out_invalid):
shutil.rmtree(args.out_invalid)
if not os.path.exists(args.out_invalid):
os.makedirs(args.out_invalid)
for name, invalid_record in invalid_info.items():
out_invalid_dir = os.path.join(args.out_invalid, name)
item_gen = zip(invalid_record['ids'], invalid_record['conf'], invalid_record['pred'])
for invalid_idx, pred_conf, pred_label in item_gen:
record_info = dataset.get_info(invalid_idx)
gt_label = record_info['label']
if 'filename' in record_info:
src_data_path = record_info['filename']
in_record_name, record_extension = os.path.basename(src_data_path).split('.')
out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}'
trg_data_path = os.path.join(out_invalid_dir, f'{out_record_name}.{record_extension}')
shutil.copyfile(src_data_path, trg_data_path)
else:
src_data_path = record_info['frame_dir']
in_record_name = os.path.basename(src_data_path)
out_record_name = f'{in_record_name}_gt{gt_label}_pred{pred_label}_conf{pred_conf:.3f}'
trg_data_path = os.path.join(out_invalid_dir, out_record_name)
os.makedirs(trg_data_path)
start_frame_id = record_info['clip_start'] + dataset.start_index
end_frame_id = record_info['clip_end'] + dataset.start_index
for frame_id in range(start_frame_id, end_frame_id):
img_name = f'{frame_id:05}.jpg'
shutil.copyfile(os.path.join(src_data_path, img_name),
os.path.join(trg_data_path, img_name))
if __name__ == '__main__':
main()
|
openvinotoolkit/mmaction2 | mmaction/datasets/samplers/__init__.py | <filename>mmaction/datasets/samplers/__init__.py
from .distributed_sampler import DistributedSampler
from .balanced_distributed_sampler import BalancedDistributedSampler
__all__ = [
'DistributedSampler',
'BalancedDistributedSampler',
]
|
openvinotoolkit/mmaction2 | mmaction/models/losses/clip_mixing_loss.py | import torch
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class ClipMixingLoss(BaseWeightedLoss):
MODES = 'embd', 'logits'
def __init__(self, mode='', default_scale=10.0, num_clips=2, reweight_targets=False, **kwargs):
super(ClipMixingLoss, self).__init__(**kwargs)
assert mode in self.MODES
self.mode = mode
self.default_scale = default_scale
self.reweight_targets = reweight_targets
self.num_clips = num_clips
assert self.num_clips > 1
def _forward(self, all_logits, labels, all_norm_embd, scale=None):
with torch.no_grad():
num_tuples = labels.view(-1).size(0) // self.num_clips
labels = labels.view(num_tuples, self.num_clips)
valid_tuples_mask = torch.all(labels == labels[:, 0].view(-1, 1), dim=1)
if valid_tuples_mask.sum() == 0:
return torch.zeros([], dtype=all_logits.dtype, device=all_logits.device)
if self.mode == 'embd':
all_norm_embd = all_norm_embd.view(all_norm_embd.size(0) // self.num_clips, self.num_clips, -1)
norm_embd = all_norm_embd[valid_tuples_mask]
similarity = torch.matmul(norm_embd, norm_embd.permute(0, 2, 1))
losses = 1.0 - similarity
ind_range = torch.arange(self.num_clips, dtype=torch.int64, device=norm_embd.device)
mask = ind_range.view(-1, 1) < ind_range.view(1, -1)
mask = mask.view(-1, self.num_clips, self.num_clips).repeat(norm_embd.size(0), 1, 1)
valid_losses = losses[mask]
else:
scale = scale if scale is not None else self.default_scale
all_logits = all_logits.view(all_logits.size(0) // self.num_clips, self.num_clips, -1)
filtered_labels = labels[valid_tuples_mask]
logits = scale * all_logits[valid_tuples_mask]
log_probs = torch.log_softmax(logits, dim=2)
with torch.no_grad():
probs = torch.softmax(logits, dim=2)
if self.reweight_targets:
flat_log_probs = log_probs.view(num_tuples * self.num_clips, -1)
flat_labels = filtered_labels.view(-1)
flat_batch_idx = torch.arange(num_tuples * self.num_clips, device=flat_log_probs.device)
neg_base_losses = flat_log_probs[flat_batch_idx, flat_labels]
trg_weights = torch.softmax(neg_base_losses.view(num_tuples, -1), dim=1)
trg_probs = torch.sum(trg_weights.unsqueeze(2) * probs, dim=1, keepdim=True)
else:
trg_probs = probs.mean(dim=1, keepdim=True)
valid_losses = torch.sum(trg_probs * log_probs, dim=2).neg()
return torch.mean(valid_losses)
|
openvinotoolkit/mmaction2 | tools/data/custom/prepare_data.py | import json
from os import walk, makedirs
from os.path import exists, join, basename
from shutil import copyfile
from collections import defaultdict
from argparse import ArgumentParser
from lxml import etree
from tqdm import tqdm
NUM_CLASSES = 12
VIDEO_FILE_EXTENSIONS = 'webm'
XML_FRAGMENT_TEMPLATE = '<annotations>\n{}</annotations>'
def load_annot(input_dir):
out_data = dict()
for root, dirs, files in walk(input_dir):
if len(files) == 0:
continue
local_data = defaultdict(dict)
for f in files:
file_name_parts = f.split('.')
file_name = '.'.join(file_name_parts[:-1])
file_extension = file_name_parts[-1]
record = local_data[file_name]
if file_extension == 'xml':
try:
annot_str = repair_annot(join(root, f))
annot = parse_annot(annot_str)
except:
annot = None
record['annot'] = annot
if record['annot'] is None:
print(f' * invalid: {basename(root)}/{f}')
elif file_extension in VIDEO_FILE_EXTENSIONS:
name_components = file_name.split('-')
assert len(name_components) == 5, f'Incorrect naming: {file_name}'
record['label'] = name_components[4]
record['user_id'] = name_components[3]
record['video_name'] = f
record['video_path'] = join(root, f)
filtered_data = {k: v for k, v in local_data.items() if 'video_path' in v and 'annot' in v}
out_data.update(filtered_data)
return out_data
def repair_annot(file_path):
content = ''
enable_collecting = False
with open(file_path, encoding='unicode_escape') as input_stream:
for line in input_stream:
if '<track id=\"0\" label=\"person\" source=\"manual\">' in line:
enable_collecting = True
elif '</track>' in line:
content += line
break
if enable_collecting:
content += line
return XML_FRAGMENT_TEMPLATE.format(content)
def parse_annot(xml_fragment):
root = etree.XML(xml_fragment.encode('utf-8'))
tracks = []
for element in root:
if element.tag != 'track':
continue
all_frame_ids, valid_frame_ids = [], []
for bbox in element:
frame_id = int(bbox.attrib['frame'])
all_frame_ids.append(frame_id)
actions = []
for action in bbox:
if action.tag == 'attribute' and action.attrib['name'] == 'sign_action':
actions.append(action.text)
assert len(actions) == 1,\
f'Expected single action per frame but got {len(actions)} actions'
action = actions[0]
valid_frame = action == 'yes'
if valid_frame:
valid_frame_ids.append(frame_id)
if len(valid_frame_ids) > 0:
tracks.append(dict(
video_start=min(all_frame_ids),
video_end=max(all_frame_ids) + 1,
clip_start=min(valid_frame_ids),
clip_end=max(valid_frame_ids) + 1,
))
if len(tracks) == 0:
return None
else:
assert len(tracks) == 1, f'Expected single track per video but got {len(tracks)} tracks'
return tracks[0]
def dump_annot(annot, out_path):
with open(out_path, 'w') as output_stream:
json.dump(annot, output_stream)
def copy_videos(annot, out_dir):
for record in tqdm(annot, desc='Copying videos', leave=False):
input_file_path = record['video_path']
output_file_path = join(out_dir, record['video_name'])
if not exists(output_file_path):
copyfile(input_file_path, output_file_path)
def main():
parser = ArgumentParser()
parser.add_argument('--input_dir', '-i', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
args = parser.parse_args()
assert exists(args.input_dir)
if not exists(args.output_dir):
makedirs(args.output_dir)
data = load_annot(args.input_dir)
user_ids = set([record['user_id'] for record in data.values()])
print(f'Loaded {len(data)} records ({len(user_ids)} unique users).')
out_annot_path = join(args.output_dir, 'videos_info.json')
dump_annot(data, out_annot_path)
print(f'Annotation has been dumped to {out_annot_path}')
out_videos_dir = join(args.output_dir, 'videos')
if not exists(out_videos_dir):
makedirs(out_videos_dir)
copy_videos(data.values(), out_videos_dir)
print(f'Videos have been copied to {out_videos_dir}')
if __name__ == '__main__':
main()
|
brockar1999/E02a-Control-Structures | main10.py | #!/usr/bin/env python3
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
print('Greetings!') #printing the given string
colors = ['red','orange','yellow','green','blue','violet','purple'] #creating a list of possible colors
play_again = '' #initializing a play_again variable
best_count = sys.maxsize # the biggest number
while (play_again != 'n' and play_again != 'no'): #a while-loop that goes until someone enters 'n' or 'no' to the play_again question
match_color = random.choice(colors) #generating a color from the colors list
count = 0 #setting the count variable to 0
color = '' #initializing color variable
while (color != match_color): #a while-loop while the color guessed does not match the generated color
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line // asks for input for the color variable
color = color.lower().strip() #alters the color variable to account for excess spaces/case-sensitivity
count += 1 #increments count
if (color == match_color): #if the colors match
print('Correct!') #you win
else: #if NOT
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) #you don't win and get a reminder of how many guesses you've done
print('\nYou guessed it in {} tries!'.format(count)) #if you matched the colors, it tells you how many times you guessed
if (count < best_count): #if you beat the 'high score'
print('This was your best guess so far!') #woohoo!
best_count = count #updating the high score
play_again = input("\nWould you like to play again (yes or no)? ").lower().strip() #asking if you want to play again
print('Thanks for playing!') #thanking people for playing. how polite! |
ryanwang520/graphql-core | src/graphql/utilities/value_from_ast_untyped.py | from math import nan
from typing import Any, Callable, Dict, Optional, Union
from ..language import (
ValueNode,
BooleanValueNode,
EnumValueNode,
FloatValueNode,
IntValueNode,
ListValueNode,
NullValueNode,
ObjectValueNode,
StringValueNode,
VariableNode,
)
from ..pyutils import inspect, Undefined
__all__ = ["value_from_ast_untyped"]
def value_from_ast_untyped(
value_node: ValueNode, variables: Optional[Dict[str, Any]] = None
) -> Any:
"""Produce a Python value given a GraphQL Value AST.
Unlike :func:`~graphql.value_from_ast`, no type is provided.
The resulting Python value will reflect the provided GraphQL value AST.
=================== ============== ================
GraphQL Value JSON Value Python Value
=================== ============== ================
Input Object Object dict
List Array list
Boolean Boolean bool
String / Enum String str
Int / Float Number int / float
Null null None
=================== ============== ================
"""
func = _value_from_kind_functions.get(value_node.kind)
if func:
return func(value_node, variables)
# Not reachable. All possible value nodes have been considered.
raise TypeError( # pragma: no cover
f"Unexpected value node: {inspect(value_node)}."
)
def value_from_null(_value_node: NullValueNode, _variables: Any) -> Any:
return None
def value_from_int(value_node: IntValueNode, _variables: Any) -> Any:
try:
return int(value_node.value)
except ValueError:
return nan
def value_from_float(value_node: FloatValueNode, _variables: Any) -> Any:
try:
return float(value_node.value)
except ValueError:
return nan
def value_from_string(
value_node: Union[BooleanValueNode, EnumValueNode, StringValueNode], _variables: Any
) -> Any:
return value_node.value
def value_from_list(
value_node: ListValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return [value_from_ast_untyped(node, variables) for node in value_node.values]
def value_from_object(
value_node: ObjectValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return {
field.name.value: value_from_ast_untyped(field.value, variables)
for field in value_node.fields
}
def value_from_variable(
value_node: VariableNode, variables: Optional[Dict[str, Any]]
) -> Any:
variable_name = value_node.name.value
if not variables:
return Undefined
return variables.get(variable_name, Undefined)
_value_from_kind_functions: Dict[str, Callable] = {
"null_value": value_from_null,
"int_value": value_from_int,
"float_value": value_from_float,
"string_value": value_from_string,
"enum_value": value_from_string,
"boolean_value": value_from_string,
"list_value": value_from_list,
"object_value": value_from_object,
"variable": value_from_variable,
}
|
dbrgn/superblock | superblock.py | #!/usr/bin/env python2
"""
Analyze superblock in ext2/ext3 filesystem.
Author: <NAME> <<EMAIL>>
License: MIT License
"""
import sys
import string
from binascii import hexlify
from datetime import datetime
BLOCKSIZE = 512
def dump(filename):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(2 * BLOCKSIZE)
for i in xrange(BLOCKSIZE / 16):
row = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, row))
ascii_string = ''.join(map(nonprintable_replace, ''.join(row)))
print '{0:2}: {1} {2}'.format(i + 1, hex_string, ascii_string)
def analyze(filename):
# Binary conversion functions
def lsb2hex(b_string):
"""Take a binary string (from ``file.read()``) and convert it into a
hex string by assuming 2 byte little endian byte order."""
msb_string = hexlify(b_string)
lsb_string = ''.join([msb_string[x:x + 2] for x in range(0, len(msb_string), 2)][::-1])
return lsb_string
def lsb2ascii(b_string):
"""Take a binary string (from ``file.read()``) and convert it to an
ascii string."""
msb_string = hexlify(b_string)
pairs = (msb_string[x:x + 2] for x in range(0, len(msb_string), 2))
values = (int(x, 16) for x in pairs)
return ''.join(map(chr, values))
def lsb2int(b_string):
"""Take a binary string (from ``file.read()``) and convert it into a
integer by assuming 2 byte little endian byte order."""
lsb_string = lsb2hex(b_string)
return int(lsb_string, 16)
# Formatting functions
def uuid(h_string):
"""Format a hex string like an UUID."""
split = lambda x: [x[:8], x[8:12], x[12:16], x[16:20], x[20:]]
return '-'.join(split(h_string))
def timestamp(seconds):
return datetime.fromtimestamp(seconds)
def map_bitmap(value, mapping):
"""Map a bitmap to the corresponding human readable strings."""
return ' '.join([t[1] for t in mapping if value & t[0]]) or 'none'
# Process superblock
with open(filename, 'rb') as f:
f.seek(2 * BLOCKSIZE)
# Bytes 0-15
inodes_total = lsb2int(f.read(4))
print 'Total number of inodes: {0:d}'.format(inodes_total)
print 'Filesystem size in blocks: {0:d}'.format(lsb2int(f.read(4)))
print 'Number of reserved blocks: {0:d}'.format(lsb2int(f.read(4)))
print 'Free blocks counter: {0:d}'.format(lsb2int(f.read(4)))
# Bytes 16-31
print 'Free inodes counter: {0:d}'.format(lsb2int(f.read(4)))
print 'Number of first block: {0:d}'.format(lsb2int(f.read(4)))
val = lsb2int(f.read(4))
print 'Block size: {0:d} ({1:d} Byte)'.format(val, 1024 * 2 ** val)
print 'Fragment size: {0:d}'.format(lsb2int(f.read(4)))
# Bytes 32-47
print 'Number blocks per group: {0:d}'.format(lsb2int(f.read(4)))
print 'Number fragments per group: {0:d}'.format(lsb2int(f.read(4)))
inodes_per_group = lsb2int(f.read(4))
print 'Number inodes per group: {0:d}'.format(inodes_per_group)
print 'Number of block groups: {0:d}'.format(inodes_total / inodes_per_group)
mtime = lsb2int(f.read(4))
print 'Time of last mount: {0:d} ({1:%Y-%m-%d %H:%M:%S})'.format(mtime, timestamp(mtime))
# Bytes 48-63
wtime = lsb2int(f.read(4))
print 'Time of last write: {0:d} ({1:%Y-%m-%d %H:%M:%S})'.format(wtime, timestamp(wtime))
print 'Mount operations counter: {0:d}'.format(lsb2int(f.read(2)))
print 'Number of mount operations before check: {0:d}'.format(lsb2int(f.read(2)))
print 'Magic signature: {0:#X}'.format(lsb2int(f.read(2)))
print 'Status flag: {0:d}'.format(lsb2int(f.read(2)))
print 'Behavior when detecting errors: {0:d}'.format(lsb2int(f.read(2)))
print 'Minor revision level: {0:d}'.format(lsb2int(f.read(2)))
# Bytes 64-79
lastcheck = lsb2int(f.read(4))
print 'Time of last check: {0} ({1:%Y-%m-%d %H:%M:%S})'.format(lastcheck, timestamp(lastcheck))
checkinterval = lsb2int(f.read(4))
print 'Time between checks: {0:d}'.format(checkinterval)
print 'OS Filesystem created: {0:d}'.format(lsb2int(f.read(4)))
print 'Revision level: {0:d}'.format(lsb2int(f.read(4)))
# Bytes 80-95
print 'Default user ID for reserved blocks: {0:d}'.format(lsb2int(f.read(2)))
print 'Default group ID for reserved blocks: {0:d}'.format(lsb2int(f.read(2)))
print 'Number first nonreserved inode: {0:d}'.format(lsb2int(f.read(4)))
print 'Size of on-disk inode structure: {0:d}'.format(lsb2int(f.read(2)))
print 'Block group number of this superblock: {0:d}'.format(lsb2int(f.read(2)))
feature_compat = lsb2int(f.read(4))
feature_compat_s = map_bitmap(feature_compat, (
(0x1, 'dir_prealloc'),
(0x2, 'imagic_inodes'),
(0x4, 'has_journal'),
(0x8, 'ext_attr'),
(0x10, 'resize_ino'),
(0x20, 'dir_index'),
))
print 'Compatible features bitmap: {0:06b} ({1})'.format(feature_compat, feature_compat_s)
# Bytes 96-103
feature_incompat = lsb2int(f.read(4))
feature_incompat_s = map_bitmap(feature_incompat, (
(0x1, 'compression'),
(0x2, 'filetype'),
(0x4, 'recover'),
(0x8, 'journal_dev'),
(0x10, 'meta_bg'),
))
print 'Incompatible features bitmap: {0:05b} ({1})'.format(feature_incompat, feature_incompat_s)
feature_ro_compat = lsb2int(f.read(4))
feature_ro_compat_s = map_bitmap(feature_ro_compat, (
(0x1, 'sparse_super'),
(0x2, 'large_file'),
(0x4, 'btree_dir'),
))
print 'Read-only features bitmap: {0:03b} ({1})'.format(feature_ro_compat, feature_ro_compat_s)
# Bytes 104-119
print '128-bit filesystem identifier: {0}'.format(uuid(hexlify(f.read(16))))
# Bytes 120-135
print 'Volume name: {0}'.format(lsb2ascii(f.read(16)))
# Bytes 136-199
print 'Path of last mount point: {0}'.format(lsb2ascii(f.read(64)))
# Bytes 200-205
algo_bitmap = lsb2int(f.read(4))
algo_bitmap_s = map_bitmap(algo_bitmap, (
(0x1, 'lzv1'),
(0x2, 'lzrw3a'),
(0x4, 'gzip'),
(0x8, 'bzip3'),
(0x10, 'lzo'),
))
print 'Compression Algorithm: {0:05b} ({1})'.format(algo_bitmap, algo_bitmap_s)
print 'Number of blocks to preallocate: {0:d}'.format(lsb2int(f.read(1)))
print 'Number of blocks to preallocate for directories: {0:d}'.format(lsb2int(f.read(1)))
# Bytes 208-235
f.read(2) # Padding
print 'Journal UUID: {0}'.format(uuid(hexlify(f.read(16))))
print 'Journal inode number: {0:d}'.format(lsb2int(f.read(4)))
print 'Journal device number: {0:d}'.format(lsb2int(f.read(4)))
print 'Journal last orphan: {0:d}'.format(lsb2int(f.read(4)))
# Bytes 236-255
print 'Hash seed: {0:d}'.format(lsb2int(f.read(4))),
print '{0:d}'.format(lsb2int(f.read(4))),
print '{0:d}'.format(lsb2int(f.read(4))),
print '{0:d}'.format(lsb2int(f.read(4)))
print 'Hash version: {0:d}'.format(lsb2int(f.read(1)))
f.read(3) # Padding
# Bytes 256-263
defm_options = lsb2int(f.read(4))
defm_options_s = map_bitmap(defm_options, (
(0x1, 'debug'),
(0x2, 'bsdgroups'),
(0x4, 'xattr_user'),
(0x8, 'acl'),
(0x10, 'uid16'),
))
print 'Default mount options: {0:05b} ({1})'.format(defm_options, defm_options_s)
print 'First meta block group ID: {0:d}'.format(lsb2int(f.read(4)))
def run():
if '-h' in sys.argv or '--help' in sys.argv:
print 'This is a script to analyze the superblock of an ext2/ext3 formatted file.\n'
print 'Such a file can be created as follows:\n'
print ' $ dd count=4096 if=/dev/zero of=filesystem.ext3'
print ' $ sudo mkfs.ext3 filesystem.ext3\n'
print 'It can be mounted with :\n'
print ' $ sudo mount -t ext3 -o loop filesystem.ext3 /mnt/mountpoint\n'
if len(sys.argv) < 3 or sys.argv[1] not in ['dump', 'analyze']:
print 'Usage: superblock.py [-h|--help] [dump|analyze] <filename>'
sys.exit(1)
action = sys.argv[1]
filename = sys.argv[2]
if action == 'dump':
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
dump(filename)
elif action == 'analyze':
print '\nAnalyzing superblock (bytes 1024-1535) of file %s.\n' % filename
analyze(filename)
if __name__ == '__main__':
run()
|
dbrgn/superblock | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(name='superblock',
version='0.2.1',
description='A script written in Python 2 to analyze the superblock of an ' + \
'ext2/ext3 formatted file.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dbrgn/superblock',
py_modules=['superblock'],
license='MIT',
keywords='filesystem superblock ext2 ext3 extfs analyze',
long_description=open('README.rst').read(),
entry_points={
'console_scripts': [
'superblock = superblock:run',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Topic :: Education',
'Topic :: System :: Filesystems',
'Topic :: Utilities',
],
)
|
sisyphe-re/traces | python_src/nodes.py | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Node:
"""
Represent a node in the log file
Attributes
----------
name : string
the node's name
list_rx : list
list of received messages
list_tx : list
list of transmitted messages
pdr : int
the packet delivery ratio of this node
latency : float
the average transmission latency of this node
success : int
total of the received msg that have been send by this node
Notes
-----
Packet delivery ratio = ∑ Number of packet received / ∑ Number of packet send
"""
def __init__(self, name):
self.name = name
self.list_rx = []
self.list_tx = []
self.pdr = 0
self.latency = 0
self.success = 0
def __str__(self):
return "Name : {0},Nb rx : {1},Nb tx : {2},Pdr : {3},Average transmission latency : {4},Send success : {5}".format(self.name, len(self.list_rx), len(self.list_tx), self.pdr, self.latency, self.latency, self.success)
def getIdsRx(self):
"""Return the id of this node's transmitted messages
Return
------
list
list of transmitted messages' ids
"""
return set([rx.msg_id for rx in self.list_rx])
def getIdsTx(self):
"""Return the id of this node's received messages
Return
------
list
list of received messages' ids
"""
return set([tx.msg_id for tx in self.list_tx])
class Message:
"""
Represent a message in the log file
Attributes
----------
timestamp : int
the timestamp of the message
msg_id : int
this message id
node_name : string
the node's name that have received or transmitted this message
"""
def __init__(self, timestamp, msg_id, node_name):
self.timestamp = timestamp
self.msg_id = msg_id
self.node_name = node_name
class MessageRx(Message):
"""
Represent a received message in the log file
Attributes
----------
timestamp : int
the timestamp of the message
msg_id : int
this message id
node_name : string
the node's name that have received or transmitted this message
latency : int
the latency of this received message (the time it has taken to arrive)
"""
def __init__(self, timestamp, msg_id, node_name):
Message.__init__(self, timestamp, msg_id, node_name)
self.latency = 0
def __str__(self):
return "received;{0};{1};{2};{3}".format(self.node_name, self.timestamp, self.msg_id, self.latency)
class MessageTx(Message):
"""
Represent a received message in the log file
Attributes
----------
timestamp : int
the timestamp of the message
msg_id : int
this message id
node_name : string
the node's name that have received or transmitted this message
success : int
number of nodes that have received this message
"""
def __init__(self, timestamp, msg_id, node_name):
Message.__init__(self, timestamp, msg_id, node_name)
self.success = 0
def __str__(self):
return "transmitted;{0};{1};{2};{3}".format(self.node_name, self.timestamp, self.msg_id, self.success)
def packetDeliveryRatio(nodes_list):
"""Add the packet delivery ratio for each nodes in the nodes list
Parameters
----------
nodes_list
a list of Node
Notes
-----
Packet delivery ratio = ∑ Number of packet received / ∑ Number of packet send
"""
# Total of received nodes (for all nodes)
total_tx = sum([len(n.list_tx) for n in nodes_list])
for node in nodes_list:
# Node pdr (%): Total received msg for this node / Total transmitted msg by other nodes
node.pdr = (len(node.list_rx) / ( total_tx - len(node.list_tx)))*100
def averageTransmissionLatency(nodes_list):
"""Add the average transmission latency for each nodes in the nodes list
Parameters
----------
nodes_list
a list of Node
Notes
-----
The time is in ms
The average time a transmitted msg take to be received by an other node
"""
for n in nodes_list:
n.sucess = 0 # Nb of time a transmitted msg has been successfully received by a node
latency = 0 # Total latency of the transmitted message by node n
n.pdr = 0
# For all transmitted msg of node n
for tx in n.list_tx:
# For all the other node n2
for n2 in nodes_list:
# For all the received msg of n2
for rx in n2.list_rx:
# If a received msg of n2 has the same id
# than a transmitted message from n
if tx.msg_id == rx.msg_id:
n.success += 1
tx.success += 1
# We compute the latency between the transmission time of the msg
# and its reception time
rx.latency = rx.timestamp - tx.timestamp
latency += rx.timestamp - tx.timestamp
if n.success:
# We compute the average transmission latency by nodes
n.latency = latency / n.success
n.pdr = len(n.list_tx) / n.success * 100
|
sisyphe-re/traces | python_src/main.py | <filename>python_src/main.py<gh_stars>0
#!/usr/bin/python3
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import matplotlib.pyplot as plt
from shutil import copyfile
from nodes import Node, MessageTx, MessageRx
from nodes import averageTransmissionLatency, packetDeliveryRatio
def parseLog(log_filename):
"""Take a log file an return a list of Node found in it
Parameters
----------
log_filename
the name of the file containing the experiment's log
Returns
-------
list
a list of Nodes from the log file
"""
try:
log_file = open(log_filename, "r" )
except IOError:
print(sys.argv[0]+": "+log_filename+": cannot open file")
sys.exit(3)
lines = log_file.readlines()
nodes_dict = {}
for line in lines:
receiver = "Received" in line
sender = "Sending" in line
if not sender and not receiver :
continue
try:
split = line.split(';')
timestamp = float(split[0])*1000 # Convert in milliseconds
name = split[1]
msg = split[2]
try :
msg_id = int(split[3])
except :
msg_id = int(split[3], 16)
# If we find a new node
if not name in nodes_dict:
# We create a new Node in the nodes dict
nodes_dict[name] = Node(name)
if receiver :
nodes_dict[name].list_rx.append(MessageRx(timestamp, msg_id, name))
# If sender
else :
nodes_dict[name].list_tx.append(MessageTx(timestamp, msg_id, name))
except IndexError as e:
print("Bad formatted line : {}".format(e))
nodes_list = nodes_dict.values()
return nodes_list
def beautifyData(exp_name, nodes_list):
"""Write in file the raw data in a more readable way
It will write all the transmitted messages in a filenameRx.data
and received messages in a filenameTx.data
The two file are store in a directory with the experience's name
Format of the Rx.data :
transmitted;<node name>;<timestamp>;<message id>;<transmission success>
Format of the Tx.data :
received;<node name>;<timestamp>;<message id>;<reception delay>
where
<transmission success> is the nb of node that have received this message
<reception delay> is the time that the message has taken to reach the node in milliseconds
Parameters
----------
exp_name
the experience's name used to name and store the beautified data
nodes_list
a list of Node
Notes
-----
"""
if not os.path.exists(exp_name):
os.makedirs(exp_name)
rx_name = "{}/{}Rx.data".format(exp_name, exp_name)
tx_name = "{}/{}Tx.data".format(exp_name, exp_name)
with open(rx_name, "w") as rx_file, open(tx_name, "w") as tx_file:
tx_file.write("transmitted;node name;timestamp;message id;transmission success\n")
rx_file.write("received;node name;timestamp;message id;reception delay\n")
for n in nodes_list:
for t in n.list_tx:
tx_file.write('{}\n'.format(t))
for r in n.list_rx:
rx_file.write('{}\n'.format(r))
def generatePlots(exp_name, nodes_list):
"""Generate plots from the list of nodes
Generate a graph for the packet delivery ratio of each nodes
and a graph for the average transmission latency of each nodes in miliseconds
Parameters
----------
exp_name
the name of the experience, used to store the plot
in the corresponding directory
nodes_list
a list of Node
Notes
-----
Packet delivery ratio = ∑ Number of packet received / ∑ Number of packet send
"""
if not os.path.exists(exp_name):
os.makedirs(exp_name)
names = []
pdr = []
latency = []
plt.rcdefaults()
for n in nodes_list:
names.append(n.name)
pdr.append(n.pdr)
latency.append(n.latency)
# Plot pdr
fig = plt.figure(figsize=(15, 15))
plt.xticks(rotation='vertical')
plt.bar(names, pdr, width=0.3)
plt.xlabel("Node ID")
plt.ylabel("PDR (%)")
plt.title("Packet Delivery Ratio (PDR) per node")
#plt.show()
plt.savefig(exp_name + "/PacketDeliveryRatio.png")
# Plot latency
fig = plt.figure(figsize=(14, 15))
plt.xticks(rotation='vertical')
plt.bar(names, latency, align="edge", width=0.3)
plt.xlabel("Node ID")
plt.ylabel("Average Latency (ms)")
plt.title("Average packet transmission latency per node")
#plt.show()
plt.savefig(exp_name + "/AverageTransmissionLatency.png")
if __name__ == "__main__":
if len(sys.argv) != 3 :
print("Usage: "+sys.argv[0]+" <log filename> <exp name>")
sys.exit(2)
log_filename = sys.argv[1]
exp_name = sys.argv[2]
nodes_list = parseLog(log_filename)
#packetDeliveryRatio(nodes_list)
averageTransmissionLatency(nodes_list)
beautifyData(exp_name, nodes_list)
generatePlots(exp_name, nodes_list)
# Just copy the rawdata in the same directory as beautified data
if not os.path.exists(exp_name):
os.makedirs(exp_name)
copyfile(log_filename, "{}/{}.rawdata".format(exp_name, exp_name))
|
datahappy1/czech_language_sentiment_analyzer | utils/utilities.py | """
utilities module
"""
import os
import re
import functools
from itertools import groupby, product
from data_preparation import czech_stemmer
CZECH_STOPWORDS_FILE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'data_preparation', 'czech_stopwords.txt'))
MARKDOWN_FILE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'README.md'))
class ProjectCommon:
"""
project common helpers class
"""
@staticmethod
@functools.lru_cache()
def read_czech_stopwords(czech_stopwords_file_path) -> list:
"""
function reading czech stopwords file and storing it to a list
:param czech_stopwords_file_path
:return:czech_stopwords
"""
czech_stopwords = []
with open(czech_stopwords_file_path, 'r', encoding='utf8') as stop_word_file:
for line in stop_word_file:
czech_stopwords.append(line[:-1])
return czech_stopwords
@staticmethod
def remove_czech_stopwords(text) -> str:
"""
function removing czech stopwords from input text
:param text:
:return:
"""
replacements = {x: '' for x in
ProjectCommon.read_czech_stopwords(CZECH_STOPWORDS_FILE_PATH)}
output = [w for w in text.split(' ') if w not in replacements]
return ' '.join(output)
@staticmethod
def remove_html(raw_text) -> str:
"""
function to clean html tags contents from the input string
:param raw_text:
:return:
"""
clean_r = re.compile('<.*?>')
clean_text = re.sub(clean_r, '', raw_text)
return clean_text
@staticmethod
def remove_non_alpha_chars(text) -> str:
"""
function for replacing all occurrences of non alpha chars in the input string
:param text:
:return:
"""
replacements = {'"': '', '.': '', '(': '', ')': '', ',': '',
'-': '', '?': '', '!': '', ':': '', '/': '', '„': '',
' ': ' ', ' ': ' ', '%': '', '“': '', '*': '', '+': ''}
for i, j in replacements.items():
text = text.replace(i, j)
return text
@staticmethod
def remove_diacritics(text) -> str:
"""
function for replacing all occurrences of Czech diacritics in the input string
:param text:
:return:
"""
replacements = {'ě': 'e', 'š': 's', 'č': 'c', 'ř': 'r', 'ž': 'z', 'ý':'y',
'á': 'a', 'í': 'i', 'é': 'e', 'ů': 'u', 'ú': 'u'}
for i, j in replacements.items():
text = text.replace(i, j)
return text
@staticmethod
def trimmer(text) -> str:
"""
function removing left and right trims
:param text:
:return:
"""
text_output_trimmed = text.lstrip(' ').rstrip(' ')
return text_output_trimmed
@staticmethod
def remove_non_alpha_chars_and_html(text) -> str:
"""
function for removals of all non alpha chars and html in the input string
:param text:
:return:
"""
text_output_trimmed = ProjectCommon.trimmer(text)
text_output_no_html = ProjectCommon.remove_html(text_output_trimmed)
text_output_no_html_no_non_alpha_chars = \
ProjectCommon.remove_non_alpha_chars(text_output_no_html)
return text_output_no_html_no_non_alpha_chars
@staticmethod
def remove_all(text) -> str:
"""
function for running all-in-one replace functions
:param text:
:return:
"""
text_output_no_html_no_non_alpha_chars = \
ProjectCommon.remove_non_alpha_chars_and_html(text)
text_output_no_html_no_non_alpha_chars_no_stopwords = \
ProjectCommon.remove_czech_stopwords(text_output_no_html_no_non_alpha_chars)
text_output_no_html_no_non_alpha_chars_no_stopwords_stemmed = \
czech_stemmer.stemmer(text_output_no_html_no_non_alpha_chars_no_stopwords)
text_output_no_html_no_non_alpha_chars_no_stopwords_stemmed_no_diacritics = \
ProjectCommon.\
remove_diacritics(text_output_no_html_no_non_alpha_chars_no_stopwords_stemmed)
return text_output_no_html_no_non_alpha_chars_no_stopwords_stemmed_no_diacritics
class Webapp:
"""
web application helpers class
"""
@staticmethod
def input_string_preparator(input_string) -> list:
"""
function for input string preparation
:param input_string:
:return:
"""
input_text_list_raw = re.split(';|,|[ ]|-|[?]|[!]|\n', input_string)
input_text_list = [ProjectCommon.remove_all(x) for x in input_text_list_raw if x != '']
return input_text_list
@staticmethod
def chart_data_preparator(input_data_set) -> dict:
"""
function for transforming raw SQL fetched data
to Charts.js compatible data structures
:param input_data_set:
:return:
"""
all_charts_output = {'pie_by_sentiment': {'group_keys': [], 'output_data_set': []},
'time_series': {'group_keys': [], 'output_data_set': []}}
cart_prod_sentiment = []
pie_chart_by_sentiment_output = []
time_series_output = []
# let's prepare the cartesian product dataset
dates = list(set([x[0] for x in input_data_set]))
sentiment_values = ['negative', 'positive', 'uncertain']
# let's add 0 to each row from the cartesian product dataset for sum
for item in product(sentiment_values, dates):
cart_prod_sentiment.append((item[1], item[0], 0))
# let's add 1 to each row from the query fetched results for sum
_input_data_set = [x.__add__((1,)) for x in input_data_set]
# let's extend the _input_data_set with the cart_prod dataset
_input_data_set.extend(cart_prod_sentiment)
# pie chart by sentiment
# let's sort the resulting dataset by date and sentiment values
_data_set_for_grouping = sorted(_input_data_set, key=lambda x: (x[1]))
# let's do the grouping of this dataset and sum the 1's and 0's
for key, group in groupby(_data_set_for_grouping, lambda x: x[1]):
_grouped_item = sum(r[2] for r in group), key
pie_chart_by_sentiment_output.append(_grouped_item)
all_charts_output['pie_by_sentiment']['group_keys'] = sorted(sentiment_values)
all_charts_output['pie_by_sentiment']['output_data_set'] = pie_chart_by_sentiment_output
# time series chart
# let's sort the resulting dataset by date and sentiment values
_data_set_for_grouping = sorted(_input_data_set, key=lambda x: (x[0], x[1]))
# let's do the grouping of this dataset and sum the 1's and 0's
for key, group in groupby(_data_set_for_grouping, lambda x: (x[0], x[1])):
_grouped_item = sum(r[2] for r in group), key[0], key[1]
time_series_output.append(_grouped_item)
all_charts_output['time_series']['group_keys'] = sorted(dates)
all_charts_output['time_series']['output_data_set'] = time_series_output
return all_charts_output
@staticmethod
def markdown_reader():
"""
function for reading markdown file
:return:
"""
with open(MARKDOWN_FILE_PATH, "r") as markdown_file_handler:
return markdown_file_handler.read()
|
datahappy1/czech_language_sentiment_analyzer | ml_models/logistic_regression/data_processor_logistic_regression.py | """
data processor for logistic regression
"""
import random
import pickle
import os
from langdetect import detect, lang_detect_exception
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import model_selection, linear_model
from utils.utilities import ProjectCommon
CZECH_STOPWORDS_FILE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'data_preparation', 'czech_stopwords.txt'))
TEMP_FILE_PATH = '../../data_preparation/reviews_with_ranks.csv'
PERSIST_MODEL_TO_FILE = True
def _read_temp_file_generator():
"""
read temp file generator
:return: yield rows
"""
for row in open(TEMP_FILE_PATH, encoding="utf8"):
try:
yield (row.split(',')[0].replace('"', ''),
"neg" if int(row.split(',')[1]) < 0 else "pos")
except IndexError:
yield '#NA'
def logistic_regression(persist_model_to_file):
"""
function for training and testing the ML model
:param persist_model_to_file:
:return:
"""
temp_file_reviews_work = []
temp_file_gen = _read_temp_file_generator()
for tfg in temp_file_gen:
if len(tfg) == 2:
try:
_detected_lang = detect(ProjectCommon.remove_non_alpha_chars_and_html(tfg[0]))
except lang_detect_exception.LangDetectException:
continue
if _detected_lang == 'cs':
temp_file_reviews_work.append((ProjectCommon.remove_all(tfg[0]), tfg[1]))
temp_file_reviews_work = [x for x in temp_file_reviews_work if x[1] == "pos"][:11500] + \
[x for x in temp_file_reviews_work if x[1] == "neg"][:11500]
random.shuffle(temp_file_reviews_work)
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split([x[0] for x in temp_file_reviews_work],
[x[1] for x in temp_file_reviews_work],
test_size=0.2)
vect = CountVectorizer(min_df=5, ngram_range=(2, 2))
Train_X = vect.fit(Train_X).transform(Train_X)
Test_X = vect.transform(Test_X)
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10]}
grid = model_selection.GridSearchCV(linear_model.LogisticRegression(max_iter=1000), param_grid, cv=5)
grid.fit(Train_X, Train_Y)
lr = grid.best_estimator_
lr.fit(Train_X, Train_Y)
if persist_model_to_file:
pickle.dump(vect, open('vectorizer.pkl', 'wb'))
pickle.dump(lr, open('model.pkl', 'wb'))
# # accuracy score calculation: 0.821
# lr.predict(Test_X)
# print("Score: {:.2f}".format(lr.score(Test_X, Test_Y)))
# # adhoc input prediction:
# input_string = input_string[0]
# input_string = [x for x in input_string.split()]
# print(input_string)
# print("prediction: {}". format(lr.predict(vect.transform(input_string))))
# return accuracy score
return lr.score(Test_X, Test_Y)
if __name__ == "__main__":
print(logistic_regression(PERSIST_MODEL_TO_FILE))
|
datahappy1/czech_language_sentiment_analyzer | ml_models/naive_bayes/data_processor_naive_bayes.py | """
data processor for logistic regression
"""
import random
import pickle
import os
from langdetect import detect, lang_detect_exception
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics, model_selection
from utils.utilities import ProjectCommon
CZECH_STOPWORDS_FILE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'data_preparation', 'czech_stopwords.txt'))
TEMP_FILE_PATH = '../../data_preparation/reviews_with_ranks.csv'
PERSIST_MODEL_TO_FILE = True
def _read_temp_file_generator():
"""
read temp file generator
:return: yield rows
"""
for row in open(TEMP_FILE_PATH, encoding="utf8"):
try:
yield (row.split(',')[0].replace('"', ''),
0 if int(row.split(',')[1]) < 0 else 1)
except IndexError:
yield '#NA'
def naive_bayes(persist_model_to_file):
"""
function for training and testing the ML model
:param persist_model_to_file:
:return:
"""
temp_file_reviews_work = []
temp_file_gen = _read_temp_file_generator()
for tfg in temp_file_gen:
if len(tfg) == 2:
try:
_detected_lang = detect(ProjectCommon.remove_non_alpha_chars_and_html(tfg[0]))
except lang_detect_exception.LangDetectException:
continue
if _detected_lang == 'cs':
temp_file_reviews_work.append((ProjectCommon.remove_all(tfg[0]), tfg[1]))
temp_file_reviews_work = [x for x in temp_file_reviews_work if x[1] == 0][:11500] + \
[x for x in temp_file_reviews_work if x[1] == 1][:11500]
random.shuffle(temp_file_reviews_work)
Train_X, Test_X, Train_Y, Test_Y = model_selection. \
train_test_split([x[0] for x in temp_file_reviews_work],
[x[1] for x in temp_file_reviews_work],
test_size=0.2)
vect = CountVectorizer()
Train_X = vect.fit_transform([x for x in Train_X])
Test_X = vect.transform([x for x in Test_X])
nb = MultinomialNB()
nb.fit(Train_X, Train_Y)
if persist_model_to_file:
pickle.dump(vect, open('vectorizer.pkl', 'wb'))
pickle.dump(nb, open('model.pkl', 'wb'))
# # accuracy score calculation: 0.903
predictions = nb.predict(Test_X)
fpr, tpr, thresholds = metrics.roc_curve([x for x in Test_Y], predictions, pos_label=1)
# print("Multinomial naive bayes AUC: {0}".format(metrics.auc(fpr, tpr)))
# # adhoc input prediction:
# input_string = input_string[0]
# input_string = [x for x in input_string.split()]
# print(input_string)
# print("prediction: {}". format(nb.predict(vect.transform(input_string))))
# return accuracy score
return metrics.auc(fpr, tpr)
if __name__ == "__main__":
print(naive_bayes(PERSIST_MODEL_TO_FILE))
|
datahappy1/czech_language_sentiment_analyzer | ml_models/webapp_interface.py | """
ml models interface for Flask web application
"""
import os
import pickle
def _pickle_load(model_type, file_name):
"""
load pickled data model file function
:param model_type:
:param file_name:
:return:
"""
return pickle.load(
open(os.path.abspath(os.path.join(os.path.dirname(__file__), model_type, file_name)), 'rb')
)
# pickle load ml models
VECTOR_NB = _pickle_load('naive_bayes', 'vectorizer.pkl')
MODEL_NB = _pickle_load('naive_bayes', 'model.pkl')
VECTOR_LR = _pickle_load('logistic_regression', 'vectorizer.pkl')
MODEL_LR = _pickle_load('logistic_regression', 'model.pkl')
MODEL_SVM = _pickle_load('support_vector_machine', 'model.pkl')
# prepare the overall sentiment model weights
PRECISION_NB = 0.886
PRECISION_LR = 0.837
PRECISION_SVM = 0.846
PRECISION_SUM = PRECISION_NB + PRECISION_LR + PRECISION_SVM
PRECISION_NB_WEIGHT_AVG = PRECISION_NB / PRECISION_SUM
PRECISION_LR_WEIGHT_AVG = PRECISION_LR / PRECISION_SUM
PRECISION_SVM_WEIGHT_AVG = PRECISION_SVM / PRECISION_SUM
def ml_model_evaluator(input_string):
"""
function for machine learning model evaluation
:param input_string:
:return: prediction_output dict
"""
prediction_output = dict()
# prediction_naive_bayes = MODEL_NB.predict(VECTOR_NB.transform(input_string))
# prediction_logistic_regression = MODEL_LR.predict(VECTOR_LR.transform(input_string))
# prediction_support_vector_machine = MODEL_SVM.predict(input_string)
prediction_naive_bayes_prob = \
MODEL_NB.predict_proba(VECTOR_NB.transform(input_string))[0][0]
prediction_logistic_regression_prob = \
MODEL_LR.predict_proba(VECTOR_LR.transform(input_string))[0][0]
prediction_support_vector_machine_prob = \
MODEL_SVM.predict_proba(input_string)[0][0]
prediction_output_overall_proba = \
round((prediction_naive_bayes_prob * PRECISION_NB_WEIGHT_AVG) +
(prediction_logistic_regression_prob * PRECISION_LR_WEIGHT_AVG) +
(prediction_support_vector_machine_prob * PRECISION_SVM_WEIGHT_AVG), 2)
if prediction_output_overall_proba <= 0.45:
prediction_output['overall_sentiment'] = {'sentiment': 'positive',
'probability': prediction_output_overall_proba}
elif prediction_output_overall_proba >= 0.55:
prediction_output['overall_sentiment'] = {'sentiment': 'negative',
'probability': prediction_output_overall_proba}
else:
prediction_output['overall_sentiment'] = {'sentiment': 'uncertain',
'probability': prediction_output_overall_proba}
return prediction_output
|
datahappy1/czech_language_sentiment_analyzer | tests/webapp_test.py | """
Flask app Pytest testing suite
"""
from flask_webapp.app import APP
API_PREFIX = APP.config['api_prefix']
def test_main_get():
APP.testing = True
response = APP.test_client().get('/')
assert response.status_code == 200
assert b'<title>Czech sentiment analyzer Datahappy \xc2\xa92019</title>' in response.data
def test_main_post_no_input_text():
APP.testing = True
response = APP.test_client().post('/')
assert response.status_code == 200
assert b'<div id="error_message" class="alert alert-danger" style="display:block;">' \
b'Sorry, need to submit at least 3 non stop-words</div>' in response.data
def test_main_post_invalid_input_text_too_few_words():
response = APP.test_client().post('/', data=dict(Input_Text='a jsi'),
follow_redirects=True)
assert response.status_code == 200
assert b'<div id="error_message" class="alert alert-danger" style="display:block;">' \
b'Sorry, need to submit at least 3 non stop-words</div>' in response.data
def test_main_post_invalid_input_text_too_short_words():
response = APP.test_client().post('/', data=dict(Input_Text='a b c d'),
follow_redirects=True)
assert response.status_code == 200
assert b'<div id="error_message" class="alert alert-danger" style="display:block;">' \
b'Sorry, need to submit at least 1 word with 3 and more characters</div>' in response.data
def test_main_post_invalid_input_text_not_czech_language():
response = APP.test_client().post('/', data=dict(Input_Text='ein zwei polizei'),
follow_redirects=True)
assert response.status_code == 200
assert b'<div id="error_message" class="alert alert-danger" style="display:block;">' \
b'Sorry, need to submit text written in Czech</div>' in response.data
def test_main_post_valid_input_text_positive():
response = APP.test_client().post('/', data=dict(Input_Text='Skvělé funkcionální testy'),
follow_redirects=True)
assert response.status_code == 200
assert b'overall_sentiment : <b>positive</b>' in response.data
def test_main_post_valid_input_text_negative():
response = APP.test_client().post('/', data=dict(Input_Text='Hrozné funkcionální testy'),
follow_redirects=True)
assert response.status_code == 200
assert b'overall_sentiment : <b>negative</b>' in response.data
def test_api_get():
APP.testing = True
response = APP.test_client().get(API_PREFIX)
assert response.status_code == 405
assert b'{"error":"405 Method Not Allowed: The method is not allowed for the requested URL.",' \
b'"mimetype":"application/json","status":405}' in response.data
def test_api_post_no_input_text():
APP.testing = True
response = APP.test_client().post(API_PREFIX)
assert response.status_code == 400
assert b'{"error":"Sorry, need to submit at least 3 non stop-words",' \
b'"mimetype":"application/json","status":400}' in response.data
def test_api_post_invalid_input_text_too_few_words():
response = APP.test_client().post(API_PREFIX, data=dict(Input_Text='a jsi'),
follow_redirects=True)
assert response.status_code == 400
assert b'{"error":"Sorry, need to submit at least 3 non stop-words",' \
b'"mimetype":"application/json","status":400}' in response.data
def test_api_post_invalid_input_text_too_short_words():
response = APP.test_client().post(API_PREFIX, data=dict(Input_Text='a b c d'),
follow_redirects=True)
assert response.status_code == 400
assert b'{"error":"Sorry, need to submit at least 1 word with 3 and more characters",' \
b'"mimetype":"application/json","status":400' in response.data
def test_api_post_invalid_input_text_not_czech_language():
response = APP.test_client().post(API_PREFIX, data=dict(Input_Text='ein zwei polizei'),
follow_redirects=True)
assert response.status_code == 400
assert b'{"error":"Sorry, need to submit text written in Czech",' \
b'"mimetype":"application/json","status":400}' in response.data
def test_api_post_valid_input_text_positive():
response = APP.test_client().post(API_PREFIX, data=dict(Input_Text='Skvělé funkcionální testy'),
follow_redirects=True)
assert response.status_code == 200
assert b'"sentiment":"positive"' in response.data
def test_api_post_valid_input_text_negative():
response = APP.test_client().post(API_PREFIX, data=dict(Input_Text='Hrozné funkcionální testy'),
follow_redirects=True)
assert response.status_code == 200
assert b'"sentiment":"negative"' in response.data
|
datahappy1/czech_language_sentiment_analyzer | data_preparation/data_collector_movie_review_scraper.py | <reponame>datahappy1/czech_language_sentiment_analyzer
"""
data collector
"""
from random import randint
from time import sleep
import csv
import re
import concurrent.futures
import datetime
from bs4 import BeautifulSoup
import urllib3
import requests
from utils.utilities import ProjectCommon
OUTPUT_FILE_PATH = 'reviews_with_ranks.csv'
SCRAPER_FINAL_OUTPUT = []
MOVIE_REVIEW_URLS = []
class Anonymize:
"""
anonymize class
"""
def __init__(self):
self.headers = [{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; '
'rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 '
'(.NET CLR 3.5.30729)'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'MyAppName/1.0.0 (<EMAIL>)'}]
@staticmethod
def sleeper():
"""
sleeper method used to sleep between requests
:return:
"""
sleep(randint(2, 5))
def randomize_request_headers(self):
"""
method to randomize request headers for each request
:return:
"""
return self.headers[randint(0, len(self.headers) - 1)]
def movie_review_url_collector():
"""
function collecting urls with the movie reviews
:return:0
"""
start_page_urls = ['https://www.csfd.cz/zebricky/nejhorsi-filmy/?show=complete',
'https://www.csfd.cz/zebricky/nejlepsi-filmy/?show=complete']
anonymize = Anonymize()
for start_page in start_page_urls:
page = requests.get(start_page, headers=anonymize.randomize_request_headers())
soup = BeautifulSoup(page.content, 'html.parser')
movie_review_url = soup.find_all('td', attrs={'class': 'film'})
for url_item in movie_review_url[:300]:
children = url_item.findChildren("a", recursive=False)
movie_name = str(children).split("/")[2]
for random_index in ([2, 3, 4, 5, 6, 7]):
review_page = str(random_index)
MOVIE_REVIEW_URLS.append('https://www.csfd.cz/film/{}/komentare/strana-{}'.
format(movie_name, review_page))
return 0
def movie_review_scraper(url_to_scrape):
"""
function getting the url from the argument, requesting the raw html
and scraping the movie review html code
:param url_to_scrape: url
:return:None
"""
anonymize = Anonymize()
print(f'{datetime.datetime.now()} started scraping {url_to_scrape}')
try:
anonymize.sleeper()
page = requests.get(url, headers=anonymize.randomize_request_headers())
if page.status_code == 200:
# the <li> html tag structure we're scraping in loops:
#
# variation #1 with star count as rank in the img alt text tag:
# <li id = "comment-796722" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class = "author" > <a href="" > reviewers nickname </a></h5>
# <img src = "" class ="rating" width="32" alt="****" / >
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
#
# variation #2 with 1 word ranking ("odpad!" translates to "junk") in the strong tag:
# <li id = "comment-9092651" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class ="author" > <a href="" > reviewers nickname </a></h5>
# <strong class ="rating" > odpad! </strong>
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
soup = BeautifulSoup(page.content, 'html.parser')
_l_substring_to_trim_from = '<p class="post">'
_r_substring_to_trim_to = '<span class="date desc">'
for soup_item in soup.find_all("li", {"id": re.compile(r"comment-*")}):
scraper_temp_output = []
img = soup_item.findChildren("img",
attrs={'class': 'rating'})
strong = soup_item.findChildren(["strong", "p"],
attrs={'class': ['rating', 'post']})
if strong and str(strong).startswith('[<strong class="rating">odpad!</strong>'):
_r_trim = len(str(strong)) - str(strong).rfind(_r_substring_to_trim_to)
_l_trim = str(strong).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
scraper_temp_output.append({'rank': -2,
'review': str(strong)[_l_trim:-_r_trim]})
else:
_r_trim = len(str(img)) - str(img).rfind(_r_substring_to_trim_to)
_l_trim = str(img).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
if img and str(img).startswith('[<img alt="*"'):
scraper_temp_output.append({'rank': -2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="**"'):
scraper_temp_output.append({'rank': -1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="***"'):
scraper_temp_output.append({'rank': 1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="*****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
for item in scraper_temp_output:
raw_review = item.get('review')
review = ProjectCommon.remove_html(str(raw_review).lower())
rank = item.get('rank')
SCRAPER_FINAL_OUTPUT.append((review, rank))
print(f'{datetime.datetime.now()} finished scraping {url}')
else:
print(f'{datetime.datetime.now()} Invalid request status code '
f'{str(page.status_code)} for {url}')
except urllib3.exceptions.ConnectionError as connerr:
print(str(connerr))
except Exception as exc:
print(str(exc))
if __name__ == "__main__":
# fill the list with urls used for movie data scraping
movie_review_url_collector()
# process the list items in a multi-threaded pool based
# scraper function movie_review_scraper
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
FUTURE_TO_URL = {executor.submit(movie_review_scraper, url):
url for url in MOVIE_REVIEW_URLS}
for future in concurrent.futures.as_completed(FUTURE_TO_URL):
url = FUTURE_TO_URL[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
# write to OUTPUT_FILE_PATH csv file the scraped movie review data
with open(OUTPUT_FILE_PATH, 'w', encoding='utf8', newline='\n') as fw:
writer = csv.writer(fw, escapechar='/', quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(SCRAPER_FINAL_OUTPUT)
print("Movie review data collection phase complete.")
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/database/conn_remote_postgres.py | """
remote postgres db connection module
"""
import os
import urllib.parse as urlparse
import psycopg2
class Connect:
"""
connect Postgres class
"""
def __init__(self):
""" create a database connection to the Postgres database
specified by db_url
:return: Connection object or psycopg2 error is raised
"""
self.db_url = os.environ.get('DATABASE_URL')
def __repr__(self):
return str(self.db_url)
def connect(self):
"""
connect method
:return:
"""
db_url_parsed = urlparse.urlparse(self.db_url)
db_name = db_url_parsed.path[1:]
user = db_url_parsed.username
password = db_<PASSWORD>
host = db_url_parsed.hostname
port = db_url_parsed.port
try:
conn = psycopg2.connect(
dbname=db_name,
user=user,
password=password,
host=host,
port=port,
sslmode='require'
)
except psycopg2.Error as psycopg2_err:
raise psycopg2_err
return conn
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/app.py | <gh_stars>1-10
"""
__main__.py
"""
import os
from datetime import date, datetime, timedelta
from flask import Flask, render_template, send_from_directory, request, jsonify, g
from flaskext.markdown import Markdown
from flask_caching import Cache
from waitress import serve
from langdetect import detect
from flask_webapp.exceptions import NotEnoughNonStopWordsException, NotEnoughWordsLengthException, \
InvalidDetectedLanguageException, EXCEPTION_TYPE_RESPONSE_MESSAGE_MAP
from flask_webapp.database import __env__
from flask_webapp.database.database_interface import Database
from utils.utilities import Webapp
from ml_models import webapp_interface
def create_app():
"""
create app factory
:return:
"""
_app = Flask(__name__)
return _app
APP = create_app()
# bind flask-markdown extension to the app
Markdown(APP)
# load the markdown file content for /methodology route
APP.config['md_content'] = Webapp.markdown_reader()
# api base url setup
APP.config['api_prefix'] = '/api/v1/prediction/'
# setup Cache ext.
APP.config['CACHE_TYPE'] = 'simple'
# register the cache instance and bind it to the app
APP.cache = Cache(APP)
# language detection module allowed languages
# Slovak, Slovenian, Croatian allowed because langdetect module,
# when submitting Czech text without the diacritics detects one of these
APP.config['acceptable_detected_language_codes'] = ['cs', 'sk', 'sl', 'hr']
# run the build DB script on app startup, instantiate the Db object
# if __env__ is local ( env. variable DATABASE_URL not set ) -> Sqlite3
# if __env__ is remote ( env. variable DATABASE_URL configured for Heroku Postgres) -> Postgres
DB_OBJ = Database(__env__)
DB_OBJ.db_builder()
def get_db():
"""
get db connection function
:return:
"""
database = getattr(g, '_database', None)
if database is None:
database = g._database = Database.connect(DB_OBJ)
return database
def write_stats_to_table(sentiment_result):
"""
function storing stats data in a table
:param sentiment_result:
:return: status
"""
cur = get_db().cursor()
data_tuple = (datetime.now(), sentiment_result)
cur.execute(DB_OBJ.db_insert_stats_query, data_tuple)
get_db().commit()
def process_input_text(input_text):
"""
process input text function
:param input_text:
:return:
"""
def _validate_input_text(_input_text):
if not _input_text:
raise NotEnoughNonStopWordsException
def _validate_detected_language(detected_language):
if detected_language not in APP.config['acceptable_detected_language_codes']:
raise InvalidDetectedLanguageException
def _create_input_text_lowered_list(_input_text):
return Webapp.input_string_preparator(_input_text.lower())
def _is_invalid_non_stop_word_count(_input_text_lowered_list):
if len([i for i in _input_text_lowered_list if i != '']) < 3:
raise NotEnoughNonStopWordsException
def _is_invalid_word_length_count(_input_text_lowered_list):
if all([len(i) < 3 for i in _input_text_lowered_list]):
raise NotEnoughWordsLengthException
def _get_sentiment_result(_input_text_lowered_list):
return webapp_interface.ml_model_evaluator([' '.join(_input_text_lowered_list)])
_validate_input_text(input_text)
input_text_lowered_list = _create_input_text_lowered_list(input_text)
_is_invalid_non_stop_word_count(input_text_lowered_list)
_is_invalid_word_length_count(input_text_lowered_list)
_validate_detected_language(detect(input_text))
return _get_sentiment_result(input_text_lowered_list)
def post_request_exception_handler(handler_type, input_text, exception_type):
"""
post request exception handler
:param handler_type:
:param input_text:
:param exception_type:
:return:
"""
if handler_type == "main":
return render_template('index.html',
template_input_string=input_text,
template_error_message=
EXCEPTION_TYPE_RESPONSE_MESSAGE_MAP[exception_type])
if handler_type == "api":
response = jsonify({
'status': 400,
'error': EXCEPTION_TYPE_RESPONSE_MESSAGE_MAP[exception_type],
'mimetype': 'application/json'
})
response.status_code = 400
return response
def post_request_success_handler(handler_type, input_text, sentiment_result):
"""
post request success handler
:param handler_type:
:param input_text:
:param sentiment_result:
:return:
"""
if handler_type == "main":
return render_template('index.html',
template_input_string=input_text,
template_sentiment_result=sentiment_result)
if handler_type == "api":
response = jsonify({
'status': 200,
'sentiment_result': sentiment_result,
'mimetype': 'application/json'
})
response.status_code = 200
return response
def post_request_sentiment_analyzer_handler(handler_type, input_text):
"""
sentiment analyzer handler
:param handler_type:
:param input_text:
:return:
"""
try:
sentiment_result = process_input_text(input_text)
except NotEnoughNonStopWordsException:
return post_request_exception_handler(
handler_type, input_text, "NotEnoughNonStopWordsException"
)
except NotEnoughWordsLengthException:
return post_request_exception_handler(
handler_type, input_text, "NotEnoughWordsLengthException"
)
except InvalidDetectedLanguageException:
return post_request_exception_handler(
handler_type, input_text, "InvalidDetectedLanguageException"
)
except Exception:
return post_request_exception_handler(handler_type, input_text, "GenericException")
write_stats_to_table(
sentiment_result=sentiment_result.get('overall_sentiment').get('sentiment')
)
return post_request_success_handler(handler_type, input_text, sentiment_result)
@APP.teardown_appcontext
def close_connection(exception):
"""
close database connection function
:return:
"""
database = getattr(g, '_database', None)
if database is not None:
database.close()
@APP.route('/favicon.ico')
def favicon():
"""
function to properly handle favicon
:return:
"""
return send_from_directory(os.path.join(APP.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@APP.errorhandler(400)
def bad_request(error):
"""
bad request error handler function
:param error:
:return:error html page or api response
"""
if request.path.startswith(APP.config["api_prefix"]):
response = jsonify({
'status': 400,
'error': str(error),
'mimetype': 'application/json'
})
response.status_code = 400
return response
return render_template('error_page.html', template_error_message=error)
@APP.errorhandler(405)
def not_allowed(error):
"""
not allowed method error handler function
:param error:
:return:error html page or api response
"""
if request.path.startswith(APP.config["api_prefix"]):
response = jsonify({
'status': 405,
'error': str(error),
'mimetype': 'application/json'
})
response.status_code = 405
return response
return render_template('error_page.html', template_error_message=error)
@APP.errorhandler(404)
def not_found(error):
"""
not found app error handler function
:param error:
:return:error html page or api response
"""
if request.path.startswith(APP.config["api_prefix"]):
response = jsonify({
'status': 404,
'error': str(error),
'mimetype': 'application/json'
})
response.status_code = 404
return response
return render_template('error_page.html', template_error_message=error)
@APP.route('/', methods=['GET', 'POST'])
def main():
"""
the main route rendering index.html
:return:
"""
if request.method == 'GET':
return render_template('index.html')
if request.method == 'POST':
return post_request_sentiment_analyzer_handler(
handler_type='main', input_text=request.form.get('Input_Text')
)
@APP.route(APP.config["api_prefix"], methods=['POST'])
def api():
"""
CURL POST example:
curl -X POST -F Input_Text="your text for analysis" http://127.0.0.1:5000/api/v1/prediction/
:return:
"""
if request.method == 'POST':
return post_request_sentiment_analyzer_handler(
handler_type='api', input_text=request.form.get('Input_Text')
)
@APP.route('/api_docs', methods=['GET'])
def api_docs():
"""
the route rendering API documentation
:return:
"""
return render_template('api_docs.html')
@APP.route('/methodology', methods=['GET'])
def methodology():
"""
the route rendering methodology documentation
from the repo README.md markdown
:return:
"""
return render_template('methodology.html', text=APP.config['md_content'])
@APP.route('/stats/<string:period>/', methods=['GET'])
@APP.cache.cached(timeout=60) # cache this view for 1 minute
def stats(period="day"):
"""
the route rendering stats
:return:
"""
# prepare the select stats query argument
if period == "day":
period_from = date.today() - timedelta(days=1)
elif period == "week":
period_from = date.today() - timedelta(weeks=1)
elif period == "month":
period_from = date.today() - timedelta(weeks=4)
# falls back to last 24 hours of stats
else:
period_from = date.today() - timedelta(days=1)
# fetch the stats data from the DB
cur = get_db().cursor()
cur.execute(DB_OBJ.db_select_stats_query_all, [period_from])
raw_data = cur.fetchall()
chart_data = Webapp.chart_data_preparator(raw_data)
return render_template('stats.html',
template_period=period,
template_pie_chart_data_sentiment=
[x[0] for x in
chart_data.get('pie_by_sentiment').get('output_data_set')],
template_pie_chart_labels_sentiment=
chart_data.get('pie_by_sentiment').get('group_keys'),
template_time_series_data_positive=
[x[0] for x in
chart_data.get('time_series').get('output_data_set')
if x[2] == "positive"],
template_time_series_data_negative=
[x[0] for x in
chart_data.get('time_series').get('output_data_set')
if x[2] == "negative"],
template_time_series_data_uncertain=
[x[0] for x in
chart_data.get('time_series').get('output_data_set')
if x[2] == "uncertain"],
template_time_series_labels
=chart_data.get('time_series').get('group_keys')
)
if __name__ == "__main__":
serve(APP, host='127.0.0.1', port=5000)
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/database/__init__.py | <filename>flask_webapp/database/__init__.py
"""
init module
"""
import os
if os.environ.get('DATABASE_URL'):
__env__ = 'remote'
else:
__env__ = 'local'
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/database/conn_local_sqlite.py | <filename>flask_webapp/database/conn_local_sqlite.py
"""
local sqlite db connection module
"""
import os
import sqlite3
class Connect:
"""
connect sqlite3 class
"""
def __init__(self):
""" create a database connection to the SQLite database
specified by db_file
:return: Connection object or sqlite3 error is raised
"""
self.db_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'stats.db'))
def __repr__(self):
return str(self.db_file)
def connect(self):
"""
connect method
:return:
"""
try:
conn = sqlite3.connect(self.db_file)
except sqlite3.Error as general_err:
raise general_err
return conn
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/database/database_interface.py | """
database interface module
"""
from flask_webapp.database import conn_local_sqlite, conn_remote_postgres
def _connect_from_environment(environment):
"""
_connect_from_environment mapping function
:param environment:
:return:
"""
try:
_mapping = {
"local": {
"conn": conn_local_sqlite.Connect,
},
"remote": {
"conn": conn_remote_postgres.Connect,
}
}
return _mapping[environment]
except KeyError:
raise Exception("Invalid environment value, valid values are remote | local")
def _get_query_from_environment(environment, query_name):
"""
_get_query from environment mapping function
:param environment:
:param query_name:
:return:
"""
try:
_mapping = {
"local": {
"drop_table": QueryCommon.DB_DROP_TABLE,
"select_count_rows_query": QueryCommon.DB_SELECT_COUNT_ROWS_QUERY,
"create_table": QueryLocal.DB_CREATE_TABLE,
"insert_stats_query": QueryLocal.DB_INSERT_STATS_QUERY,
"check_table_exists": QueryLocal.DB_CHECK_TABLE_EXISTS,
"select_stats_query_all": QueryLocal.DB_SELECT_RAW_STATS_DATA,
},
"remote": {
"drop_table": QueryCommon.DB_DROP_TABLE,
"select_count_rows_query": QueryCommon.DB_SELECT_COUNT_ROWS_QUERY,
"create_table": QueryRemote.DB_CREATE_TABLE,
"insert_stats_query": QueryRemote.DB_INSERT_STATS_QUERY,
"check_table_exists": QueryRemote.DB_CHECK_TABLE_EXISTS,
"select_stats_query_all": QueryRemote.DB_SELECT_RAW_STATS_DATA
}
}
return _mapping[environment][query_name]
except KeyError:
raise Exception("Invalid environment value, valid values are remote | local")
class QueryRemote:
"""
query remote class for Postgres
"""
# create the stats table
DB_CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS stats(
id SMALLSERIAL,
request_datetime TIMESTAMP NOT NULL,
sentiment_prediction VARCHAR NOT NULL); """
# check if table exists
DB_CHECK_TABLE_EXISTS = """
SELECT 1 FROM information_schema.tables
WHERE table_name = 'stats'; """
# insert into stats query
DB_INSERT_STATS_QUERY = """
INSERT INTO stats("request_datetime", "sentiment_prediction") VALUES (%s, %s); """
# select raw stats data
DB_SELECT_RAW_STATS_DATA = """
SELECT to_char("request_datetime", 'YYYY-MM-DD'), sentiment_prediction
FROM stats
WHERE request_datetime::timestamp >= %s; """
class QueryLocal:
"""
query remote class for Sqlite3
"""
# create the stats table
DB_CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS stats (
id integer PRIMARY KEY AUTOINCREMENT,
request_datetime timestamp NOT NULL,
sentiment_prediction string NOT NULL); """
# check if table exists
DB_CHECK_TABLE_EXISTS = """
SELECT 1 FROM sqlite_master
WHERE type='table' AND name='stats'; """
# insert into stats query
DB_INSERT_STATS_QUERY = """
INSERT INTO 'stats'('request_datetime', 'sentiment_prediction') VALUES (?, ?); """
# select raw stats data
DB_SELECT_RAW_STATS_DATA = """
SELECT date(request_datetime) as 'DATE()', sentiment_prediction
FROM stats
WHERE request_datetime >= ?; """
class QueryCommon:
"""
query common class
"""
# drop the stats table
DB_DROP_TABLE = """
DROP TABLE IF EXISTS stats; """
# select count(*) query
DB_SELECT_COUNT_ROWS_QUERY = """
SELECT count(*) FROM stats;"""
class Database:
"""
main database interaction class
"""
def __init__(self, env):
self.environment = env
self.db_drop_table = \
_get_query_from_environment(self.environment, query_name="drop_table")
self.db_select_count_rows_query = \
_get_query_from_environment(self.environment, query_name="select_count_rows_query")
self.db_create_table = \
_get_query_from_environment(self.environment, query_name="create_table")
self.db_insert_stats_query = \
_get_query_from_environment(self.environment, query_name="insert_stats_query")
self.db_check_table_exists = \
_get_query_from_environment(self.environment, query_name="check_table_exists")
self.db_select_stats_query_all = \
_get_query_from_environment(self.environment, query_name="select_stats_query_all")
def connect(self):
"""
connect to database method
:return:
"""
_mapped_conn_obj = _connect_from_environment(self.environment)["conn"]
conn = _mapped_conn_obj()
return conn.connect()
def db_builder(self):
"""
db builder method
:return:
"""
conn = self.connect()
# drop and re-create table
with conn:
cur = conn.cursor()
cur.execute(self.db_check_table_exists)
table_exists_query_result = cur.fetchone()
if table_exists_query_result and table_exists_query_result[0] == 1:
# check the count of all rows in the stats table
cur.execute(self.db_select_count_rows_query)
rowcount = cur.fetchone()[0]
if rowcount > 7000:
# drop stats table if > 7000 rows due to
# Heroku Postgres free-tier limitation
cur.execute(self.db_drop_table)
print(f"Dropped the stats table, row count: {rowcount}")
# create stats table if not exists
cur.execute(self.db_create_table)
return 0
|
datahappy1/czech_language_sentiment_analyzer | flask_webapp/exceptions.py | <filename>flask_webapp/exceptions.py<gh_stars>1-10
"""
exceptions.py
"""
class NotEnoughNonStopWordsException(Exception):
"""
Not Enough Non Stop Words Exception Class
"""
class NotEnoughWordsLengthException(Exception):
"""
Not Enough Words Length Exception Class
"""
class InvalidDetectedLanguageException(Exception):
"""
Invalid Detected Language Exception Class
"""
class GenericException(Exception):
"""
Generic Exception Class
"""
EXCEPTION_TYPE_RESPONSE_MESSAGE_MAP = {
"NotEnoughNonStopWordsException":
"Sorry, need to submit at least 3 non stop-words",
"NotEnoughWordsLengthException":
"Sorry, need to submit at least 1 word with 3 and more characters",
"InvalidDetectedLanguageException":
"Sorry, need to submit text written in Czech",
"GenericException":
"Sorry, something went wrong"
}
|
datahappy1/czech_language_sentiment_analyzer | ml_models/support_vector_machine/data_processor_support_vector_machine.py | <gh_stars>1-10
"""
data processor for logistic regression
"""
import random
import pickle
import numpy as np
import os
from langdetect import detect, lang_detect_exception
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn import metrics, model_selection
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from utils.utilities import ProjectCommon
CZECH_STOPWORDS_FILE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'data_preparation', 'czech_stopwords.txt'))
TEMP_FILE_PATH = '../../data_preparation/reviews_with_ranks.csv'
PERSIST_MODEL_TO_FILE = True
def _read_temp_file_generator():
"""
read temp file generator
:return: yield rows
"""
for row in open(TEMP_FILE_PATH, encoding="utf8"):
try:
yield (row.split(',')[0].replace('"', ''),
'neg' if int(row.split(',')[1]) < 0 else 'pos')
except IndexError:
yield '#NA'
def support_vector_machine(persist_model_to_file):
"""
function for training and testing the ML model
:param persist_model_to_file:
:return:
"""
temp_file_reviews_work = []
temp_file_gen = _read_temp_file_generator()
for tfg in temp_file_gen:
if len(tfg) == 2:
try:
_detected_lang = detect(ProjectCommon.remove_non_alpha_chars_and_html(tfg[0]))
except lang_detect_exception.LangDetectException:
continue
if _detected_lang == 'cs':
temp_file_reviews_work.append((ProjectCommon.remove_all(tfg[0]), tfg[1]))
temp_file_reviews_work = [x for x in temp_file_reviews_work if x[1] == 'neg'][:11500] + \
[x for x in temp_file_reviews_work if x[1] == 'pos'][:11500]
random.shuffle(temp_file_reviews_work)
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split([x[0] for x in temp_file_reviews_work],
[x[1] for x in temp_file_reviews_work],
test_size=0.2)
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='log', penalty='l2',
alpha=1e-3, random_state=42,
max_iter=5, tol=None)),
])
text_clf.fit(Train_X, Train_Y)
predicted = text_clf.predict(Test_X)
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, cv=5, n_jobs=-1)
gs_clf = gs_clf.fit(Train_X, Train_Y)
if persist_model_to_file:
pickle.dump(gs_clf, open('model.pkl', 'wb'))
# # accuracy score calculation: 0.847
# print(np.mean(predicted == Test_Y))
# print(metrics.classification_report(Test_Y, predicted, target_names = ['neg', 'pos']))
# # adhoc input prediction:
# input_string = ''
# print(input_string)
# print("prediction: {}". format(gs_clf.predict([input_string])[0]))
return np.mean(predicted == Test_Y)
if __name__ == "__main__":
print(support_vector_machine(PERSIST_MODEL_TO_FILE))
|
shc558/wwyfcs | wwyfcs/utils/create_examples.py | <filename>wwyfcs/utils/create_examples.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import argparse
import os
import pandas as pd
from sklearn.model_selection import train_test_split
# load data and tag lines with source characters' names
def load_data(args):
data = pd.read_csv(args.file_path)
data['id:data'] = data[args.id_colname]+':'+data[args.data_colname]
return data
# create response sets where each row includes n previous responses as context
def extract_dialogues(df, args):
dialogue_chains = []
n = args.len_context
for i in range(n, len(df[args.data_colname])):
if args.character: #collect responses from specified character
if df[args.id_colname][i] == args.character:
row = []
prev = i - 1 - n # include current response and previous n responses
for j in range(i, prev, -1):
row.append(df[args.data_colname][j])
dialogue_chains.append(row)
else:
row = []
prev = i - 1 - n
for j in range(i, prev, -1):
row.append(df[args.data_colname][j])
dialogue_chains.append(row)
columns = ['response','context']+['context/' + str(i) for i in range(n-1)]
df = pd.DataFrame.from_records(dialogue_chains, columns= columns)
df = df.dropna().reset_index(drop=True)
return df
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str,
help='Path to row data.')
parser.add_argument('--data_colname', type=str,
help='Name of the data field.')
parser.add_argument('--id_colname', type=str,
help='Name of the ID field.')
parser.add_argument('--output_dir', type=str,
default=None, help='Dir to output data')
parser.add_argument('--character', type=str,
default=None,help='Name of the character to extract.')
parser.add_argument('--len_context', type=int,
default = 9, help='Number of previous lines to use as context')
parser.add_argument('--eval_size', type=float,
default = 0.1, help='fraction to use as evaluation set')
args = parser.parse_args()
extracted = extract_dialogues(load_data(args), args)
train, eval = train_test_split(extracted[:100], test_size = args.eval_size, random_state=42)
if args.output_dir:
train.to_csv(os.path.join(args.output_dir,'train_examples.csv'), index=False)
eval.to_csv(os.path.join(args.output_dir,'eval_examples.csv'), index=False)
else:
train.to_csv(os.path.join(os.getcwd(),'train_examples.csv'), index=False)
eval.to_csv(os.path.join(os.getcwd(),'eval_examples.csv'), index=False)
if __name__ == "__main__":
main()
|
shc558/wwyfcs | wwyfcs/app/generate_conversations.py | # -*- coding: utf-8 -*-
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import streamlit as st
from PIL import Image
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
st.title('WWYFCS')
st.header('What would your favorite character say?')
total_steps = st.sidebar.slider(label='Turns of conversation',min_value=1,max_value=10, value=5)
top_k= st.sidebar.slider(label='Level of randomness',min_value=0,max_value=30, value=0)
character = st.selectbox('Select a character',options = [
'',
'DialoGPT',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'Missandei',
'Hodor'
])
@st.cache
def load_model(model_name):
return AutoModelForCausalLM.from_pretrained(model_name)
if character == '':
st.write('Please select a character to begin.')
elif character=='DialoGPT':
model_name='microsoft/DialoGPT-small'
prompt_ids = ''
model = load_model(model_name)
# Let's chat
for step in range(total_steps):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input = input(">> User:")
st.write('User:',new_user_input)
new_user_input_ids = tokenizer.encode(new_user_input + tokenizer.eos_token, return_tensors='pt')
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
# generated a response while limiting the total chat history to 1000 tokens
chat_history_ids = model.generate(
bot_input_ids,
max_length=1500,
do_sample=top_k > 0,
top_p=0.95,
top_k=top_k,
pad_token_id=tokenizer.eos_token_id
)
# pretty print last ouput tokens from bot
st.write("{}: {}".format(character, tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
else:
image = Image.open(os.path.join(os.getcwd(),'app','images','{}.jpg'.format(character)))
st.image(image, width=100, use_column_width=False)
model_name=os.path.join(os.getcwd(),'app','model_n4')
prompt_ids = tokenizer.encode(character.lower()+':', return_tensors='pt')
model = load_model(model_name)
# Let's chat
for step in range(total_steps):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input = input(">> User:")
st.write('User:',new_user_input)
new_user_input_ids = tokenizer.encode(new_user_input + tokenizer.eos_token, return_tensors='pt')
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids, prompt_ids], dim=-1) if step > 0 else torch.cat([new_user_input_ids, prompt_ids], dim=-1)
# generated a response while limiting the total chat history to 1000 tokens
chat_history_ids = model.generate(
bot_input_ids,
max_length=1500,
do_sample=top_k > 0,
top_p=0.95,
top_k=top_k,
pad_token_id=tokenizer.eos_token_id
)
# pretty print last ouput tokens from bot
st.write("{}: {}".format(character, tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
|
shc558/wwyfcs | flaskapp/app.py | import numpy as np
import uuid
from flask import Flask, request, make_response
from flask_cors import cross_origin
import json
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from datetime import datetime
# Use the application default credentials
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred, {
'projectId': project_id, #replace with your project-id
})
db = firestore.client()
app = Flask(__name__)
session_id=uuid.uuid4().hex
tokenizer = AutoTokenizer.from_pretrained('./model_n4')
model = AutoModelForCausalLM.from_pretrained('./model_n4')
prompt_ids = tokenizer.encode('jon snow'+':', return_tensors='pt') # can be changed to another character e.g. arya stark
# getting and sending response to dialogflow
@app.route('/webhook', methods=['POST'])
@cross_origin()
def webhook():
req = request.get_json(silent=True, force=True)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
# processing the request from dialogflow
def processRequest(req):
result = req.get("queryResult")
#Fetching the data points
convo_id = req.get("responseId")
new_user_input=result.get("queryText")
new_user_input_ids = tokenizer.encode(new_user_input + tokenizer.eos_token, return_tensors='pt')
doc_ref = db.collection(session_id)
docs = doc_ref.get()
#Retrieving chat history if exists
if docs:
#get chat history from firebase
query = doc_ref.order_by(u'time_stamp', direction=firestore.Query.DESCENDING).limit(1)
doc = [item for item in query.stream()][0]
chat_history_ids = tokenizer.encode(doc.to_dict()['chat_history'],return_tensors='pt')
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids, prompt_ids], dim=-1)
bot_input_ids = bot_input_ids[:np.min([bot_input_ids.size()[-1],1024])]
else:
bot_input_ids = torch.cat([new_user_input_ids, prompt_ids], dim=-1)
bot_input_ids = bot_input_ids[:np.min([bot_input_ids.size()[-1],1024])]
#Getting the intent which has fullfilment enabled
intent = result.get("intent").get('displayName')
#Fitting out model with the data points
if (intent=='UserInput'):
chat_history_ids = model.generate(
bot_input_ids,
max_length=1000,
pad_token_id=tokenizer.eos_token_id
)
chat_history = tokenizer.decode(chat_history_ids[0])
output = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
doc_ref = db.collection(session_id).document(convo_id)
doc_ref.set({
u'chat_history': chat_history,
u'time_stamp': datetime.now()
})
#Returning back the fullfilment text back to DialogFlow
return {
"fulfillmentText": output
}
if __name__ == '__main__':
app.run()
|
shc558/wwyfcs | wwyfcs/trainer/train_language_model.py | # -*- coding: utf-8 -*-
"""
Fine-tuning DialoGPT using language modeling based on
Huggingface transformers run_language_modeling.py &
tutorial from <NAME>
"""
import pandas as pd
import logging
import math
import os
import pickle
from dataclasses import dataclass, field
from typing import Optional, Dict, List, Tuple, Union
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
PreTrainedModel,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
set_seed,
TrainingArguments,
Trainer,
BatchEncoding
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class DataCollatorForLanguageModeling:
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
tokenizer: PreTrainedTokenizer
mlm: bool = True
mlm_probability: float = 0.15
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch = self._tensorize_batch(examples)
if self.mlm:
inputs, labels = self.mask_tokens(batch)
return {"input_ids": inputs, "labels": labels}
else:
labels = batch.clone().detach()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
return {"input_ids": batch, "labels": labels}
def _tensorize_batch(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> torch.Tensor:
# In order to accept both lists of lists and lists of Tensors
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
else:
if self.tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
# raise ValueError(
# "You are attempting to pad samples but the tokenizer you are using"
# f" ({self.tokenizer.__class__.__name__}) does not have one."
# )
return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
return inputs, labels
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
default='microsoft/DialoGPT-small',
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: str = field(
default='gpt2',
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: str = field(
default='microsoft/DialoGPT-small',
metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: str = field(
default='microsoft/DialoGPT-small',
metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: str = field(
default='data/cached',
metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: str = field(
# default=None,
metadata={"help": "The input training data file (a csv file)."}
)
eval_data_file: str = field(
# default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a csv file)."}
)
overwrite_cache: bool = field(
default=True,
metadata={"help": "Overwrite the cached training and evaluation sets"}
)
block_size: int = field(
default=512,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
}
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
def construct_conv(row, tokenizer, eos = True):
# flatten a list of lists
flatten = lambda l: [item for sublist in l for item in sublist]
conv = list(reversed([tokenizer.encode(x) + [tokenizer.eos_token_id] for x in row]))
conv = flatten(conv)
return conv
class ConversationDataset(Dataset):
def __init__(self, tokenizer, model_args, data_args, df):
# leave room for special tokens
block_size = data_args.block_size - (tokenizer.model_max_length - tokenizer.max_len_single_sentence)
directory = model_args.cache_dir
cached_features_file = os.path.join(
directory, model_args.model_type + "_cached_lm_" + str(block_size)
)
if os.path.exists(cached_features_file) and not data_args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file")
self.examples = []
for _, row in df.iterrows():
conv = construct_conv(row, tokenizer)
self.examples.append(conv)
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
def get_dataset(
model_args,
data_args,
tokenizer,
evaluate = False
):
file_path = data_args.eval_data_file if evaluate else data_args.train_data_file
df = pd.read_csv(file_path)
return ConversationDataset(
tokenizer = tokenizer,
model_args = model_args,
data_args = data_args,
df = df
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments,DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
print('===========')
print(data_args)
print('===========')
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, training_steps: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
training_args.max_steps
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = (
get_dataset(model_args = model_args, data_args = data_args, tokenizer = tokenizer)
)
eval_dataset = (
get_dataset(model_args = model_args, data_args = data_args, tokenizer = tokenizer, evaluate = True)
if training_args.do_eval
else None
)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
return results
if __name__ == "__main__":
main()
|
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/urls.py | <reponame>tuseef123/Django-CRUD-OPERATIONS
from django.contrib import admin
from django.urls import path,include
from .import views
urlpatterns = [
path('',views.index,name="index"),
path('record',views.records,name="record"),
path('show/',views.show,name="show"),
path('edit/<int:pk>/',views.update,name="edit"),
path('delete/<int:id>/',views.delete,name="delete"),
] |
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/migrations/0003_auto_20210112_1816.py | <filename>crud/crudapp/migrations/0003_auto_20210112_1816.py
# Generated by Django 3.1.5 on 2021-01-12 13:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crudapp', '0002_remove_record_date'),
]
operations = [
migrations.RenameField(
model_name='record',
old_name='name',
new_name='namess',
),
]
|
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/admin.py | <reponame>tuseef123/Django-CRUD-OPERATIONS
from django.contrib import admin
from crudapp.models import record
# Register your models here.
admin.site.register(record) |
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/apps.py | from django.apps import AppConfig
class MessagesShowConfig(AppConfig):
name = 'messages_show'
|
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/models.py | from django.db import models
# Create your models here.
class record(models.Model):
p_name=models.CharField(max_length=40)
catagory=models.CharField(max_length=40)
quantity=models.IntegerField()
quality=models.CharField(max_length=50)
room=models.IntegerField()
def __str__(self):
return self.p_name
|
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/migrations/0004_auto_20210112_1820.py | <filename>crud/crudapp/migrations/0004_auto_20210112_1820.py
# Generated by Django 3.1.5 on 2021-01-12 13:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crudapp', '0003_auto_20210112_1816'),
]
operations = [
migrations.RenameField(
model_name='record',
old_name='namess',
new_name='p_name',
),
]
|
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/views.py | from django.shortcuts import render,redirect
from django.contrib import messages
from .models import Profile
from .forms import AddData
# Create your views here.
def home(request):
profile = Profile.objects.all()
if request.method =='POST':
form = AddData(request.POST)
if form.is_valid():
form.save()
messages.add_message(request,messages.SUCCESS,'Record Added Successfully')
return redirect('home')
else:
form = AddData()
return render(request,'message_show/home.html',{'profiles':profile,'form':form})
def edit(request,pk):
data_fetch = Profile.objects.get(pk=pk)
form = AddData(instance = data_fetch)
if request.method =='POST':
form = AddData(request.POST,instance = data_fetch)
if form.is_valid():
form.save()
messages.add_message(request,messages.SUCCESS,'Record updated Successfully')
return redirect('home')
return render(request,'message_show/edit.html',{'form':form})
def delete(request,pk):
data = Profile.objects.get(pk=pk)
data.delete()
messages.add_message(request,messages.SUCCESS,'Record Deleted Successfully')
return redirect('home') |
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/forms.py | from crudapp.models import record
from django import forms
class colform(forms.ModelForm):
class Meta:
model = record
fields= '__all__' |
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/models.py | from django.db import models
# Create your models here.
class Profile(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=60)
handle = models.CharField(max_length=50)
def __str__(self):
return self.handle |
tuseef123/Django-CRUD-OPERATIONS | crud/crudapp/views.py | from django.shortcuts import render,redirect
from crudapp.models import record
from .forms import colform
from django.contrib import messages
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request,'index.html')
def records(request):
if request.method=="POST":
p_name=request.POST["p_name"]
catagory=request.POST["catagory"]
quantity=request.POST["quantity"]
quality=request.POST["quality"]
room=request.POST["room"]
dataa=record(p_name=p_name,catagory=catagory,quantity=quantity,quality=quality,room=room)
dataa.save()
return redirect('index')
else:
return render(request,'record.html')
def show(request):
data=record.objects.all()
return render(request,'recordshow.html',{'data':data})
def update(request,pk):
fetch_data= record.objects.get(pk=pk)
form = colform(instance=fetch_data)
if request.method == 'POST':
form = colform(request.POST,instance=fetch_data)
if form.is_valid():
form.save()
# return redirect('show')
messages.add_message(request,messages.SUCCESS,'Data has been updated Successfully!')
# print(data_fetch)
return render(request,'edit.html',{'data':fetch_data})
def delete(request,id):
data=record.objects.get(id=id)
data.delete()
return redirect('show') |
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/migrations/0001_initial.py | <gh_stars>0
# Generated by Django 3.1.5 on 2021-01-14 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=60)),
('handle', models.CharField(max_length=50)),
],
),
]
|
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/forms.py | from django.forms import ModelForm
from .models import Profile
class AddData(ModelForm):
class Meta:
model = Profile
fields = '__all__' |
tuseef123/Django-CRUD-OPERATIONS | message/messages_show/urls.py | <reponame>tuseef123/Django-CRUD-OPERATIONS<gh_stars>0
from django.urls import path,include
from . import views
urlpatterns=[
path('',views.home,name ='home'),
path('edit/<int:pk>/',views.edit,name='edit'),
path('delete/<int:pk>/',views.delete,name='delete'),
] |
conormuldoon/unison | back-end/src/main/python/switch_model.py | <filename>back-end/src/main/python/switch_model.py
import switch_list_items
if __name__ == "__main__":
matchlist = ["api.timezone", "api.uri", "api.fog"]
switch_list_items.switchitems(matchlist)
|
conormuldoon/unison | back-end/src/main/python/switch_list_items.py | import shutil
CURRENT = "../resources/application.properties"
SWITCHED = "../resources/application.tmppy"
def switchitems(matchlist):
f0 = open(CURRENT, "r")
f1 = open(SWITCHED , "w+")
contents = f0.readlines()
for line in contents:
addline = True
for matchitem in matchlist:
if matchitem in line:
addline = False
idx = line.find("#")+1
if idx > 0:
f1.write(line[idx:])
else:
f1.write("#"+line)
break
if addline:
f1.write(line)
f0.close()
f1.close()
shutil.move(SWITCHED, CURRENT)
|
conormuldoon/unison | back-end/src/main/python/switch_db_config.py | <reponame>conormuldoon/unison
import switch_list_items
if __name__ == "__main__":
matchlist = ["driverClassName", "hibernate.dialect",
"datasource.url", "hibernate.ddl"]
switch_list_items.switchitems(matchlist)
|
conormuldoon/unison | back-end/src/main/python/change_db_password.py | import shutil
import sys
CURRENT = "../resources/application.properties"
SWITCHED = "../resources/application.tmppy"
DS_PASSWORD = "<PASSWORD>"
def changepassord(password):
f0 = open(CURRENT, "r")
f1 = open(SWITCHED, "w+")
contents = f0.readlines()
for line in contents:
if line.startswith(DS_PASSWORD):
f1.write(DS_PASSWORD + "=" + password + "\n")
continue
f1.write(line)
f0.close()
f1.close()
shutil.move(SWITCHED, CURRENT)
# The new password is passed as an argument to main.
if __name__ == "__main__":
changepassord(sys.argv[1])
|
rootrl/Flask-demo-with-mysql | wsgi.py | from app import create_app
application = app = create_app('default')
|
rootrl/Flask-demo-with-mysql | migrations/versions/e8f7ab64277e_.py | """empty message
Revision ID: e8f7ab64277e
Revises: <KEY>
Create Date: 2019-12-21 16:23:23.140105
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'e8f7ab64277e'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('serial', sa.Integer(), nullable=True))
op.alter_column('article', 'article_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
op.drop_column('article', 'ext_field_1')
op.drop_column('article', 'ext_field_2')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('ext_field_2', mysql.VARCHAR(length=1024), nullable=False))
op.add_column('article', sa.Column('ext_field_1', mysql.VARCHAR(length=1024), nullable=False))
op.alter_column('article', 'article_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.drop_column('article', 'serial')
# ### end Alembic commands ###
|
rootrl/Flask-demo-with-mysql | app/models.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import current_app
from . import db
# can't set a default value
class Article(db.Model):
id = db.Column(db.Integer, primary_key=True)
# article_id = db.Column(db.Integer, nullable=False, server_default=0) # can't work
article_id = db.Column(db.Integer)
serial = db.Column(db.Integer)
title = db.Column(db.String(120), nullable=False)
content = db.Column(db.Text, nullable=False)
def __repr__(self):
return '<Article %r>' % self.title
|
rootrl/Flask-demo-with-mysql | migrations/versions/e28f15c8bc0f_.py | <filename>migrations/versions/e28f15c8bc0f_.py
"""empty message
Revision ID: e28f15c8bc0f
Revises: <PASSWORD>
Create Date: 2019-12-21 16:04:54.553769
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('article',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('article_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=False),
sa.Column('ext_field_1', sa.String(length=1024), nullable=True),
sa.Column('ext_field_2', sa.String(length=1024), nullable=True),
sa.Column('content', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.drop_index('email', table_name='user')
op.drop_index('username', table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('username', mysql.VARCHAR(length=80), nullable=False),
sa.Column('email', mysql.VARCHAR(length=120), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('username', 'user', ['username'], unique=True)
op.create_index('email', 'user', ['email'], unique=True)
op.drop_table('article')
# ### end Alembic commands ###
|
rootrl/Flask-demo-with-mysql | app/__init__.py | <reponame>rootrl/Flask-demo-with-mysql
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
db = SQLAlchemy()
def create_app(config_name):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
# 初始化app配置
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# 扩展初始化配置
db.init_app(app)
# TODO:: just import frontend and backend, and the router defined in modules's __init__.py
from .frontend import article
app.register_blueprint(article.bp)
return app
|
rootrl/Flask-demo-with-mysql | app/frontend/__init__.py | <filename>app/frontend/__init__.py
# from app import app
# from . import article
# app.register_blueprint(article.bp)
|
rootrl/Flask-demo-with-mysql | app/frontend/article.py | import functools
import random
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from app import create_app, db
from app.models import Article
bp = Blueprint('article', __name__, url_prefix='/article')
@bp.route('/show', methods=["GET"])
def show():
serial_id = request.args.get('sid', 2)
id = request.args.get('id', random.randint(1,100))
article = Article.query.filter_by(serial=serial_id, article_id=id).first()
return render_template('frontend/article/show.html', article=article)
@bp.route('/del', methods=["GET"])
def delete():
id = request.args.get('id')
article = Article.query.get(id)
db.session.delete(article)
db.session.commit()
flash("删除成功")
@bp.route('/add', methods=("POST", "GET"))
def create():
if request.method == 'POST':
id = request.form['id']
sid = request.form['sid']
title = request.form['title']
content = request.form['content']
if not title or not content:
error = "Title and Content is required"
flash(error)
else:
article = Article(article_id=id, title=title, content=content, serial=sid)
db.session.add(article)
db.session.commit()
flash("发布成功!")
return render_template('frontend/article/add.html')
|
rootrl/Flask-demo-with-mysql | config.py | # -*- coding: utf-8 -*-
# @Author: Rootrl
# @Date: 2019-12-21 13:42:41
import os
from time import strftime
import logging
log_name = os.path.join(
os.getenv('HOME'), 'log/flask/log_{}.log'.format(strftime('%Y%m%d')))
FLASK_LOG_FILE = os.getenv('FLASK_LOG_FILE') or log_name
if not os.path.exists(os.path.dirname(FLASK_LOG_FILE)):
os.makedirs(os.path.dirname(FLASK_LOG_FILE))
def get_handler():
# 获取处理器
f_handler = logging.FileHandler(FLASK_LOG_FILE, encoding='utf-8')
formatter = logging.Formatter(
'[%(asctime)s %(filename)s:%(lineno)s] - %(message)s')
f_handler.setFormatter(formatter)
f_handler.setLevel(logging.DEBUG)
return f_handler
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
FLATPAGES_AUTO_RELOAD = True
FLATPAGES_EXTENSION = '.md'
SECRET_KEY = os.environ.get('SECRET_KEY') or 'can you guess it'
DEBUG = True
# sqlalchemy format: mysql+pymysql://username:password@host/database
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root123@localhost/flask_project?charset=utf8'
# 当关闭数据库是否自动提交事务
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# 是否追踪修改
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
# app.logger.setLevel(logging.DEBUG)
# app.logger.addHandler(get_handler)
pass
class DevelopmentConfig(Config):
"""开发环境
"""
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root123@localhost/flask_project?charset=utf8'
class TestConfig(Config):
"""测试环境
"""
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root123@localhost/flask_project?charset=utf8'
class ProductionConfig(Config):
"""生产环境
"""
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root123@localhost/flask_project?charset=utf8'
# 设置配置映射
config = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'test': TestConfig,
'default': DevelopmentConfig
}
|
bAmpT/muzero-pytorch | core/test.py | <filename>core/test.py
import os
import torch
from .mcts import MCTS, Node
from .utils import select_action
import multiprocessing
def _test(config, model, ep_i, device, render, save_video, save_path, ep_data):
with torch.no_grad():
env = config.new_game(save_video=save_video, save_path=save_path,
video_callable=lambda episode_id: True, uid=ep_i)
done = False
ep_reward = 0
obs = env.reset()
while not done:
if render:
env.render()
root = Node(0)
obs = torch.FloatTensor(obs).to(device).unsqueeze(0)
root.expand(env.to_play(), env.legal_actions(), model.initial_inference(obs))
MCTS(config).run(root, env.action_history(), model)
action, _ = select_action(root, temperature=1, deterministic=True)
obs, reward, done, info = env.step(action.index)
ep_reward += reward
env.close()
ep_data[ep_i] = ep_reward
def test(config, model, episodes, device, render, save_video=False):
model.to(device)
model.eval()
save_path = os.path.join(config.exp_path, 'recordings')
manager = multiprocessing.Manager()
ep_data = manager.dict()
jobs = []
for ep_i in range(episodes):
p = multiprocessing.Process(target=_test, args=(config, model, ep_i, device, render, save_video, save_path,
ep_data))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
test_reward = sum(ep_data.values())
return test_reward / episodes
|
bAmpT/muzero-pytorch | scripts/aggregate_summaries.py | <gh_stars>100-1000
import os
from tensorboard.backend.event_processing import event_accumulator as ea
from torch.utils.tensorboard import SummaryWriter
def aggregate_summaries(logdir: str, exp_path: str, ):
# we recognize all files which have tfevents
scalars_info = {}
for root, dirs, files in os.walk(logdir):
for event_file in [x for x in files if 'tfevents' in x]:
event_path = os.path.join(root, event_file)
acc = ea.EventAccumulator(event_path)
acc.Reload()
# only support scalar now
scalar_list = acc.Tags()['scalars']
for tag in scalar_list:
if tag not in scalars_info:
scalars_info[tag] = {'data': {}}
for s in acc.Scalars(tag):
if s.step not in scalars_info[tag]['data']:
scalars_info[tag]['data'][s.step] = [s.value]
else:
scalars_info[tag]['data'][s.step].append(s.value)
summary_writer = SummaryWriter(exp_path, flush_secs=10)
for tag in scalars_info:
for steps in sorted(list(scalars_info[tag]['data'].keys())):
values = scalars_info[tag]['data'][steps]
summary_writer.add_scalar(tag, sum(values) / len(values), steps)
summary_writer.flush()
summary_writer.close()
if __name__ == '__main__':
"""Aggregates multiple runs of the each configuration"""
result_path = '../results'
base_aggregate_path = '../aggregate_results'
for root, dirs, files in os.walk(result_path):
if len(dirs) > 0 and 'seed' in dirs[0]:
print(root, dirs, files)
aggregate_path = base_aggregate_path + root.split(result_path)[1]
os.makedirs(aggregate_path, exist_ok=True)
aggregate_summaries(root, aggregate_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.