repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/mutation_options.py | import argparse
import numpy as np
import random
import copy
import tequila as tq
from typing import Union
from collections import Counter
from time import time
from vqe_utils import convert_PQH_to_tq_QH, convert_tq_QH_to_PQH,\
fold_unitary_into_hamiltonian
from energy_optimization import minimize_energy
global_seed = 1
class Instructions:
'''
TODO need to put some documentation here
'''
def __init__(self, n_qubits, mu=2.0, sigma=0.4, alpha=0.9, T_0=1.0,
beta=0.5, patience=10, max_non_cliffords=0,
reference_energy=0., number=None):
# hardcoded values for now
self.num_non_cliffords = 0
self.max_non_cliffords = max_non_cliffords
# ------------------------
self.starting_patience = patience
self.patience = patience
self.mu = mu
self.sigma = sigma
self.alpha = alpha
self.beta = beta
self.T_0 = T_0
self.T = T_0
self.gates = self.get_random_gates(number=number)
self.n_qubits = n_qubits
self.positions = self.get_random_positions()
self.best_previous_instructions = {}
self.reference_energy = reference_energy
self.best_reference_wfn = None
self.noncliff_replacements = {}
def _str(self):
print(self.gates)
print(self.positions)
def set_reference_wfn(self, reference_wfn):
self.best_reference_wfn = reference_wfn
def update_T(self, update_type: str = 'regular', best_temp=None):
# Regular update
if update_type.lower() == 'regular':
self.T = self.alpha * self.T
# Temperature update if patience ran out
elif update_type.lower() == 'patience':
self.T = self.beta*best_temp + (1-self.beta)*self.T_0
def get_random_gates(self, number=None):
'''
Randomly generates a list of gates.
number indicates the number of gates to generate
otherwise, number will be drawn from a log normal distribution
'''
mu, sigma = self.mu, self.sigma
full_options = ['X','Y','Z','S','H','CX', 'CY', 'CZ','SWAP', 'UCC2c', 'UCC4c', 'UCC2', 'UCC4']
clifford_options = ['X','Y','Z','S','H','CX', 'CY', 'CZ','SWAP', 'UCC2c', 'UCC4c']
#non_clifford_options = ['UCC2', 'UCC4']
# Gate distribution, selecting number of gates to add
k = np.random.lognormal(mu, sigma)
k = np.int(k)
if number is not None:
k = number
# Selecting gate types
gates = None
if self.num_non_cliffords < self.max_non_cliffords:
gates = random.choices(full_options, k=k) # with replacement
else:
gates = random.choices(clifford_options, k=k)
new_num_non_cliffords = 0
if "UCC2" in gates:
new_num_non_cliffords += Counter(gates)["UCC2"]
if "UCC4" in gates:
new_num_non_cliffords += Counter(gates)["UCC4"]
if (new_num_non_cliffords+self.num_non_cliffords) <= self.max_non_cliffords:
self.num_non_cliffords += new_num_non_cliffords
else:
extra_cliffords = (new_num_non_cliffords+self.num_non_cliffords) - self.max_non_cliffords
assert(extra_cliffords >= 0)
new_gates = random.choices(clifford_options, k=extra_cliffords)
for g in new_gates:
try:
gates[gates.index("UCC4")] = g
except:
gates[gates.index("UCC2")] = g
self.num_non_cliffords = self.max_non_cliffords
if k == 1:
if gates == "UCC2c" or gates == "UCC4c":
gates = get_string_Cliff_ucc(gates)
if gates == "UCC2" or gates == "UCC4":
gates = get_string_ucc(gates)
else:
for ind, gate in enumerate(gates):
if gate == "UCC2c" or gate == "UCC4c":
gates[ind] = get_string_Cliff_ucc(gate)
if gate == "UCC2" or gate == "UCC4":
gates[ind] = get_string_ucc(gate)
return gates
def get_random_positions(self, gates=None):
'''
Randomly assign gates to qubits.
'''
if gates is None:
gates = self.gates
n_qubits = self.n_qubits
single_qubit = ['X','Y','Z','S','H']
# two_qubit = ['CX','CY','CZ', 'SWAP']
two_qubit = ['CX', 'CY', 'CZ', 'SWAP', 'UCC2c', 'UCC2']
four_qubit = ['UCC4c', 'UCC4']
qubits = list(range(0, n_qubits))
q_positions = []
for gate in gates:
if gate in four_qubit:
p = random.sample(qubits, k=4)
if gate in two_qubit:
p = random.sample(qubits, k=2)
if gate in single_qubit:
p = random.sample(qubits, k=1)
if "UCC2" in gate:
p = random.sample(qubits, k=2)
if "UCC4" in gate:
p = random.sample(qubits, k=4)
q_positions.append(p)
return q_positions
def delete(self, number=None):
'''
Randomly drops some gates from a clifford instruction set
if not specified, the number of gates to drop is sampled from a uniform distribution over all the gates
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
if number is not None:
num_to_drop = number
else:
num_to_drop = random.sample(range(1,len(gates)-1), k=1)[0]
action_indices = random.sample(range(0,len(gates)-1), k=num_to_drop)
for index in sorted(action_indices, reverse=True):
if "UCC2_" in str(gates[index]) or "UCC4_" in str(gates[index]):
self.num_non_cliffords -= 1
del gates[index]
del positions[index]
self.gates = gates
self.positions = positions
#print ('deleted {} gates'.format(num_to_drop))
def add(self, number=None):
'''
adds a random selection of clifford gates to the end of a clifford instruction set
if number is not specified, the number of gates to add will be drawn from a log normal distribution
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
added_instructions = self.get_new_instructions(number=number)
gates.extend(added_instructions['gates'])
positions.extend(added_instructions['positions'])
self.gates = gates
self.positions = positions
#print ('added {} gates'.format(len(added_instructions['gates'])))
def change(self, number=None):
'''
change a random number of gates and qubit positions in a clifford instruction set
if not specified, the number of gates to change is sampled from a uniform distribution over all the gates
'''
gates = copy.deepcopy(self.gates)
positions = copy.deepcopy(self.positions)
n_qubits = self.n_qubits
if number is not None:
num_to_change = number
else:
num_to_change = random.sample(range(1,len(gates)), k=1)[0]
action_indices = random.sample(range(0,len(gates)-1), k=num_to_change)
added_instructions = self.get_new_instructions(number=num_to_change)
for i in range(num_to_change):
gates[action_indices[i]] = added_instructions['gates'][i]
positions[action_indices[i]] = added_instructions['positions'][i]
self.gates = gates
self.positions = positions
#print ('changed {} gates'.format(len(added_instructions['gates'])))
# TODO to be debugged!
def prune(self):
'''
Prune instructions to remove redundant operations:
--> first gate should go beyond subsystems (this assumes expressible enough subsystem-ciruits
#TODO later -> this needs subsystem information in here!
--> 2 subsequent gates that are their respective inverse can be removed
#TODO this might change the number of qubits acted on in theory?
'''
pass
#print ("DEBUG PRUNE FUNCTION!")
# gates = copy.deepcopy(self.gates)
# positions = copy.deepcopy(self.positions)
# for g_index in range(len(gates)-1):
# if (gates[g_index] == gates[g_index+1] and not 'S' in gates[g_index])\
# or (gates[g_index] == 'S' and gates[g_index+1] == 'S-dag')\
# or (gates[g_index] == 'S-dag' and gates[g_index+1] == 'S'):
# print(len(gates))
# if positions[g_index] == positions[g_index+1]:
# self.gates.pop(g_index)
# self.positions.pop(g_index)
def update_by_action(self, action: str):
'''
Updates instruction dictionary
-> Either adds, deletes or changes gates
'''
if action == 'delete':
try:
self.delete()
# In case there are too few gates to delete
except:
pass
elif action == 'add':
self.add()
elif action == 'change':
self.change()
else:
raise Exception("Unknown action type " + action + ".")
self.prune()
def update_best_previous_instructions(self):
''' Overwrites the best previous instructions with the current ones. '''
self.best_previous_instructions['gates'] = copy.deepcopy(self.gates)
self.best_previous_instructions['positions'] = copy.deepcopy(self.positions)
self.best_previous_instructions['T'] = copy.deepcopy(self.T)
def reset_to_best(self):
''' Overwrites the current instructions with best previous ones. '''
#print ('Patience ran out... resetting to best previous instructions.')
self.gates = copy.deepcopy(self.best_previous_instructions['gates'])
self.positions = copy.deepcopy(self.best_previous_instructions['positions'])
self.patience = copy.deepcopy(self.starting_patience)
self.update_T(update_type='patience', best_temp=copy.deepcopy(self.best_previous_instructions['T']))
def get_new_instructions(self, number=None):
'''
Returns a a clifford instruction set,
a dictionary of gates and qubit positions for building a clifford circuit
'''
mu = self.mu
sigma = self.sigma
n_qubits = self.n_qubits
instruction = {}
gates = self.get_random_gates(number=number)
q_positions = self.get_random_positions(gates)
assert(len(q_positions) == len(gates))
instruction['gates'] = gates
instruction['positions'] = q_positions
# instruction['n_qubits'] = n_qubits
# instruction['patience'] = patience
# instruction['best_previous_options'] = {}
return instruction
def replace_cg_w_ncg(self, gate_id):
''' replaces a set of Clifford gates
with corresponding non-Cliffords
'''
print("gates before", self.gates, flush=True)
gate = self.gates[gate_id]
if gate == 'X':
gate = "Rx"
elif gate == 'Y':
gate = "Ry"
elif gate == 'Z':
gate = "Rz"
elif gate == 'S':
gate = "S_nc"
#gate = "Rz"
elif gate == 'H':
gate = "H_nc"
# this does not work???????
# gate = "Ry"
elif gate == 'CX':
gate = "CRx"
elif gate == 'CY':
gate = "CRy"
elif gate == 'CZ':
gate = "CRz"
elif gate == 'SWAP':#find a way to change this as well
pass
# gate = "SWAP"
elif "UCC2c" in str(gate):
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
gate = pre_gate + "_" + "UCC2" + "_" +mid_gate
elif "UCC4c" in str(gate):
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
gate = pre_gate + "_" + "UCC4" + "_" + mid_gate
self.gates[gate_id] = gate
print("gates after", self.gates, flush=True)
def build_circuit(instructions):
'''
constructs a tequila circuit from a clifford instruction set
'''
gates = instructions.gates
q_positions = instructions.positions
init_angles = {}
clifford_circuit = tq.QCircuit()
# for i in range(1, len(gates)):
# TODO len(q_positions) not == len(gates)
for i in range(len(gates)):
if len(q_positions[i]) == 2:
q1, q2 = q_positions[i]
elif len(q_positions[i]) == 1:
q1 = q_positions[i]
q2 = None
elif not len(q_positions[i]) == 4:
raise Exception("q_positions[i] must have length 1, 2 or 4...")
if gates[i] == 'X':
clifford_circuit += tq.gates.X(q1)
if gates[i] == 'Y':
clifford_circuit += tq.gates.Y(q1)
if gates[i] == 'Z':
clifford_circuit += tq.gates.Z(q1)
if gates[i] == 'S':
clifford_circuit += tq.gates.S(q1)
if gates[i] == 'H':
clifford_circuit += tq.gates.H(q1)
if gates[i] == 'CX':
clifford_circuit += tq.gates.CX(q1, q2)
if gates[i] == 'CY': #using generators
clifford_circuit += tq.gates.S(q2)
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.S(q2).dagger()
if gates[i] == 'CZ': #using generators
clifford_circuit += tq.gates.H(q2)
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.H(q2)
if gates[i] == 'SWAP':
clifford_circuit += tq.gates.CX(q1, q2)
clifford_circuit += tq.gates.CX(q2, q1)
clifford_circuit += tq.gates.CX(q1, q2)
if "UCC2c" in str(gates[i]) or "UCC4c" in str(gates[i]):
clifford_circuit += get_clifford_UCC_circuit(gates[i], q_positions[i])
# NON-CLIFFORD STUFF FROM HERE ON
global global_seed
if gates[i] == "S_nc":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.S(q1)
clifford_circuit += tq.gates.Rz(angle=var_name, target=q1)
if gates[i] == "H_nc":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.H(q1)
clifford_circuit += tq.gates.Ry(angle=var_name, target=q1)
if gates[i] == "Rx":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.X(q1)
clifford_circuit += tq.gates.Rx(angle=var_name, target=q1)
if gates[i] == "Ry":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0.0
clifford_circuit += tq.gates.Y(q1)
clifford_circuit += tq.gates.Ry(angle=var_name, target=q1)
if gates[i] == "Rz":
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = 0
clifford_circuit += tq.gates.Z(q1)
clifford_circuit += tq.gates.Rz(angle=var_name, target=q1)
if gates[i] == "CRx":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Rx(angle=var_name, target=q2, control=q1)
if gates[i] == "CRy":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Ry(angle=var_name, target=q2, control=q1)
if gates[i] == "CRz":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
init_angles[var_name] = np.pi
clifford_circuit += tq.gates.Rz(angle=var_name, target=q2, control=q1)
def get_ucc_init_angles(gate):
angle = None
pre_gate = gate.split("_")[0]
mid_gate = gate.split("_")[-1]
if mid_gate == 'Z':
angle = 0.
elif mid_gate == 'S':
angle = 0.
elif mid_gate == 'S-dag':
angle = 0.
else:
raise Exception("This should not happen -- center/mid gate should be Z,S,S_dag.")
return angle
if "UCC2_" in str(gates[i]) or "UCC4_" in str(gates[i]):
uccc_circuit = get_non_clifford_UCC_circuit(gates[i], q_positions[i])
clifford_circuit += uccc_circuit
try:
var_name = uccc_circuit.extract_variables()[0]
init_angles[var_name]= get_ucc_init_angles(gates[i])
except:
init_angles = {}
return clifford_circuit, init_angles
def get_non_clifford_UCC_circuit(gate, positions):
"""
"""
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
global global_seed
mid_gate = gate.split("_")[-1]
mid_circuit = tq.QCircuit()
if mid_gate == "S":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.S(positions[-1])
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
elif mid_gate == "S-dag":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.S(positions[-1]).dagger()
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
elif mid_gate == "Z":
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit += tq.gates.Z(positions[-1])
mid_circuit += tq.gates.Rz(angle=tq.Variable(var_name), target=positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def get_string_Cliff_ucc(gate):
"""
this function randomly sample basis change and mid circuit elements for
a ucc-type clifford circuit and adds it to the gate
"""
pre_circ_comp = ["X", "Y", "H", "I"]
mid_circ_comp = ["S", "S-dag", "Z"]
p = None
if "UCC2c" in gate:
p = random.sample(pre_circ_comp, k=2)
elif "UCC4c" in gate:
p = random.sample(pre_circ_comp, k=4)
pre_gate = "#".join([str(item) for item in p])
mid_gate = random.sample(mid_circ_comp, k=1)[0]
return str(pre_gate + "_" + gate + "_" + mid_gate)
def get_string_ucc(gate):
"""
this function randomly sample basis change and mid circuit elements for
a ucc-type clifford circuit and adds it to the gate
"""
pre_circ_comp = ["X", "Y", "H", "I"]
p = None
if "UCC2" in gate:
p = random.sample(pre_circ_comp, k=2)
elif "UCC4" in gate:
p = random.sample(pre_circ_comp, k=4)
pre_gate = "#".join([str(item) for item in p])
mid_gate = str(random.random() * 2 * np.pi)
return str(pre_gate + "_" + gate + "_" + mid_gate)
def get_clifford_UCC_circuit(gate, positions):
"""
This function creates an approximate UCC excitation circuit using only
clifford Gates
"""
#pre_circ_comp = ["X", "Y", "H", "I"]
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
#pre_gates = []
#if gate == "UCC2":
# pre_gates = random.choices(pre_circ_comp, k=2)
#if gate == "UCC4":
# pre_gates = random.choices(pre_circ_comp, k=4)
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
#mid_circ_comp = ["S", "S-dag", "Z"]
#mid_gate = random.sample(mid_circ_comp, k=1)[0]
mid_gate = gate.split("_")[-1]
mid_circuit = tq.QCircuit()
if mid_gate == "S":
mid_circuit += tq.gates.S(positions[-1])
elif mid_gate == "S-dag":
mid_circuit += tq.gates.S(positions[-1]).dagger()
elif mid_gate == "Z":
mid_circuit += tq.gates.Z(positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def get_UCC_circuit(gate, positions):
"""
This function creates an UCC excitation circuit
"""
#pre_circ_comp = ["X", "Y", "H", "I"]
pre_cir_dic = {"X":tq.gates.X, "Y":tq.gates.Y, "H":tq.gates.H, "I":None}
pre_gate = gate.split("_")[0]
pre_gates = pre_gate.split(*"#")
pre_circuit = tq.QCircuit()
for i, pos in enumerate(positions):
try:
pre_circuit += pre_cir_dic[pre_gates[i]](pos)
except:
pass
for i, pos in enumerate(positions[:-1]):
pre_circuit += tq.gates.CX(pos, positions[i+1])
mid_gate_val = gate.split("_")[-1]
global global_seed
np.random.seed(global_seed)
global_seed += 1
var_name = "var"+str(np.random.rand())
mid_circuit = tq.gates.Rz(target=positions[-1], angle=tq.Variable(var_name))
# mid_circ_comp = ["S", "S-dag", "Z"]
# mid_gate = random.sample(mid_circ_comp, k=1)[0]
# mid_circuit = tq.QCircuit()
# if mid_gate == "S":
# mid_circuit += tq.gates.S(positions[-1])
# elif mid_gate == "S-dag":
# mid_circuit += tq.gates.S(positions[-1]).dagger()
# elif mid_gate == "Z":
# mid_circuit += tq.gates.Z(positions[-1])
return pre_circuit + mid_circuit + pre_circuit.dagger()
def schedule_actions_ratio(epoch: int, action_options: list = ['delete', 'change', 'add'],
decay: int = 30,
steady_ratio: Union[tuple, list] = [0.2, 0.6, 0.2]) -> list:
delete, change, add = tuple(steady_ratio)
actions_ratio = []
for action in action_options:
if action == 'delete':
actions_ratio += [ delete*(1-np.exp(-1*epoch / decay)) ]
elif action == 'change':
actions_ratio += [ change*(1-np.exp(-1*epoch / decay)) ]
elif action == 'add':
actions_ratio += [ (1-add)*np.exp(-1*epoch / decay) + add ]
else:
print('Action type ', action, ' not defined!')
# unnecessary for current schedule
# if not np.isclose(np.sum(actions_ratio), 1.0):
# actions_ratio /= np.sum(actions_ratio)
return actions_ratio
def get_action(ratio: list = [0.20,0.60,0.20]):
'''
randomly chooses an action from delete, change, add
ratio denotes the multinomial probabilities
'''
choice = np.random.multinomial(n=1, pvals = ratio, size = 1)
index = np.where(choice[0] == 1)
action_options = ['delete', 'change', 'add']
action = action_options[index[0].item()]
return action
def get_prob_acceptance(E_curr, E_prev, T, reference_energy):
'''
Computes acceptance probability of a certain action
based on change in energy
'''
# print("Ecurr is", E_curr)
# print("Eprev is", E_prev)
# print("delta", E_curr - E_prev)
# raise Exception("shit")
delta_E = 12
try:
delta_E = E_curr - E_prev
except:
print("Ecurr is", E_curr)
print("Eprev is", E_prev)
print("delta", E_curr - E_prev)
prob = 0
if delta_E < 0 and E_curr < reference_energy:
prob = 1
else:
if E_curr < reference_energy:
prob = np.exp(-delta_E/T)
else:
prob = 0
return prob
def perform_folding(hamiltonian, circuit):
# QubitHamiltonian -> ParamQubitHamiltonian
param_hamiltonian = convert_tq_QH_to_PQH(hamiltonian)
gates = circuit.gates
# Go backwards
gates.reverse()
for gate in gates:
# print("\t folding", gate)
param_hamiltonian = (fold_unitary_into_hamiltonian(gate, param_hamiltonian))
# hamiltonian = convert_PQH_to_tq_QH(param_hamiltonian)()
hamiltonian = param_hamiltonian
return hamiltonian
def evaluate_fitness(instructions, hamiltonian: tq.QubitHamiltonian, type_energy_eval: str, cluster_circuit: tq.QCircuit=None) -> tuple:
'''
Evaluates fitness=objective=energy given a system
for a set of instructions
'''
#print ("evaluating fitness")
n_qubits = instructions.n_qubits
clifford_circuit, init_angles = build_circuit(instructions)
# tq.draw(clifford_circuit, backend="cirq")
## TODO check if cluster_circuit is parametrized
t0 = time()
t1 = None
folded_hamiltonian = perform_folding(hamiltonian, clifford_circuit)
t1 = time()
#print ("\tfolding took ", t1-t0)
parametrized = len(clifford_circuit.extract_variables()) > 0
initial_guess = None
if not parametrized:
folded_hamiltonian = (convert_PQH_to_tq_QH(folded_hamiltonian))()
elif parametrized:
# TODO clifford_circuit is both ref + rest; so nomenclature is shit here
variables = [gate.extract_variables() for gate in clifford_circuit.gates\
if gate.extract_variables()]
variables = np.array(variables).flatten().tolist()
# TODO this initial_guess is absolutely useless rn and is just causing problems if called
initial_guess = { k: 0.1 for k in variables }
if instructions.best_reference_wfn is not None:
initial_guess = instructions.best_reference_wfn
E_new, optimal_state = minimize_energy(hamiltonian=folded_hamiltonian, n_qubits=n_qubits, type_energy_eval=type_energy_eval, cluster_circuit=cluster_circuit, initial_guess=initial_guess,\
initial_mixed_angles=init_angles)
t2 = time()
#print ("evaluated fitness; comp was ", t2-t1)
#print ("current fitness: ", E_new)
return E_new, optimal_state
| 26,780 | 33.962141 | 191 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/hacked_openfermion_qubit_operator.py | import tequila as tq
import sympy
import copy
#from param_hamiltonian import get_geometry, generate_ucc_ansatz
from hacked_openfermion_symbolic_operator import SymbolicOperator
# Define products of all Pauli operators for symbolic multiplication.
_PAULI_OPERATOR_PRODUCTS = {
('I', 'I'): (1., 'I'),
('I', 'X'): (1., 'X'),
('X', 'I'): (1., 'X'),
('I', 'Y'): (1., 'Y'),
('Y', 'I'): (1., 'Y'),
('I', 'Z'): (1., 'Z'),
('Z', 'I'): (1., 'Z'),
('X', 'X'): (1., 'I'),
('Y', 'Y'): (1., 'I'),
('Z', 'Z'): (1., 'I'),
('X', 'Y'): (1.j, 'Z'),
('X', 'Z'): (-1.j, 'Y'),
('Y', 'X'): (-1.j, 'Z'),
('Y', 'Z'): (1.j, 'X'),
('Z', 'X'): (1.j, 'Y'),
('Z', 'Y'): (-1.j, 'X')
}
_clifford_h_products = {
('I') : (1., 'I'),
('X') : (1., 'Z'),
('Y') : (-1., 'Y'),
('Z') : (1., 'X')
}
_clifford_s_products = {
('I') : (1., 'I'),
('X') : (-1., 'Y'),
('Y') : (1., 'X'),
('Z') : (1., 'Z')
}
_clifford_s_dag_products = {
('I') : (1., 'I'),
('X') : (1., 'Y'),
('Y') : (-1., 'X'),
('Z') : (1., 'Z')
}
_clifford_cx_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'I', 'X'),
('I', 'Y'): (1., 'Z', 'Y'),
('I', 'Z'): (1., 'Z', 'Z'),
('X', 'I'): (1., 'X', 'X'),
('X', 'X'): (1., 'X', 'I'),
('X', 'Y'): (1., 'Y', 'Z'),
('X', 'Z'): (-1., 'Y', 'Y'),
('Y', 'I'): (1., 'Y', 'X'),
('Y', 'X'): (1., 'Y', 'I'),
('Y', 'Y'): (-1., 'X', 'Z'),
('Y', 'Z'): (1., 'X', 'Y'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'Z', 'X'),
('Z', 'Y'): (1., 'I', 'Y'),
('Z', 'Z'): (1., 'I', 'Z'),
}
_clifford_cy_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'Z', 'X'),
('I', 'Y'): (1., 'I', 'Y'),
('I', 'Z'): (1., 'Z', 'Z'),
('X', 'I'): (1., 'X', 'Y'),
('X', 'X'): (-1., 'Y', 'Z'),
('X', 'Y'): (1., 'X', 'I'),
('X', 'Z'): (-1., 'Y', 'X'),
('Y', 'I'): (1., 'Y', 'Y'),
('Y', 'X'): (1., 'X', 'Z'),
('Y', 'Y'): (1., 'Y', 'I'),
('Y', 'Z'): (-1., 'X', 'X'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'I', 'X'),
('Z', 'Y'): (1., 'Z', 'Y'),
('Z', 'Z'): (1., 'I', 'Z'),
}
_clifford_cz_products = {
('I', 'I'): (1., 'I', 'I'),
('I', 'X'): (1., 'Z', 'X'),
('I', 'Y'): (1., 'Z', 'Y'),
('I', 'Z'): (1., 'I', 'Z'),
('X', 'I'): (1., 'X', 'Z'),
('X', 'X'): (-1., 'Y', 'Y'),
('X', 'Y'): (-1., 'Y', 'X'),
('X', 'Z'): (1., 'X', 'I'),
('Y', 'I'): (1., 'Y', 'Z'),
('Y', 'X'): (-1., 'X', 'Y'),
('Y', 'Y'): (1., 'X', 'X'),
('Y', 'Z'): (1., 'Y', 'I'),
('Z', 'I'): (1., 'Z', 'I'),
('Z', 'X'): (1., 'I', 'X'),
('Z', 'Y'): (1., 'I', 'Y'),
('Z', 'Z'): (1., 'Z', 'Z'),
}
COEFFICIENT_TYPES = (int, float, complex, sympy.Expr, tq.Variable)
class ParamQubitHamiltonian(SymbolicOperator):
@property
def actions(self):
"""The allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_strings(self):
"""The string representations of the allowed actions."""
return ('X', 'Y', 'Z')
@property
def action_before_index(self):
"""Whether action comes before index in string representations."""
return True
@property
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
return True
def renormalize(self):
"""Fix the trace norm of an operator to 1"""
norm = self.induced_norm(2)
if numpy.isclose(norm, 0.0):
raise ZeroDivisionError('Cannot renormalize empty or zero operator')
else:
self /= norm
def _simplify(self, term, coefficient=1.0):
"""Simplify a term using commutator and anti-commutator relations."""
if not term:
return coefficient, term
term = sorted(term, key=lambda factor: factor[0])
new_term = []
left_factor = term[0]
for right_factor in term[1:]:
left_index, left_action = left_factor
right_index, right_action = right_factor
# Still on the same qubit, keep simplifying.
if left_index == right_index:
new_coefficient, new_action = _PAULI_OPERATOR_PRODUCTS[
left_action, right_action]
left_factor = (left_index, new_action)
coefficient *= new_coefficient
# Reached different qubit, save result and re-initialize.
else:
if left_action != 'I':
new_term.append(left_factor)
left_factor = right_factor
# Save result of final iteration.
if left_factor[1] != 'I':
new_term.append(left_factor)
return coefficient, tuple(new_term)
def _clifford_simplify_h(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_h_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_s(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_s_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_s_dag(self, qubit):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
for left, right in term:
if left == qubit:
coeff, new_pauli = _clifford_s_dag_products[right]
new_term.append(tuple((left, new_pauli)))
else:
new_term.append(tuple((left,right)))
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
def _clifford_simplify_control_g(self, axis, control_q, target_q):
"""simplifying the Hamiltonian using the clifford group property"""
fold_ham = {}
for term in self.terms:
#there should be a better way to do this
new_term = []
coeff = 1.0
target = "I"
control = "I"
for left, right in term:
if left == control_q:
control = right
elif left == target_q:
target = right
else:
new_term.append(tuple((left,right)))
new_c = "I"
new_t = "I"
if not (target == "I" and control == "I"):
if axis == "X":
coeff, new_c, new_t = _clifford_cx_products[control, target]
if axis == "Y":
coeff, new_c, new_t = _clifford_cy_products[control, target]
if axis == "Z":
coeff, new_c, new_t = _clifford_cz_products[control, target]
if new_c != "I":
new_term.append(tuple((control_q, new_c)))
if new_t != "I":
new_term.append(tuple((target_q, new_t)))
new_term = sorted(new_term, key=lambda factor: factor[0])
fold_ham[tuple(new_term)] = coeff*self.terms[term]
self.terms = fold_ham
return self
if __name__ == "__main__":
"""geometry = get_geometry("H2", 0.714)
print(geometry)
basis_set = 'sto-3g'
ref_anz, uccsd_anz, ham = generate_ucc_ansatz(geometry, basis_set)
print(ham)
b_ham = tq.grouping.binary_rep.BinaryHamiltonian.init_from_qubit_hamiltonian(ham)
c_ham = tq.grouping.binary_rep.BinaryHamiltonian.init_from_qubit_hamiltonian(ham)
print(b_ham)
print(b_ham.get_binary())
print(b_ham.get_coeff())
param = uccsd_anz.extract_variables()
print(param)
for term in b_ham.binary_terms:
print(term.coeff)
term.set_coeff(param[0])
print(term.coeff)
print(b_ham.get_coeff())
d_ham = c_ham.to_qubit_hamiltonian() + b_ham.to_qubit_hamiltonian()
"""
term = [(2,'X'), (0,'Y'), (3, 'Z')]
coeff = tq.Variable("a")
coeff = coeff *2.j
print(coeff)
print(type(coeff))
print(coeff({"a":1}))
ham = ParamQubitHamiltonian(term= term, coefficient=coeff)
print(ham.terms)
print(str(ham))
for term in ham.terms:
print(ham.terms[term]({"a":1,"b":2}))
term = [(2,'X'), (0,'Z'), (3, 'Z')]
coeff = tq.Variable("b")
print(coeff({"b":1}))
b_ham = ParamQubitHamiltonian(term= term, coefficient=coeff)
print(b_ham.terms)
print(str(b_ham))
for term in b_ham.terms:
print(b_ham.terms[term]({"a":1,"b":2}))
coeff = tq.Variable("a")*tq.Variable("b")
print(coeff)
print(coeff({"a":1,"b":2}))
ham *= b_ham
print(ham.terms)
print(str(ham))
for term in ham.terms:
coeff = (ham.terms[term])
print(coeff)
print(coeff({"a":1,"b":2}))
ham = ham*2.
print(ham.terms)
print(str(ham))
| 9,918 | 29.614198 | 85 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/HEA.py | import tequila as tq
import numpy as np
from tequila import gates as tq_g
from tequila.objective.objective import Variable
def generate_HEA(num_qubits, circuit_id=11, num_layers=1):
"""
This function generates different types of hardware efficient
circuits as in this paper
https://onlinelibrary.wiley.com/doi/full/10.1002/qute.201900070
param: num_qubits (int) -> the number of qubits in the circuit
param: circuit_id (int) -> the type of hardware efficient circuit
param: num_layers (int) -> the number of layers of the HEA
input:
num_qubits -> 4
circuit_id -> 11
num_layers -> 1
returns:
ansatz (tq.QCircuit()) -> a circuit as shown below
0: ───Ry(0.318309886183791*pi*f((y0,))_0)───Rz(0.318309886183791*pi*f((z0,))_1)───@─────────────────────────────────────────────────────────────────────────────────────
│
1: ───Ry(0.318309886183791*pi*f((y1,))_2)───Rz(0.318309886183791*pi*f((z1,))_3)───X───Ry(0.318309886183791*pi*f((y4,))_8)────Rz(0.318309886183791*pi*f((z4,))_9)────@───
│
2: ───Ry(0.318309886183791*pi*f((y2,))_4)───Rz(0.318309886183791*pi*f((z2,))_5)───@───Ry(0.318309886183791*pi*f((y5,))_10)───Rz(0.318309886183791*pi*f((z5,))_11)───X───
│
3: ───Ry(0.318309886183791*pi*f((y3,))_6)───Rz(0.318309886183791*pi*f((z3,))_7)───X─────────────────────────────────────────────────────────────────────────────────────
"""
circuit = tq.QCircuit()
qubits = [i for i in range(num_qubits)]
if circuit_id == 1:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
return circuit
elif circuit_id == 2:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
circuit += tq_g.CNOT(target=qubit, control=qubits[ind])
return circuit
elif circuit_id == 3:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("cz"+str(count))
circuit += tq_g.Rz(angle = variable,target=qubit, control=qubits[ind])
count += 1
return circuit
elif circuit_id == 4:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("cx"+str(count))
circuit += tq_g.Rx(angle = variable,target=qubit, control=qubits[ind])
count += 1
return circuit
elif circuit_id == 5:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
for target in qubits:
if control != target:
variable = Variable("cz"+str(count))
circuit += tq_g.Rz(angle = variable,target=target, control=control)
count += 1
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
return circuit
elif circuit_id == 6:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
variable = Variable("cz"+str(count))
circuit += tq_g.Rz(angle = variable,target=qubits[ind+1], control=control)
count += 1
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
variable = Variable("cz"+str(count))
circuit += tq_g.Rz(angle = variable,target=qubits[ind+1], control=control)
count += 1
return circuit
elif circuit_id == 7:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
for target in qubits:
if control != target:
variable = Variable("cx"+str(count))
circuit += tq_g.Rx(angle = variable,target=target, control=control)
count += 1
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
return circuit
elif circuit_id == 8:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
variable = Variable("cx"+str(count))
circuit += tq_g.Rx(angle = variable,target=qubits[ind+1], control=control)
count += 1
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
variable = Variable("cx"+str(count))
circuit += tq_g.Rx(angle = variable,target=qubits[ind+1], control=control)
count += 1
return circuit
elif circuit_id == 9:
count = 0
for _ in range(num_layers):
for qubit in qubits:
circuit += tq_g.H(qubit)
for ind, qubit in enumerate(qubits[1:]):
circuit += tq_g.Z(target=qubit, control=qubits[ind])
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
count += 1
return circuit
elif circuit_id == 10:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
circuit += tq_g.Z(target=qubit, control=qubits[ind])
circuit += tq_g.Z(target=qubits[0], control=qubits[-1])
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
return circuit
elif circuit_id == 11:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
circuit += tq_g.X(target=qubits[ind+1], control=control)
for ind, qubit in enumerate(qubits[1:-1]):
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
circuit += tq_g.X(target=qubits[ind+1], control=control)
return circuit
elif circuit_id == 12:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
circuit += tq_g.Z(target=qubits[ind+1], control=control)
for ind, control in enumerate(qubits[1:-1]):
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = control)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = control)
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
circuit += tq_g.Z(target=qubits[ind+1], control=control)
return circuit
elif circuit_id == 13:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target=qubit, control=qubits[ind])
count += 1
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target=qubits[0], control=qubits[-1])
count += 1
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, control=qubit, target=qubits[ind])
count += 1
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, control=qubits[0], target=qubits[-1])
count += 1
return circuit
elif circuit_id == 14:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target=qubit, control=qubits[ind])
count += 1
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target=qubits[0], control=qubits[-1])
count += 1
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, control=qubit, target=qubits[ind])
count += 1
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, control=qubits[0], target=qubits[-1])
count += 1
return circuit
elif circuit_id == 15:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
circuit += tq_g.X(target=qubit, control=qubits[ind])
circuit += tq_g.X(control=qubits[-1], target=qubits[0])
for qubit in qubits:
variable = Variable("y"+str(count))
circuit += tq_g.Ry(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[1:]):
circuit += tq_g.X(control=qubit, target=qubits[ind])
circuit += tq_g.X(target=qubits[-1], control=qubits[0])
return circuit
elif circuit_id == 16:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, control=control, target=qubits[ind+1])
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, control=control, target=qubits[ind+1])
count += 1
return circuit
elif circuit_id == 17:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, control in enumerate(qubits):
if ind % 2 == 0:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, control=control, target=qubits[ind+1])
count += 1
for ind, control in enumerate(qubits[:-1]):
if ind % 2 != 0:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, control=control, target=qubits[ind+1])
count += 1
return circuit
elif circuit_id == 18:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[:-1]):
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target=qubit, control=qubits[ind+1])
count += 1
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target=qubits[-1], control=qubits[0])
count += 1
return circuit
elif circuit_id == 19:
count = 0
for _ in range(num_layers):
for qubit in qubits:
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target = qubit)
variable = Variable("z"+str(count))
circuit += tq_g.Rz(angle=variable, target = qubit)
count += 1
for ind, qubit in enumerate(qubits[:-1]):
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target=qubit, control=qubits[ind+1])
count += 1
variable = Variable("x"+str(count))
circuit += tq_g.Rx(angle=variable, target=qubits[-1], control=qubits[0])
count += 1
return circuit
| 18,425 | 45.530303 | 172 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/vqe_utils.py | import tequila as tq
import numpy as np
from hacked_openfermion_qubit_operator import ParamQubitHamiltonian
from openfermion import QubitOperator
from HEA import *
def get_ansatz_circuit(ansatz_type, geometry, basis_set=None, trotter_steps = 1, name=None, circuit_id=None,num_layers=1):
"""
This function generates the ansatz for the molecule and the Hamiltonian
param: ansatz_type (str) -> the type of the ansatz ('UCCSD, UpCCGSD, SPA, HEA')
param: geometry (str) -> the geometry of the molecule
param: basis_set (str) -> the basis set for wchich the ansatz has to generated
param: trotter_steps (int) -> the number of trotter step to be used in the
trotter decomposition
param: name (str) -> the name for the madness molecule
param: circuit_id (int) -> the type of hardware efficient ansatz
param: num_layers (int) -> the number of layers of the HEA
e.g.:
input:
ansatz_type -> "UCCSD"
geometry -> "H 0.0 0.0 0.0\nH 0.0 0.0 0.714"
basis_set -> 'sto-3g'
trotter_steps -> 1
name -> None
circuit_id -> None
num_layers -> 1
returns:
ucc_ansatz (tq.QCircuit()) -> a circuit printed below:
circuit:
FermionicExcitation(target=(0, 1, 2, 3), control=(), parameter=Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [])
FermionicExcitation(target=(0, 1, 2, 3), control=(), parameter=Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [])
Hamiltonian (tq.QubitHamiltonian()) -> -0.0621+0.1755Z(0)+0.1755Z(1)-0.2358Z(2)-0.2358Z(3)+0.1699Z(0)Z(1)
+0.0449Y(0)X(1)X(2)Y(3)-0.0449Y(0)Y(1)X(2)X(3)-0.0449X(0)X(1)Y(2)Y(3)
+0.0449X(0)Y(1)Y(2)X(3)+0.1221Z(0)Z(2)+0.1671Z(0)Z(3)+0.1671Z(1)Z(2)
+0.1221Z(1)Z(3)+0.1756Z(2)Z(3)
fci_ener (float) ->
"""
ham = tq.QubitHamiltonian()
fci_ener = 0.0
ansatz = tq.QCircuit()
if ansatz_type == "UCCSD":
molecule = tq.Molecule(geometry=geometry, basis_set=basis_set)
ansatz = molecule.make_uccsd_ansatz(trotter_steps)
ham = molecule.make_hamiltonian()
fci_ener = molecule.compute_energy(method="fci")
elif ansatz_type == "UCCS":
molecule = tq.Molecule(geometry=geometry, basis_set=basis_set, backend='psi4')
ham = molecule.make_hamiltonian()
fci_ener = molecule.compute_energy("fci")
indices = molecule.make_upccgsd_indices(key='UCCS')
print("indices are:", indices)
ansatz = molecule.make_upccgsd_layer(indices=indices, include_singles=True, include_doubles=False)
elif ansatz_type == "UpCCGSD":
molecule = tq.Molecule(name=name, geometry=geometry, n_pno=None)
ham = molecule.make_hamiltonian()
fci_ener = molecule.compute_energy("fci")
ansatz = molecule.make_upccgsd_ansatz()
elif ansatz_type == "SPA":
molecule = tq.Molecule(name=name, geometry=geometry, n_pno=None)
ham = molecule.make_hamiltonian()
fci_ener = molecule.compute_energy("fci")
ansatz = molecule.make_upccgsd_ansatz(name="SPA")
elif ansatz_type == "HEA":
molecule = tq.Molecule(geometry=geometry, basis_set=basis_set)
ham = molecule.make_hamiltonian()
fci_ener = molecule.compute_energy(method="fci")
ansatz = generate_HEA(molecule.n_orbitals * 2, circuit_id)
else:
raise Exception("not implemented any other ansatz, please choose from 'UCCSD, UpCCGSD, SPA, HEA'")
return ansatz, ham, fci_ener
def get_generator_for_gates(unitary):
"""
This function takes a unitary gate and returns the generator of the
the gate so that it can be padded to the Hamiltonian
param: unitary (tq.QGateImpl()) -> the unitary circuit element that has to be
converted to a paulistring
e.g.:
input:
unitary -> a FermionicGateImpl object as the one printed below
FermionicExcitation(target=(0, 1, 2, 3), control=(), parameter=Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [])
returns:
parameter (tq.Variable()) -> (1, 0, 1, 0)
generator (tq.QubitHamiltonian()) -> -0.1250Y(0)Y(1)Y(2)X(3)+0.1250Y(0)X(1)Y(2)Y(3)
+0.1250X(0)X(1)Y(2)X(3)+0.1250X(0)Y(1)Y(2)Y(3)
-0.1250Y(0)X(1)X(2)X(3)-0.1250Y(0)Y(1)X(2)Y(3)
-0.1250X(0)Y(1)X(2)X(3)+0.1250X(0)X(1)X(2)Y(3)
null_proj (tq.QubitHamiltonian()) -> -0.1250Z(0)Z(1)+0.1250Z(1)Z(3)+0.1250Z(0)Z(3)
+0.1250Z(1)Z(2)+0.1250Z(0)Z(2)-0.1250Z(2)Z(3)
-0.1250Z(0)Z(1)Z(2)Z(3)
"""
try:
parameter = None
generator = None
null_proj = None
if isinstance(unitary, tq.quantumchemistry.qc_base.FermionicGateImpl):
parameter = unitary.extract_variables()
generator = unitary.generator
null_proj = unitary.p0
else:
#getting parameter
if unitary.is_parametrized():
parameter = unitary.extract_variables()
else:
parameter = [None]
try:
generator = unitary.make_generator(include_controls=True)
except:
generator = unitary.generator()
"""if len(parameter) == 0:
parameter = [tq.objective.objective.assign_variable(unitary.parameter)]"""
return parameter[0], generator, null_proj
except Exception as e:
print("An Exception happened, details :",e)
pass
def fold_unitary_into_hamiltonian(unitary, Hamiltonian):
"""
This function return a list of the resulting Hamiltonian terms after folding the paulistring
correspondig to the unitary into the Hamiltonian
param: unitary (tq.QGateImpl()) -> the unitary to be folded into the Hamiltonian
param: Hamiltonian (ParamQubitHamiltonian()) -> the Hamiltonian of the system
e.g.:
input:
unitary -> a FermionicGateImpl object as the one printed below
FermionicExcitation(target=(0, 1, 2, 3), control=(), parameter=Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [])
Hamiltonian -> -0.06214952615456104 [] + -0.044941923860490916 [X0 X1 Y2 Y3] +
0.044941923860490916 [X0 Y1 Y2 X3] + 0.044941923860490916 [Y0 X1 X2 Y3] +
-0.044941923860490916 [Y0 Y1 X2 X3] + 0.17547360045040505 [Z0] +
0.16992958569230643 [Z0 Z1] + 0.12212314332112947 [Z0 Z2] +
0.1670650671816204 [Z0 Z3] + 0.17547360045040508 [Z1] +
0.1670650671816204 [Z1 Z2] + 0.12212314332112947 [Z1 Z3] +
-0.23578915712819945 [Z2] + 0.17561918557144712 [Z2 Z3] +
-0.23578915712819945 [Z3]
returns:
folded_hamiltonian (ParamQubitHamiltonian()) -> Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [X0 X1 X2 X3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [X0 X1 Y2 Y3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [X0 Y1 X2 Y3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [X0 Y1 Y2 X3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Y0 X1 X2 Y3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Y0 X1 Y2 X3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Y0 Y1 X2 X3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Y0 Y1 Y2 Y3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z1] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z1 Z2] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z1 Z2 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z1 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z2] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z2 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z0 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z1] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z1 Z2] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z1 Z2 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z1 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z2] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z2 Z3] +
Objective with 0 unique expectation values
total measurements = 0
variables = [(1, 0, 1, 0)]
types = [] [Z3]
"""
folded_hamiltonian = ParamQubitHamiltonian()
if isinstance(unitary, tq.circuit._gates_impl.DifferentiableGateImpl) and not isinstance(unitary, tq.circuit._gates_impl.PhaseGateImpl):
variable, generator, null_proj = get_generator_for_gates(unitary)
# print(generator)
# print(null_proj)
#converting into ParamQubitHamiltonian()
c_generator = convert_tq_QH_to_PQH(generator)
"""c_null_proj = convert_tq_QH_to_PQH(null_proj)
print(variable)
prod1 = (null_proj*ham*generator - generator*ham*null_proj)
print(prod1)
#print((Hamiltonian*c_generator))
prod2 = convert_PQH_to_tq_QH(c_null_proj*Hamiltonian*c_generator - c_generator*Hamiltonian*c_null_proj)({var:0.1 for var in variable.extract_variables()})
print(prod2)
assert prod1 ==prod2
raise Exception("testing")"""
#print("starting", flush=True)
#handling the parameterize gates
if variable is not None:
#print("folding generator")
# adding the term: cos^2(\theta)*H
temp_ham = ParamQubitHamiltonian().identity()
temp_ham *= Hamiltonian
temp_ham *= (variable.apply(tq.numpy.cos)**2)
#print(convert_PQH_to_tq_QH(temp_ham)({var:1. for var in variable.extract_variables()}))
folded_hamiltonian += temp_ham
#print("step1 done", flush=True)
# adding the term: sin^2(\theta)*G*H*G
temp_ham = ParamQubitHamiltonian().identity()
temp_ham *= c_generator
temp_ham *= Hamiltonian
temp_ham *= c_generator
temp_ham *= (variable.apply(tq.numpy.sin)**2)
#print(convert_PQH_to_tq_QH(temp_ham)({var:1. for var in variable.extract_variables()}))
folded_hamiltonian += temp_ham
#print("step2 done", flush=True)
# adding the term: i*cos^2(\theta)8sin^2(\theta)*(G*H -H*G)
temp_ham1 = ParamQubitHamiltonian().identity()
temp_ham1 *= c_generator
temp_ham1 *= Hamiltonian
temp_ham2 = ParamQubitHamiltonian().identity()
temp_ham2 *= Hamiltonian
temp_ham2 *= c_generator
temp_ham = temp_ham1 - temp_ham2
temp_ham *= 1.0j
temp_ham *= variable.apply(tq.numpy.sin) * variable.apply(tq.numpy.cos)
#print(convert_PQH_to_tq_QH(temp_ham)({var:1. for var in variable.extract_variables()}))
folded_hamiltonian += temp_ham
#print("step3 done", flush=True)
#handling the non-paramterized gates
else:
raise Exception("This function is not implemented yet")
# adding the term: G*H*G
folded_hamiltonian += c_generator*Hamiltonian*c_generator
#print("Halfway there", flush=True)
#handle the FermionicGateImpl gates
if null_proj is not None:
print("folding null projector")
c_null_proj = convert_tq_QH_to_PQH(null_proj)
#print("step4 done", flush=True)
# adding the term: (1-cos(\theta))^2*P0*H*P0
temp_ham = ParamQubitHamiltonian().identity()
temp_ham *= c_null_proj
temp_ham *= Hamiltonian
temp_ham *= c_null_proj
temp_ham *= ((1-variable.apply(tq.numpy.cos))**2)
folded_hamiltonian += temp_ham
#print("step5 done", flush=True)
# adding the term: 2*cos(\theta)*(1-cos(\theta))*(P0*H +H*P0)
temp_ham1 = ParamQubitHamiltonian().identity()
temp_ham1 *= c_null_proj
temp_ham1 *= Hamiltonian
temp_ham2 = ParamQubitHamiltonian().identity()
temp_ham2 *= Hamiltonian
temp_ham2 *= c_null_proj
temp_ham = temp_ham1 + temp_ham2
temp_ham *= (variable.apply(tq.numpy.cos)*(1-variable.apply(tq.numpy.cos)))
folded_hamiltonian += temp_ham
#print("step6 done", flush=True)
# adding the term: i*sin(\theta)*(1-cos(\theta))*(G*H*P0 - P0*H*G)
temp_ham1 = ParamQubitHamiltonian().identity()
temp_ham1 *= c_generator
temp_ham1 *= Hamiltonian
temp_ham1 *= c_null_proj
temp_ham2 = ParamQubitHamiltonian().identity()
temp_ham2 *= c_null_proj
temp_ham2 *= Hamiltonian
temp_ham2 *= c_generator
temp_ham = temp_ham1 - temp_ham2
temp_ham *= 1.0j
temp_ham *= (variable.apply(tq.numpy.sin)*(1-variable.apply(tq.numpy.cos)))
folded_hamiltonian += temp_ham
#print("step7 done", flush=True)
elif isinstance(unitary, tq.circuit._gates_impl.PhaseGateImpl):
if np.isclose(unitary.parameter, np.pi/2.0):
return Hamiltonian._clifford_simplify_s(unitary.qubits[0])
elif np.isclose(unitary.parameter, -1.*np.pi/2.0):
return Hamiltonian._clifford_simplify_s_dag(unitary.qubits[0])
else:
raise Exception("Only DifferentiableGateImpl, PhaseGateImpl(S), Pauligate(X,Y,Z), Controlled(X,Y,Z) and Hadamrd(H) implemented yet")
elif isinstance(unitary, tq.circuit._gates_impl.QGateImpl):
if unitary.is_controlled():
if unitary.name == "X":
return Hamiltonian._clifford_simplify_control_g("X", unitary.control[0], unitary.target[0])
elif unitary.name == "Y":
return Hamiltonian._clifford_simplify_control_g("Y", unitary.control[0], unitary.target[0])
elif unitary.name == "Z":
return Hamiltonian._clifford_simplify_control_g("Z", unitary.control[0], unitary.target[0])
else:
raise Exception("Only DifferentiableGateImpl, PhaseGateImpl(S), Pauligate(X,Y,Z), Controlled(X,Y,Z) and Hadamrd(H) implemented yet")
else:
if unitary.name == "X":
gate = convert_tq_QH_to_PQH(tq.paulis.X(unitary.qubits[0]))
return gate*Hamiltonian*gate
elif unitary.name == "Y":
gate = convert_tq_QH_to_PQH(tq.paulis.Y(unitary.qubits[0]))
return gate*Hamiltonian*gate
elif unitary.name == "Z":
gate = convert_tq_QH_to_PQH(tq.paulis.Z(unitary.qubits[0]))
return gate*Hamiltonian*gate
elif unitary.name == "H":
return Hamiltonian._clifford_simplify_h(unitary.qubits[0])
else:
raise Exception("Only DifferentiableGateImpl, PhaseGateImpl(S), Pauligate(X,Y,Z), Controlled(X,Y,Z) and Hadamrd(H) implemented yet")
else:
raise Exception("Only DifferentiableGateImpl, PhaseGateImpl(S), Pauligate(X,Y,Z), Controlled(X,Y,Z) and Hadamrd(H) implemented yet")
return folded_hamiltonian
def convert_tq_QH_to_PQH(Hamiltonian):
"""
This function takes the tequila QubitHamiltonian object and converts into a
ParamQubitHamiltonian object.
param: Hamiltonian (tq.QubitHamiltonian()) -> the Hamiltonian to be converted
e.g:
input:
Hamiltonian -> -0.0621+0.1755Z(0)+0.1755Z(1)-0.2358Z(2)-0.2358Z(3)+0.1699Z(0)Z(1)
+0.0449Y(0)X(1)X(2)Y(3)-0.0449Y(0)Y(1)X(2)X(3)-0.0449X(0)X(1)Y(2)Y(3)
+0.0449X(0)Y(1)Y(2)X(3)+0.1221Z(0)Z(2)+0.1671Z(0)Z(3)+0.1671Z(1)Z(2)
+0.1221Z(1)Z(3)+0.1756Z(2)Z(3)
returns:
param_hamiltonian (ParamQubitHamiltonian()) -> -0.06214952615456104 [] +
-0.044941923860490916 [X0 X1 Y2 Y3] +
0.044941923860490916 [X0 Y1 Y2 X3] +
0.044941923860490916 [Y0 X1 X2 Y3] +
-0.044941923860490916 [Y0 Y1 X2 X3] +
0.17547360045040505 [Z0] +
0.16992958569230643 [Z0 Z1] +
0.12212314332112947 [Z0 Z2] +
0.1670650671816204 [Z0 Z3] +
0.17547360045040508 [Z1] +
0.1670650671816204 [Z1 Z2] +
0.12212314332112947 [Z1 Z3] +
-0.23578915712819945 [Z2] +
0.17561918557144712 [Z2 Z3] +
-0.23578915712819945 [Z3]
"""
param_hamiltonian = ParamQubitHamiltonian()
Hamiltonian = Hamiltonian.to_openfermion()
for term in Hamiltonian.terms:
param_hamiltonian += ParamQubitHamiltonian(term = term, coefficient = Hamiltonian.terms[term])
return param_hamiltonian
class convert_PQH_to_tq_QH:
def __init__(self, Hamiltonian):
self.param_hamiltonian = Hamiltonian
def __call__(self,variables=None):
"""
This function takes the ParamQubitHamiltonian object and converts into a
tequila QubitHamiltonian object.
param: param_hamiltonian (ParamQubitHamiltonian()) -> the Hamiltonian to be converted
param: variables (dict) -> a dictionary with the values of the variables in the
Hamiltonian coefficient
e.g:
input:
param_hamiltonian -> a [Y0 X2 Z3] + b [Z0 X2 Z3]
variables -> {"a":1,"b":2}
returns:
Hamiltonian (tq.QubitHamiltonian()) -> +1.0000Y(0)X(2)Z(3)+2.0000Z(0)X(2)Z(3)
"""
Hamiltonian = tq.QubitHamiltonian()
for term in self.param_hamiltonian.terms:
val = self.param_hamiltonian.terms[term]# + self.param_hamiltonian.imag_terms[term]*1.0j
if isinstance(val, tq.Variable) or isinstance(val, tq.objective.objective.Objective):
try:
for key in variables.keys():
variables[key] = variables[key]
#print(variables)
val = val(variables)
#print(val)
except Exception as e:
print(e)
raise Exception("You forgot to pass the dictionary with the values of the variables")
Hamiltonian += tq.QubitHamiltonian(QubitOperator(term=term, coefficient=val))
return Hamiltonian
def _construct_derivatives(self, variables=None):
"""
"""
derivatives = {}
variable_names = []
for term in self.param_hamiltonian.terms:
val = self.param_hamiltonian.terms[term]# + self.param_hamiltonian.imag_terms[term]*1.0j
#print(val)
if isinstance(val, tq.Variable) or isinstance(val, tq.objective.objective.Objective):
variable = val.extract_variables()
for var in list(variable):
from grad_hacked import grad
derivative = ParamQubitHamiltonian(term = term, coefficient = grad(val, var))
if var not in variable_names:
variable_names.append(var)
if var not in list(derivatives.keys()):
derivatives[var] = derivative
else:
derivatives[var] += derivative
return variable_names, derivatives
def get_geometry(name, b_l):
"""
This is utility fucntion that generates tehe geometry string of a Molecule
param: name (str) -> name of the molecule
param: b_l (float) -> the bond length of the molecule
e.g.:
input:
name -> "H2"
b_l -> 0.714
returns:
geo_str (str) -> "H 0.0 0.0 0.0\nH 0.0 0.0 0.714"
"""
geo_str = None
if name == "LiH":
geo_str = "H 0.0 0.0 0.0\nLi 0.0 0.0 {0}".format(b_l)
elif name == "H2":
geo_str = "H 0.0 0.0 0.0\nH 0.0 0.0 {0}".format(b_l)
elif name == "BeH2":
geo_str = "H 0.0 0.0 {0}\nH 0.0 0.0 {1}\nBe 0.0 0.0 0.0".format(b_l,-1*b_l)
elif name == "N2":
geo_str = "N 0.0 0.0 0.0\nN 0.0 0.0 {0}".format(b_l)
elif name == "H4":
geo_str = "H 0.0 0.0 0.0\nH 0.0 0.0 {0}\nH 0.0 0.0 {1}\nH 0.0 0.0 {2}".format(b_l,-1*b_l,2*b_l)
return geo_str
| 27,934 | 51.807183 | 162 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/n2/n2_serial_bl_2.25/hacked_openfermion_symbolic_operator.py | import abc
import copy
import itertools
import re
import warnings
import sympy
import tequila as tq
from tequila.objective.objective import Objective, Variable
from openfermion.config import EQ_TOLERANCE
COEFFICIENT_TYPES = (int, float, complex, sympy.Expr, Variable, Objective)
class SymbolicOperator(metaclass=abc.ABCMeta):
"""Base class for FermionOperator and QubitOperator.
A SymbolicOperator stores an object which represents a weighted
sum of terms; each term is a product of individual factors
of the form (`index`, `action`), where `index` is a nonnegative integer
and the possible values for `action` are determined by the subclass.
For instance, for the subclass FermionOperator, `action` can be 1 or 0,
indicating raising or lowering, and for QubitOperator, `action` is from
the set {'X', 'Y', 'Z'}.
The coefficients of the terms are stored in a dictionary whose
keys are the terms.
SymbolicOperators of the same type can be added or multiplied together.
Note:
Adding SymbolicOperators is faster using += (as this
is done by in-place addition). Specifying the coefficient
during initialization is faster than multiplying a SymbolicOperator
with a scalar.
Attributes:
actions (tuple): A tuple of objects representing the possible actions.
e.g. for FermionOperator, this is (1, 0).
action_strings (tuple): A tuple of string representations of actions.
These should be in one-to-one correspondence with actions and
listed in the same order.
e.g. for FermionOperator, this is ('^', '').
action_before_index (bool): A boolean indicating whether in string
representations, the action should come before the index.
different_indices_commute (bool): A boolean indicating whether
factors acting on different indices commute.
terms (dict):
**key** (tuple of tuples): A dictionary storing the coefficients
of the terms in the operator. The keys are the terms.
A term is a product of individual factors; each factor is
represented by a tuple of the form (`index`, `action`), and
these tuples are collected into a larger tuple which represents
the term as the product of its factors.
"""
@staticmethod
def _issmall(val, tol=EQ_TOLERANCE):
'''Checks whether a value is near-zero
Parses the allowed coefficients above for near-zero tests.
Args:
val (COEFFICIENT_TYPES) -- the value to be tested
tol (float) -- tolerance for inequality
'''
if isinstance(val, sympy.Expr):
if sympy.simplify(abs(val) < tol) == True:
return True
return False
if isinstance(val, tq.Variable) or isinstance(val, tq.objective.objective.Objective):
return False
if abs(val) < tol:
return True
return False
@abc.abstractproperty
def actions(self):
"""The allowed actions.
Returns a tuple of objects representing the possible actions.
"""
pass
@abc.abstractproperty
def action_strings(self):
"""The string representations of the allowed actions.
Returns a tuple containing string representations of the possible
actions, in the same order as the `actions` property.
"""
pass
@abc.abstractproperty
def action_before_index(self):
"""Whether action comes before index in string representations.
Example: For QubitOperator, the actions are ('X', 'Y', 'Z') and
the string representations look something like 'X0 Z2 Y3'. So the
action comes before the index, and this function should return True.
For FermionOperator, the string representations look like
'0^ 1 2^ 3'. The action comes after the index, so this function
should return False.
"""
pass
@abc.abstractproperty
def different_indices_commute(self):
"""Whether factors acting on different indices commute."""
pass
__hash__ = None
def __init__(self, term=None, coefficient=1.):
if not isinstance(coefficient, COEFFICIENT_TYPES):
raise ValueError('Coefficient must be a numeric type.')
# Initialize the terms dictionary
self.terms = {}
# Detect if the input is the string representation of a sum of terms;
# if so, initialization needs to be handled differently
if isinstance(term, str) and '[' in term:
self._long_string_init(term, coefficient)
return
# Zero operator: leave the terms dictionary empty
if term is None:
return
# Parse the term
# Sequence input
if isinstance(term, (list, tuple)):
term = self._parse_sequence(term)
# String input
elif isinstance(term, str):
term = self._parse_string(term)
# Invalid input type
else:
raise ValueError('term specified incorrectly.')
# Simplify the term
coefficient, term = self._simplify(term, coefficient=coefficient)
# Add the term to the dictionary
self.terms[term] = coefficient
def _long_string_init(self, long_string, coefficient):
r"""
Initialization from a long string representation.
e.g. For FermionOperator:
'1.5 [2^ 3] + 1.4 [3^ 0]'
"""
pattern = r'(.*?)\[(.*?)\]' # regex for a term
for match in re.findall(pattern, long_string, flags=re.DOTALL):
# Determine the coefficient for this term
coef_string = re.sub(r"\s+", "", match[0])
if coef_string and coef_string[0] == '+':
coef_string = coef_string[1:].strip()
if coef_string == '':
coef = 1.0
elif coef_string == '-':
coef = -1.0
else:
try:
if 'j' in coef_string:
if coef_string[0] == '-':
coef = -complex(coef_string[1:])
else:
coef = complex(coef_string)
else:
coef = float(coef_string)
except ValueError:
raise ValueError(
'Invalid coefficient {}.'.format(coef_string))
coef *= coefficient
# Parse the term, simpify it and add to the dict
term = self._parse_string(match[1])
coef, term = self._simplify(term, coefficient=coef)
if term not in self.terms:
self.terms[term] = coef
else:
self.terms[term] += coef
def _validate_factor(self, factor):
"""Check that a factor of a term is valid."""
if len(factor) != 2:
raise ValueError('Invalid factor {}.'.format(factor))
index, action = factor
if action not in self.actions:
raise ValueError('Invalid action in factor {}. '
'Valid actions are: {}'.format(
factor, self.actions))
if not isinstance(index, int) or index < 0:
raise ValueError('Invalid index in factor {}. '
'The index should be a non-negative '
'integer.'.format(factor))
def _simplify(self, term, coefficient=1.0):
"""Simplifies a term."""
if self.different_indices_commute:
term = sorted(term, key=lambda factor: factor[0])
return coefficient, tuple(term)
def _parse_sequence(self, term):
"""Parse a term given as a sequence type (i.e., list, tuple, etc.).
e.g. For QubitOperator:
[('X', 2), ('Y', 0), ('Z', 3)] -> (('Y', 0), ('X', 2), ('Z', 3))
"""
if not term:
# Empty sequence
return ()
elif isinstance(term[0], int):
# Single factor
self._validate_factor(term)
return (tuple(term),)
else:
# Check that all factors in the term are valid
for factor in term:
self._validate_factor(factor)
# Return a tuple
return tuple(term)
def _parse_string(self, term):
"""Parse a term given as a string.
e.g. For FermionOperator:
"2^ 3" -> ((2, 1), (3, 0))
"""
factors = term.split()
# Convert the string representations of the factors to tuples
processed_term = []
for factor in factors:
# Get the index and action string
if self.action_before_index:
# The index is at the end of the string; find where it starts.
if not factor[-1].isdigit():
raise ValueError('Invalid factor {}.'.format(factor))
index_start = len(factor) - 1
while index_start > 0 and factor[index_start - 1].isdigit():
index_start -= 1
if factor[index_start - 1] == '-':
raise ValueError('Invalid index in factor {}. '
'The index should be a non-negative '
'integer.'.format(factor))
index = int(factor[index_start:])
action_string = factor[:index_start]
else:
# The index is at the beginning of the string; find where
# it ends
if factor[0] == '-':
raise ValueError('Invalid index in factor {}. '
'The index should be a non-negative '
'integer.'.format(factor))
if not factor[0].isdigit():
raise ValueError('Invalid factor {}.'.format(factor))
index_end = 1
while (index_end <= len(factor) - 1 and
factor[index_end].isdigit()):
index_end += 1
index = int(factor[:index_end])
action_string = factor[index_end:]
# Convert the action string to an action
if action_string in self.action_strings:
action = self.actions[self.action_strings.index(action_string)]
else:
raise ValueError('Invalid action in factor {}. '
'Valid actions are: {}'.format(
factor, self.action_strings))
# Add the factor to the list as a tuple
processed_term.append((index, action))
# Return a tuple
return tuple(processed_term)
@property
def constant(self):
"""The value of the constant term."""
return self.terms.get((), 0.0)
@constant.setter
def constant(self, value):
self.terms[()] = value
@classmethod
def zero(cls):
"""
Returns:
additive_identity (SymbolicOperator):
A symbolic operator o with the property that o+x = x+o = x for
all operators x of the same class.
"""
return cls(term=None)
@classmethod
def identity(cls):
"""
Returns:
multiplicative_identity (SymbolicOperator):
A symbolic operator u with the property that u*x = x*u = x for
all operators x of the same class.
"""
return cls(term=())
def __str__(self):
"""Return an easy-to-read string representation."""
if not self.terms:
return '0'
string_rep = ''
for term, coeff in sorted(self.terms.items()):
if self._issmall(coeff):
continue
tmp_string = '{} ['.format(coeff)
for factor in term:
index, action = factor
action_string = self.action_strings[self.actions.index(action)]
if self.action_before_index:
tmp_string += '{}{} '.format(action_string, index)
else:
tmp_string += '{}{} '.format(index, action_string)
string_rep += '{}] +\n'.format(tmp_string.strip())
return string_rep[:-3]
def __repr__(self):
return str(self)
def __imul__(self, multiplier):
"""In-place multiply (*=) with scalar or operator of the same type.
Default implementation is to multiply coefficients and
concatenate terms.
Args:
multiplier(complex float, or SymbolicOperator): multiplier
Returns:
product (SymbolicOperator): Mutated self.
"""
# Handle scalars.
if isinstance(multiplier, COEFFICIENT_TYPES):
for term in self.terms:
self.terms[term] *= multiplier
return self
# Handle operator of the same type
elif isinstance(multiplier, self.__class__):
result_terms = dict()
for left_term in self.terms:
for right_term in multiplier.terms:
left_coefficient = self.terms[left_term]
right_coefficient = multiplier.terms[right_term]
new_coefficient = left_coefficient * right_coefficient
new_term = left_term + right_term
new_coefficient, new_term = self._simplify(
new_term, coefficient=new_coefficient)
# Update result dict.
if new_term in result_terms:
result_terms[new_term] += new_coefficient
else:
result_terms[new_term] = new_coefficient
self.terms = result_terms
return self
# Invalid multiplier type
else:
raise TypeError('Cannot multiply {} with {}'.format(
self.__class__.__name__, multiplier.__class__.__name__))
def __mul__(self, multiplier):
"""Return self * multiplier for a scalar, or a SymbolicOperator.
Args:
multiplier: A scalar, or a SymbolicOperator.
Returns:
product (SymbolicOperator)
Raises:
TypeError: Invalid type cannot be multiply with SymbolicOperator.
"""
if isinstance(multiplier, COEFFICIENT_TYPES + (type(self),)):
product = copy.deepcopy(self)
product *= multiplier
return product
else:
raise TypeError('Object of invalid type cannot multiply with ' +
type(self) + '.')
def __iadd__(self, addend):
"""In-place method for += addition of SymbolicOperator.
Args:
addend (SymbolicOperator, or scalar): The operator to add.
If scalar, adds to the constant term
Returns:
sum (SymbolicOperator): Mutated self.
Raises:
TypeError: Cannot add invalid type.
"""
if isinstance(addend, type(self)):
for term in addend.terms:
self.terms[term] = (self.terms.get(term, 0.0) +
addend.terms[term])
if self._issmall(self.terms[term]):
del self.terms[term]
elif isinstance(addend, COEFFICIENT_TYPES):
self.constant += addend
else:
raise TypeError('Cannot add invalid type to {}.'.format(type(self)))
return self
def __add__(self, addend):
"""
Args:
addend (SymbolicOperator): The operator to add.
Returns:
sum (SymbolicOperator)
"""
summand = copy.deepcopy(self)
summand += addend
return summand
def __radd__(self, addend):
"""
Args:
addend (SymbolicOperator): The operator to add.
Returns:
sum (SymbolicOperator)
"""
return self + addend
def __isub__(self, subtrahend):
"""In-place method for -= subtraction of SymbolicOperator.
Args:
subtrahend (A SymbolicOperator, or scalar): The operator to subtract
if scalar, subtracts from the constant term.
Returns:
difference (SymbolicOperator): Mutated self.
Raises:
TypeError: Cannot subtract invalid type.
"""
if isinstance(subtrahend, type(self)):
for term in subtrahend.terms:
self.terms[term] = (self.terms.get(term, 0.0) -
subtrahend.terms[term])
if self._issmall(self.terms[term]):
del self.terms[term]
elif isinstance(subtrahend, COEFFICIENT_TYPES):
self.constant -= subtrahend
else:
raise TypeError('Cannot subtract invalid type from {}.'.format(
type(self)))
return self
def __sub__(self, subtrahend):
"""
Args:
subtrahend (SymbolicOperator): The operator to subtract.
Returns:
difference (SymbolicOperator)
"""
minuend = copy.deepcopy(self)
minuend -= subtrahend
return minuend
def __rsub__(self, subtrahend):
"""
Args:
subtrahend (SymbolicOperator): The operator to subtract.
Returns:
difference (SymbolicOperator)
"""
return -1 * self + subtrahend
def __rmul__(self, multiplier):
"""
Return multiplier * self for a scalar.
We only define __rmul__ for scalars because the left multiply
exist for SymbolicOperator and left multiply
is also queried as the default behavior.
Args:
multiplier: A scalar to multiply by.
Returns:
product: A new instance of SymbolicOperator.
Raises:
TypeError: Object of invalid type cannot multiply SymbolicOperator.
"""
if not isinstance(multiplier, COEFFICIENT_TYPES):
raise TypeError('Object of invalid type cannot multiply with ' +
type(self) + '.')
return self * multiplier
def __truediv__(self, divisor):
"""
Return self / divisor for a scalar.
Note:
This is always floating point division.
Args:
divisor: A scalar to divide by.
Returns:
A new instance of SymbolicOperator.
Raises:
TypeError: Cannot divide local operator by non-scalar type.
"""
if not isinstance(divisor, COEFFICIENT_TYPES):
raise TypeError('Cannot divide ' + type(self) +
' by non-scalar type.')
return self * (1.0 / divisor)
def __div__(self, divisor):
""" For compatibility with Python 2. """
return self.__truediv__(divisor)
def __itruediv__(self, divisor):
if not isinstance(divisor, COEFFICIENT_TYPES):
raise TypeError('Cannot divide ' + type(self) +
' by non-scalar type.')
self *= (1.0 / divisor)
return self
def __idiv__(self, divisor):
""" For compatibility with Python 2. """
return self.__itruediv__(divisor)
def __neg__(self):
"""
Returns:
negation (SymbolicOperator)
"""
return -1 * self
def __pow__(self, exponent):
"""Exponentiate the SymbolicOperator.
Args:
exponent (int): The exponent with which to raise the operator.
Returns:
exponentiated (SymbolicOperator)
Raises:
ValueError: Can only raise SymbolicOperator to non-negative
integer powers.
"""
# Handle invalid exponents.
if not isinstance(exponent, int) or exponent < 0:
raise ValueError(
'exponent must be a non-negative int, but was {} {}'.format(
type(exponent), repr(exponent)))
# Initialized identity.
exponentiated = self.__class__(())
# Handle non-zero exponents.
for _ in range(exponent):
exponentiated *= self
return exponentiated
def __eq__(self, other):
"""Approximate numerical equality (not true equality)."""
return self.isclose(other)
def __ne__(self, other):
return not (self == other)
def __iter__(self):
self._iter = iter(self.terms.items())
return self
def __next__(self):
term, coefficient = next(self._iter)
return self.__class__(term=term, coefficient=coefficient)
def isclose(self, other, tol=EQ_TOLERANCE):
"""Check if other (SymbolicOperator) is close to self.
Comparison is done for each term individually. Return True
if the difference between each term in self and other is
less than EQ_TOLERANCE
Args:
other(SymbolicOperator): SymbolicOperator to compare against.
"""
if not isinstance(self, type(other)):
return NotImplemented
# terms which are in both:
for term in set(self.terms).intersection(set(other.terms)):
a = self.terms[term]
b = other.terms[term]
if not (isinstance(a, sympy.Expr) or isinstance(b, sympy.Expr)):
tol *= max(1, abs(a), abs(b))
if self._issmall(a - b, tol) is False:
return False
# terms only in one (compare to 0.0 so only abs_tol)
for term in set(self.terms).symmetric_difference(set(other.terms)):
if term in self.terms:
if self._issmall(self.terms[term], tol) is False:
return False
else:
if self._issmall(other.terms[term], tol) is False:
return False
return True
def compress(self, abs_tol=EQ_TOLERANCE):
"""
Eliminates all terms with coefficients close to zero and removes
small imaginary and real parts.
Args:
abs_tol(float): Absolute tolerance, must be at least 0.0
"""
new_terms = {}
for term in self.terms:
coeff = self.terms[term]
if isinstance(coeff, sympy.Expr):
if sympy.simplify(sympy.im(coeff) <= abs_tol) == True:
coeff = sympy.re(coeff)
if sympy.simplify(sympy.re(coeff) <= abs_tol) == True:
coeff = 1j * sympy.im(coeff)
if (sympy.simplify(abs(coeff) <= abs_tol) != True):
new_terms[term] = coeff
continue
if isinstance(val, tq.Variable) or isinstance(val, tq.objective.objective.Objective):
continue
# Remove small imaginary and real parts
if abs(coeff.imag) <= abs_tol:
coeff = coeff.real
if abs(coeff.real) <= abs_tol:
coeff = 1.j * coeff.imag
# Add the term if the coefficient is large enough
if abs(coeff) > abs_tol:
new_terms[term] = coeff
self.terms = new_terms
def induced_norm(self, order=1):
r"""
Compute the induced p-norm of the operator.
If we represent an operator as
:math: `\sum_{j} w_j H_j`
where :math: `w_j` are scalar coefficients then this norm is
:math: `\left(\sum_{j} \| w_j \|^p \right)^{\frac{1}{p}}
where :math: `p` is the order of the induced norm
Args:
order(int): the order of the induced norm.
"""
norm = 0.
for coefficient in self.terms.values():
norm += abs(coefficient)**order
return norm**(1. / order)
def many_body_order(self):
"""Compute the many-body order of a SymbolicOperator.
The many-body order of a SymbolicOperator is the maximum length of
a term with nonzero coefficient.
Returns:
int
"""
if not self.terms:
# Zero operator
return 0
else:
return max(
len(term)
for term, coeff in self.terms.items()
if (self._issmall(coeff) is False))
@classmethod
def accumulate(cls, operators, start=None):
"""Sums over SymbolicOperators."""
total = copy.deepcopy(start or cls.zero())
for operator in operators:
total += operator
return total
def get_operators(self):
"""Gets a list of operators with a single term.
Returns:
operators([self.__class__]): A generator of the operators in self.
"""
for term, coefficient in self.terms.items():
yield self.__class__(term, coefficient)
def get_operator_groups(self, num_groups):
"""Gets a list of operators with a few terms.
Args:
num_groups(int): How many operators to get in the end.
Returns:
operators([self.__class__]): A list of operators summing up to
self.
"""
if num_groups < 1:
warnings.warn('Invalid num_groups {} < 1.'.format(num_groups),
RuntimeWarning)
num_groups = 1
operators = self.get_operators()
num_groups = min(num_groups, len(self.terms))
for i in range(num_groups):
yield self.accumulate(
itertools.islice(operators,
len(range(i, len(self.terms), num_groups))))
| 25,797 | 35.697013 | 97 | py |
pyIFD | pyIFD-main/test_pyifd.py | from tests.validate_algo import validate_algo
def test_adq1():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_ADQ1.mat', 'ADQ1', 0.9) is True
def test_adq2():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_ADQ2.mat', 'ADQ2', 0.9) is True
def test_adq3():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_ADQ3.mat', 'ADQ3', 0.9) is True
def test_blk():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_BLK.mat', 'BLK', 0.85) is True
def test_cagi():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_CAGI.mat', 'CAGI', 0.9) is True
def test_cfa1():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_CFA1.mat', 'CFA1', 0.9) is True
def test_cfa2():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_CFA2.mat', 'CFA2', 0.9) is True
def test_dct():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_DCT.mat', 'DCT', 0.9) is True
def test_ela():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_ELA.mat', 'ELA', 0.9) is True
def test_gho():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_GHO.mat', 'GHO', 0.9) is True
def test_nadq():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_NADQ.mat', 'NADQ', 0.9) is True
def test_noi1():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_NOI1.mat', 'NOI1', 0.9) is True
def test_noi2():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_NOI2.mat', 'NOI2', 0.9) is True
def test_noi4():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_NOI4.mat', 'NOI4', 0.9) is True
def test_noi5():
assert validate_algo('tests/data/168_image.jpg', 'tests/data/168_NOI5.mat', 'NOI5', 0.85) is True
| 1,836 | 28.629032 | 101 | py |
pyIFD | pyIFD-main/setup.py | from setuptools import find_packages, setup
setup(
name='pyIFD',
version='0.0.2',
extras_require=dict(tests=['pytest']),
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
setup_requires=[
'cython','numpy'],
install_requires=[
'cython',
'numpy',
'scipy',
'matplotlib',
'scikit-image',
'pillow',
'opencv-python',
'PyWavelets',
'jpegio']
)
| 489 | 20.304348 | 43 | py |
pyIFD | pyIFD-main/src/__init__.py | 0 | 0 | 0 | py | |
pyIFD | pyIFD-main/src/pyIFD/BLK.py | """
This module provides the BLK algorithm
JPEG-block-artifact-based detector, solution 1.
Algorithm attribution:
Li, Weihai, Yuan Yuan, and Nenghai Yu. "Passive detection of doctored JPEG
image via block artifact grid extraction." Signal Processing 89, no. 9 (2009):
1821-1829.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage import convolve
from PIL import Image
from numpy.lib.stride_tricks import as_strided as ast
import cv2
def BlockValue(blockData, blk_size):
"""
Get the per-block feature of blockData.
Args:
blockData: Input 2d array to extract features from.
Returns:
b: A float containing features of blockData
"""
if np.shape(blockData) != blk_size:
blockData=np.pad(blockData, ((0,8-np.shape(blockData)[0]),(0,8-np.shape(blockData)[1])), 'constant', constant_values=(1,1))
Max1 = np.max(np.sum(blockData[1:7, 1:7], 0)) # Inner rows and columns added rowwise
Min1 = np.min(np.sum(blockData[1:7, (0, 7)], 0)) # First and last columns, inner rows, added rowwise
Max2 = np.max(np.sum(blockData[1:7, 1:7], 1)) # Inner rows and columns added columnwise
Min2 = np.min(np.sum(blockData[(0, 7), 1:7], 1)) # First and last rows, inner colums, added columnwise
b = Max1-Min1+Max2-Min2
return b
def GetBlockView(A, block=(8, 8)):
"""
Splits A into blocks of size blocks.
Args:
A: 2d array A to be split up.
block (optional, default=(8, 8)):
Returns:
ast(A, shape=shape, strides=strides): 4d array. First two dimensions give the coordinates of the block. Second two dimensions give the block data.
"""
shape = (int(np.floor(A.shape[0] / block[0])), int(np.floor(A.shape[1] / block[1]))) + block
strides = (block[0]*A.strides[0], block[1]*A.strides[1]) + A.strides
return ast(A, shape=shape, strides=strides)
def ApplyFunction(M, blk_size=(8, 8)):
"""
Applies BlockValue function to blocks of input
Args:
M:
blk_size (optional, default=(8,8)):
Returns:
OutputMap:
"""
Blocks=np.ones((int(np.ceil(np.shape(M)[0]/blk_size[0])), int(np.ceil(np.shape(M)[1]/blk_size[1])), blk_size[0], blk_size[1]))
Blocks[:int(np.floor(np.shape(M)[0]/blk_size[0])), :int(np.floor(np.shape(M)[1]/blk_size[1])), :, :] = GetBlockView(M, block=blk_size)
OutputMap = np.zeros(np.shape(Blocks)[:2])
for x in range(Blocks.shape[0]):
for y in range(Blocks.shape[1]):
OutputMap[x, y] = BlockValue(Blocks[x, y], blk_size)
return OutputMap
def GetBlockGrid(impath):
"""
Main driver for BLK algorithm.
Args:
impath: Input image path
Returns:
b: Main output of BLK. (2d array). This output corresponds to OutputMap
eH:
HorzMid:
eV:
VertMid:
BlockDiff:
Todos:
* Check if all returns necessary
"""
im = np.single(cv2.imread(impath))
YCbCr = np.double(cv2.cvtColor(im, cv2.COLOR_BGR2YCR_CB))
Y = YCbCr[:, :, 0]
# This thresh is used to remove extremely strong edges:
# block edges are definitely going to be weak
DiffThresh = 50
# Accumulator size. Larger may overcome small splices, smaller may not
# aggregate enough.
AC = 33
YH = np.insert(Y, 0, Y[0, :], axis=0)
YH = np.append(YH, [Y[-1, :]], axis=0)
Im2DiffY = -np.diff(YH, 2, 0)
Im2DiffY[np.abs(Im2DiffY) > DiffThresh] = 0
padsize = np.round((AC-1)/2).astype(int)
padded = np.pad(Im2DiffY, ((0, 0), (padsize, padsize)), mode='symmetric')
summedH = convolve(np.abs(padded), np.ones((1, AC)))
summedH = summedH[:, padsize:-padsize]
mid = median_filter(summedH, [AC, 1])
eH = summedH-mid
paddedHorz = np.pad(eH, ((16, 16), (0, 0)), mode='symmetric')
HMx = paddedHorz.shape[0]-32
HMy = paddedHorz.shape[1]
HorzMid = np.zeros((HMx, HMy, 5))
HorzMid[:, :, 0] = paddedHorz[0:HMx, :]
HorzMid[:, :, 1] = paddedHorz[8:HMx+8, :]
HorzMid[:, :, 2] = paddedHorz[16:HMx+16, :]
HorzMid[:, :, 3] = paddedHorz[24:HMx+24, :]
HorzMid[:, :, 4] = paddedHorz[32:HMx+32, :]
HorzMid = np.median(HorzMid, 2)
YV = np.insert(Y, 0, Y[:, 0], axis=1)
YV = np.insert(YV, -1, Y[:, -1], axis=1)
Im2DiffX = -np.diff(YV, 2, 1)
Im2DiffX[np.abs(Im2DiffX) > DiffThresh] = 0
padded = np.pad(Im2DiffX, ((padsize, padsize), (0, 0)), mode='symmetric')
summedV = convolve(np.abs(padded), np.ones((AC, 1)))
summedV = summedV[padsize:-padsize, :]
mid = median_filter(summedV, [1, AC])
eV = summedV-mid
paddedVert = np.pad(eV, ((0, 0), (padsize, padsize)), mode='symmetric')
VMx = paddedVert.shape[0]
VMy = paddedVert.shape[1]-32
VertMid = np.zeros((VMx, VMy, 5))
VertMid[:, :, 0] = paddedVert[:, 0:VMy]
VertMid[:, :, 1] = paddedVert[:, 8:VMy+8]
VertMid[:, :, 2] = paddedVert[:, 16:VMy+16]
VertMid[:, :, 3] = paddedVert[:, 24:VMy+24]
VertMid[:, :, 4] = paddedVert[:, 32:VMy+32]
VertMid = np.median(VertMid, 2)
BlockDiff = HorzMid+VertMid
b = ApplyFunction(BlockDiff, (8, 8))
return [b, eH, HorzMid, eV, VertMid, BlockDiff]
| 5,382 | 31.823171 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/CAGI.py | """
This module provides the CAGI algorithm
JPEG-grid-alignment-abnormality-based detector.
Algorithm attribution:
Iakovidou, Chryssanthi, Markos Zampoglou, Symeon Papadopoulos, and Yiannis Kompatsiaris. "Content-aware detection of JPEG grid inconsistencies for intuitive image forensics." Journal of Visual Communication and Image Representation 54 (2018): 155-170.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
from PIL import Image
from skimage.color import rgb2gray
import numpy as np
from scipy.ndimage import correlate
import cv2
import os
def im2double(im):
"""
Converts image to type double.
Args:
im: Input image
Returns:
image as double: Converts type of im to double. Scales so elements lie from 0 to 1.
"""
info = np.iinfo(im.dtype)
return im.astype(np.double) / info.max
def ImageTiling(OImg):
"""
Fill me in please.
Args:
OImg:
Returns:
tile:
Todos:
* Fill this in with proper summary
"""
Img = np.array(Image.fromarray(OImg.astype(np.uint8)).resize(size=(600, 600), resample=Image.NEAREST))
R1 = rgb2gray(Img)
R = R1*255
blocks = 3600
stepX = 60
stepY = 60
ImgR = R.astype('int')
countx = -1
tile = np.zeros((10, 10, blocks))
for a in range(stepX):
for b in range(stepY):
countx += 1
i = -1
for x in range((a*10), (a*10)+10):
i += 1
j = -1
for y in range((b*10), (b*10)+10):
j += 1
tile[i, j, countx] = ImgR[x, y]
return tile
def MainTrain(R10, blk_idx, blk_idy):
"""
Fill me in please.
Args:
R10:
blk_idx:
blk_idy:
Returns:
MeanContent:
MeanStrongEdge:
Todos:
* Fill this in with proper summary
"""
[x, y, z] = R10.shape
[PMasks, MMasks, MaskWhite] = getMasks()
# ////////Image Tiling 3 Scales////////////////////////////
# slight difference in tileF (~99% similarity)
tileF = ImageTiling(R10)
# ////////////Smaping/////////////////////////////////////
smapF = SmapIng(tileF, PMasks, MaskWhite)
# % % % %////////////Filtering///////////////////////////////////
[ThresSmall, ThresBig, ThresImg] = filtering(smapF)
smapF_filtrOld = filteringMethod(smapF, ThresSmall, ThresBig, ThresImg)
# Through here so far
# /////////////PaintEdges///////////////////////////////// This uses NN PIL using mean
[e, edge, contours] = PaintimgEdges(smapF_filtrOld, MMasks, 1)
Output = np.array(Image.fromarray(e.astype(np.double)).resize(size=(y, x), resample=Image.NEAREST))
StrongEdge = np.array(Image.fromarray(contours.astype(np.uint8)).resize(size=(y, x), resample=Image.NEAREST))
MeanContent = np.zeros((blk_idx, blk_idy))
MeanStrongEdge = np.zeros((blk_idx, blk_idy))
for i in range(blk_idx):
for j in range(blk_idy):
a = i*8
b = j*8
MeanContent[i, j] = np.mean(Output[a:a+8, b:b+8])
MeanStrongEdge[i, j] = np.mean(StrongEdge[a:a+8, b:b+8])
MeanStrongEdge[MeanStrongEdge > 0.5] = 1
MeanStrongEdge[MeanStrongEdge <= 0.5] = 0
return [MeanContent, MeanStrongEdge]
def PaintimgEdges(smap, MMasks, scale):
"""
Fill me in please.
Args:
smap:
MMasks:
scale:
Returns:
edgeImg2:
edgeImg:
edgeImg3:
Todos:
* Fill this in with proper summary
"""
if (scale == 1):
stepX = 60
edgeImg = np.zeros((600, 600))
edgeImg2 = np.zeros((600, 600))
edgeImg3 = np.zeros((600, 600))
countx = -1
for a in range(stepX):
for b in range(stepX):
countx += 1
i = -1
for x in range(a*10, a*10+10):
i += 1
j = -1
for y in range(b*10, b*10+10):
j += 1
edgeImg[x, y] = MMasks[i, j, int(smap[countx, 0]-1)]
if (smap[countx, 0] == 59):
edgeImg3[x, y] = 0
else:
edgeImg3[x, y] = 1
edgeImg2[x, y] = smap[countx, 1]
return [edgeImg2, edgeImg, edgeImg3]
def RescaleToImageResult(E, sgrid, kx, ky, pixels):
"""
Fill me in please.
Args:
E:
sgrid:
kx:
ky:
pixels:
Returns:
Result:
Todos:
* Fill this in with proper summary
"""
result = np.zeros((kx*sgrid*8, ky*sgrid*8))
for x in range(kx):
for y in range(ky):
a = x*sgrid*8
b = y*sgrid*8
result[a:a+sgrid*8, b:b+sgrid*8] = E[x, y]
[xim, yim] = pixels.shape
[xres, yres] = result.shape
Result = np.zeros((xim, yim))
Result[:xres, :yres] = result
for k in range(xres, xim):
for y in range(yres):
Result[k, y] = result[xres-1, y]
for k in range(xim):
for y in range(yres, yim):
Result[k, y] = Result[k, yres-1]
return Result
def SmapIng(ImgTiles, MaskTiles, WhiteMaskPoints):
"""
Fill me in please.
Args:
ImgTiles:
MaskTiles:
WhiteMaskPoints:
Returns:
smap:
Todos:
* Fill this in with proper summary
"""
blocks = np.shape(ImgTiles)[2]
smap = np.zeros((blocks, 2))
winMask = 59
MaskWhite = (MaskTiles > 0).astype(int)
MaskBlack = (MaskTiles <= 0).astype(int)
for a in range(blocks):
maxR = 0
for k in range(58):
TempW = np.sum(ImgTiles[:, :, a]*MaskWhite[:, :, k])
TempB = np.sum(ImgTiles[:, :, a]*MaskBlack[:, :, k])
whiteScore = TempW/WhiteMaskPoints[k]
blackScore = TempB/(100-WhiteMaskPoints[k])
ctR = np.abs(whiteScore-blackScore)
w = ((ctR*100)/255)
if (w > maxR):
maxR = w
winMask = k+1
smap[a, 0] = winMask
smap[a, 1] = maxR
return smap
def mat2gray(A):
"""
Converts matrix to have values from 0-1.
Args:
A: Input matrix.
Returns:
Gray matrix with values from 0-1.
Todos:
* Fill this in with proper summary
"""
A -= A.min()
if(A.max() == 0):
return A
return A/A.max()
def characterizeblocks(MeanContent2, MeanStrongEdge, V_im, blk_idx, blk_idy, MeanInSpace, diff_Mean_Best_scaled, dmbsi, sgrid, PossiblePoints, kx, ky):
"""
Fill me in please.
Args:
MeanContent2:
MeanStrongEdge:
V_im:
blk_idx:
blk_idy:
MeanInSpace:
diff_Mean_Best_scaled:
diff_Mean_Best_scaledInv (dmbsi):
sgrid:
PossiblePoints:
kx:
ky:
Returns:
E:
EInv:
Todos:
* Fill this in with proper summary
"""
uniform = np.zeros((int(np.floor(blk_idx/sgrid)), int(np.floor(blk_idy/sgrid))))
for a in range(kx):
for b in range(ky):
for pp in range(16):
if (MeanInSpace[a, b, pp] < (np.mean(MeanInSpace[:, :, pp])*0.2)):
uniform[a, b] += 1
st = np.std(np.reshape(uniform, (uniform.size, 1), 'F'))
H = np.ones((5, 5))*0.04
im = correlate(uniform, H, mode='constant')
meanv = np.mean(im)
bg = 0
for f in range(16):
if ((PossiblePoints[f, 0] == 4) and (PossiblePoints[f, 1] == 4)):
bg = f+1
if bg == 16:
bestgrid = mat2gray(correlate(MeanInSpace[:, :, 15], H, mode='constant'))
elif bg == 0:
bg1 = np.where(PossiblePoints[:, 4] == max(PossiblePoints[:, 4]))
bg = np.max(bg1)+1
bestgrid = mat2gray(correlate(MeanInSpace[:, :, bg-1], H, mode='constant'))
else:
bestgrid = mat2gray(correlate(MeanInSpace[:, :, bg], H, mode='constant'))
# //////////block based homogenous
if ((np.mean(PossiblePoints[:, 4]) > 0.4) or (bg != 16)):
homB = 0
else:
homB = 1
if ((st/meanv) > 1.5):
im[im < (meanv+(st))] = 0
im[im >= (meanv+(st))] = homB
else:
im[im < (meanv+(st)/2)] = 0
im[im >= (meanv+(st)/2)] = homB
# /////////no content////////////////////////
contentsc = MeanContent2.copy()
hom = np.zeros((kx, ky))
for i in range(kx):
for j in range(ky):
if (contentsc[i, j] <= 4): # very soft responses
hom[i, j] = 1
c = sgrid
MeanStrongEdge2 = np.zeros((kx, ky))
for i in range(kx):
for j in range(ky):
a = i*sgrid
b = j*sgrid
MeanStrongEdge2[i, j] = np.mean(MeanStrongEdge[a:a+c, b:b+c])
cc = 8*sgrid
V_im2 = np.zeros((kx, ky))
for i in range(kx):
for j in range(ky):
a = i*8*sgrid
b = j*8*sgrid
V_im2[i, j] = np.mean(V_im[a:a+cc, b:b+cc])
V_imOver = V_im2.copy()
V_imUndr = V_im2.copy()
V_imOver[V_imOver >= 245] = 300
V_imOver[V_imOver != 300] = 0
V_imUndr[V_imUndr < 15] = 300
V_imUndr[V_imUndr != 300] = 0
V_imOver = mat2gray(V_imOver)
V_imUndr = mat2gray(V_imUndr)
MeanStrongEdge2[MeanStrongEdge2 < 0.5] = 0
MeanStrongEdge2[MeanStrongEdge2 >= 0.5] = 1
# /////////////end overexposed/iunder and contours////////////////////
touse = kx*ky
notuse = np.zeros((kx, ky))
for i in range(kx):
for j in range(ky):
if hom[i, j] == 1:
notuse[i, j] = 3
if MeanStrongEdge2[i, j] == 1:
notuse[i, j] = 2
if ((V_imUndr[i, j] == 1) or (V_imOver[i, j] == 1)):
notuse[i, j] = 1
for i in range(kx):
for j in range(ky):
if notuse[i, j] == 1:
im[i, j] = 1
notused = np.sum(notuse[:] != 0)
touse = kx*ky-notused
# //////////////excl NaN
if touse == 0:
for i in range(kx):
for j in range(ky):
if hom[i, j] == 1 and im[i, j] != 1:
notuse[i, j] = 0
diff_Mean_Best_scaled_temp = diff_Mean_Best_scaled.copy()
diff_Mean_Best_scaled_tempInv = dmbsi.copy()
for a in range(int(np.floor(blk_idx/sgrid))):
for b in range(int(np.floor(blk_idy/sgrid))):
if im[a, b] == 1:
diff_Mean_Best_scaled_temp[a, b] = 0
diff_Mean_Best_scaled_tempInv[a, b] = 1
if diff_Mean_Best_scaled_temp[a, b] < np.mean(diff_Mean_Best_scaled) and homB == 1:
diff_Mean_Best_scaled_temp[a, b] = 0
if diff_Mean_Best_scaled_tempInv[a, b] < np.mean(dmbsi) and homB == 1:
diff_Mean_Best_scaled_tempInv[a, b] = 1
a += 1
b += 1
imageF = np.zeros((a, b))
imageFInv = np.zeros((a, b))
for x in range(a):
for y in range(b):
if x == 0 or x == a-1 or y == 0 or y == b-1:
imageF[x, y] = diff_Mean_Best_scaled_temp[x, y]*(bestgrid[x, y])
else:
imageF[x, y] = diff_Mean_Best_scaled_temp[x, y]*(1-bestgrid[x, y])
imageFInv[x, y] = diff_Mean_Best_scaled_tempInv[x, y]*(1-bestgrid[x, y])
E_nofilt = imageF.copy()
E = correlate(imageF, H, mode='constant')
E_nofiltInv = imageFInv.copy()
EInv = correlate(imageFInv, H, mode='constant')
# /////////////content based filtering//////////
uninteresting = np.zeros((touse, 1))
uninterestingInv = np.zeros((touse, 1))
a = -1
for i in range(kx):
for j in range(ky):
if(notuse[i, j] == 0):
a += 1
uninteresting[a] = E[i, j]
uninterestingInv[a] = EInv[i, j]
MeanBlocksre = E_nofilt.copy()
MeanBlocksreInv = E_nofiltInv.copy()
meanuninteresting = np.mean(uninteresting)
meanuninterestingInv = np.mean(uninterestingInv)
for i in range(kx):
for j in range(ky):
if ((im[i, j] == 1) and (notuse[i, j] == 2)):
im[i, j] = 0
if ((notuse[i, j] == 1) or (MeanBlocksre[i, j] < meanuninteresting)):
MeanBlocksre[i, j] = meanuninteresting
if (((im[i, j] == 1) and (MeanBlocksre[i, j] < meanuninteresting)) or ((notuse[i, j] == 3) and (im[i, j] == 1))):
MeanBlocksre[i, j] = meanuninteresting
if ((notuse[i, j] == 1) or (MeanBlocksreInv[i, j] > meanuninterestingInv)):
MeanBlocksreInv[i, j] = meanuninterestingInv
if (((im[i, j] == 1) and (MeanBlocksreInv[i, j] > meanuninterestingInv)) or ((notuse[i, j] == 3) and (im[i, j] == 1))):
MeanBlocksreInv[i, j] = meanuninterestingInv
E = correlate(MeanBlocksre, H, mode='reflect')
EInv = correlate(MeanBlocksreInv, H, mode='reflect')
return [E, EInv]
blocksize = 6
def filtering(smap):
"""
Fill me in please.
Args:
smap:
Returns:
meansmallAreas:
meanbigAreas:
meanImg:
Todos:
* Fill this in with proper summary
"""
blocks = np.shape(smap)[0]
step = int(np.sqrt(blocks))
smallAreas = np.zeros((blocksize, blocksize))
increment = int(step/blocksize)
for a in range(blocksize):
Start = int((a+1)*(blocks/blocksize)-(blocks/blocksize)+1)
End = int((a+1)*(blocks/blocksize))
for x in range(Start, End, step):
for y in range(increment):
z = x+y-1
if (a < 3):
smallAreas[0, a*2] = smallAreas[0, a*2]+smap[z, 1]
smallAreas[0, a*2+1] = smallAreas[0, a*2+1]+smap[z+increment, 1]
smallAreas[1, (a*2)] = smallAreas[1, (a*2)]+smap[z+2*(increment), 1]
smallAreas[1, a*2+1] = smallAreas[1, a*2+1]+smap[z+3*(increment), 1]
smallAreas[2, a*2] = smallAreas[2, a*2]+smap[z+4*(increment), 1]
smallAreas[2, a*2+1] = smallAreas[2, a*2+1]+smap[z+5*(increment), 1]
else:
smallAreas[3, ((a-3)*2)] = smallAreas[3, ((a-3)*2)]+smap[z, 1]
smallAreas[3, (a-3)*2+1] = smallAreas[3, (a-3)*2+1]+smap[z+increment, 1]
smallAreas[4, ((a-3)*2)] = smallAreas[4, ((a-3)*2)]+smap[z+2*(increment), 1]
smallAreas[4, (a-3)*2+1] = smallAreas[4, (a-3)*2+1]+smap[z+3*(increment), 1]
smallAreas[5, ((a-3)*2)] = smallAreas[5, ((a-3)*2)]+smap[z+4*(increment), 1]
smallAreas[5, (a-3)*2+1] = smallAreas[5, (a-3)*2+1]+smap[z+5*(increment), 1]
meansmallAreas = smallAreas/100
meanbigAreas = np.zeros((1, blocksize))
for x in range(blocksize):
meanbigAreas[0, x] = np.mean(meansmallAreas[x, :])
meanImg = np.mean(meanbigAreas)
return [meansmallAreas, meanbigAreas, meanImg]
def filteringMethod(smap, ThressSmall, ThressBigV, ThressImg):
"""
Fill me in please.
Args:
smap:
ThressSmall:
ThressBigV:
ThressImg:
Returns:
smap:
Todos:
* Fill this in with proper summary
"""
blocks = np.size(smap, 0)
step = int(np.sqrt(blocks))
ThressBig = np.ndarray.flatten(ThressBigV)
for x in range(blocksize):
if ((ThressBig[x] < ThressImg) and (ThressImg < 10)):
ThressBig[x] = ThressImg
elif ((ThressBig[x] > ThressImg) and (ThressImg < 5)):
ThressBig[x] = 5
for y in range(blocksize):
if (ThressSmall[x, y] < ThressBig[x]):
if (ThressBig[x] < 5):
ThressSmall[x, y] = ThressBig[x]+1
else:
ThressSmall[x, y] = ThressBig[x]
Thresses = ThressSmall
increment = int(step/blocksize)
for a in range(1, blocksize+1):
Start = int(a*(blocks/blocksize)-(blocks/blocksize))
End = int(a*(blocks/blocksize))-1
for x in range(Start, End, step):
for y in range(increment):
z = x+y
if (a < 4):
if (smap[z, 1] < Thresses[0, (a*2)-2]):
smap[z, 0] = 59
if smap[z+increment, 1] < Thresses[0, a*2-1]:
smap[z+increment, 0] = 59
if smap[z+2*(increment), 1] < Thresses[1, (a*2)-2]:
smap[z+2*(increment), 0] = 59
if smap[z+3*(increment), 1] < Thresses[1, a*2-1]:
smap[z+3*(increment), 0] = 59
if smap[z+4*(increment), 1] < Thresses[2, (a*2)-2]:
smap[z+4*(increment), 0] = 59
if smap[z+5*(increment), 1] < Thresses[2, a*2-1]:
smap[z+5*(increment), 0] = 59
else:
if smap[z, 1] < Thresses[3, ((a-3)*2)-2]:
smap[z, 0] = 59
if smap[z+increment, 1] < Thresses[3, (a-3)*2-1]:
smap[z+increment, 0] = 59
if smap[z+2*(increment), 1] < Thresses[4, ((a-3)*2)-2]:
smap[z+2*(increment), 0] = 59
if smap[z+3*(increment), 1] < Thresses[4, (a-3)*2-1]:
smap[z+3*(increment), 0] = 59
if smap[z+4*(increment), 1] < Thresses[5, ((a-3)*2)-2]:
smap[z+4*(increment), 0] = 59
if smap[z+5*(increment), 1] < Thresses[5, (a-3)*2-1]:
smap[z+5*(increment), 0] = 59
return smap
def hist_adjust(arr, bins):
"""
Fill me in please.
Args:
arr:
bins:
Returns:
[A,B]:
Todos:
* Fill this in with proper summary
"""
[A, B] = np.histogram(arr, bins)
for i in range(1, bins):
count = np.count_nonzero(arr == B[i])
A[i] -= count
A[i-1] += count
return [A, B]
def inblockpatterns(image, bins, p, q, blk_idx, blk_idy):
"""
Fill me in please.
Args:
image:
bins:
p:
q:
blk_idx:
blk_idy:
Returns:
K:
Correct:
BlockScoreAll:
Todos:
* Fill this in with proper summary
"""
Zmat = np.zeros((int(np.floor(blk_idx*blk_idy)), 2))
a = -1
BlockScoreAll = np.zeros((blk_idx, blk_idy))
for i in range(blk_idx):
Ax = (i*8)+p-1
Ex = Ax+4
for j in range(blk_idy):
Ay = (j*8)+q-1
A = image[Ax, Ay]
B = image[Ax, Ay+1]
C = image[Ax+1, Ay]
D = image[Ax+1, Ay+1]
Ey = Ay+4
E = image[Ex, Ey]
F = image[Ex, Ey+1]
G = image[Ex+1, Ey]
H = image[Ex+1, Ey+1]
a += 1
Zmat[a, 0] = abs(A-B-C+D)
Zmat[a, 1] = abs(E-F-G+H)
BlockScoreAll[i, j] = Zmat[a, 1] - Zmat[a, 0]
if (BlockScoreAll[i, j] <= 0):
BlockScoreAll[i, j] = 0
norm = a
# Currently mismatched hist fcn
Hz = hist_adjust(Zmat[:, 0], bins)[0]
Hzn = Hz/(norm+1)
Hz2 = hist_adjust(Zmat[:, 1], bins)[0]
Hz2n = Hz2/(norm+1)
y2 = int(Hzn.size)
K = 0
for i in range(y2):
K_temp = Hzn[i]-Hz2n[i]
K += abs(K_temp)
A = sum(Hzn[0:2])
E = sum(Hz2n[0:2])
if A > E:
Correct = True
else:
Correct = False
return [K, Correct, BlockScoreAll]
def predict0(Kscores):
"""
Fill me in please.
Args:
Kscores:
Returns:
Kpredict:
Kpre:
Todos:
* Fill this in with proper summary
"""
Kpredict = np.zeros((9, 9))
Kpredict[0:8, 0:8] = Kscores[:, :, 1]
for i in range(8):
Kpredict[8, i] = sum(Kpredict[:, i])
Kpredict[i, 8] = sum(Kpredict[i, :])
Kpre = np.zeros((8, 8))
for i in range(8):
for j in range(8):
Kpre[i, j] = (Kpredict[i, 8]+Kpredict[8, j])/16
return [Kpredict, Kpre]
def predict1(Kscores, Kpredict, Kpre):
"""
Fill me in please.
Args:
Kscores:
Kpredict:
Kpre:
Returns:
PossiblePoints:
Todos:
* Fill this in with proper summary
"""
A = np.zeros((4, 4))
for i in range(4):
for j in range(4):
A[i, j] = Kscores[i, j, 0] + Kscores[i+4, j+4, 0]-Kscores[i+4, j, 0]-Kscores[i, j+4, 0]
r1 = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
c1 = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
PossiblePoints = np.zeros((len(r1), 8))
A_point = [0, 0]
E_point = [0, 0]
for i in range(len(r1)):
r = r1[i]
c = c1[i]
if (A[r-1, c-1] > 0):
if (Kpredict[r-1, c-1] == 1):
A_point[0] = r
A_point[1] = c
E_point[0] = r+4
E_point[1] = c+4
else:
E_point[0] = r
E_point[1] = c
A_point[0] = r+4
A_point[1] = c+4
else:
if (Kpredict[r-1, c+3] == 1):
A_point[0] = r
A_point[1] = c+4
E_point[0] = r+4
E_point[1] = c
else:
E_point[0] = r
E_point[1] = c+4
A_point[0] = r+4
A_point[1] = c
PossiblePoints[i, 0] = A_point[0]
PossiblePoints[i, 1] = A_point[1]
PossiblePoints[i, 2] = E_point[0]
PossiblePoints[i, 3] = E_point[1]
PossiblePoints[i, 4] = Kscores[r-1, c-1, 0]/2
PossiblePoints[i, 5] = 0
for i in range(len(r1)):
PossiblePoints[i, 6] = Kpre[int(PossiblePoints[i, 0])-1, int(PossiblePoints[i, 1])-1]-Kpre[int(PossiblePoints[i, 2])-1, int(PossiblePoints[i, 3])-1]
PossiblePoints[i, 7] = (PossiblePoints[i, 6] + PossiblePoints[i, 4])/2
return PossiblePoints
def scores_pick_variables(BlockScoreALL, sgrid, blk_idx, blk_idy, PossiblePoints, kx, ky):
"""
Fill me in please.
Args:
BlockScoreAll:
sgrid:
blk_idx:
blk_idy:
PossiblePoints:
kx:
ky:
Returns:
MeanInSpace:
PossiblePoints:
diff_Mean_Best_scaled:
diff_Mean_Best_scaledInv
Todos:
* Fill this in with proper summary
"""
BlockScore = np.zeros((blk_idx, blk_idy, 16))
for i in range(16):
p = PossiblePoints[i, 0]
q = PossiblePoints[i, 1]
BlockScore[:, :, i] = BlockScoreALL[:, :, int(p-1), int(q-1)]/255
MeanInSpace = np.zeros((kx, ky, 16))
for r in range(16):
for i in range(kx):
for j in range(ky):
b = (i+1)*sgrid
a = b-sgrid
d = (j+1)*sgrid
c = d-sgrid
MeanInSpace[i, j, r] = np.mean(BlockScore[a:b, c:d, r])
MeanOfAllGrids = np.mean(MeanInSpace, axis=2)
BestGrid = MeanInSpace[:, :, 15]
diff_Mean_Best = MeanOfAllGrids - BestGrid
diff_Mean_Best_scaled = mat2gray(diff_Mean_Best)
bg = 0
for f in range(16):
if (PossiblePoints[f, 0] == 4 and PossiblePoints[f, 1] == 4):
bg = f
for f in range(16):
if (bg == 0):
bg1 = np.where(PossiblePoints[:, 5] == max(PossiblePoints[:, 5]))
bg = np.max(bg1)
BestGridInv = np.zeros((kx, ky))
BestGridInv = MeanInSpace[:, :, bg]
diff_Mean_BestInv = MeanOfAllGrids - BestGridInv
diff_Mean_Best_scaledInv = mat2gray(diff_Mean_BestInv)
return [MeanInSpace, PossiblePoints, diff_Mean_Best_scaled, diff_Mean_Best_scaledInv]
def CAGI(impath):
"""
Main driver for CAGI algorithm.
Args:
impath:
Returns:
Result_CAGI: Equivalent to OutputMap
Result_Inv_CAGI: Other output of CAGI.
"""
# Read image in as double RGB
BGR = cv2.imread(impath)
RGB = np.double(BGR[..., ::-1])
(height, width, color) = RGB.shape
V_im = cv2.cvtColor(np.uint8(RGB), cv2.COLOR_RGB2HSV)[:, :, 2]
# Store the pixels for Y of YCbCr
pixels = 16 / 255 + (0.256788 * RGB[:, :, 0] + 0.504129 * RGB[:, :, 1] + 0.0979058 * RGB[:, :, 2])
if ((height*width) < (480*640)):
sgrid = 2
else:
sgrid = 3
bins = 40
imageGS = pixels
(x, y) = imageGS.shape
blk_idx = int(np.floor((x/8)-1))
blk_idy = int(np.floor((y/8)-1))
kx = int(np.floor(blk_idx/sgrid))
ky = int(np.floor(blk_idy/sgrid))
BlockScoreAll = np.zeros((blk_idx, blk_idy, 8, 8))
Kscores = np.zeros((8, 8, 2))
for p in range(8):
for q in range(8):
(K, Correct, BlockScoreAll[:, :, p, q]) = inblockpatterns(imageGS, bins, p+1, q+1, blk_idx, blk_idy)
if (K > 1.999999):
Kscores[p, q, 0] = 0
else:
Kscores[p, q, 0] = K
Kscores[p, q, 1] = Correct
[Kpredict, Kpre] = predict0(Kscores)
PossiblePoints = predict1(Kscores, Kpredict, Kpre)
PossiblePoints = PossiblePoints[np.argsort(PossiblePoints[:, 6])]
[MeanContent, MeanStrongEdge] = MainTrain(RGB, blk_idx, blk_idy)
MeanContent2 = np.zeros((kx, ky))
for i in range(kx):
for j in range(ky):
a = i*sgrid
b = j*sgrid
ccc = sgrid
MeanContent2[i, j] = np.mean(MeanContent[a:a+ccc, b:b+ccc])
[MeanInSpace, PossiblePoints, diff_Mean_Best_scaled, dmbsi] = scores_pick_variables(BlockScoreAll, sgrid, blk_idx, blk_idy, PossiblePoints, kx, ky)
[E, EInv] = characterizeblocks(MeanContent2, MeanStrongEdge, V_im, blk_idx, blk_idy, MeanInSpace, diff_Mean_Best_scaled, dmbsi, sgrid, PossiblePoints, kx, ky)
Result_CAGI = RescaleToImageResult(E, sgrid, kx, ky, pixels)
Result_Inv_CAGI = RescaleToImageResult(EInv, sgrid, kx, ky, pixels)
return [Result_CAGI, Result_Inv_CAGI]
def getMasks():
"""
Return image masks.
Args:
Returns:
PMasks:
MMasks:
MaskWhite:
"""
PMasks = np.load(os.path.join(os.path.dirname(__file__), 'PMasks.npy'))
MMasks = np.load(os.path.join(os.path.dirname(__file__), 'MMasks.npy'))
MaskWhite = np.array([[10],
[30],
[50],
[70],
[90],
[20],
[40],
[60],
[80],
[12],
[30],
[50],
[70],
[88],
[15],
[28],
[45],
[64],
[79],
[85],
[12],
[30],
[50],
[70],
[88],
[20],
[40],
[60],
[80],
[10],
[30],
[50],
[70],
[90],
[20],
[40],
[60],
[80],
[12],
[30],
[50],
[70],
[88],
[15],
[21],
[36],
[55],
[72],
[85],
[12],
[30],
[50],
[70],
[88],
[20],
[40],
[60],
[80]], dtype=np.uint8)
return [PMasks, MMasks, MaskWhite]
| 28,116 | 28.411088 | 251 | py |
pyIFD | pyIFD-main/src/pyIFD/GHOST.py | """
This module provides the GHOST algorithm
JPEG-block-artifact-based detector, solution 3 (leveraging JPEG ghosts).
Algorithm attribution:
Farid, Hany. "Exposing digital forgeries from JPEG ghosts." Information Forensics and Security, IEEE Transactions on 4, no. 1 (2009): 154-160.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
from scipy import signal
from scipy.signal import fftconvolve
from skimage.transform import resize
import numpy as np
import cv2
def GHOST(impath, checkDisplacements=0):
"""
Main driver for GHOST algorithm.
Args:
impath: Path to image to be transformed.
checkDisplacements (0 or 1, optional, default=0): whether to run comparisons for all 8x8 displacements in order to find the NA-match.
Returns:
OutputX:
OutputY:
dispImages: Equivalent of OutputMap.
imin:
Qualities:
Mins:
TODO:
Find purpose of other outputs, and if they are needed.
"""
imorig = np.double(cv2.imread(impath))
minQ = 51
maxQ = 100
stepQ = 1
dispImages = {}
Output = np.zeros(int((maxQ-minQ)/stepQ+1))
Mins = np.zeros(int((maxQ-minQ)/stepQ+1))
if(checkDisplacements == 1):
maxDisp = 7
else:
maxDisp = 0
smoothing_b = 17
h = (np.ones((smoothing_b, smoothing_b))/(smoothing_b**2))[:, :, None]
Offset = int((smoothing_b-1)/2)
for ii in range(minQ, maxQ+1, stepQ):
encimg = cv2.imencode('.jpg', imorig, [int(cv2.IMWRITE_JPEG_QUALITY), ii])[1].tobytes()
nparr = np.frombuffer(encimg, np.byte)
tmpResave = np.double(cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR))
Deltas = {}
overallDelta = {}
ComparisonFull = (tmpResave-imorig)**2
ComparisonFull = np.double(fftconvolve(ComparisonFull, h, 'same'))
for dispx in range(maxDisp+1):
for dispy in range(maxDisp+1):
DisplacementIndex = dispx*8+dispy
Comparison=ComparisonFull[Offset+dispx:-Offset, Offset+dispy:-Offset]
Deltas[DisplacementIndex] = np.mean(Comparison, axis=2)
overallDelta[DisplacementIndex] = np.mean(Deltas[DisplacementIndex])
minInd = min(overallDelta, key=overallDelta.get)
minOverallDelta = overallDelta[minInd]
Mins[int((ii-minQ)/stepQ)] = minInd+1 # Add 1 to acct for matlab starting idx counting at 1
Output[int((ii-minQ)/stepQ)] = minOverallDelta
delta = Deltas[minInd]
delta = (delta-delta.min())/(delta.max()-delta.min())
newSize = (round((delta.shape[i])/4) for i in range(2))
dispImages[int((ii-minQ)/stepQ)] = resize(np.float32(delta), newSize)
OutputX = range(minQ, maxQ+1, stepQ)
OutputY = Output
imin = signal.argrelextrema(OutputY, np.less)[0]+1 # Add one to acct for matlab starting idx counting at 1.
if(OutputY[-1] < OutputY[-2]): # Check last point
imin = np.append(imin, len(OutputY))
if(OutputY[0] < OutputY[1]): # Check first point
imin = np.insert(imin, 0, 1)
Qualities = imin*stepQ+minQ-1
return [OutputX, OutputY, dispImages, imin, Qualities, Mins] | 3,318 | 37.593023 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/NOI5.py | """
This module provides the NOI5 algorithm
Noise-variance-inconsistency detector, solution 5 (leveraging Principal Component Analysis).
Algorithm attribution:
H. Zeng, Y. Zhan, X. Kang, X. Lin, Image splicing localization using PCA-based
noise level estimation, Multimedia Tools & Applications, 2017.76(4):4783
http://www.escience.cn/people/Zenghui/index.html
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from numpy.linalg import eigh
import cv2
from scipy.ndimage import median_filter as medfilt
def KMeans(data, N):
"""
Sorts data into N bins.
Args:
data: data to be sorted
N: number of bins to be sorted into
Returns:
u: means of the bins
re: If data is a nx1 vector, this will be a nx2 output. The first column will be the point, and the second will be its bin assignment
"""
m = data.size
u = np.zeros((N, 1))
Sdata = np.sort(data)
u[0] = np.mean(Sdata[-round(m/4)-1:])
u[1] = np.mean(Sdata[:round(m/4)])
umax = np.median(Sdata[-round(m/10)-1:])
data[data > umax] = umax
for iter in range(200):
pre_u = u.copy() # center of the last iter
tmp = np.zeros((N, m))
for i in range(N):
tmp[i, :] = data-u[i]
tmp = np.abs(tmp)
junk = np.min(tmp, axis=0)
index = np.argmin(tmp, axis=0)
quan = np.zeros((m, N))
for i in range(m):
quan[i, index[i]] = junk[i]
for i in range(N):
if (np.sum(quan[:, i]) > 0.01):
u[i] = np.sum(quan[:, i]*data)/np.sum(quan[:, i])
if (np.linalg.norm(pre_u-u) < 0.02):
break
re = np.zeros((m, 2))
for i in range(m):
tmp = np.zeros((N, 1))
for j in range(N):
tmp[j] = np.linalg.norm(data[i]-u[j])
junk = np.min(tmp, axis=0)
index = np.argmin(tmp, axis=0)
re[i, 0] = data[i]
re[i, 1] = index+1
# the tampered area is less than half of the whole image
label = re[:, 1]
if list(label).count(1) < int(m/2):
re[:, 1] = 3-label
return [u, re]
def PCANoiseLevelEstimator(image, Bsize):
"""
Summary please.
Args:
image: Image to process
Bsize:
Returns:
label:
variance:
"""
UpperBoundLevel = 0.0005
UpperBoundFactor = 3.1
M1 = Bsize
M2 = Bsize
M = M1 * M2
EigenValueCount = 7
EigenValueDiffThreshold = 49.0
LevelStep = 0.05
MinLevel = 0.06
MaxClippedPixelCount = round(np.nextafter(0.1*M, 0.1*M+1))
# ==========================================================================
def Clamp(x, a, b):
"""
Limit input value to a range.
Args:
x: value to clamp
a: minimum value
b: maximum value
Returns:
y: clamped value
"""
y = x
if x < a:
y = a
if x > b:
y = b
return y
# ==========================================================================
def ComputeBlockInfo( image ):
"""
Summary please.
Args:
image:
Returns:
block_info
"""
sums=np.zeros((np.shape(image)[0]-M1,np.shape(image)[1]))
block_info = np.zeros((np.shape(image)[0]*np.shape(image)[1],3))
image2=image**2
sums2=np.zeros(np.shape(sums))
clipped=np.zeros(np.shape(sums))
for x in range(np.shape(image)[0]-M2):
for y in range(np.shape(image)[1]):
if x == 0:
sums[0,y] = np.sum(image[:M2,y])
sums2[0,y] = np.sum(image2[:M2,y])
clipped[0,y]= np.count_nonzero((image[:M2,y]==0) | (image[:M2,y]==255))
else:
sums[x,y] = sums[x-1,y]-image[x-1,y]+image[x+M2-1, y]
sums2[x, y] = sums2[x-1, y] - image2[x-1,y]+image2[x+M2-1, y]
clipped[x, y] = clipped[x-1, y]
if image[x-1, y] in [0,255]:
clipped[x,y]-=1
if image[x+M2-1, y] in [0,255]:
clipped[x,y]+=1
prevsum1=-1
prevsum2=-1
prevclipped=-1
block_count=0
for y in range(np.shape(image)[1]-M1):
for x in range(np.shape(image)[0]-M2):
if x == 0:
sum1=np.sum(sums[y,:M2])
sum2=np.sum(sums2[y,:M2])
clipped_pixel_count=np.sum(clipped[y,:M2])
else:
sum1=prevsum1-sums[y,x-1]+sums[y,x+M2-1]
sum2=prevsum2-sums2[y,x-1]+sums2[y,x+M2-1]
clipped_pixel_count=prevclipped-clipped[y,x-1]+clipped[y,x+M2-1]
prevsum1=sum1
prevsum2=sum2
prevclipped=clipped_pixel_count
if clipped_pixel_count <= MaxClippedPixelCount:
block_info[block_count,0] = (sum2 - sum1*sum1/M) / M
block_info[block_count,1] = x+1
block_info[block_count,2] = y+1
block_count += 1
block_info=np.delete(block_info,slice(block_count,np.shape(image)[0]*np.shape(image)[1]),0)
return block_info
# ==========================================================================
def ComputeStatistics(image, block_info):
"""
Summary please.
Args:
image:
block_info:
Returns:
sum1:
sum2:
subset_size:
"""
loop_iters = len(np.arange(1, MinLevel, -0.05))
sum1 = np.zeros((M, 1, loop_iters))
sum2 = np.zeros((M, M, loop_iters))
subset_size = np.zeros((loop_iters, 1))
subset_count = 0
max_index = np.shape(block_info)[0]-1
for p in np.arange(1, MinLevel, -LevelStep):
q = 0
if p - LevelStep > MinLevel:
q = p - LevelStep
beg_index = Clamp(round(q*max_index+LevelStep/2) + 1, 1, max_index+1)
end_index = Clamp(round(p*max_index+LevelStep/2) + 1, 1, max_index+1)
curr_sum1 = np.zeros((M, 1))
curr_sum2 = np.zeros((M, M))
for k in range(int(beg_index)-1, int(end_index)-1):
curr_x = int(block_info[k, 1])
curr_y = int(block_info[k, 2])
block = np.reshape(image[curr_y-1:curr_y+M2-1, curr_x-1:curr_x+M1-1], (M, 1), order='F').astype("double")
curr_sum1 += block
curr_sum2 += block * block.T
subset_count += 1
sum1[:, :, subset_count-1] = curr_sum1.copy()
sum2[:, :, subset_count-1] = curr_sum2.copy()
subset_size[subset_count-1] = end_index - beg_index
for i in range(len(subset_size)-1, 0, -1):
sum1[:, :, i-1] += sum1[:, :, i]
sum2[:, :, i-1] += sum2[:, :, i]
subset_size[i-1] += subset_size[i]
return [sum1, sum2, subset_size]
# ==========================================================================
def ComputeUpperBound(block_info):
"""
Summary please.
Args:
block_info:
Returns:
upper_bound:
"""
max_index = np.shape(block_info)[0] - 1
zero_idx = np.where(block_info[:, 0] == 0)[0]
if zero_idx.size == 0:
nozeroindex = round(UpperBoundLevel*max_index)
else:
nozeroindex = min(np.max(np.where(block_info[:, 0] == 0)[0])+1, np.shape(block_info)[0]-1)
index = Clamp(round(UpperBoundLevel*max_index) + 1, nozeroindex, np.shape(block_info)[0]-1)
upper_bound = UpperBoundFactor * block_info[index, 0]
return upper_bound
# ==========================================================================
def ApplyPCA(sum1, sum2, subset_size):
"""
Summary please.
Args:
sum1: Matrix one for PCA
sum2: Matrix two for PCA
subset_size: Vector for subset size
Returns:
eigh: Eigenvalues.
"""
meanval = sum1 / subset_size
cov_matrix = sum2 / subset_size - meanval * np.transpose(meanval)
return eigh(cov_matrix)[0]
# ==========================================================================
def GetNextEstimate(sum1, sum2, subset_size, prev_estimate, upper_bound):
"""
Summary please.
Args:
sum1:
sum2:
subset_size:
prev_estimate:
upper_bound:
Returns:
variance:
"""
variance = 0
for i in range(len(subset_size)):
eigen_value = ApplyPCA(sum1[:, :, i], sum2[:, :, i], subset_size[i])
variance = eigen_value[0]
if variance < 0.00001:
break
diff = eigen_value[EigenValueCount-1] - eigen_value[0]
diff_threshold = EigenValueDiffThreshold * prev_estimate / subset_size[i]**0.5
if(diff < diff_threshold and variance < upper_bound):
break
return variance
# ==========================================================================
label = 0
block_info = ComputeBlockInfo(image)
if np.min(np.shape(block_info)) == 0:
label = 1
variance = np.var(image)
else:
idx = np.lexsort((block_info[:, 2], block_info[:, 0]))
block_info = np.asarray([block_info[i, :] for i in idx])
[sum1, sum2, subset_size] = ComputeStatistics(image, block_info)
if subset_size[-1] == 0:
label = 1
variance = np.var(image)
else:
upper_bound = ComputeUpperBound(block_info)
prev_variance = 0
variance = upper_bound
for iter in range(10):
if(np.abs(prev_variance - variance) < 0.00001):
break
prev_variance = variance
variance = GetNextEstimate(sum1, sum2, subset_size, variance, upper_bound)
if variance < 0:
label = 1
variance = np.var(image)
variance = np.sqrt(variance)
return [label, variance]
def PCANoise(impath):
"""
Main driver for NOI5 algorithm.
Args:
impath: input image path.
Returns:
Noise_mix2: OutputMap
bwpp: OutputMap (Quantized)
"""
B = 64
imin = cv2.cvtColor(cv2.imread(impath), cv2.COLOR_BGR2GRAY).astype("double")
[M, N] = np.shape(imin)
imin = np.array(imin[:int(np.floor(M/B)*B), :int(np.floor(N/B)*B)])
[M, N] = np.shape(imin)
irange = int(np.floor(M/B))
jrange = int(np.floor(N/B))
Ib = np.zeros((irange, jrange))
Noise_64 = np.zeros((irange, jrange))
for i in range(irange):
for j in range(jrange):
Ib = imin[i*B:(i+1)*B, j*B:(j+1)*B]
Noise_64[i, j] = PCANoiseLevelEstimator(Ib, 5)[1]
[u, re] = KMeans(Noise_64.flatten(order='F'), 2)
result4 = np.reshape(re[:, 1], np.shape(Noise_64), order='F')
B = 32
irange = int(np.floor(M/B))
jrange = int(np.floor(N/B))
label32 = np.zeros((irange, jrange))
Noise_32 = np.zeros((irange, jrange))
for i in range(irange):
for j in range(jrange):
Ib = imin[i*B:(i+1)*B, j*B:(j+1)*B]
[label32[i, j], Noise_32[i, j]] = PCANoiseLevelEstimator(Ib, 5)
MEDNoise_32 = medfilt(Noise_32, [5, 5])
Noise_32[label32 == 1] = MEDNoise_32[label32 == 1]
[u, re] = KMeans(Noise_32.flatten(order='F'), 2)
irange = int(M/64)
jrange = int(N/64)
Noise_mix = np.zeros((irange*2, jrange*2))
initialdetected = np.zeros((irange*2, jrange*2))
for i in range(irange):
for j in range(jrange):
Noise_mix[2*i:2*(i+1), 2*j:2*(j+1)] = Noise_64[i, j]
initialdetected[2*i:2*(i+1), 2*j:2*(j+1)] = result4[i, j]
Noise_mix = 0.8*Noise_mix+0.2*Noise_32[:2*(i+1), :2*(j+1)]
Noise_mix2 = Noise_mix.copy()
DL = initialdetected[1:-1, :-2] - initialdetected[1:-1, 1:-1]
DR = initialdetected[1:-1, 1:-1] - initialdetected[1:-1, 2:]
DU = initialdetected[:-2, 1:-1] - initialdetected[1:-1, 1:-1]
DD = initialdetected[1:-1, 1:-1] - initialdetected[2:, 1:-1]
Edge = np.zeros(np.shape(initialdetected))
Edge[1:-1, 1:-1] = np.abs(DL)+np.abs(DR)+np.abs(DU)+np.abs(DD)
g = [Edge > 0]
Noise_mix2[tuple(g)] = Noise_32[tuple(g)]
[u, re] = KMeans(Noise_mix2.flatten(order='F'), 2)
result4 = np.reshape(re[:, 1], np.shape(Noise_mix2), order='F')
labels = cv2.connectedComponentsWithStats(np.uint8(result4-1))
bwpp = labels[1]
area = labels[2][:, 4]
for num in range(1, len(area)):
if (area[num] < 4):
result4[bwpp == num] = 1
bwpp = cv2.connectedComponents(np.uint8(result4-1))[1]
return [Noise_mix2, bwpp.astype("uint8")]
| 13,151 | 33.25 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/NADQ.py | """
This module provides the NADQ algorithm
Aligned- and Non-aligned-double-JPEG-compression-based detector.
Algorithm attribution:
T.Bianchi, A.Piva, "Image Forgery Localization via Block-Grained
Analysis of JPEG Artifacts", IEEE Transactions on Information Forensics &
Security, vol. 7, no. 3, June 2012, pp. 1003 - 1017.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
from pyIFD.util import ibdct, jpeg_rec, bdct, dequantize, ibdct
from scipy.signal import convolve2d
from scipy.ndimage import correlate
from scipy.fft import idct
import numpy as np
import jpegio as jio
import math
from scipy.signal import convolve2d
from scipy.ndimage import correlate
from scipy.fft import idct
from scipy.signal import fftconvolve
def NADQ(impath):
"""
Main driver for NADQ algorithm
Args:
impath: Input image path
Returns:
OutputMap: OutputMap
"""
if impath[-4:] == ".jpg":
try:
OutputMap = getJmapNA_EM(jio.read(impath))
except Exception as e:
print('JPEGIO exception: ' + str(e))
return
else:
print('Only .jpg supported')
return OutputMap
# JPEG_QTABLE Generate standard JPEG quantization tables
#
# T=JPEG_QTABLE(QUALITY,TNUM,FORCE_BASELINE)
#
# Returns a quantization table T given in JPEG spec, section K.1 and scaled
# using a quality factor. The scaling method used is the same as that used
# by the IJG (Independent JPEG Group) code library.
#
# QUALITY values should range from 1 (terrible) to 100 (very good), the
# scale recommended by IJG. Default is 50, which represents the tables
# defined by the standard used without scaling.
#
# TNUM should be a valid table number, either 0 (used primarily for
# luminance channels), or 1 (used for chromatic channels). Default is 0.
#
# FORCE_BASELINE clamps the quantization table entries to have values
# between 1..255 to ensure baseline compatibility with all JPEG decoders.
# By default, values are clamped to a range between 1..32767. These are
# the same ranges used by the IJG code library for generating standard
# quantization tables.
def jpeg_qtable(quality=50, tnum=0, force_baseline=0):
# convert to linear quality scale
if (quality <= 0):
quality = 1
if (quality > 100):
quality = 100
if (quality < 50):
quality = 5000 / quality
else:
quality = 200 - quality*2
if tnum == 0:
# This is table 0 (the luminance table):
t = [16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99]
elif tnum == 1:
# This is table 1 (the chrominance table):
t = [17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99]
t = np.reshape(t,(8,8),order='F').T
t = np.floor((t * quality + 50)/100)
t[t < 1] = 1
t[t > 32767] = 32767 # max quantizer needed for 12 bits
if (force_baseline):
t[t > 255] = 255
return t
def LLR(x, nz, Q, phase, sig):
binHist=range(-2**11, 2**11)
center=2**11
# Finished review
w = int(np.ceil(3*sig))
k = list(range(-w,w+1))
g = np.array([math.exp(-kk**2/sig**2/2) for kk in k])
g = g/np.sum(g)
N = np.size(x) / np.size(binHist)
bppm = np.zeros(np.shape(binHist))
bppm[center + phase::Q] = Q
bppm[center + phase::-Q] = Q
bppm = np.convolve(g, bppm)
bppm = bppm[w:-w]
bppm = (bppm*N + 1)
LLRmap = np.log(bppm / np.mean(bppm))
LLRmap[center] = nz * LLRmap[center]
x=np.round(x).astype("int")+center
def lmap(xx):
return LLRmap[xx]
vlmap=np.vectorize(lmap)
L = vlmap(x)
return L
def EMperiod(x, Qmin, Qmax, alpha0, h0, dLmin, maxIter, hcal, bias, sig):
# Finished review
Qvec = list(range(int(Qmin),int(Qmax)+1))
alphavec = alpha0*np.ones(np.shape(Qvec))
h1mat = np.zeros((len(Qvec), len(x)))
for k in range(len(Qvec)):
h1mat[k,:] = h1period(x, Qvec[k], hcal, bias, sig)
Lvec = np.ones(np.shape(Qvec))*float('-inf')
Lmax = float('-inf')
delta_L = float('inf')
ii = 0
# Markos: for cases where the if clause is never activated
Q=Qvec[0]
alpha=alphavec[0]
while delta_L > dLmin and ii < maxIter:
ii +=1
for k in range(len(Qvec)):
# expectation
beta0 = h0*alphavec[k] / (h0*alphavec[k] + h1mat[k,:]*(1 - alphavec[k]))
# maximization
alphavec[k] = np.mean(beta0)
# compute true log-likelihood of mixture
L = np.sum(np.log(alphavec[k]*h0 + (1-alphavec[k])*h1mat[k,:]))
if (L > Lmax):
Lmax = L
Q = Qvec[k]
alpha = alphavec[k]
if (L - Lvec[k] < delta_L):
delta_L = L - Lvec[k]
Lvec[k] = L
return [Q, alpha, Lmax]
def h1period(x, Q, hcal, bias, sig):
#Check h1 period first
binHist=range(-2**11,2**11)
center=2**11
#Finished review
N = np.sum(hcal)
# simulate quantization
if Q % 2 == 0:
hs = np.ones(Q-1)
hs=np.append(hs,0.5)
hs=np.insert(hs,0, 0.5)
ws = int(Q/2)
else:
hs = np.ones(Q)
ws = int((Q-1)/2)
h2 = np.convolve(hcal,hs)
# simulate dequantization
h1 = np.zeros(np.shape(binHist))
h1[center::Q] = h2[center + ws:-ws:Q]
h1[center::-Q] = h2[center + ws:ws-1:-Q]
# simulate rounding/truncation
w = int(np.ceil(3*sig))
k = range(-w,w+1)
g = [math.exp(-(kk+bias)**2/sig**2/2) for kk in k]
h1 = np.convolve(h1, g)
h1 = h1[w:-w]
# normalize probability and use Laplace correction to avoid p1 = 0
h1 /= sum(h1)
h1 = (h1*N+1)/(N+np.size(binHist))
x=np.array(x)
p1=np.take(h1,np.round(np.nextafter(x,x+1)).astype("int")+center)
return p1
def getJmapNA_EM(image, ncomp=1, c2=6):
"""
Detects and localizes tampered areas in double compressed JPEG images.
Args:
image: JPEG object TODO: Change to impath
ncomp: index of color component (1 = Y, 2 = Cb, 3 = Cr)
c2: number of DCT coefficients to consider (1 <= c2 <= 64)
ncomp:
c2:
Returns:
LLRmap(:,:,c): estimated likelihood of being doubly compressed for each 8x8 image block
using standard model and c-th DCT frequency (zig-zag order)
LLRmap_s(:,:,c): estimated likelihood of being doubly compressed for each 8x8 image block
using simplified model and c-th DCT frequency (zig-zag order)
k1e: estimated shift of first compression
k2e: estimated shift of second compression TODO: ?
alphatable: mixture parameter for each DCT frequency
"""
coeffArray = image.coef_arrays[ncomp-1]
qtable = image.quant_tables[image.comp_info[ncomp-1].quant_tbl_no]
q1table = np.ones((8,8))
minQ = np.maximum(2,np.floor(qtable/np.sqrt(3)))
maxQ = np.maximum(jpeg_qtable(50),qtable)
# estimate rounding and truncation error
Im = jpeg_rec(image)[0]
ImTmp = Im.copy()
ImTmp=np.maximum(0,ImTmp)
ImTmp[ImTmp > 255] = 255
E = Im - np.round(ImTmp)
Edct = bdct(0.299 * E[:, :, 0] + 0.587 * E[:, :, 1] + 0.114 * E[:, :, 2])
# compute DCT coeffs of decompressed image
Im = ibdct(dequantize(coeffArray, qtable))
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22, 15, 8, 16, 23, 30, 37, 44, 51, 58, 59, 52,
45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
center = 2**11
B = np.ones((8,8))/8
DC = np.rot90(convolve2d(np.rot90(Im, 2), np.rot90(B, 2)), 2)
DC = DC[7:, 7:]
EDC = Edct[::8, ::8]
varE = np.var(EDC)
bias = np.mean(EDC)
sig = np.sqrt(qtable[0, 0]**2 / 12 + varE)
alphatable = np.ones((8,8))
Ims=np.shape(Im)
LLRmap = np.zeros((int(Ims[0]/8), int(Ims[1]/8), c2))
LLRmap_s = np.zeros((int(Ims[0]/8), int(Ims[1]/8), c2))
k1e = 1
k2e = 1
Lmax = -np.inf
# estimate shift of first compression
for k1 in range(8):
for k2 in range(8):
binHist = range(-2**11, 2**11)
if (k1 + 1 > 1 or k2 + 1 > 1):
DCpoly = DC[k1::8, k2::8]
# choose shift for estimating unquantized distribution through
# calibration
if k1 < 4:
k1cal = k1 + 2
else:
k1cal = k1
if k2 < 4:
k2cal = k2 + 2
else:
k2cal = k2
DCcal = DC[k1cal-1::8, k2cal-1::8]
binHist = np.arange(-2**11, 2**11-1)+0.5
binHist = np.append(binHist, max(2**11, np.max(DCcal)))
binHist = np.insert(binHist, 0, min(-2**11, np.min(DCcal)))
hcal = np.histogram(DCcal, binHist)[0]
hcalnorm = (hcal+1)/(np.size(DCcal)+np.size(binHist)-1)
# define mixture components
h0=np.array(np.take(hcalnorm,np.round(np.ndarray.flatten(DCpoly,order='F')).astype("int")+center))
# estimate parameters of first compression
[Q, alpha, L] = EMperiod(np.ndarray.flatten(DCpoly,order='F'), minQ[0, 0], maxQ[0, 0], 0.95, h0, 5, 20, hcal, bias, sig)
if L > Lmax:
# simplified model
nz = np.count_nonzero(DCpoly)/np.size(DCpoly)
LLRmap_s[:, :, 0] = LLR(DCpoly, nz, Q, int(np.round(bias)), sig)
# standard model
ppu = np.log(np.divide(h1period(range(-2**11,2**11), Q, hcal, bias, sig),np.take(hcalnorm,range(2**12))))
DCpoly=np.round(DCpoly).astype("int")+center
def pmap(xx):
return ppu[xx]
vpmap=np.vectorize(pmap)
LLRmap[:, :, 0]=vpmap(DCpoly)
q1table[0, 0] = Q
alphatable[0, 0] = alpha
k1e = k1+1
k2e = k2+1
Lmax = L
for index in range(1, c2):
binHist=range(-2**11,2**11)
coe = coeff[index]
ic1 = int(np.ceil(coe/8))
ic2 = coe % 8
if ic2 == 0:
ic2 = 8
A = np.zeros((8,8))
A[ic1-1, ic2-1] = 1
B = idct(idct(A.T, norm='ortho').T, norm='ortho')
AC = np.rot90(fftconvolve(np.rot90(Im, 2), np.rot90(B, 2)), 2) # This part is slow. Maybe look into cv2 replacement
AC = AC[7:, 7:]
ACpoly = AC[k1e-1::8, k2e-1::8]
# choose shift for estimating unquantized distribution through
# calibration
if k1e < 5:
k1cal = k1e + 1
else:
k1cal = k1e - 1
if k2e < 5:
k2cal = k2e + 1
else:
k2cal = k2e - 1
ACcal = AC[k1cal-1::8, k2cal-1::8]
binHist = np.arange(-2**11, 2**11-1)+0.5
binHist = np.append(binHist, max(2**11, np.max(ACcal)))
binHist = np.insert(binHist, 0, min(-2**11, np.min(ACcal)))
hcal = np.histogram(ACcal, binHist)[0]
hcalnorm = (hcal+1)/(np.size(ACcal)+np.size(binHist)-1)
# estimate std dev of quantization error on DCT coeffs (quantization of
# second compression plus rounding/truncation between first and second
# compression)
EAC = Edct[ic1-1::8, ic2-1::8]
varE = np.var(EAC)
if index == 1:
bias = np.mean(EAC)
else:
bias = 0
sig = np.sqrt(qtable[ic1-1, ic2-1]**2 / 12 + varE)
h0=np.array(np.take(hcalnorm,np.round(np.ndarray.flatten(ACpoly,order='F')).astype("int")+center))
# estimate parameters of first compression
[Q, alpha] = EMperiod(np.ndarray.flatten(ACpoly,order='F'), minQ[ic1-1, ic2-1], maxQ[ic1-1, ic2-1], 0.95, h0, 5, 20, hcal, bias, sig)[:2]
q1table[ic1-1, ic2-1] = Q
alphatable[ic1-1, ic2-1] = alpha
# simplified model
nz = np.count_nonzero(ACpoly)/np.size(ACpoly)
LLRmap_s[:, :, index] = LLR(ACpoly, nz, Q, int(np.round(bias)), sig)
# standard model
ppu = np.log(np.divide(h1period(range(-2**11,2**11), Q, hcal, bias, sig),np.take(hcalnorm,range(2**12))))
ACpoly=np.round(ACpoly).astype("int")+center
LLRmap[:, :, index] = vpmap(ACpoly)
OutputMap=correlate(np.sum(LLRmap,2),np.ones((3,3)),mode='reflect')
return OutputMap
| 13,218 | 36.028011 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/DCT.py | """
This module provides the DCT algorithm
JPEG-block-artifact-based detector, solution 2 (leveraging Discrete Cosine Transforms).
Algorithm attribution:
Ye, Shuiming, Qibin Sun, and Ee-Chien Chang. "Detecting digital image forgeries
by measuring inconsistencies of blocking artifact." In Multimedia and Expo, 2007
IEEE International Conference on, pp. 12-15. IEEE, 2007.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
import jpegio as jio
from pyIFD.util import dequantize, extrema, bdct
import cv2
def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss-1.)/2. for ss in shape]
y, x = np.ogrid[-m:m+1, -n:n+1]
h = np.exp(-(x*x + y*y) / (2.*sigma*sigma))
h[h < np.finfo(h.dtype).eps*h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def hist3d(arr, bins):
"""
"""
arrs = np.shape(arr)
out = np.zeros((arrs[0], arrs[1], len(bins)))
for x in range(arrs[0]):
for y in range(arrs[1]):
out[x, y, :-1] = np.histogram(arr[x, y, :], bins)[0]
out[x, y, -1] = np.count_nonzero(arr[x, y, :] == bins[-1])
return out
def DCT(impath):
"""
Main driver for DCT algorithm.
Args:
impath: Input image path
Returns:
OutputMap: OutputMap
"""
if impath[-4:] == ".jpg":
try:
OutputMap = GetDCTArtifact(jio.read(impath))
except Exception as e:
print('JPEGIO exception: ' + str(e))
return
else:
OutputMap = GetDCTArtifact(cv2.imread(impath), png=True)
return OutputMap
def GetDCTArtifact(im, png=False):
"""
Determines DCT artifacts.
Args:
im: Input image
Returns:
BMat: OutputMap
"""
MaxCoeffs = 32
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22, 15, 8, 16,
23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
# Depending on whether im was created using jpeg_read (and thus is a struct)
# or CleanUpImage(/imread), run a different code block for DCT block
# extraction
if png:
im = np.double(im)
Y=0.299*im[:, :, 0]+0.587*im[:, :, 1]+0.114*im[:, :, 2]
Y = Y[:int(np.floor(np.shape(Y)[0]/8)*8), :int(np.floor(np.shape(Y)[1]/8)*8)]
Y -= 128
YDCT=np.round(bdct(Y,8))
imSize=np.shape(Y)
else:
Q = im.quant_tables[0]
YDCT = im.coef_arrays[0]
YDCT = dequantize(YDCT, Q)
imSize = np.shape(YDCT)
YDCT_Block = np.reshape(YDCT, (8, round(imSize[0]/8), 8, round(imSize[1]/8)), order='F')
YDCT_Block = np.transpose(YDCT_Block, [0, 2, 1, 3])
YDCT_Block = np.reshape(YDCT_Block, (8, 8, round(imSize[0]*imSize[1]/64)), order='F')
DCTHists = hist3d(YDCT_Block, np.arange(-257, 258))
DCTHists = DCTHists[:, :, 1:-1]
QEst = np.zeros((8, 8))
# skip the DC term
for coeffIndex in range(1, MaxCoeffs):
NoPeaks = False
coe = coeff[coeffIndex]
startY = coe % 8
if startY == 0:
startY = 8
startX = int(np.ceil(coe/8))
DCTHist = np.ndarray.flatten(DCTHists[startY-1, startX-1, :], order='F')
HistFFT = np.fft.fft(DCTHist)-1
Power = abs(HistFFT)
PowerFilterSize = 3
g = matlab_style_gauss2D([1, 51], PowerFilterSize)
PowerFilt = np.convolve(Power, np.ravel(g), 'same')
Valley = 1
while (PowerFilt[Valley-1] <= PowerFilt[Valley]):
Valley += 1
Valley += 1
while (Valley < len(PowerFilt)-1) and (PowerFilt[Valley-1] >= PowerFilt[Valley]):
Valley = Valley+1
if Valley*2 < len(Power)*0.8:
Power = Power[Valley-1:-Valley]
else:
NoPeaks = True
Diff2 = np.diff(Power, 2)
if len(Diff2) == 0:
Diff2 = 0
g = matlab_style_gauss2D([1, 51], 5)
yfilt = np.convolve(Diff2, np.ravel(g), 'same')
yfilt[yfilt > (min(yfilt)/5)] = 0
imin = extrema(yfilt)
if NoPeaks is True:
imin = []
QEst[startY-1, startX-1] = len(imin)
D = np.tile(QEst[:, :, None], [1, 1, np.shape(YDCT_Block)[2]])
with np.errstate(invalid='ignore', divide='ignore'):
BMat = abs(YDCT_Block-np.round(YDCT_Block/D)*D)
BMat[np.isnan(BMat)] = 0
BMat = np.sum(np.sum(BMat, 0), 0)
BMat = np.reshape(BMat, (int(imSize[0]/8), int(imSize[1]/8)), order='F')
return BMat.astype("uint8")
| 4,867 | 31.891892 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/NOI2.py | """
This module provides the NOI2 algorithm
Noise-variance-inconsistency detector, solution 2.
Algorithm attribution:
Lyu, Siwei, Xunyu Pan, and Xing Zhang. "Exposing region splicing forgeries
with blind local noise estimation." International Journal of Computer Vision
110, no. 2 (2014): 202-221.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
import cv2
from PIL import Image
from scipy.signal import convolve2d
def conv2(x, y, mode='same'):
"""
Computes standard 2d convolution for matrices x and y.
Args:
x: 2d matrix.
y: 2d matrix.
mode (optional, default='same'):
Returns:
computation:
Todos:
* Sort out return
"""
return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)
def GetNoiseMaps_hdd(im, filter_type, filter_size, block_rad):
"""
Outputs variance estimates for im. Equivalent to GetNoiseMaps_ram
Args:
im: Image to be processed.
filter_type: Type of filter. Must be one of ('haar','dct','rand')
filter_size: the size of the support of the filter
block_rad: the size of the local blocks
Returns:
estV: estimated local noise variance
TODO:
* Consider removing the ram function path.
"""
origT = [65.481/255, 128.553/255, 24.966/255]
Y = origT[0]*im[:, :, 2]+origT[1]*im[:, :, 1]+origT[2]*im[:, :, 0]+16
im = np.round(Y)
flt = np.ones((filter_size, 1))
flt = (flt*np.transpose(flt))/(filter_size**2)
noiIm = conv2(im, flt, 'same')
estV_tmp = localNoiVarEstimate_hdd(noiIm, filter_type, filter_size, block_rad)
estVSize = tuple(np.round((np.array(np.shape(estV_tmp))+0.1)/4))
estV = np.array(Image.fromarray(estV_tmp).resize(np.flip(estVSize).astype(int), resample=Image.BOX))
estV[estV <= 0.001] = np.mean(estV)
return estV
def GetNoiseMaps_ram(im, filter_type, filter_size, block_rad):
"""
Outputs variance estimates for im.
Args:
im: Image to be processed.
filter_type: Type of filter. Must be one of ('haar','dct','rand')
filter_size: the size of the support of the filter
block_rad: the size of the local blocks
Returns:
estV: estimated local noise variance
"""
origT = [65.481/255, 128.553/255, 24.966/255]
Y = origT[0]*im[:, :, 2]+origT[1]*im[:, :, 1]+origT[2]*im[:, :, 0]+16
im = np.round(Y)
flt = np.ones((filter_size, 1))
flt = (flt*np.transpose(flt))/(filter_size**2)
noiIm = conv2(im, flt, 'same')
estV_tmp = localNoiVarEstimate_hdd(noiIm, filter_type, filter_size, block_rad)
estV = np.imresize(estV_tmp, np.round(np.size(estV_tmp)/4), 'method', 'box')
estV[estV <= 0.001] = np.mean(estV)
return estV
def block_avg(X, d, pad='zero'):
"""
Computes the avg of elements for all overlapping dxd windows in data X, where d = 2*rad+1.
Args:
X: an [nx,ny,ns] array as a stack of ns images of size [nx,ny]
rad: radius of the sliding window, i.e., window size = (2*rad+1)*(2*rad+1)
pad (optional, default='zero'): padding patterns
Returns:
Y: sum of elements for all overlapping dxd windows
"""
[nx, ny, ns] = np.shape(X)
if d < 0 or d != np.floor(d) or d >= min(nx, ny):
return
wd = 2*d+1 # size of the sliding window
Y = np.zeros((nx+wd, ny+wd, ns), 'single')
Y[d+1:nx+d+1, d+1:ny+d+1, :] = X
# padding boundary
if pad[0:2] != 'ze':
# padding by mirroring
if pad[0:2] == 'mi':
# mirroring top
Y[1:d+1, :, :] = np.flip(Y[d+2:wd+1, :, :], axis=0)
# mirroring bottom
Y[nx+d+1:, :, :] = np.flip(Y[nx:nx+d, :, :], axis=0)
# mirroring left
Y[:, 1:d+1, :] = np.flip(Y[:, d+2:wd+1, :], axis=1)
# mirroring right
Y[:, ny+d+1:, :] = np.flip(Y[:, ny:ny+d, :], axis=1)
else:
return
# forming integral image
Y = np.cumsum(np.cumsum(Y, 0), 1)
# computing block sums
Y = Y[wd:, wd:, :]+Y[:-wd, :-wd, :] - Y[wd:, :-wd, :]-Y[:-wd, wd:, :]
Y /= (wd*wd)
return Y
def dct2mtx(n, order):
"""
Generates matrices corresponding to 2D-DCT transform.
Args:
N: size of 2D-DCT basis (N x N)
ord: order of the obtained DCT basis
Returns:
mtx: 3D matrices of dimension (NxNxN^2)
"""
(cc, rr) = np.meshgrid(range(n), range(n))
c = np.sqrt(2 / n) * np.cos(np.pi * (2*cc + 1) * rr / (2 * n))
c[0, :] = c[0, :] / np.sqrt(2)
if order[:2] == 'gr':
order = np.reshape(range(n**2), (n, n), order='F')
elif order[:2] == 'sn': # not exactly snake code,but close
temp = cc+rr
idx = np.argsort(np.ndarray.flatten(temp))
order = np.reshape(idx, (n, n), order='F')
mtx = np.zeros((n, n, n*n))
for i in range(n):
for j in range(n):
mtx[:, :, order[i, j]] = np.outer(c[i, :], c[j, :])
return mtx
def haar2mtx(n):
"""
Generates haar filter of size (n,n,n**2).
Args:
n: Positive integer.
Returns:
mtx: nxn filter array.
"""
Level = int(np.log2(n))
if 2**Level < n:
print("input parameter has to be the power of 2")
return
# Initialization
c = np.ones((1, 1))
NC = 1/np.sqrt(2) # normalization constant
LP = [1, 1]
HP = [1, -1]
# iteration from H=[1]
for i in range(0, Level):
c = NC*np.concatenate((np.kron(c, LP), np.kron(np.eye(np.shape(c)[0], np.shape(c)[1]), HP)))
mtx = np.zeros((n, n, n*n))
k = 0
for i in range(n):
for j in range(n):
mtx[:, :, k] = np.outer(c[i, :], c[j, :])
k += 1
return mtx
def localNoiVarEstimate_hdd(noi, ft, fz, br):
"""
Computes local noise variance estimation using kurtosis.
Args:
noisyIm: input noisy image
filter_type: the type of band-pass filter used supported types, "dct", "haar", "rand"
filter_size: the size of the support of the filter
block_rad: the size of the local blocks
Returns:
estVar: estimated local noise variance
"""
if ft == 'dct':
fltrs = dct2mtx(fz, 'snake')
elif ft == 'haar':
fltrs = haar2mtx(fz)
elif ft == 'rand':
fltrs = rnd2mtx(fz)
else:
return 0
# decompose into channels
ch = np.zeros([np.shape(noi)[0], np.shape(noi)[1], fz*fz-1], 'single')
for k in range(1, fz**2):
ch[:, :, k-1] = conv2(noi, fltrs[:, :, k], 'same')
# collect raw moments
mu1 = block_avg(ch, br, 'mi')
mu2 = block_avg(ch**2, br, 'mi')
mu3 = block_avg(ch**3, br, 'mi')
mu4 = block_avg(ch**4, br, 'mi')
Factor34 = mu4 - 4*mu1*mu3
noiV = mu2 - mu1**2
with np.errstate(invalid='ignore', divide='ignore', over='ignore'):
noiK = (Factor34 + 6*mu1**2*mu2 - 3*mu1**4)/(noiV**2)-3
noiK[noiK < 0] = 0
a = np.mean(np.sqrt(noiK), 2)
b = np.mean(1/noiV, 2)
c = np.mean(1/noiV**2, 2)
d = np.mean(np.sqrt(noiK)/noiV, 2)
sqrtK = (a*c - b*d)/(c-b*b)
V = (1-a/sqrtK)/b
V = V.astype("single")
idx = sqrtK < np.median(sqrtK)
V[idx] = 1/b[idx]
idx = V < 0
V[idx] = 1/b[idx]
return V
def rnd2mtx(n):
"""
Generates matrices corresponding to random orthnormal transform.
Args:
N: size of 2D random basis (N x N)
Returns:
mtx: 3D matrices of dimension (NxNxN^2)
"""
X = np.random.randn(n, n)
X -= np.tile(np.mean(X, 0), (n, 1))
X /= np.tile(np.sqrt(np.sum(X**2, 0)), (n, 1))
mtx = np.zeros((n, n, n*n))
k = 0
for i in range(n):
for j in range(n):
mtx[:, :, k] = np.outer(X[:, i], np.transpose(X[:, j]))
k += 1
return mtx
def GetNoiseMaps(impath, sizeThreshold=55*(2**5), filter_type='rand', filter_size=4, block_rad=8):
"""
Main driver for NOI2 algorithm.
Args:
impath:
sizeThreshold (optional, default=55*25):
filter_type (optional, default='rand'):
filter_size (optional, default=4):
block_rad (optional, default=8):
Returns:
estV: Equivalent to OutputMap
"""
im = cv2.imread(impath)
size = np.prod(np.shape(im))
if size > sizeThreshold:
estV = GetNoiseMaps_hdd(im, filter_type, filter_size, block_rad)
else:
estV = GetNoiseMaps_ram(im, filter_type, filter_size, block_rad)
estV = np.nan_to_num(estV, posinf=0, neginf=0)
return estV
| 8,753 | 27.891089 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/NOI1.py | """
This module provides the NOI1 algorithm
Noise-variance-inconsistency detector, solution 1.
Algorithm attribution:
Mahdian, Babak, and Stanislav Saic. "Using noise inconsistencies for blind
image forensics." Image and Vision Computing 27, no. 10 (2009): 1497-1503.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from skimage.color import rgb2ycbcr
from PIL import Image
from pywt import dwt2
import cv2
def GetNoiseMap(impath, BlockSize=8):
"""
Main driver for NOI1 algorithm.
Args:
impath: Path to the image to be processed.
BlockSize: the block size for noise variance estimation. Too small reduces quality, too large reduces localization accuracy
Returns:
OutputMap:
"""
im = cv2.imread(impath)
YCbCr = np.double(cv2.cvtColor(im, cv2.COLOR_BGR2YCR_CB))
Y = np.round(YCbCr[:, :, 0])
(cA1, (cH, cV, cD)) = dwt2(Y, 'db8') # 2d discrete wavelet transform
cD = cD[:int(np.floor(np.size(cD, 0)/BlockSize)*BlockSize), :int(np.floor(np.size(cD, 1)/BlockSize)*BlockSize)]
Block = np.zeros((int(np.floor(np.size(cD, 0)/BlockSize)), int(np.floor(np.size(cD, 1)/BlockSize)), BlockSize**2))
for ii in range(0, np.size(cD, 0)-1, BlockSize):
for jj in range(0, np.size(cD, 1)-1, BlockSize):
blockElements = cD[ii:ii+BlockSize, jj:jj+BlockSize]
Block[int(ii/BlockSize), int(jj/BlockSize), :] = np.reshape(blockElements, (1, 1, np.size(blockElements)))
OutputMap = np.median(np.abs(Block), 2)/0.6745
return OutputMap
| 1,717 | 33.36 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/ADQ1.py | """
This module provides the ADQ1 module.
Aligned-double-JPEG-compression-based detector, solution 1.
Algorithm attribution:
Lin, Zhouchen, Junfeng He, Xiaoou Tang, and Chi-Keung Tang. "Fast, automatic
and fine-grained tampered JPEG image detection via DCT coefficient analysis."
Pattern Recognition 42, no. 11 (2009): 2492-2501.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
import jpegio as jio
from scipy.signal import medfilt2d
from pyIFD.util import bdct
import matplotlib.image as mpimg
def ExtractYDCT(im):
"""
Determines YDCT.
Args:
im:
Returns:
YDCT:
"""
im = np.double(im)
Y = 0.299*im[:, :, 0]+0.587*im[:, :, 1]+0.114*im[:, :, 2]
Y = Y[:int(np.floor(np.shape(Y)[0]/8)*8), :int(np.floor(np.shape(Y)[1]/8)*8)]
Y -= 128
YDCT = np.round(bdct(Y, 8)).astype("int")
return YDCT
def detectDQ_JPEG(im):
"""
Determines DQ for JPEG image.
Args:
im: Input image as read in by JPEGIO
Returns:
OutputMap: Heatmap values for detected areas
"""
# How many DCT coeffs to take into account
MaxCoeffs = 15
# JPEG zig-zag sequence
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22,
15, 8, 16, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
# Which channel to take: always keep Y only
channel = 1
coeffArray = im.coef_arrays[channel-1]
if im.image_height % 8 != 0:
coeffArray = coeffArray[:-8, :]
if im.image_width % 8 != 0:
coeffArray = coeffArray[:, :-8]
FFT_Out = {}
FFT_smoothed = {}
p_h_fft = np.zeros((MaxCoeffs, 1))
p_final = np.zeros((MaxCoeffs, 1))
s_0_Out = np.zeros((MaxCoeffs, 1))
P_tampered = np.zeros((int(np.shape(coeffArray)[0]/8), int(np.shape(coeffArray)[1]/8), MaxCoeffs))
P_untampered = np.zeros(np.shape(P_tampered))
# numOverall = np.zeros(np.shape(P_tampered))
# denomOverall = np.zeros(np.shape(P_tampered))
for coeffIndex in range(MaxCoeffs):
coe = coeff[coeffIndex]
startY = int(coe % 8)
if startY == 0:
startY = 8
startX = int(np.ceil(coe/8))
selectedCoeffs = coeffArray[startX-1::8, startY-1::8]
coeffList = np.reshape(selectedCoeffs, (np.size(selectedCoeffs), 1), order='F')
minHistValue = int(min(coeffList)-1)
maxHistValue = int(max(coeffList)+2)
coeffHist = np.histogram(coeffList, list(range(minHistValue, maxHistValue+1)))[0]
if(np.size(coeffHist) > 0):
s_0_Out[coeffIndex] = np.argmax(coeffHist)+1
# Good through coeffHist
# Find period by max peak in the FFT minus DC term
FFT = abs(np.fft.fft(coeffHist))
FFT_Out[coeffIndex] = FFT
if np.size(FFT) != 0:
DC = FFT[0]
# Find first local minimum, to remove DC peak
FreqValley = 1
while (FreqValley < len(FFT)-1) and (FFT[FreqValley-1] >= FFT[FreqValley]):
FreqValley += 1
FFT = FFT[FreqValley-1:int(np.floor(len(FFT)/2))]
FFT_smoothed[coeffIndex] = FFT
if(np.size(FFT) != 0):
FFTPeak = np.argmax(FFT)+1
maxPeak = FFT[FFTPeak-1]
FFTPeak += FreqValley-1-1 # -1 because FreqValley appears twice, and -1 for the 0-freq DC term
if np.size(FFT) == 0 or maxPeak < DC/5 or min(FFT)/maxPeak > 0.9: # threshold at 1/5 the DC and 90% the remaining lowest to only retain significant peaks
p_h_fft[coeffIndex] = 1
else:
p_h_fft[coeffIndex] = round(len(coeffHist)/FFTPeak)
else:
p_h_fft[coeffIndex] = 1
# period is the minimum of the two methods
p_final[coeffIndex] = p_h_fft[coeffIndex]
# calculate per-block probabilities
if p_final[coeffIndex] != 1:
adjustedCoeffs = selectedCoeffs-minHistValue+1
period_start = adjustedCoeffs-(np.fmod(adjustedCoeffs-s_0_Out[coeffIndex], p_final[coeffIndex]))
num = np.zeros(np.shape(period_start))
denom = np.zeros(np.shape(period_start))
for kk in range(np.shape(period_start)[0]):
for ll in range(np.shape(period_start)[1]):
if period_start[kk, ll] >= s_0_Out[coeffIndex]:
period = list(range(int(period_start[kk, ll]), int(period_start[kk, ll]+p_final[coeffIndex])))
if period_start[kk, ll]+p_final[coeffIndex]-1 > len(coeffHist):
idx = [i for i, x in enumerate(period) if x > len(coeffHist)]
for i in idx:
period[i] -= p_final[coeffIndex]
num[kk, ll] = coeffHist[adjustedCoeffs[kk, ll]-1]
denom[kk, ll] = sum([coeffHist[int(p-1)] for p in period])
else:
period = list(range(int(period_start[kk, ll]), int(period_start[kk, ll]-p_final[coeffIndex]), -1))
if period_start[kk, ll]-p_final[coeffIndex]+1 <= 0:
idx = [i for i, x in enumerate(period) if x <= 0]
for i in idx:
period[i] += p_final[coeffIndex]
num[kk, ll] = coeffHist[adjustedCoeffs[kk, ll]-1]
denom[kk, ll] = sum([coeffHist[int(p - 1)] for p in period])
P_u = num/denom
P_t = 1/p_final[coeffIndex]
P_tampered[:, :, coeffIndex] = P_t/(P_u+P_t)
P_untampered[:, :, coeffIndex] = P_u/(P_u+P_t)
else:
P_tampered[:, :, coeffIndex] = np.ones((int(np.ceil(np.shape(coeffArray)[0]/8)), int(np.ceil(np.shape(coeffArray)[1]/8))))*0.5
P_untampered[:, :, coeffIndex] = 1-P_tampered[:, :, coeffIndex]
P_tampered_Overall = np.prod(P_tampered, axis=2)/(np.prod(P_tampered, axis=2)+np.prod(P_untampered, axis=2))
P_tampered_Overall[np.isnan(P_tampered_Overall)] = 0
OutputMap = P_tampered_Overall.copy()
s = np.var(np.reshape(P_tampered_Overall, (np.size(P_tampered_Overall), 1)))
Teval = np.zeros((99, 1))
for S in range(1, 100):
T = S/100
Class0 = P_tampered_Overall < T
Class1 = P_tampered_Overall >= T
if np.all(Class0) is False:
s0 = 0
else:
s0 = np.var(P_tampered_Overall[Class0])
if np.all(Class1) is False:
s1 = 0
else:
s1 = np.var(P_tampered_Overall[Class1])
if s0 == 0 and s1 == 0:
Teval[S-1] = 0
else:
Teval[S-1] = s/(s0+s1)
Topt = np.argmax(Teval)+1
Topt = Topt/100-0.01
Class0 = P_tampered_Overall < Topt
Class1 = P_tampered_Overall >= Topt
if np.all(Class0) is False:
s0 = 0
else:
s0 = np.var(P_tampered_Overall[Class0])
if np.all(Class1) is False:
s1 = 0
else:
s1 = np.var(P_tampered_Overall[Class1])
Class1_filt = medfilt2d(np.array(Class1, dtype="uint8"), [3, 3])
Class0_filt = medfilt2d(np.array(Class0, dtype="uint8"), [3, 3])
e_i = (Class0_filt[:-2, 1:-1]+Class0_filt[1:-1, :-2]+Class0_filt[2:, 1:-1]+Class0_filt[1:-1, 2:])*Class1_filt[1:-1, 1:-1]
e_i = e_i.astype("double")
if np.sum(Class0) > 0 and np.sum(Class0) < np.size(Class0):
K_0 = np.sum(np.maximum(e_i-2, 0))/np.sum(Class0)
else:
K_0 = 1
s0 = 0
s1 = 0
Feature_Vector = [Topt, s, s0+s1, K_0]
return [OutputMap, Feature_Vector, coeffArray]
def detectDQ_NonJPEG(im):
"""
Determines DQ for non-JPEG.
Args:
im:
Returns:
OutputMap: Heatmap values for detected areas
"""
# How many DCT coeffs to take into account
MaxCoeffs = 15
# JPEG zig-zag sequence
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22,
15, 8, 16, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
# Which channel to take: always keep Y only
# channel = 1
coeffArray = ExtractYDCT(im)
FFT_Out = {}
FFT_smoothed = {}
p_h_fft = np.zeros((MaxCoeffs, 1))
p_final = np.zeros((MaxCoeffs, 1))
s_0_Out = np.zeros((MaxCoeffs, 1))
P_tampered = np.zeros((int(np.shape(coeffArray)[0]/8), int(np.shape(coeffArray)[1]/8), MaxCoeffs))
P_untampered = np.zeros(np.shape(P_tampered))
# numOverall = np.zeros(np.shape(P_tampered))
# denomOverall = np.zeros(np.shape(P_tampered))
for coeffIndex in range(MaxCoeffs):
coe = coeff[coeffIndex]
startY = int(coe % 8)
if startY == 0:
startY = 8
startX = int(np.ceil(coe/8))
selectedCoeffs = coeffArray[startX-1::8, startY-1::8]
coeffList = np.reshape(selectedCoeffs, (np.size(selectedCoeffs), 1), order='F')
minHistValue = int(min(coeffList)-1)
maxHistValue = int(max(coeffList)+2)
coeffHist = np.histogram(coeffList, list(range(minHistValue, maxHistValue+1)))[0]
if(np.size(coeffHist) > 0):
s_0_Out[coeffIndex] = np.argmax(coeffHist)+1
# Good through coeffHist
# Find period by max peak in the FFT minus DC term
FFT = abs(np.fft.fft(coeffHist))
FFT_Out[coeffIndex] = FFT
if np.size(FFT) != 0:
DC = FFT[0]
# Find first local minimum, to remove DC peak
FreqValley = 1
while (FreqValley < len(FFT)-1) and (FFT[FreqValley-1] >= FFT[FreqValley]):
FreqValley += 1
FFT = FFT[FreqValley-1:int(np.floor(len(FFT)/2))]
FFT_smoothed[coeffIndex] = FFT
FFTPeak = np.argmax(FFT)+1
maxPeak = FFT[FFTPeak-1]
FFTPeak += FreqValley-1-1 # -1 because FreqValley appears twice, and -1 for the 0-freq DC term
if maxPeak < DC/5 or min(FFT)/maxPeak > 0.9: # threshold at 1/5 the DC and 90% the remaining lowest to only retain significant peaks
p_h_fft[coeffIndex] = 1
else:
p_h_fft[coeffIndex] = round(len(coeffHist)/FFTPeak)
else:
p_h_fft[coeffIndex] = 1
# period is the minimum of the two methods
p_final[coeffIndex] = p_h_fft[coeffIndex]
# calculate per-block probabilities
if p_final[coeffIndex] != 1:
adjustedCoeffs = selectedCoeffs-minHistValue+1
period_start = adjustedCoeffs-(np.fmod(adjustedCoeffs-s_0_Out[coeffIndex], p_final[coeffIndex]))
num = np.zeros(np.shape(period_start))
denom = np.zeros(np.shape(period_start))
for kk in range(np.shape(period_start)[0]):
for ll in range(np.shape(period_start)[1]):
if period_start[kk, ll] >= s_0_Out[coeffIndex]:
period = list(range(int(period_start[kk, ll]), int(period_start[kk, ll]+p_final[coeffIndex])))
if period_start[kk, ll]+p_final[coeffIndex]-1 > len(coeffHist):
idx = [i for i, x in enumerate(period) if x > len(coeffHist)]
for i in idx:
period[i] -= p_final[coeffIndex]
num[kk, ll] = coeffHist[adjustedCoeffs[kk, ll]-1]
denom[kk, ll] = sum([coeffHist[int(p-1)] for p in period])
else:
period = list(range(int(period_start[kk, ll]), int(period_start[kk, ll]-p_final[coeffIndex]), -1))
if period_start[kk, ll]-p_final[coeffIndex]+1 <= 0:
idx = [i for i, x in enumerate(period) if x <= 0]
for i in idx:
period[i] += p_final[coeffIndex]
num[kk, ll] = coeffHist[adjustedCoeffs[kk, ll]-1]
denom[kk, ll] = sum([coeffHist[int(p-1)] for p in period])
P_u = num/denom
P_t = 1/p_final[coeffIndex]
P_tampered[:, :, coeffIndex] = P_t/(P_u+P_t)
P_untampered[:, :, coeffIndex] = P_u/(P_u+P_t)
else:
P_tampered[:, :, coeffIndex] = np.ones((int(np.ceil(np.shape(coeffArray)[0]/8)), int(np.ceil(np.shape(coeffArray)[1]/8))))*0.5
P_untampered[:, :, coeffIndex] = 1-P_tampered[:, :, coeffIndex]
P_tampered_Overall = np.prod(P_tampered, axis=2)/(np.prod(P_tampered, axis=2)+np.prod(P_untampered, axis=2))
P_tampered_Overall[np.isnan(P_tampered_Overall)] = 0
OutputMap = P_tampered_Overall.copy()
s = np.var(np.reshape(P_tampered_Overall, (np.size(P_tampered_Overall), 1)))
Teval = np.zeros((99, 1))
for S in range(1, 100):
T = S/100
Class0 = P_tampered_Overall < T
Class1 = P_tampered_Overall >= T
s0 = np.var(P_tampered_Overall[Class0])
s1 = np.var(P_tampered_Overall[Class1])
Teval[S-1] = s/(s0+s1)
Topt = np.argmax(Teval)+1
Topt = Topt/100-0.01
Class0 = P_tampered_Overall < Topt
Class1 = P_tampered_Overall >= Topt
s0 = np.var(P_tampered_Overall[Class0])
s1 = np.var(P_tampered_Overall[Class1])
Class1_filt = medfilt2d(np.array(Class1, dtype="uint8"), [3, 3])
Class0_filt = medfilt2d(np.array(Class0, dtype="uint8"), [3, 3])
e_i = (Class0_filt[:-2, 1:-1]+Class0_filt[1:-1, :-2]+Class0_filt[2:, 1:-1]+Class0_filt[1:-1, 2:])*Class1_filt[1:-1, 1:-1]
e_i = e_i.astype("double")
return OutputMap
def detectDQ(impath):
"""
Main driver for ADQ1 algorithm
Args:
impath: Input image path
Returns:
OutputMap: Heatmap values for detected areas
"""
if impath[-4:] == ".jpg":
try:
OutputMap = detectDQ_JPEG(jio.read(impath))
except Exception as e:
print('JPEGIO exception: ' + str(e))
return
else:
im = mpimg.imread(impath)
im = np.round(im*255)
OutputMap = detectDQ_NonJPEG(im)
return OutputMap
| 14,409 | 39.706215 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/ELA.py | """
This module provides the ELA algorithm
Error-level-analysis-based detector.
Algorithm attribution:
Krawets, Neil. "A Picture's Worth: Digital Image Analysis and Forensics". Online
article on http://www.google.gr/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&uact=8&ved=0ahUKEwiDg5_c07PLAhVpnXIKHUp8B5QQFgggMAA&url=http%3A%2F%2Fwww.hackerfactor.com%2Fpapers%2Fbh-usa-07-krawetz-wp.pdf&usg=AFQjCNFuUo7D6kGBAP9jAEmSgmY6RtWZ4w&sig2=Xw9SdzUHLYJ6dfPVzUmFLw
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
import cv2
import os
def ELA(impath, Quality=90, Multiplier=15, Flatten=True):
"""
Main driver for ELA algorithm.
Args:
impath: Path to image to be transformed.
Quality (optional, default=90): the quality in which to recompress the image. (0-100 integer).
Multiplier (optional, default=15): value with which to multiply the residual to make it more visible. (Float).
Flatten (optional, default=True): Boolean. Describes whether to flatten OutputMap.
Returns:
OutputMap: Output of ELA algorithm.
"""
ImIn = np.double(cv2.imread(impath))
cv2.imwrite('tmpResave.jpg', ImIn, [cv2.IMWRITE_JPEG_QUALITY, Quality])
ImJPG = np.double(cv2.imread('tmpResave.jpg'))
OutputMap = (np.abs(ImIn-ImJPG))*Multiplier
OutputMap[:, :, [0, 2]] = OutputMap[:, :, [2, 0]]
if Flatten is True:
OutputMap = np.mean(OutputMap, 2)
os.remove('tmpResave.jpg')
return OutputMap
| 1,647 | 36.454545 | 273 | py |
pyIFD | pyIFD-main/src/pyIFD/util.py | """
This file provides utility functions for pyIFD modules.
"""
import numpy as np
import math
import cv2
from scipy import signal
def minmaxpercent(o, p=0.05):
o = o[np.isfinite(o)]
a = 0
b = 0
p=0.01
if o.size==0:
a=0
b=1
else:
o = np.sort(o)
a = o[int(max(np.ceil(np.size(o)*p),1))]
b = o[int(max(np.floor(np.size(o)*(1-p)),1))]
return [a,b]
def postprocessing(input_map):
out_map = input_map
[minimum, maximum] = minmaxpercent(input_map, 0.01)
out_map[out_map < minimum] = minimum
out_map[out_map > maximum] = maximum
out_map = out_map - np.min(out_map)
out_map = out_map / np.max(out_map)
out_map = np.round(out_map*63)+1
out_map[np.isnan(out_map)] = 1
return out_map
def vec2im(v, padsize=[0, 0], bsize=None, rows=None, cols=None):
"""
Converts vector to image.
Args:
v: input vector to be converted
padsize (optional, default=[0,0]): Must be non-negative integers in a 1x2 array. Padsize dictates the amount of zeros padded for each of the two dimensions.
bsize (optional, default=None): Block size. It's dimensions must multiply to the number of elements in v.
rows (optional, default=None): Number of rows for output
cols (optional, default=None): Number of cols for output
Returns:
im: Output image (2d numpy array)
"""
[m, n] = np.shape(v)
padsize = padsize+np.zeros((1, 2), dtype=int)[0]
if(padsize.any() < 0):
raise Exception("Pad size must not be negative")
if bsize is None:
bsize = math.floor(math.sqrt(m))
bsize = bsize+np.zeros((1, 2), dtype=int)[0]
if(np.prod(bsize) != m):
raise Exception("Block size does not match size of input vectors.")
if rows is None:
rows = math.floor(math.sqrt(n))
if cols is None:
cols = math.ceil(n/rows)
# make image
y = bsize[0]+padsize[0]
x = bsize[1]+padsize[1]
t = np.zeros((y, x, rows*cols))
t[:bsize[0], :bsize[1], :n] = np.reshape(v, (bsize[0], bsize[1], n), order='F')
t = np.reshape(t, (y, x, rows, cols), order='F')
t = np.reshape(np.transpose(t, [0, 2, 1, 3]), (y*rows, x*cols), order='F')
im = t[:y*rows-padsize[0], :x*cols-padsize[1]]
return im
def im2vec(im, bsize, padsize=0):
"""
Converts image to vector.
Args:
im: Input image to be converted to a vector.
bsize: Size of block of im to be converted to vec. Must be 1x2 non-negative int array.
padsize (optional, default=0): Must be non-negative integers in a 1x2 array. Amount of zeros padded on each
Returns:
v: Output vector.
rows: Number of rows of im after bsize and padsize are applied (before final flattening to vector).
cols: Number of cols of im after bsize and padsize are applied (before final flattening to vector).
"""
bsize = bsize+np.zeros((1, 2), dtype=int)[0]
padsize = padsize+np.zeros((1, 2), dtype=int)[0]
if(padsize.any() < 0):
raise Exception("Pad size must not be negative")
imsize = np.shape(im)
y = bsize[0]+padsize[0]
x = bsize[1]+padsize[1]
rows = math.floor((imsize[0]+padsize[0])/y)
cols = math.floor((imsize[1]+padsize[1])/x)
t = np.zeros((y*rows, x*cols))
imy = y*rows-padsize[0]
imx = x*cols-padsize[1]
t[:imy, :imx] = im[:imy, :imx]
t = np.reshape(t, (y, rows, x, cols), order='F')
t = np.reshape(np.transpose(t, [0, 2, 1, 3]), (y, x, rows*cols), order='F')
v = t[:bsize[0], :bsize[1], :rows*cols]
v = np.reshape(v, (y*x, rows*cols), order='F')
return [v, rows, cols]
def bdctmtx(n):
"""
Produces bdct block matrix.
Args:
n: Size of block
Returns:
m: nxn array to performs dct with.
"""
[c, r] = np.meshgrid(range(8), range(8))
[c0, r0] = np.meshgrid(r, r)
[c1, r1] = np.meshgrid(c, c)
x = np.zeros(np.shape(c))
for i in range(n):
for j in range(n):
x[i, j] = math.sqrt(2/n)*math.cos(math.pi*(2*c[i, j]+1)*r[i, j]/(2*n))
x[0, :] = x[0, :]/math.sqrt(2)
x = x.flatten('F')
m = np.zeros(np.shape(r0))
for i in range(n**2):
for j in range(n**2):
m[i, j] = x[r0[i, j]+c0[i, j]*n]*x[r1[i, j]+c1[i, j]*n]
return m
def bdct(a, n=8):
"""
Performs dct on array via blocks of size nxn.
Args:
a: Array to perform dct on.
n (optional, default=8): Size of blocks to perform dct on.
Returns:
b: Array after dct.
"""
dctm = bdctmtx(n)
[v, r, c] = im2vec(a, n)
b = vec2im(dctm @ v, 0, n, r, c)
return b
def dequantize(qcoef, qtable):
"""
Dequantizes a coef array given a quant table.
Args:
qcoef: Quantized coefficient array
qtable: Table used to (de)quantize coef arrays. Must be the same size as qcoef.
Returns:
coef: Dequantized coef array. Same size as qcoef and qtable.
"""
blksz = np.shape(qtable)
[v, r, c] = im2vec(qcoef, blksz)
flat = np.array(qtable).flatten('F')
vec = v*np.tile(flat, (np.shape(v)[1], 1)).T
coef = vec2im(vec, 0, blksz, r, c)
return coef
def extrema(x):
"""
Gets the local extrema points from a time series. This includes endpoints if necessary.
Note that the indices will start counting from 1 to match MatLab.
Args:
x: time series vector
Returns:
imin: indices of XMIN
"""
x = np.asarray(x)
imin = signal.argrelextrema(x, np.less)[0]
if(x[-1] < x[-2]): # Check last point
imin = np.append(imin, len(x)-1)
if(x[0] < x[1]): # Check first point
imin = np.insert(imin, 0, 0)
xmin = x[imin]
minorder = np.argsort(xmin)
imin = imin[minorder]
return imin+1
def ibdct(a, n=8):
"""
Performs an inverse discrete cosine transorm on array a with blocks of size nxn.
Args:
a: Array to be transformed. (2d array)
n (optional, default=8): Size of blocks.
Returns:
b: Output after transform. (2d array)
"""
dctm = bdctmtx(n)
[v, r, c] = im2vec(a, n)
b = vec2im(dctm.T @ v, 0, n, r, c)
return b
def jpeg_rec(image):
"""
Simulate decompressed JPEG image from JPEG object.
Args:
image: JPEG object. (jpegio struct).
Returns:
IRecon: Reconstructed BGR image
YCbCr: YCbCr image
"""
Y = ibdct(dequantize(image.coef_arrays[0], image.quant_tables[0]))
Y += 128
if(image.image_components == 3):
if(len(image.quant_tables) == 1):
image.quant_tables[1] = image.quant_tables[0]
image.quant_tables[2] = image.quant_tables[0]
Cb = ibdct(dequantize(image.coef_arrays[1], image.quant_tables[1]))
Cr = ibdct(dequantize(image.coef_arrays[2], image.quant_tables[1]))
[r, c] = np.shape(Y)
[rC, cC] = np.shape(Cb)
if(math.ceil(r/rC) == 2) and (math.ceil(c/cC) == 2): # 4:2:0
kronMat = np.ones((2, 2))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 4): # 4:1:1
kronMat = np.ones((1, 4))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 2): # 4:2:2
kronMat = np.ones((1, 4))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 1): # 4:4:4
kronMat = np.ones((1, 1))
elif(math.ceil(r/rC) == 2) and (math.ceil(c/cC) == 1): # 4:4:0
kronMat = np.ones((2, 1))
else:
raise Exception("Subsampling method not recognized: "+str(np.shape(Y))+" "+str(np.shape(Cr)))
Cb = np.kron(Cb, kronMat)+128
Cr = np.kron(Cr, kronMat)+128
Cb = Cb[:r, :c]
Cr = Cr[:r, :c]
IRecon = np.zeros((r, c, 3))
IRecon[:, :, 0] = (Y+1.402*(Cr-128))
IRecon[:, :, 1] = (Y-0.34414*(Cb-128)-0.71414*(Cr-128))
IRecon[:, :, 2] = (Y+1.772*(Cb-128))
YCbCr = np.concatenate((Y, Cb, Cr), axis=1)
else:
IRecon = np.tile(Y, [1, 1, 3])
YCbCr = cv2.cvtColor(IRecon, cv2.COLOR_BGR2YCR_CB)
return [IRecon, YCbCr]
| 8,099 | 29.451128 | 164 | py |
pyIFD | pyIFD-main/src/pyIFD/CFA2.py | """
This module provides the CFA2 algorithm
Color-filter-array-artifact-based detector, solution 2.
Algorithm attribution:
Dirik, Ahmet Emir, and Nasir D. Memon. "Image tamper detection based on
demosaicing artifacts." In ICIP, pp. 1497-1500. 2009.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from scipy.ndimage import correlate
from numpy.lib.stride_tricks import as_strided as ast
import cv2
def bilinInterp(CFAIm, BinFilter, CFA): # Possible this is provided in skimage or similar
"""
Bilinear interpolation
Args:
CFAIm:
BinFilter:
CFA:
Returns:
OutputMap: Out_Im_Int
"""
MaskMin = np.array([[0.25, 0.5, 0.25], [0.5, 1.0, 0.5], [0.25, 0.5, 0.25]])
MaskMaj = np.array([[0.0, 0.25, 0.0], [0.25, 1.0, 0.25], [0.0, 0.25, 0.0]])
dCFA = np.diff(CFA, axis=0)
dCFAT = np.diff(np.transpose(CFA), axis=0)
if (np.argwhere(dCFA == 0).size > 0) or (np.argwhere(dCFAT == 0).size > 0):
MaskMaj = MaskMaj*2
Mask = np.tile(MaskMin[:, :, None], (1, 1, 3))
Maj = np.argmax(np.sum(np.sum(BinFilter, 0), 0))
Mask[:, :, Maj] = MaskMaj
Out_Im = np.zeros(np.shape(CFAIm))
for ii in range(3):
Mixed_im = np.zeros((np.shape(CFAIm)[0], np.shape(CFAIm)[1]))
Orig_Layer = CFAIm[:, :, ii]
Interp_Layer = correlate(Orig_Layer, Mask[:, :, ii], mode='constant') # imfilter(Orig_Layer,Mask[:, :, ii])
Mixed_im[BinFilter[:, :, ii] == 0] = Interp_Layer[BinFilter[:, :, ii] == 0]
Mixed_im[BinFilter[:, :, ii] == 1] = Orig_Layer[BinFilter[:, :, ii] == 1]
Out_Im[:, :, ii] = Mixed_im
Out_Im_Int = np.round(np.nextafter(Out_Im, Out_Im+1)).astype(int)
return Out_Im_Int
def eval_block(block): # Just more blockproc? Can this go in util?
"""
Evaluated block.
Args:
impath: block_struc
Returns:
OutputMap: Out
"""
Out = np.zeros(6)
Out[0] = np.mean((np.double(block[:, :, 0]) - np.double(block[:, :, 3]))**2)
Out[1] = np.mean((np.double(block[:, :, 1]) - np.double(block[:, :, 4]))**2)
Out[2] = np.mean((np.double(block[:, :, 2]) - np.double(block[:, :, 5]))**2)
Out[3] = np.std(np.ndarray.flatten(block[:, :, 0], order='F'), ddof=1)
Out[4] = np.std(np.ndarray.flatten(block[:, :, 1], order='F'), ddof=1)
Out[5] = np.std(np.ndarray.flatten(block[:, :, 2], order='F'), ddof=1)
return Out
def GetBlockView(A, block=(16, 16)):
"""
Splits A into blocks of size blocks.
Args:
A: 2d array A to be split up.
block (optional, default=(8, 8)):
Returns:
ast(A, shape=shape, strides=strides): 4d array. First two dimensions give the coordinates of the block. Second two dimensions give the block data.
"""
shape = (int(np.floor(A.shape[0] / block[0])), int(np.floor(A.shape[1] / block[1]))) + block
strides = (block[0]*A.strides[0], block[1]*A.strides[1]) + A.strides
return ast(A, shape=shape, strides=strides)
def ApplyFunction(M, blk_size=(16, 16)):
"""
Applies BlockValue function to blocks of input
Args:
M: 3d array.
blk_size (optional, default=(8,8)):
Returns:
OutputMap:
"""
Blocks = np.zeros((int(np.floor(np.shape(M)[0]/blk_size[0])), int(np.floor(np.shape(M)[1]/blk_size[1])), blk_size[0], blk_size[1], 6))
edges = np.mod(np.shape(M)[:2], blk_size)
for i in range(6):
Blocks[:, :, :, :, i] = GetBlockView(M[:, :, i])
OutputMap = np.zeros((int(np.ceil(np.shape(M)[0]/blk_size[0])), int(np.ceil(np.shape(M)[1]/blk_size[1])), 6))
for x in range(Blocks.shape[0]):
for y in range(Blocks.shape[1]):
OutputMap[x, y, :] = eval_block(Blocks[x, y])
if edges[0] != 0:
for y in range(Blocks.shape[1]):
OutputMap[-1, y, :] = eval_block(M[-edges[0]:, y*blk_size[1]:(y+1)*blk_size[1], :])
if edges[1] != 0:
for x in range(Blocks.shape[0]):
OutputMap[x, -1, :] = eval_block(M[x*blk_size[0]:(x+1)*blk_size[0]:, -edges[1]:, :])
if edges[0] != 0 and edges[1] != 0:
OutputMap[-1, -1, :] = eval_block(M[-edges[0]:, -edges[1]:, :])
return OutputMap
def CFATamperDetection_F1(im):
StdThresh = 5
Depth = 3
im = im[:np.round(np.floor(np.shape(im)[0]/(2**Depth))*(2**Depth)).astype(np.uint), :np.round(np.floor(np.shape(im)[1]/(2**Depth))*(2**Depth)).astype(np.uint), :]
CFAList = np.array([[[2, 1], [3, 2]], [[2, 3], [1, 2]], [[3, 2], [2, 1]], [[1, 2], [2, 3]]])
W1 = 16
if np.shape(im)[0] < W1 or np.shape(im)[1] < W1:
F1Map = np.zeros((np.shape(im)[0], np.shape(im)[1]))
return F1Map
MeanError = np.ones(np.size(CFAList))*np.inf
Diffs = np.zeros((np.shape(CFAList)[0], int(np.ceil(np.shape(im)[0]/W1)*np.ceil(np.shape(im)[1]/W1))))
F1Maps = np.zeros((np.shape(CFAList)[0], int(np.ceil(np.shape(im)[0]/W1)), int(np.ceil(np.shape(im)[1]/W1))))
for TestArray in range(np.shape(CFAList)[0]):
BinFilter = np.zeros((np.shape(im)[0], np.shape(im)[1], 3))
ProcIm = np.zeros((np.shape(im)[0], np.shape(im)[1], 6))
CFA = CFAList[TestArray]
R = CFA == 1
G = CFA == 2
B = CFA == 3
BinFilter[:, :, 0] = np.tile(R, (int(np.shape(im)[0]/2), int(np.shape(im)[1]/2)))
BinFilter[:, :, 1] = np.tile(G, (int(np.shape(im)[0]/2), int(np.shape(im)[1]/2)))
BinFilter[:, :, 2] = np.tile(B, (int(np.shape(im)[0]/2), int(np.shape(im)[1]/2)))
CFAIm = im*BinFilter
BilinIm = bilinInterp(CFAIm, BinFilter, CFA)
ProcIm[:, :, 0:3] = im
ProcIm[:, :, 3:6] = np.double(BilinIm)
ProcIm = np.double(ProcIm)
# BlockResult = blockproc(ProcIm, [W1 W1], @eval_block)
BlockResult = ApplyFunction(ProcIm, (W1, W1))
Stds = BlockResult[:, :, 3:6]
BlockDiffs = BlockResult[:, :, :3]
NonSmooth = Stds > StdThresh
MeanError[TestArray] = np.mean(BlockDiffs[NonSmooth])
with np.errstate(invalid='ignore'):
BlockDiffs /= np.tile(np.sum(BlockDiffs, 2)[:, :, None], (1, 1, 3))
Diffs[TestArray, :] = np.ndarray.flatten(BlockDiffs[:, :, 1], order='F')
F1Maps[TestArray, :, :] = BlockDiffs[:, :, 1]
Diffs[np.isnan(Diffs)] = 0
val = int(np.argmin(MeanError))
F1Map = F1Maps[val, :, :]
F1Map[np.isnan(F1Map)] = 0
CFAOut = CFAList[val] == 2
return [F1Map,CFAOut]
def CFA2(impath):
bgr = cv2.imread(impath)
ImageIn = np.double(cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB))
toCrop = np.mod(np.shape(ImageIn), 2)
if toCrop[0] != 0:
ImageIn = ImageIn[:-toCrop[0],:,:]
if toCrop[1] != 0:
ImageIn = ImageIn[:,:-toCrop[1],:]
OutputMap = CFATamperDetection_F1(ImageIn)[0]
return OutputMap
| 6,932 | 36.679348 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/ADQ2.py | """
This module provides the ADQ2 Algorithm
Aligned-double-JPEG-compression-based detector, solution 2.
Algorithm attribution:
T. Bianchi, A. De Rosa, and A. Piva, "IMPROVED DCT COEFFICIENT ANALYSIS
FOR FORGERY LOCALIZATION IN JPEG IMAGES", ICASSP 2011, Prague, Czech Republic,
2011, pp. 2444-2447.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from scipy.signal import medfilt2d
import jpegio as jio
import math
import cv2
from pyIFD.util import bdctmtx, im2vec, vec2im, dequantize, bdct
def ibdct(a, n=8):
"""
Performs an inverse discrete cosine transorm on array a with blocks of size nxn.
Args:
a: Array to be transformed. (2d array)
n (optional, default=8): Size of blocks.
Returns:
b: Output after transform. (2d array)
"""
dctm = bdctmtx(n)
[v, r, c] = im2vec(a, n)
b = vec2im(dctm.T @ v, 0, n, r, c)
return b
def jpeg_rec(image):
"""
Simulate decompressed JPEG image from JPEG object.
Args:
image: JPEG object. (jpegio struct).
Returns:
IRecon: Reconstructed BGR image
YCbCr: YCbCr image
"""
Y = ibdct(dequantize(image.coef_arrays[0], image.quant_tables[0]))
Y += 128
if(image.image_components == 3):
if(len(image.quant_tables) == 1):
image.quant_tables[1] = image.quant_tables[0]
image.quant_tables[2] = image.quant_tables[0]
Cb = ibdct(dequantize(image.coef_arrays[1], image.quant_tables[1]))
Cr = ibdct(dequantize(image.coef_arrays[2], image.quant_tables[1]))
[r, c] = np.shape(Y)
[rC, cC] = np.shape(Cb)
if(math.ceil(r/rC) == 2) and (math.ceil(c/cC) == 2): # 4:2:0
kronMat = np.ones((2, 2))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 4): # 4:1:1
kronMat = np.ones((1, 4))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 2): # 4:2:2
kronMat = np.ones((1, 4))
elif(math.ceil(r/rC) == 1) and (math.ceil(c/cC) == 1): # 4:4:4
kronMat = np.ones((1, 1))
elif(math.ceil(r/rC) == 2) and (math.ceil(c/cC) == 1): # 4:4:0
kronMat = np.ones((2, 1))
else:
raise Exception("Subsampling method not recognized: "+str(np.shape(Y))+" "+str(np.shape(Cr)))
Cb = np.kron(Cb, kronMat)+128
Cr = np.kron(Cr, kronMat)+128
Cb = Cb[:r, :c]
Cr = Cr[:r, :c]
IRecon = np.zeros((r, c, 3))
IRecon[:, :, 0] = (Y+1.402*(Cr-128))
IRecon[:, :, 1] = (Y-0.34414*(Cb-128)-0.71414*(Cr-128))
IRecon[:, :, 2] = (Y+1.772*(Cb-128))
YCbCr = np.concatenate((Y, Cb, Cr), axis=1)
else:
IRecon = np.tile(Y, [1, 1, 3])
YCbCr = cv2.cvtColor(IRecon, cv2.COLOR_BGR2YCR_CB)
return [IRecon, YCbCr]
def floor2(x1):
"""
Applies floor to vector x1, but if an element is close to an integer, it is lowered by 0.5.
Args:
x1: Input vector
Returns:
x2: Output floor vector
"""
tol = 1e-12
x2 = np.floor(x1)
idx = np.where(np.absolute(x1-x2) < tol)
x2[idx] = x1[idx]-0.5
return x2
def ceil2(x1):
"""
Applies ceil to vector x1, but if an element is close to an integer, it is raised by 0.5.
Args:
x1: Input vector
Returns:
x2: Output ceiling vector
"""
tol = 1e-12
x2 = np.ceil(x1)
idx = np.where(np.absolute(x1-x2) < tol)
x2[idx] = x1[idx] + 0.5
return x2
def getJmap(impath, ncomp=1, c1=1, c2=15):
"""
Main driver for ADQ2 algorithm.
Args:
impath: Input image path, required to be JPEG with extension .jpg
ncomp: index of color component (1 = Y, 2 = Cb, 3 = Cr)
c1: first DCT coefficient to consider (1 <= c1 <= 64)
c2: last DCT coefficient to consider (1 <= c2 <= 64)
Returns:
maskTampered: estimated probability of being tampered for each 8x8 image block. Equivalent of OutputMap
q1table: estimated quantization table of primary compression
alphatable: mixture parameter for each DCT frequency
Todos:
* Check returns necessary
"""
if impath[-4:] == ".jpg":
try:
image = jio.read(impath)
except Exception as e:
print('JPEGIO exception: ' + str(e))
return
else:
print("Only .jpg accepted")
return
ncomp -= 1 # indexing
coeffArray = image.coef_arrays[ncomp]
qtable = image.quant_tables[image.comp_info[ncomp].quant_tbl_no]
# estimate rounding and truncation error
ImIn = jpeg_rec(image)[0]
Iint = ImIn.copy()
Iint[Iint < 0] = 0
Iint[Iint > 255] = 255
E = ImIn-np.double(np.uint8(Iint+0.5))
Edct = bdct(0.299*E[:, :, 0]+0.587*E[:, :, 1]+0.114*E[:, :, 2])
Edct2 = np.reshape(Edct, (1, np.size(Edct)), order='F').copy()
varE = np.var(Edct2)
# simulate coefficients without DQ effect
Y = ibdct(dequantize(coeffArray, qtable))
coeffArrayS = bdct(Y[1:, 1:])
sizeCA = np.shape(coeffArray)
sizeCAS = np.shape(coeffArrayS)
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22, 15, 8,
16, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
coeffFreq = np.zeros((int(np.size(coeffArray)/64), 1))
coeffSmooth = np.zeros((int(np.size(coeffArrayS)/64), 1))
errFreq = np.zeros((int(np.size(Edct)/64), 1))
bppm = 0.5*np.ones((int(np.size(coeffArray)/64), 1))
bppmTampered = 0.5*np.ones((int(np.size(coeffArray)/64), 1))
q1table = 100*np.ones(np.shape(qtable))
alphatable = np.ones(np.shape(qtable))
Q1up = np.concatenate((20*np.ones(10), 30*np.ones(5), 40*np.ones(6), 64*np.ones(7), 80*np.ones(8), 88*np.ones(28)))
rangeC = np.arange(c1-1, c2)
for index in rangeC:
coe = coeff[index]
# load DCT coefficients at position index
k = 0
start = coe % 8
if(start == 0):
start = 8
rangeL = np.arange(start-1, sizeCA[1], 8)
rangeI = np.arange(math.ceil(coe/8)-1, sizeCA[0], 8)
for rl in rangeL:
for i in rangeI:
coeffFreq[k] = coeffArray[i, rl]
errFreq[k] = Edct[i, rl]
k += 1
k = 0
rangeL = np.arange(start-1, sizeCAS[1], 8)
rangeI = np.arange(math.ceil(coe/8)-1, sizeCAS[0], 8)
for rl in rangeL:
for i in rangeI:
coeffSmooth[k] = coeffArrayS[i, rl]
k += 1
# get histogram of DCT coefficients
binHist = np.arange(-2**11, 2**11-1)+0.5
binHist = np.append(binHist, max(2**11, coeffFreq.max()))
binHist = np.insert(binHist, 0, min(-2**11, coeffFreq.min()))
num4Bin = np.histogram(coeffFreq, binHist)[0]
# get histogram of DCT coeffs w/o DQ effect (prior model for
# uncompressed image
Q2 = qtable[math.floor((coe-1) / 8), (coe-1) % 8]
binHist = np.arange(-2**11, 2**11-1)+0.5
binHist *= Q2
binHist = np.append(binHist, max(Q2*(2**11), coeffSmooth.max()))
binHist = np.insert(binHist, 0, min(Q2*(-2**11), coeffSmooth.min()))
hsmooth = np.histogram(coeffSmooth, binHist)[0]
# get estimate of rounding/truncation error
biasE = np.mean(errFreq)
# kernel for histogram smoothing
sig = math.sqrt(varE)/Q2
f = math.ceil(6*sig)
p = np.arange(-f, f+1)
g = np.exp(-p**2/sig**2/2)
g = g/sum(g)
binHist = np.arange(-2**11, 2**11)
lidx = np.invert([binHist[i] != 0 for i in range(len(binHist))])
hweight = 0.5*np.ones((1, 2**12))[0]
E = float('inf')
Etmp = np.ones((1, 99))[0]*float('inf')
alphaest = 1
Q1est = 1
biasest = 0
if(index == 0):
bias = biasE
else:
bias = 0
# estimate Q-factor of first compression
rangeQ = np.arange(1, Q1up[index]+1)
for Q1 in rangeQ:
for b in [bias]:
alpha = 1
if(Q2 % Q1 == 0):
diff = np.square(hweight * (hsmooth-num4Bin))
else:
# nhist * hsmooth = prior model for doubly compressed coefficient
nhist = Q1/Q2*(floor2((Q2/Q1)*(binHist+b/Q2+0.5))-ceil2((Q2/Q1)*(binHist+b/Q2-0.5))+1)
nhist = np.convolve(g, nhist)
nhist = nhist[f:-f]
a1 = np.multiply(hweight, np.multiply(nhist, hsmooth)-hsmooth)
a2 = np.multiply(hweight, hsmooth-num4Bin)
# Exclude zero bin from fitting
la1 = np.ma.masked_array(a1, lidx).filled(0)
la2 = np.ma.masked_array(a2, lidx).filled(0)
alpha = (-(la1 @ la2.T))/(la1 @ la1.T)
alpha = min(alpha, 1)
diff = (hweight*(alpha*a1+a2))**2
KLD = sum(np.ma.masked_array(diff, lidx).filled(0))
if KLD < E and alpha > 0.25:
E = KLD.copy()
Q1est = Q1.copy()
alphaest = alpha
if KLD < Etmp[int(Q1) - 1]:
Etmp[int(Q1) - 1] = KLD
biasest = b
Q1 = Q1est.copy()
nhist = Q1 / Q2 * (floor2((Q2 / Q1) * (binHist + biasest / Q2 + 0.5)) - ceil2((Q2 / Q1) * (binHist + biasest / Q2 - 0.5)) + 1)
nhist = np.convolve(g, nhist)
nhist = nhist[f:-f]
nhist = alpha * nhist + 1 - alpha
ppt = np.mean(nhist) / (nhist + np.mean(nhist))
alpha = alphaest
q1table[math.floor((coe - 1) / 8), (coe - 1) % 8] = Q1est
alphatable[math.floor((coe - 1) / 8), (coe - 1) % 8] = alpha
# compute probabilities if DQ effect is present
if(Q2 % Q1est > 0):
# index
nhist = Q1est / Q2 * (floor2((Q2 / Q1est) * (binHist + biasest / Q2 + 0.5)) - ceil2((Q2 / Q1est) * (binHist + biasest / Q2 - 0.5)) + 1)
# histogram smoothing (avoids false alarms)
nhist = np.convolve(g, nhist)
nhist = nhist[f:-f]
nhist = alpha * nhist + 1 - alpha
ppu = nhist / (nhist + np.mean(nhist))
ppt = np.mean(nhist) / (nhist + np.mean(nhist))
# set zeroed coefficients as non-informative
ppu[2**11] = 0.5
ppt[2**11] = 0.5
idx = np.floor(coeffFreq+2**11).astype(int)
bppm = bppm * ppu[idx]
bppmTampered = bppmTampered * ppt[idx]
maskTampered = bppmTampered / (bppm + bppmTampered)
maskTampered = np.reshape(maskTampered, (int(sizeCA[0] / 8), int(sizeCA[1] / 8)), order='F')
# apply median filter to highlight connected regions
maskTampered = medfilt2d(maskTampered, [5, 5])
return [maskTampered, q1table, alphatable]
| 11,148 | 34.733974 | 191 | py |
pyIFD | pyIFD-main/src/pyIFD/CFA1.py | """
This module provides the CFA1 Algorithm
Color-filter-array-artifact-based detector, solution 1.
Algorithm attribution:
P. Ferrara, T. Bianchi, A. De Rosa and P. Piva,
"Image Forgery Localization via Fine-Grained Analysis of CFA Artifacts",
IEEE Transactions on Information Forensics & Security, vol. 7, no. 5,
Oct. 2012 (published online June 2012), pp. 1566-1577.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import cv2
import numpy as np
from pyIFD.CFA2 import CFATamperDetection_F1
def CFA1(impath):
"""
Main driver of CFA1
Args:
impath: path to image
Returns:
OutputMap: CFA1 main output
"""
bgr = cv2.imread(impath)
ImageIn = np.double(cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB))
toCrop = np.mod(np.shape(ImageIn), 2)
if toCrop[0] != 0:
ImageIn = ImageIn[:-toCrop[0],:,:]
if toCrop[1] != 0:
ImageIn = ImageIn[:,:-toCrop[1],:]
bayer = CFATamperDetection_F1(ImageIn)[1]
OutputMap = CFAloc(ImageIn, bayer)
return OutputMap
from numpy.ma import masked_array as ma
def Feature(sigma, pattern):
with np.errstate(divide='ignore', invalid='ignore'):
return np.prod(ma(sigma,(1-pattern)))/np.prod(ma(sigma,pattern))
def ApplyFunction(M, pattern, blk_size=(8, 8)):
"""
Applies BlockValue function to blocks of input
Args:
M: 2d array.
blk_size (optional, default=(8,8)):
Returns:
OutputMap:
"""
Blocks = GetBlockView(M, block=blk_size)
OutputMap = np.empty((int(np.ceil(np.shape(M)[0]/blk_size[0])), int(np.ceil(np.shape(M)[1]/blk_size[1]))))
OutputMap[:] = np.NaN
for x in range(Blocks.shape[0]):
for y in range(Blocks.shape[1]):
OutputMap[x, y] = Feature(Blocks[x, y], pattern)
return OutputMap
from scipy.ndimage import convolve
import math
from numpy.ma import masked_invalid
from scipy.ndimage import median_filter
def prediction(im):
"""
Predictor with a bilinear kernel.
"""
Hpred = np.array([[0, 0.25, 0], [0.25, -1, 0.25], [0, 0.25, 0]], dtype="double")
pred_error = convolve(np.double(im), Hpred, mode='nearest')
return pred_error
def getVarianceMap(im, Bayer, dim):
# extend pattern over all image
pattern = np.kron(np.ones((int(dim[0]/2),int(dim[1]/2))), Bayer)
# separate acquired and interpolate pixels for a 7x7 window
mask = np.array([[1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1]])
# gaussian window for mean and variance
N_window = 7
sigma = 1
sigrange = np.arange(-np.ceil(sigma*2),np.ceil(sigma*2)+4*sigma/(N_window-1),4*sigma/(N_window-1))
x = np.tile(sigrange, (len(sigrange), 1))
y = np.tile(sigrange[:, None], (1, len(sigrange)))
gaussian_window = (1/(2*math.pi*sigma**2))*np.exp(-0.5*(x**2+y**2)/sigma**2)
window = gaussian_window*mask
mc = np.sum(window)
vc = 1 - (np.sum(window**2))
window_mean = window/mc
# local variance of acquired pixels
acquired = im*pattern
mean_map_acquired = convolve(acquired, window_mean, mode='nearest')*pattern
sqmean_map_acquired = convolve(acquired**2, window_mean, mode='nearest')*pattern
var_map_acquired = (sqmean_map_acquired - (mean_map_acquired**2))/vc
# local variance of interpolated pixels
interpolated = im*(1-pattern)
mean_map_interpolated = convolve(interpolated, window_mean, mode='nearest')*(1-pattern)
sqmean_map_interpolated = convolve(interpolated**2, window_mean, mode='nearest')*(1-pattern)
var_map_interpolated = (sqmean_map_interpolated - (mean_map_interpolated**2))/vc
var_map = var_map_acquired + var_map_interpolated
return var_map
def getFeature(inmap, Bayer, Nb):
# Proposed feature to localize CFA artifacts
pattern = np.kron(np.ones((int(Nb/2), int(Nb/2))), Bayer)
statistics = ApplyFunction(inmap, pattern, (Nb, Nb))
statistics[np.isnan(statistics)] = 1
statistics[np.isinf(statistics)] = 0
return statistics
def EMGaussianZM(x, tol, max_iter):
#
# estimate Gaussian mixture parameters from data x with EM algorithm
# assume x distributed as alpha * N(0,v1) + (1 - alpha) * N(mu2, v2)
# initial guess
alpha = 0.5
mu2 = np.mean(x)
v2 = np.var(x)
v1 = v2/10
alpha_old = 1
k = 1
while abs(alpha - alpha_old) > tol and k < max_iter:
alpha_old = alpha
k += 1
# expectation
f1 = alpha * np.exp(-x**2/2/v1)/math.sqrt(v1)
f2 = (1 - alpha) * np.exp(-(x - mu2)**2/2/v2)/math.sqrt(v2)
alpha1 = f1 / (f1 + f2)
alpha2 = f2 / (f1 + f2)
# maximization
alpha = np.mean(alpha1)
v1 = np.sum(alpha1 * x**2) / np.sum(alpha1)
mu2 = np.sum(alpha2 * x) / np.sum(alpha2)
v2 = np.sum(alpha2 * (x - mu2)**2) / np.sum(alpha2)
# if abs(alpha - alpha_old) > tol:
# display('warning: EM algorithm: number of iterations > max_iter');
return [alpha, v1, mu2, v2]
def MoGEstimationZM(statistics):
# Expectation Maximization Algorithm with Zero-Mean forced first component
# E/M algorithm parameters inizialization
tol = 1e-3
max_iter = 500
# NaN and Inf management
statistics[np.isnan(statistics)] = 1
statistics[statistics < 0] = 0
with np.errstate(divide='ignore'):
data = np.log(np.ndarray.flatten(statistics), order='F')
data=masked_invalid(data).compressed()
# E/M algorithm
[alpha, v1, mu2, v2] = EMGaussianZM(data, tol, max_iter)
# Estimated model parameters
mu = [mu2, 0]
sigma = np.sqrt([v2, v1])
return [mu, sigma]
def loglikelihood(statistics, mu, sigma):
# Loglikelihood map
# allowable values for logarithm
min = 1e-320
max = 1e304
statistics[statistics == 0] = min
statistics[statistics < 0] = 0
mu1=mu[1]
mu2=mu[0]
sigma1=sigma[1]
sigma2=sigma[0]
# log likelihood
logstat=np.log(statistics)
LogLikelihood = math.log(sigma1) - math.log(sigma2) -0.5*((((logstat - mu2)**2)/sigma2**2) - (((logstat - mu1)**2)/sigma1**2))
return LogLikelihood
def CFAloc(image, Bayer, Nb=8, Ns=1):
# parameters
Nm = 5 # dimension of map filtering
# green channel extraction
im = image[:, :, 1]
[h, w] = np.shape(im)
dim = [h, w]
# prediction error
pred_error = prediction(im)
# local variance of acquired and interpolated pixels
var_map = getVarianceMap(pred_error, Bayer, dim)
# proposed feature
stat = getFeature(var_map, Bayer, Nb)
# GMM parameters estimation
[mu, sigma] = MoGEstimationZM(stat)
if sigma[0] == 0 or sigma[1] == 0:
return np.zeros(np.shape(stat), dtype='uint8')
# likelihood map
loglikelihood_map = loglikelihood(stat, mu, sigma)
# filtered and cumulated log-likelihood map
mapLog = median_filter(loglikelihood_map, [Nm, Nm])
with np.errstate(over='ignore'):
expMap = np.exp(mapLog)
probMap = 1/(expMap+1)
return probMap
from numpy.lib.stride_tricks import as_strided as ast
def GetBlockView(A, block=(8, 8)):
shape= (int(np.floor(A.shape[0]/ block[0])), int(np.floor(A.shape[1]/ block[1])))+ block
strides= (block[0]* A.strides[0], block[1]* A.strides[1])+ A.strides
return ast(A, shape=shape, strides=strides)
| 7,738 | 28.765385 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/__init__.py | 0 | 0 | 0 | py | |
pyIFD | pyIFD-main/src/pyIFD/ADQ3.py | """
This module provides the ADQ3 algorithm
Aligned-double-JPEG-compression-based detector, solution 3.
Algorithm attribution:
Amerini, Irene, Rudy Becarelli, Roberto Caldelli, and Andrea Del Mastio.
"Splicing forgeries localization through the use of first digit features."
In Information Forensics and Security (WIFS), 2014 IEEE International
Workshop on, pp. 143-148. IEEE, 2014.
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
import jpegio as jio
import os
from pyIFD.util import dequantize
SupportVector = np.load(os.path.join(os.path.dirname(__file__), 'SupportVector.npy'), allow_pickle=True)
AlphaHat = np.load(os.path.join(os.path.dirname(__file__), 'AlphaHat.npy'), allow_pickle=True)
bias = np.array([0.10431149, -0.25288239, -0.2689174, 0.39425104, -1.11269764, -1.15730589, -1.18658372, -0.9444815, -3.46445309, -2.9434976])
def BenfordDQ(impath):
"""
Main driver for ADQ3 algorithm.
Args:
impath: Input image path, required to be JPEG with extension .jpg
Returns:
OutputMap: Output of ADQ3 algorithm (2D array).
"""
if impath[-4:] == '.jpg':
try:
im = jio.read(impath)
except Exception as e:
print('Exception in JPEGIO read: ' + str(e))
return
else:
print("Only .jpg accepted.")
return
Quality = EstimateJPEGQuality(im)
QualityInd = int(np.round((Quality-50)/5+1))
if QualityInd > 10:
QualityInd = 10
elif QualityInd < 1:
QualityInd = 1
c1 = 2
c2 = 10
ncomp = 1
digitBinsToKeep = [2, 5, 7]
block = im
YCoef = im.coef_arrays[ncomp-1]
Step = 8
BlockSize = 64
maxX = np.shape(YCoef)[0]+1-BlockSize
maxY = np.shape(YCoef)[1]+1-BlockSize
OutputMap = np.zeros((int(np.ceil(maxX-1)/Step+1), int(np.ceil(maxY-1)/Step+1)))
if np.shape(im.coef_arrays[0])[0] < BlockSize:
return 0
for X in range(1, np.shape(YCoef)[0]+1, Step):
StartX = min(X, np.shape(YCoef)[0]-BlockSize+1)
for Y in range(1, np.shape(YCoef)[1]+1, Step):
StartY = min(Y, np.shape(YCoef)[1]-BlockSize+1)
block.coef_arrays[ncomp-1] = YCoef[StartX-1:StartX+BlockSize-1, StartY-1:StartY+BlockSize-1]
Features = ExtractFeatures(block, c1, c2, ncomp, digitBinsToKeep)
Features /= 64
Dist = svmdecision(Features, QualityInd-1)
OutputMap[int(np.ceil((StartX-1)/Step)), int(np.ceil((StartY-1)/Step))] = Dist
OutputMap = np.concatenate((np.tile(OutputMap[0, :], (int(np.ceil(BlockSize / 2 / Step)), 1)), OutputMap), axis=0)
OutputMap = np.concatenate((np.tile(OutputMap[:, 0], (int(np.ceil(BlockSize / 2 / Step)), 1)).T, OutputMap), axis=1)
return OutputMap
def EstimateJPEGQuality(imIn):
"""
Estimates the quality of JPEG object.
Args:
imIn: jpegio struct
Returns:
Quality: 0-100 integer
"""
if(len(imIn.quant_tables) == 1):
imIn.quant_tables[1] = imIn.quant_tables[0]
YQuality = 100-(np.sum(imIn.quant_tables[0])-imIn.quant_tables[0][0][0])/63
CrCbQuality = 100-(np.sum(imIn.quant_tables[1])-imIn.quant_tables[0][0][0])/63
Diff = abs(YQuality-CrCbQuality)*0.98
Quality = (YQuality+2*CrCbQuality)/3+Diff
return Quality
def ExtractFeatures(im, c1, c2, ncomp, digitBinsToKeep):
"""
This function extracts a descriptor feature based on the first-digit distribution of DCT coefficients of an image. It is needed by BenfordDQ.
Args:
c1: first DCT coefficient to be taken into account, DC term included
c2: final DCT coefficient to be taken into account, DC term included
ncomp: component from which to extract the feature (1 corresponds to the Y component)
digitBinsToKeep: digits for which to keep their frequency
Returns:
output: Flattened feature vector
"""
coeffArray = im.coef_arrays[ncomp-1]
qtable = im.quant_tables[im.comp_info[ncomp].quant_tbl_no-1]
Y = dequantize(coeffArray, qtable)
coeff = [1, 9, 2, 3, 10, 17, 25, 18, 11, 4, 5, 12, 19, 26, 33, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 57, 50, 43, 36, 29, 22, 15, 8, 16,
23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 24, 32, 39, 46, 53, 60, 61, 54, 47, 40, 48, 55, 62, 63, 56, 64]
sizeCA = np.shape(coeffArray)
digitHist = np.zeros((c2-c1+1, 10))
for index in range(c1, c2+1):
coe = coeff[index-1]
start = coe % 8
if start == 0:
start = 8
coeffFreq=np.ndarray.flatten(Y[int(np.ceil(coe/8))-1:sizeCA[0]-1:8, start-1:sizeCA[1]:8], order='F')
NumOfDigits = (np.floor(np.log10(abs(coeffFreq) + 0.5)) + 1)
tmp = [10**(i-1) for i in np.array(NumOfDigits)]
FirstDigit = np.floor(np.divide(abs(coeffFreq), tmp)).astype("uint8")
binHist = list(np.arange(0.5, 9.5, 1))
binHist.insert(0, -float('Inf'))
binHist.append(float('Inf'))
digitHist[index-c1, :] = np.histogram(FirstDigit, binHist)[0]
HistToKeep = digitHist[:, digitBinsToKeep]
return np.ndarray.flatten(HistToKeep)
def svmdecision(Xnew, index):
"""
Uses given index of svm to classify Xnew.
Args:
Xnew: Array to be classifed
index: Index of SVM to use to classify
Returns:
f: 2d array of svm decision output.
"""
f = np.dot(np.tanh(SupportVector[index] @ np.transpose(Xnew)-1), AlphaHat[index]) + bias[index]
return f | 5,636 | 37.609589 | 187 | py |
pyIFD | pyIFD-main/src/pyIFD/NOI4.py | """
This module provides the NOI4 algorithm
Noise-variance-inconsistency detector, solution 4 (leveraging median filters).
Algorith attribution:
https://29a.ch/2015/08/21/noise-analysis-for-image-forensics
Based on code from:
Zampoglou, M., Papadopoulos, S., & Kompatsiaris, Y. (2017). Large-scale evaluation of splicing localization algorithms for web images. Multimedia Tools and Applications, 76(4), 4801–4834.
"""
import numpy as np
from scipy.signal import medfilt
from PIL import Image
def MedFiltForensics(impath, NSize=3, Multiplier=10, Flatten=True):
"""
Main driver for NOI4.
Args:
impath: input image
NSize (optional, default=3): size of blocks to apply median filter to
Multiplier: Number to scale output by
Flatten: Whether to flatten output or not (False/True)
Output args:
OutputMap: Output image
"""
Im = Image.open(impath)
ImIn = np.array(Im, dtype=np.double)
[x, y, channels] = ImIn.shape
ImMed = np.zeros((x, y, channels))
for Channel in range(channels):
ImMed[:, :, Channel] = medfilt(ImIn[:, :, Channel], [NSize, NSize])
OutputMap = (np.abs(ImIn-ImMed))*Multiplier
if Flatten is True:
OutputMap = np.mean(OutputMap, 2)
return OutputMap.astype("uint16")
| 1,286 | 27.6 | 187 | py |
pyIFD | pyIFD-main/tests/validate_algo.py | from pyIFD.ADQ1 import detectDQ
from pyIFD.ADQ2 import getJmap
from pyIFD.ADQ3 import BenfordDQ
from pyIFD.BLK import GetBlockGrid
from pyIFD.CAGI import CAGI
from pyIFD.CFA1 import CFA1
from pyIFD.CFA2 import CFA2
from pyIFD.DCT import DCT
from pyIFD.ELA import ELA
from pyIFD.GHOST import GHOST
from pyIFD.NADQ import NADQ
from pyIFD.NOI1 import GetNoiseMap
from pyIFD.NOI2 import GetNoiseMaps
from pyIFD.NOI4 import MedFiltForensics
from pyIFD.NOI5 import PCANoise
import numpy as np
import scipy.io as spio
from skimage.metrics import structural_similarity as comp
import sys
import os
import argparse
import logging
def validate_algo(infilename, matfilename, algoname, criteria=0.99):
retVal = False
if algoname == 'ADQ1':
adq1test = detectDQ(infilename)
adq1mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(adq1mat['OutputMap'], adq1test[0])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('ADQ1: FAIL Similarity: ' + str(sim))
else:
print('ADQ1: PASS')
retVal = True
elif algoname == 'ADQ2':
if infilename[-4:] != ".jpg":
print("ADQ2 only takes .jpg inputs")
return 1
adq2test = getJmap(infilename)
adq2mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(adq2mat['OutputMap'], adq2test[0])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('ADQ2: FAIL Similarity: ' + str(sim))
else:
print('ADQ2: PASS')
retVal = True
elif algoname == 'ADQ3':
if infilename[-4:] != ".jpg":
print("ADQ3 only takes .jpg inputs")
return 1
adq3test = BenfordDQ(infilename)
adq3mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(adq3mat['OutputMap'], adq3test)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('ADQ3: FAIL Similarity: ' + str(sim))
else:
print('ADQ3: PASS')
retVal = True
elif algoname == 'BLK':
blktest = GetBlockGrid(infilename)
blkmat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(blkmat['OutputMap'], blktest[0])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('BLK: FAIL Similarity: ' + str(sim))
else:
print('BLK: PASS')
retVal = True
elif algoname == 'CAGI':
cagitest = CAGI(infilename)
cagimat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(cagimat['OutputMap'], cagitest[0])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('CAGI: FAIL Similarity: ' + str(sim))
else:
print('CAGI: PASS')
retVal = True
sim = 0
try:
sim = comp(cagimat['OutputMap_Inverse'], cagitest[1])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('CAGI INVERSE: FAIL Similarity: ' + str(sim))
retVal = False
else:
print('CAGI INVERSE: PASS')
elif algoname == 'CFA1':
cfa1test = CFA1(infilename)
cfa1mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(cfa1mat['OutputMap'], cfa1test)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('CFA1: FAIL Similarity: ' + str(sim))
else:
print('CFA1: PASS')
retVal = True
elif algoname == 'CFA2':
cfa2test = CFA2(infilename)
cfa2mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(cfa2mat['OutputMap'], cfa2test)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('CFA2: FAIL Similarity: ' + str(sim))
else:
print('CFA2: PASS')
retVal = True
elif algoname == 'DCT':
dcttest = DCT(infilename)
dctmat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(dctmat['OutputMap'], dcttest)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('DCT: FAIL Similarity: ' + str(sim))
else:
print('DCT: PASS')
retVal = True
elif algoname == 'ELA':
elatest = ELA(infilename)
elamat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(elamat['OutputMap'], elatest.astype(np.uint8))
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('ELA: FAIL Similarity: ' + str(sim))
else:
print('ELA: PASS')
retVal = True
elif algoname == 'GHO':
ghosttest = GHOST(infilename)
ghostmat = spio.loadmat(matfilename)
matDispImages = ghostmat['OutputMap'][0]
pyDispImages = ghosttest[2]
similarity = []
for i in range(len(matDispImages)):
sim = 0
try:
sim = comp(matDispImages[i], pyDispImages[i])
except ValueError as e:
print(e)
return retVal
similarity.append(sim)
sim = np.mean(similarity)
if(sim < criteria):
print('GHOST: FAIL Similarity: ' + str(sim))
else:
print('GHOST: PASS')
retVal = True
elif algoname == 'NADQ':
if infilename[-4:] != ".jpg":
print("NADQ only takes .jpg inputs")
return 1
nadqtest = NADQ(infilename)
nadqmat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(nadqmat['OutputMap'], nadqtest)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('NADQ: FAIL Similarity: ' + str(sim))
else:
print('NADQ: PASS')
retVal = True
elif algoname == 'NOI1':
noi1test = GetNoiseMap(infilename)
noi1mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(noi1mat['OutputMap'], noi1test)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('NOI1: FAIL Similarity: ' + str(sim))
else:
print('NOI1: PASS')
retVal = True
elif algoname == 'NOI2':
noi2test = GetNoiseMaps(infilename, filter_type='haar')
noi2mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(noi2mat['OutputMap'], noi2test)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('NOI2: FAIL Similarity: ' + str(sim))
else:
print('NOI2: PASS')
retVal = True
elif algoname == 'NOI4':
noi4test = MedFiltForensics(infilename, Flatten=False)
noi4mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(noi4mat['OutputMap'], noi4test, multichannel=True)
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('NOI4: FAIL Similarity: ' + str(sim))
else:
print('NOI4: PASS')
retVal = True
elif algoname == 'NOI5':
try:
noi5test = PCANoise(infilename)
except:
print('NOI5: ALGO FAILED')
return retVal
noi5mat = spio.loadmat(matfilename)
sim = 0
try:
sim = comp(noi5mat['OutputMap'], noi5test[0])
except ValueError as e:
print(e)
return retVal
if(sim < criteria):
print('NOI5 OutputMap: FAIL Similarity: ' + str(sim))
else:
print('NOI5 OutputMap: PASS')
retVal = True
else:
print('Unknown algorithm: ' + algoname)
return retVal
def main(args):
if args.rootdircorrect is True:
for root, dirs, files in os.walk(args.imagefilesrootdir):
dirs.sort()
for basefilename in sorted(files):
imagefilename = os.path.join(root,basefilename)
splitimage = os.path.splitext(basefilename)
if(splitimage[1] == '.jpg'):
matfiledir = args.groundtruthfilesrootdir + '/' + splitimage[0]
for algorithm in args.algorithms:
matfilename = matfiledir + '/' + splitimage[0] + '_' + algorithm + '.mat'
print('Validating image ' + basefilename + ' for algorithm ' + algorithm)
validate_algo(imagefilename, matfilename, algorithm)
elif args.singlefilecorrect is True:
basefilename = os.path.splitext(os.path.realpath(args.imagefilename))[0].split('_')[0]
for algorithm in args.algorithms:
print('Validating image ' + args.imagefilename + ' for algorithm ' + algorithm)
groundtruthfilename = basefilename + '_' + algorithm + '.mat'
validate_algo(args.imagefilename, groundtruthfilename, algorithm, args.simcriteria)
def get_arg(env, default):
return os.getenv(env) if os.getenv(env, "") != "" else default
def parse_args(parser):
args = parser.parse_args()
args.algorithms = get_arg('PYIFD_ALGORITHMS', args.algorithms).split(',')
args.imagefilename = get_arg('PYIFD_IMAGE_FILENAME', args.imagefilename)
args.imagefilesrootdir = get_arg('PYIFD_IMAGE_ROOTDIR', args.imagefilesrootdir)
args.groundtruthfilesrootdir = get_arg('PYIFD_GROUND_TRUTH_ROOTDIR', args.groundtruthfilesrootdir)
args.simcriteria = float(get_arg('PYIFD_SIM_CRITERIA', args.simcriteria))
args.singlefilecorrect = args.imagefilename is not None
args.rootdircorrect = (args.imagefilesrootdir is not None) and (args.groundtruthfilesrootdir is not None)
if args.singlefilecorrect and args.rootdircorrect:
logging.warning('Both single file and image/ground truth rootdirs defined. Defaulting to rootdirs')
elif (args.singlefilecorrect or args.rootdircorrect) is not True:
logging.error('Either imagefilename must be defined or imagefilesrootdir and groundtruthfilesrootdir must be defined')
args = None
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('Starting pyIFD validation')
parser = argparse.ArgumentParser(description='Get algorithm list, image filename/root dir, ground truth filename/root dir, for each algorithm process each image and compare with ground truth')
parser.add_argument(
'--algorithms',
help='Comma separated list of algorithms to run, env variable PYIFD_ALGORITHMS',
default='All')
parser.add_argument(
'--imagefilename',
help='Input image filename, env variable PYIFD_IMAGE_FILENAME',
default=None)
parser.add_argument(
'--groundtruthfilename',
help='Input image ground truth filename, env variable PYIFD_GROUND_TRUTH_FILENAME',
default=None)
parser.add_argument(
'--imagefilesrootdir',
help='Input images root dir which will be searched for images, processing each, env variable PYIFD_IMAGE_ROOTDIR',
default=None)
parser.add_argument(
'--groundtruthfilesrootdir',
help='Input image ground truth root dir, env variable PYIFD_GROUND_TRUTH_ROOTDIR',
default=None)
parser.add_argument(
'--simcriteria',
help='Algorithm similarity criteria, env variable PYIFD_SIM_CRITERIA',
default=0.99)
cmdline_args = parse_args(parser)
if cmdline_args is not None:
logging.info('Starting validation')
main(cmdline_args)
logging.info('Exiting validation')
| 12,335 | 29.309582 | 196 | py |
mt3 | mt3-main/setup.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install mt3."""
import os
import sys
import setuptools
# To enable importing version.py directly, we add its path to sys.path.
version_path = os.path.join(os.path.dirname(__file__), 'mt3')
sys.path.append(version_path)
from version import __version__ # pylint: disable=g-import-not-at-top
setuptools.setup(
name='mt3',
version=__version__,
description='Multi-Task Multitrack Music Transcription',
author='Google Inc.',
author_email='no-reply@google.com',
url='http://github.com/magenta/mt3',
license='Apache 2.0',
packages=setuptools.find_packages(),
package_data={
'': ['*.gin'],
},
scripts=[],
install_requires=[
'absl-py',
'flax @ git+https://github.com/google/flax#egg=flax',
'gin-config',
'immutabledict',
'librosa',
'mir_eval',
'note_seq',
'numpy',
'pretty_midi',
'scikit-learn',
'scipy',
'seqio @ git+https://github.com/google/seqio#egg=seqio',
't5',
't5x @ git+https://github.com/google-research/t5x#egg=t5x',
'tensorflow',
'tensorflow-datasets',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
tests_require=['pytest'],
setup_requires=['pytest-runner'],
keywords='music transcription machinelearning audio',
)
| 2,153 | 30.676471 | 74 | py |
mt3 | mt3-main/mt3/inference.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for MT3 inference."""
import functools
import json
from typing import Any, Optional, Sequence
import gin
from mt3 import metrics_utils
from mt3 import note_sequences
from mt3 import tasks
from mt3 import vocabularies
import note_seq
import seqio
import tensorflow as tf
def write_inferences_to_file(
path: str,
inferences: Sequence[Any],
task_ds: tf.data.Dataset,
mode: str,
vocabulary: Optional[seqio.Vocabulary] = None,
vocab_config=gin.REQUIRED,
onsets_only=gin.REQUIRED,
use_ties=gin.REQUIRED) -> None:
"""Writes model predictions, ground truth transcriptions, and input audio.
For now this only works for transcription tasks with ties.
Args:
path: File path to write to.
inferences: Model inferences, output of predict_batch.
task_ds: Original task dataset.
mode: Prediction mode; must be 'predict' as 'score' is not supported.
vocabulary: Task output vocabulary.
vocab_config: Vocabulary config object.
onsets_only: If True, only predict onsets.
use_ties: If True, use "tie" representation.
"""
if mode == 'score':
raise ValueError('`score` mode currently not supported in MT3')
if not vocabulary:
raise ValueError('`vocabulary` parameter required in `predict` mode')
if onsets_only and use_ties:
raise ValueError('ties not compatible with onset-only transcription')
if onsets_only:
encoding_spec = note_sequences.NoteOnsetEncodingSpec
elif not use_ties:
encoding_spec = note_sequences.NoteEncodingSpec
else:
encoding_spec = note_sequences.NoteEncodingWithTiesSpec
codec = vocabularies.build_codec(vocab_config)
targets = []
predictions = []
for inp, output in zip(task_ds.as_numpy_iterator(), inferences):
tokens = tasks.trim_eos(vocabulary.decode_tf(output).numpy())
start_time = inp['input_times'][0]
# Round down to nearest symbolic token step.
start_time -= start_time % (1 / codec.steps_per_second)
targets.append({
'unique_id': inp['unique_id'][0],
'ref_ns': inp['sequence'][0] if inp['sequence'][0] else None,
})
predictions.append({
'unique_id': inp['unique_id'][0],
'est_tokens': tokens,
'start_time': start_time,
# Input audio is not part of the "prediction" but the below call to
# metrics_utils.event_predictions_to_ns handles the concatenation.
'raw_inputs': inp['raw_inputs']
})
# The first target for each full example contains the NoteSequence; just
# organize by ID.
full_targets = {}
for target in targets:
if target['ref_ns']:
full_targets[target['unique_id']] = {
'ref_ns': note_seq.NoteSequence.FromString(target['ref_ns'])
}
full_predictions = metrics_utils.combine_predictions_by_id(
predictions=predictions,
combine_predictions_fn=functools.partial(
metrics_utils.event_predictions_to_ns,
codec=codec,
encoding_spec=encoding_spec))
assert sorted(full_targets.keys()) == sorted(full_predictions.keys())
full_target_prediction_pairs = [
(full_targets[id], full_predictions[id])
for id in sorted(full_targets.keys())
]
def note_to_dict(note):
return {
'start_time': note.start_time,
'end_time': note.end_time,
'pitch': note.pitch,
'velocity': note.velocity,
'program': note.program,
'is_drum': note.is_drum
}
with tf.io.gfile.GFile(path, 'w') as f:
for target, prediction in full_target_prediction_pairs:
json_dict = {
'id': target['ref_ns'].id,
'est_notes':
[note_to_dict(note) for note in prediction['est_ns'].notes]
}
json_str = json.dumps(json_dict, cls=seqio.TensorAndNumpyEncoder)
f.write(json_str + '\n')
| 4,386 | 30.561151 | 76 | py |
mt3 | mt3-main/mt3/vocabularies_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vocabularies."""
from absl.testing import absltest
from mt3 import vocabularies
import numpy as np
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_eager_execution()
class VocabulariesTest(absltest.TestCase):
def test_velocity_quantization(self):
self.assertEqual(0, vocabularies.velocity_to_bin(0, num_velocity_bins=1))
self.assertEqual(0, vocabularies.velocity_to_bin(0, num_velocity_bins=127))
self.assertEqual(0, vocabularies.bin_to_velocity(0, num_velocity_bins=1))
self.assertEqual(0, vocabularies.bin_to_velocity(0, num_velocity_bins=127))
self.assertEqual(
1,
vocabularies.velocity_to_bin(
vocabularies.bin_to_velocity(1, num_velocity_bins=1),
num_velocity_bins=1))
for velocity_bin in range(1, 128):
self.assertEqual(
velocity_bin,
vocabularies.velocity_to_bin(
vocabularies.bin_to_velocity(velocity_bin, num_velocity_bins=127),
num_velocity_bins=127))
def test_encode_decode(self):
vocab = vocabularies.GenericTokenVocabulary(32)
input_tokens = [1, 2, 3]
expected_encoded = [4, 5, 6]
# Encode
self.assertSequenceEqual(vocab.encode(input_tokens), expected_encoded)
np.testing.assert_array_equal(
vocab.encode_tf(tf.convert_to_tensor(input_tokens)).numpy(),
expected_encoded)
# Decode
self.assertSequenceEqual(vocab.decode(expected_encoded), input_tokens)
np.testing.assert_array_equal(
vocab.decode_tf(tf.convert_to_tensor(expected_encoded)).numpy(),
input_tokens)
def test_decode_invalid_ids(self):
vocab = vocabularies.GenericTokenVocabulary(32, extra_ids=4)
encoded = [0, 2, 3, 4, 34, 35]
expected_decoded = [-2, -2, 0, 1, 31, -2]
self.assertSequenceEqual(vocab.decode(encoded), expected_decoded)
np.testing.assert_array_equal(
vocab.decode_tf(tf.convert_to_tensor(encoded)).numpy(),
expected_decoded)
def test_decode_eos(self):
vocab = vocabularies.GenericTokenVocabulary(32)
encoded = [0, 2, 3, 4, 1, 0, 1, 0]
# Python decode function truncates everything after first EOS.
expected_decoded = [-2, -2, 0, 1, -1]
self.assertSequenceEqual(vocab.decode(encoded), expected_decoded)
# TF decode function preserves array length.
expected_decoded_tf = [-2, -2, 0, 1, -1, -1, -1, -1]
np.testing.assert_array_equal(
vocab.decode_tf(tf.convert_to_tensor(encoded)).numpy(),
expected_decoded_tf)
def test_encode_invalid_id(self):
vocab = vocabularies.GenericTokenVocabulary(32)
inputs = [0, 15, 31]
# No exception expected.
vocab.encode(inputs)
vocab.encode_tf(tf.convert_to_tensor(inputs))
inputs_too_low = [-1, 15, 31]
with self.assertRaises(ValueError):
vocab.encode(inputs_too_low)
with self.assertRaises(tf.errors.InvalidArgumentError):
vocab.encode_tf(tf.convert_to_tensor(inputs_too_low))
inputs_too_high = [0, 15, 32]
with self.assertRaises(ValueError):
vocab.encode(inputs_too_high)
with self.assertRaises(tf.errors.InvalidArgumentError):
vocab.encode_tf(tf.convert_to_tensor(inputs_too_high))
def test_encode_dtypes(self):
vocab = vocabularies.GenericTokenVocabulary(32)
inputs = [0, 15, 31]
encoded32 = vocab.encode_tf(tf.convert_to_tensor(inputs, tf.int32))
self.assertEqual(tf.int32, encoded32.dtype)
encoded64 = vocab.encode_tf(tf.convert_to_tensor(inputs, tf.int64))
self.assertEqual(tf.int64, encoded64.dtype)
if __name__ == '__main__':
absltest.main()
| 4,164 | 35.217391 | 80 | py |
mt3 | mt3-main/mt3/run_length_encoding_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_length_encoding."""
from mt3 import event_codec
from mt3 import run_length_encoding
import note_seq
import numpy as np
import seqio
import tensorflow as tf
assert_dataset = seqio.test_utils.assert_dataset
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('tie', 0, 0)
])
run_length_encode_shifts = run_length_encoding.run_length_encode_shifts_fn(
codec=codec)
class RunLengthEncodingTest(tf.test.TestCase):
def test_remove_redundant_state_changes(self):
og_dataset = tf.data.Dataset.from_tensors({
'targets': [3, 525, 356, 161, 2, 525, 356, 161, 355, 394]
})
assert_dataset(
run_length_encoding.remove_redundant_state_changes_fn(
codec=codec,
state_change_event_types=['velocity', 'program'])(og_dataset),
{
'targets': [3, 525, 356, 161, 2, 161, 355, 394],
})
def test_run_length_encode_shifts(self):
og_dataset = tf.data.Dataset.from_tensors({
'targets': [1, 1, 1, 161, 1, 1, 1, 162, 1, 1, 1]
})
assert_dataset(
run_length_encode_shifts(og_dataset),
{
'targets': [3, 161, 6, 162],
})
def test_run_length_encode_shifts_beyond_max_length(self):
og_dataset = tf.data.Dataset.from_tensors({
'targets': [1] * 202 + [161, 1, 1, 1]
})
assert_dataset(
run_length_encode_shifts(og_dataset),
{
'targets': [100, 100, 2, 161],
})
def test_run_length_encode_shifts_simultaneous(self):
og_dataset = tf.data.Dataset.from_tensors({
'targets': [1, 1, 1, 161, 162, 1, 1, 1]
})
assert_dataset(
run_length_encode_shifts(og_dataset),
{
'targets': [3, 161, 162],
})
def test_merge_run_length_encoded_targets(self):
# pylint: disable=bad-whitespace
targets = np.array([
[ 3, 161, 162, 5, 163],
[160, 164, 3, 165, 0]
])
# pylint: enable=bad-whitespace
merged_targets = run_length_encoding.merge_run_length_encoded_targets(
targets=targets, codec=codec)
expected_merged_targets = [
160, 164, 3, 161, 162, 165, 5, 163
]
np.testing.assert_array_equal(expected_merged_targets, merged_targets)
if __name__ == '__main__':
tf.test.main()
| 3,362 | 30.138889 | 75 | py |
mt3 | mt3-main/mt3/spectral_ops.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forked from DDSP spectral_ops.py (just for compute_logmel)."""
import gin
import tensorflow.compat.v2 as tf
def tf_float32(x):
"""Ensure array/tensor is a float32 tf.Tensor."""
if isinstance(x, tf.Tensor):
return tf.cast(x, dtype=tf.float32) # This is a no-op if x is float32.
else:
return tf.convert_to_tensor(x, tf.float32)
def safe_log(x, eps=1e-5):
"""Avoid taking the log of a non-positive number."""
safe_x = tf.where(x <= 0.0, eps, x)
return tf.math.log(safe_x)
def stft(audio, frame_size=2048, overlap=0.75, pad_end=True):
"""Differentiable stft in tensorflow, computed in batch."""
# Remove channel dim if present.
audio = tf_float32(audio)
if len(audio.shape) == 3:
audio = tf.squeeze(audio, axis=-1)
s = tf.signal.stft(
signals=audio,
frame_length=int(frame_size),
frame_step=int(frame_size * (1.0 - overlap)),
fft_length=None, # Use enclosing power of 2.
pad_end=pad_end)
return s
@gin.register
def compute_mag(audio, size=2048, overlap=0.75, pad_end=True):
mag = tf.abs(stft(audio, frame_size=size, overlap=overlap, pad_end=pad_end))
return tf_float32(mag)
@gin.register
def compute_mel(audio,
lo_hz=0.0,
hi_hz=8000.0,
bins=64,
fft_size=2048,
overlap=0.75,
pad_end=True,
sample_rate=16000):
"""Calculate Mel Spectrogram."""
mag = compute_mag(audio, fft_size, overlap, pad_end)
num_spectrogram_bins = int(mag.shape[-1])
linear_to_mel_matrix = tf.signal.linear_to_mel_weight_matrix(
bins, num_spectrogram_bins, sample_rate, lo_hz, hi_hz)
mel = tf.tensordot(mag, linear_to_mel_matrix, 1)
mel.set_shape(mag.shape[:-1].concatenate(linear_to_mel_matrix.shape[-1:]))
return mel
@gin.register
def compute_logmel(audio,
lo_hz=80.0,
hi_hz=7600.0,
bins=64,
fft_size=2048,
overlap=0.75,
pad_end=True,
sample_rate=16000):
"""Logarithmic amplitude of mel-scaled spectrogram."""
mel = compute_mel(audio, lo_hz, hi_hz, bins,
fft_size, overlap, pad_end, sample_rate)
return safe_log(mel)
| 2,845 | 30.977528 | 78 | py |
mt3 | mt3-main/mt3/network.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5.1.1 Transformer model."""
from typing import Any, Sequence
from flax import linen as nn
from flax import struct
import jax.numpy as jnp
from mt3 import layers
@struct.dataclass
class T5Config:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
# Activation dtypes.
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_encoder_layers: int = 6
num_decoder_layers: int = 6
head_dim: int = 64
mlp_dim: int = 2048
# Activation functions are retrieved from Flax.
mlp_activations: Sequence[str] = ('relu',)
dropout_rate: float = 0.1
# If `True`, the embedding weights are used in the decoder output layer.
logits_via_embedding: bool = False
class EncoderLayer(nn.Module):
"""Transformer encoder layer."""
config: T5Config
@nn.compact
def __call__(self, inputs, encoder_mask=None, deterministic=False):
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = layers.LayerNorm(
dtype=cfg.dtype, name='pre_attention_layer_norm')(
inputs)
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='attention')(
x, x, encoder_mask, deterministic=deterministic)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = layers.LayerNorm(dtype=cfg.dtype, name='pre_mlp_layer_norm')(x)
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = layers.MlpBlock(
intermediate_dim=cfg.mlp_dim,
activations=cfg.mlp_activations,
intermediate_dropout_rate=cfg.dropout_rate,
dtype=cfg.dtype,
name='mlp',
)(y, deterministic=deterministic)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y + x
return y
class DecoderLayer(nn.Module):
"""Transformer decoder layer that attends to the encoder."""
config: T5Config
@nn.compact
def __call__(self,
inputs,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
deterministic=False,
decode=False,
max_decode_length=None):
cfg = self.config
# inputs: embedded inputs to the decoder with shape [batch, length, emb_dim]
x = layers.LayerNorm(
dtype=cfg.dtype, name='pre_self_attention_layer_norm')(
inputs)
# Self-attention block
x = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='self_attention')(
x,
x,
decoder_mask,
deterministic=deterministic,
decode=decode)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x + inputs
# Encoder-Decoder block.
y = layers.LayerNorm(
dtype=cfg.dtype, name='pre_cross_attention_layer_norm')(
x)
y = layers.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
head_dim=cfg.head_dim,
dropout_rate=cfg.dropout_rate,
name='encoder_decoder_attention')(
y, encoded, encoder_decoder_mask, deterministic=deterministic)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y + x
# MLP block.
z = layers.LayerNorm(dtype=cfg.dtype, name='pre_mlp_layer_norm')(y)
z = layers.MlpBlock(
intermediate_dim=cfg.mlp_dim,
activations=cfg.mlp_activations,
intermediate_dropout_rate=cfg.dropout_rate,
dtype=cfg.dtype,
name='mlp',
)(z, deterministic=deterministic)
z = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
z, deterministic=deterministic)
z = z + y
return z
class Encoder(nn.Module):
"""A stack of encoder layers."""
config: T5Config
@nn.compact
def __call__(self,
encoder_input_tokens,
encoder_mask=None,
deterministic=False):
cfg = self.config
assert encoder_input_tokens.ndim == 3 # [batch, length, depth]
seq_length = encoder_input_tokens.shape[-2]
inputs_positions = jnp.arange(seq_length)[None, :]
# [batch, length, depth] -> [batch, length, emb_dim]
x = layers.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
cfg.emb_dim,
dtype=cfg.dtype,
kernel_init=nn.linear.default_kernel_init,
kernel_axes=('vocab', 'embed'),
name='continuous_inputs_projection')(encoder_input_tokens)
x = x + layers.FixedEmbed(features=cfg.emb_dim)(inputs_positions)
x = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic)
x = x.astype(cfg.dtype)
for lyr in range(cfg.num_encoder_layers):
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = EncoderLayer(
config=cfg,
name=f'layers_{lyr}')(x, encoder_mask, deterministic)
x = layers.LayerNorm(dtype=cfg.dtype, name='encoder_norm')(x)
return nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=deterministic)
class Decoder(nn.Module):
"""A stack of decoder layers as a part of an encoder-decoder architecture."""
config: T5Config
@nn.compact
def __call__(self,
encoded,
decoder_input_tokens,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
deterministic=False,
decode=False,
max_decode_length=None):
cfg = self.config
assert decoder_input_tokens.ndim == 2 # [batch, len]
seq_length = decoder_input_tokens.shape[-1]
decoder_positions = jnp.arange(seq_length)[None, :]
# [batch, length] -> [batch, length, emb_dim]
y = layers.Embed( # pytype: disable=wrong-arg-types # jax-types
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
dtype=cfg.dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
one_hot=True,
name='token_embedder')(decoder_input_tokens.astype('int32'))
y = y + layers.FixedEmbed(features=cfg.emb_dim)(
decoder_positions, decode=decode)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
y = y.astype(cfg.dtype)
for lyr in range(cfg.num_decoder_layers):
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = DecoderLayer(
config=cfg, name=f'layers_{lyr}')(
y,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
deterministic=deterministic,
decode=decode,
max_decode_length=max_decode_length)
y = layers.LayerNorm(dtype=cfg.dtype, name='decoder_norm')(y)
y = nn.Dropout(
rate=cfg.dropout_rate, broadcast_dims=(-2,))(
y, deterministic=deterministic)
# [batch, length, emb_dim] -> [batch, length, vocab_size]
if cfg.logits_via_embedding:
# Use the transpose of embedding matrix for logit transform.
logits = self.shared_embedding.attend(y)
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
else:
logits = layers.DenseGeneral(
cfg.vocab_size,
dtype=jnp.float32, # Use float32 for stabiliity.
kernel_axes=('embed', 'vocab'),
name='logits_dense')(
y)
return logits
class Transformer(nn.Module):
"""An encoder-decoder Transformer model."""
config: T5Config
def setup(self):
cfg = self.config
self.encoder = Encoder(config=cfg)
self.decoder = Decoder(config=cfg)
def encode(self,
encoder_input_tokens,
encoder_segment_ids=None,
enable_dropout=True):
"""Applies Transformer encoder-branch on the inputs."""
cfg = self.config
assert encoder_input_tokens.ndim == 3 # (batch, length, depth)
# Make padding attention mask; we don't actually mask out any input
# positions, letting the model potentially attend to the zero vector used as
# padding.
encoder_mask = layers.make_attention_mask(
jnp.ones(encoder_input_tokens.shape[:-1]),
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if encoder_segment_ids is not None:
encoder_mask = layers.combine_masks(
encoder_mask,
layers.make_attention_mask(
encoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=cfg.dtype))
return self.encoder(
encoder_input_tokens, encoder_mask, deterministic=not enable_dropout)
def decode(
self,
encoded,
encoder_input_tokens, # only needed for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
decoder_positions=None,
enable_dropout=True,
decode=False,
max_decode_length=None):
"""Applies Transformer decoder-branch on encoded-input and target."""
cfg = self.config
# Make padding attention masks.
if decode:
# Do not mask decoder attention based on targets padding at
# decoding/inference time.
decoder_mask = None
encoder_decoder_mask = layers.make_attention_mask(
jnp.ones_like(decoder_target_tokens),
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
else:
decoder_mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=cfg.dtype,
decoder_segment_ids=decoder_segment_ids)
encoder_decoder_mask = layers.make_attention_mask(
decoder_target_tokens > 0,
jnp.ones(encoder_input_tokens.shape[:-1]),
dtype=cfg.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if encoder_segment_ids is not None:
if decode:
raise ValueError(
'During decoding, packing should not be used but '
'`encoder_segment_ids` was passed to `Transformer.decode`.')
encoder_decoder_mask = layers.combine_masks(
encoder_decoder_mask,
layers.make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=cfg.dtype))
logits = self.decoder(
encoded,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
deterministic=not enable_dropout,
decode=decode,
max_decode_length=max_decode_length)
return logits.astype(self.config.dtype)
def __call__(self,
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False):
"""Applies Transformer model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former. For a packed dataset, it usually
has additional processing applied. For example, the first element of each
sequence has id 0 instead of the shifted EOS id from the previous sequence.
Args:
encoder_input_tokens: input data to the encoder.
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
encoder_positions: encoder subsequence positions for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Ensables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
enable_dropout=enable_dropout)
return self.decode(
encoded,
encoder_input_tokens, # only used for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=encoder_segment_ids,
decoder_segment_ids=decoder_segment_ids,
decoder_positions=decoder_positions,
enable_dropout=enable_dropout,
decode=decode)
| 13,894 | 32.890244 | 80 | py |
mt3 | mt3-main/mt3/mixing.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for mixing (in the audio sense) multiple transcription examples."""
from typing import Callable, Optional, Sequence
import gin
from mt3 import event_codec
from mt3 import run_length_encoding
import numpy as np
import seqio
import tensorflow as tf
@gin.configurable
def mix_transcription_examples(
ds: tf.data.Dataset,
sequence_length: seqio.preprocessors.SequenceLengthType,
output_features: seqio.preprocessors.OutputFeaturesType,
codec: event_codec.Codec,
inputs_feature_key: str = 'inputs',
targets_feature_keys: Sequence[str] = ('targets',),
max_examples_per_mix: Optional[int] = None,
shuffle_buffer_size: int = seqio.SHUFFLE_BUFFER_SIZE
) -> Callable[..., tf.data.Dataset]:
"""Preprocessor that mixes together "batches" of transcription examples.
Args:
ds: Dataset of individual transcription examples, each of which should
have an 'inputs' field containing 1D audio samples (currently only
audio encoders that use raw samples as an intermediate representation
are supported), and a 'targets' field containing run-length encoded
note events.
sequence_length: Dictionary mapping feature key to length.
output_features: Dictionary mapping feature key to spec.
codec: An event_codec.Codec used to interpret the target events.
inputs_feature_key: Feature key for inputs which will be mixed as audio.
targets_feature_keys: List of feature keys for targets, each of which will
be merged (separately) as run-length encoded note events.
max_examples_per_mix: Maximum number of individual examples to mix together.
shuffle_buffer_size: Size of shuffle buffer to use for shuffle prior to
mixing.
Returns:
Dataset containing mixed examples.
"""
if max_examples_per_mix is None:
return ds
# TODO(iansimon): is there a way to use seqio's seed?
ds = tf.data.Dataset.sample_from_datasets([
ds.shuffle(
buffer_size=shuffle_buffer_size // max_examples_per_mix
).padded_batch(batch_size=i) for i in range(1, max_examples_per_mix + 1)
])
def mix_inputs(ex):
samples = tf.reduce_sum(ex[inputs_feature_key], axis=0)
norm = tf.linalg.norm(samples, ord=np.inf)
ex[inputs_feature_key] = tf.math.divide_no_nan(samples, norm)
return ex
ds = ds.map(mix_inputs, num_parallel_calls=tf.data.experimental.AUTOTUNE)
max_tokens = sequence_length['targets']
if output_features['targets'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
def mix_targets(ex):
for k in targets_feature_keys:
ex[k] = run_length_encoding.merge_run_length_encoded_targets(
targets=ex[k],
codec=codec)
return ex
ds = ds.map(mix_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
| 3,396 | 35.923913 | 80 | py |
mt3 | mt3-main/mt3/metrics_utils.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for transcription metrics."""
import collections
import functools
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, TypeVar
from mt3 import event_codec
from mt3 import note_sequences
from mt3 import run_length_encoding
import note_seq
import numpy as np
import pretty_midi
import sklearn
S = TypeVar('S')
T = TypeVar('T')
CombineExamplesFunctionType = Callable[[Sequence[Mapping[str, Any]]],
Mapping[str, Any]]
def _group_predictions_by_id(
predictions: Sequence[Mapping[str, T]]
) -> Mapping[str, Sequence[T]]:
predictions_by_id = collections.defaultdict(list)
for pred in predictions:
predictions_by_id[pred['unique_id']].append(pred)
return predictions_by_id
def combine_predictions_by_id(
predictions: Sequence[Mapping[str, Any]],
combine_predictions_fn: CombineExamplesFunctionType
) -> Mapping[str, Mapping[str, Any]]:
"""Concatenate predicted examples, grouping by ID and sorting by time."""
predictions_by_id = _group_predictions_by_id(predictions)
return {
id: combine_predictions_fn(preds)
for id, preds in predictions_by_id.items()
}
def decode_and_combine_predictions(
predictions: Sequence[Mapping[str, Any]],
init_state_fn: Callable[[], S],
begin_segment_fn: Callable[[S], None],
decode_tokens_fn: Callable[[S, Sequence[int], int, Optional[int]],
Tuple[int, int]],
flush_state_fn: Callable[[S], T]
) -> Tuple[T, int, int]:
"""Decode and combine a sequence of predictions to a full result.
For time-based events, this usually means concatenation.
Args:
predictions: List of predictions, each of which is a dictionary containing
estimated tokens ('est_tokens') and start time ('start_time') fields.
init_state_fn: Function that takes no arguments and returns an initial
decoding state.
begin_segment_fn: Function that updates the decoding state at the beginning
of a segment.
decode_tokens_fn: Function that takes a decoding state, estimated tokens
(for a single segment), start time, and max time, and processes the
tokens, updating the decoding state in place. Also returns the number of
invalid and dropped events for the segment.
flush_state_fn: Function that flushes the final decoding state into the
result.
Returns:
result: The full combined decoding.
total_invalid_events: Total number of invalid event tokens across all
predictions.
total_dropped_events: Total number of dropped event tokens across all
predictions.
"""
sorted_predictions = sorted(predictions, key=lambda pred: pred['start_time'])
state = init_state_fn()
total_invalid_events = 0
total_dropped_events = 0
for pred_idx, pred in enumerate(sorted_predictions):
begin_segment_fn(state)
# Depending on the audio token hop length, each symbolic token could be
# associated with multiple audio frames. Since we split up the audio frames
# into segments for prediction, this could lead to overlap. To prevent
# overlap issues, ensure that the current segment does not make any
# predictions for the time period covered by the subsequent segment.
max_decode_time = None
if pred_idx < len(sorted_predictions) - 1:
max_decode_time = sorted_predictions[pred_idx + 1]['start_time']
invalid_events, dropped_events = decode_tokens_fn(
state, pred['est_tokens'], pred['start_time'], max_decode_time)
total_invalid_events += invalid_events
total_dropped_events += dropped_events
return flush_state_fn(state), total_invalid_events, total_dropped_events
def event_predictions_to_ns(
predictions: Sequence[Mapping[str, Any]], codec: event_codec.Codec,
encoding_spec: note_sequences.NoteEncodingSpecType
) -> Mapping[str, Any]:
"""Convert a sequence of predictions to a combined NoteSequence."""
ns, total_invalid_events, total_dropped_events = decode_and_combine_predictions(
predictions=predictions,
init_state_fn=encoding_spec.init_decoding_state_fn,
begin_segment_fn=encoding_spec.begin_decoding_segment_fn,
decode_tokens_fn=functools.partial(
run_length_encoding.decode_events,
codec=codec,
decode_event_fn=encoding_spec.decode_event_fn),
flush_state_fn=encoding_spec.flush_decoding_state_fn)
# Also concatenate raw inputs from all predictions.
sorted_predictions = sorted(predictions, key=lambda pred: pred['start_time'])
raw_inputs = np.concatenate(
[pred['raw_inputs'] for pred in sorted_predictions], axis=0)
start_times = [pred['start_time'] for pred in sorted_predictions]
return {
'raw_inputs': raw_inputs,
'start_times': start_times,
'est_ns': ns,
'est_invalid_events': total_invalid_events,
'est_dropped_events': total_dropped_events,
}
def get_prettymidi_pianoroll(ns: note_seq.NoteSequence, fps: float,
is_drum: bool):
"""Convert NoteSequence to pianoroll through pretty_midi."""
for note in ns.notes:
if is_drum or note.end_time - note.start_time < 0.05:
# Give all drum notes a fixed length, and all others a min length
note.end_time = note.start_time + 0.05
pm = note_seq.note_sequence_to_pretty_midi(ns)
end_time = pm.get_end_time()
cc = [
# all sound off
pretty_midi.ControlChange(number=120, value=0, time=end_time),
# all notes off
pretty_midi.ControlChange(number=123, value=0, time=end_time)
]
pm.instruments[0].control_changes = cc
if is_drum:
# If inst.is_drum is set, pretty_midi will return an all zero pianoroll.
for inst in pm.instruments:
inst.is_drum = False
pianoroll = pm.get_piano_roll(fs=fps)
return pianoroll
def frame_metrics(ref_pianoroll: np.ndarray,
est_pianoroll: np.ndarray,
velocity_threshold: int) -> Tuple[float, float, float]:
"""Frame Precision, Recall, and F1."""
# Pad to same length
if ref_pianoroll.shape[1] > est_pianoroll.shape[1]:
diff = ref_pianoroll.shape[1] - est_pianoroll.shape[1]
est_pianoroll = np.pad(est_pianoroll, [(0, 0), (0, diff)], mode='constant')
elif est_pianoroll.shape[1] > ref_pianoroll.shape[1]:
diff = est_pianoroll.shape[1] - ref_pianoroll.shape[1]
ref_pianoroll = np.pad(ref_pianoroll, [(0, 0), (0, diff)], mode='constant')
# For ref, remove any notes that are too quiet (consistent with Cerberus.)
ref_frames_bool = ref_pianoroll > velocity_threshold
# For est, keep all predicted notes.
est_frames_bool = est_pianoroll > 0
precision, recall, f1, _ = sklearn.metrics.precision_recall_fscore_support(
ref_frames_bool.flatten(),
est_frames_bool.flatten(),
labels=[True, False])
return precision[0], recall[0], f1[0]
| 7,437 | 36.756345 | 82 | py |
mt3 | mt3-main/mt3/spectrograms.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio spectrogram functions."""
import dataclasses
from mt3 import spectral_ops
import tensorflow as tf
# defaults for spectrogram config
DEFAULT_SAMPLE_RATE = 16000
DEFAULT_HOP_WIDTH = 128
DEFAULT_NUM_MEL_BINS = 512
# fixed constants; add these to SpectrogramConfig before changing
FFT_SIZE = 2048
MEL_LO_HZ = 20.0
@dataclasses.dataclass
class SpectrogramConfig:
"""Spectrogram configuration parameters."""
sample_rate: int = DEFAULT_SAMPLE_RATE
hop_width: int = DEFAULT_HOP_WIDTH
num_mel_bins: int = DEFAULT_NUM_MEL_BINS
@property
def abbrev_str(self):
s = ''
if self.sample_rate != DEFAULT_SAMPLE_RATE:
s += 'sr%d' % self.sample_rate
if self.hop_width != DEFAULT_HOP_WIDTH:
s += 'hw%d' % self.hop_width
if self.num_mel_bins != DEFAULT_NUM_MEL_BINS:
s += 'mb%d' % self.num_mel_bins
return s
@property
def frames_per_second(self):
return self.sample_rate / self.hop_width
def split_audio(samples, spectrogram_config):
"""Split audio into frames."""
return tf.signal.frame(
samples,
frame_length=spectrogram_config.hop_width,
frame_step=spectrogram_config.hop_width,
pad_end=True)
def compute_spectrogram(samples, spectrogram_config):
"""Compute a mel spectrogram."""
overlap = 1 - (spectrogram_config.hop_width / FFT_SIZE)
return spectral_ops.compute_logmel(
samples,
bins=spectrogram_config.num_mel_bins,
lo_hz=MEL_LO_HZ,
overlap=overlap,
fft_size=FFT_SIZE,
sample_rate=spectrogram_config.sample_rate)
def flatten_frames(frames):
"""Convert frames back into a flat array of samples."""
return tf.reshape(frames, [-1])
def input_depth(spectrogram_config):
return spectrogram_config.num_mel_bins
| 2,330 | 27.084337 | 74 | py |
mt3 | mt3-main/mt3/layers.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dense attention classes and mask/weighting functions."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import dataclasses
import functools
import operator
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
# from flax.linen.partitioning import param_with_axes, with_sharding_constraint
param_with_axes = nn_partitioning.param_with_axes
with_sharding_constraint = nn_partitioning.with_sharding_constraint
# Type annotations
Array = jnp.ndarray
DType = jnp.dtype
PRNGKey = jnp.ndarray
Shape = Sequence[int]
Activation = Callable[..., Array]
# Parameter initializers.
Initializer = Callable[[PRNGKey, Shape, DType], Array]
default_embed_init = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal', out_axis=0)
def sinusoidal(min_scale: float = 1.0,
max_scale: float = 10000.0,
dtype: DType = jnp.float32) -> Initializer:
"""Creates 1D Sinusoidal Position Embedding Initializer.
Args:
min_scale: Minimum frequency-scale in sine grating.
max_scale: Maximum frequency-scale in sine grating.
dtype: The DType of the returned values.
Returns:
The sinusoidal initialization function.
"""
def init(key: PRNGKey, shape: Shape, dtype: DType = dtype) -> Array:
"""Sinusoidal init."""
del key
if dtype != np.float32:
raise ValueError('The sinusoidal initializer only supports float32.')
if len(list(shape)) != 2:
raise ValueError(
f'Expected a 2D shape (max_len, features), but got {shape}.')
max_len, features = shape
pe = np.zeros((max_len, features), dtype=dtype)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (features // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, features // 2) * scale_factor)
pe[:, :features // 2] = np.sin(position * div_term)
pe[:, features // 2:2 * (features // 2)] = np.cos(position * div_term)
return jnp.array(pe)
return init
def dot_product_attention(query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: DType = jnp.float32,
float32_logits: bool = False):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
Args:
query: queries for calculating attention with shape of `[batch, q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch, kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch, kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch, num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Output of shape `[batch, length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# `attn_weights`: [batch, num_heads, q_length, kv_length]
attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key)
# Apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# Normalize the attention weights across `kv_length` dimension.
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
# Apply attention dropout.
if not deterministic and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
multiplier = (
keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# Take the linear combination of `value`.
return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value)
dynamic_vector_slice_in_dim = jax.vmap(
lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None))
class MultiHeadDotProductAttention(nn.Module):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
head_dim: dimension of each head.
dtype: the dtype of the computation.
dropout_rate: dropout rate
kernel_init: initializer for the kernel of the Dense layers.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
"""
num_heads: int
head_dim: int
dtype: DType = jnp.float32
dropout_rate: float = 0.
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
float32_logits: bool = False # computes logits in float32 for stability.
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
deterministic: bool = False) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode` argument. For decoding, this method is called twice,
first to initialize the cache and then for an actual decoding process. The
two calls are differentiated by the presence of 'cached_key' in the variable
dict. In the cache initialization stage, the cache variables are initialized
as zeros and will be filled in the subsequent decoding process.
In the cache initialization call, `inputs_q` has a shape [batch, length,
q_features] and `inputs_kv`: [batch, length, kv_features]. During the
incremental decoding stage, query, key and value all have the shape [batch,
1, qkv_features] corresponding to a single step.
Args:
inputs_q: input queries of shape `[batch, q_length, q_features]`.
inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.
mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.
bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.
decode: Whether to prepare and use an autoregressive cache.
deterministic: Disables dropout if set to True.
Returns:
output of shape `[batch, length, q_features]`.
"""
projection = functools.partial(
DenseGeneral,
axis=-1,
features=(self.num_heads, self.head_dim),
kernel_axes=('embed', 'joined_kv'),
dtype=self.dtype)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)
query_init = lambda *args: self.kernel_init(*args) / depth_scaling
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch, length, num_heads, head_dim]
query = projection(kernel_init=query_init, name='query')(inputs_q)
key = projection(kernel_init=self.kernel_init, name='key')(inputs_kv)
value = projection(kernel_init=self.kernel_init, name='value')(inputs_kv)
query = with_sharding_constraint(query, ('batch', 'length', 'heads', 'kv'))
key = with_sharding_constraint(key, ('batch', 'length', 'heads', 'kv'))
value = with_sharding_constraint(value, ('batch', 'length', 'heads', 'kv'))
if decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension [batch, length, num_heads, head_dim],
# but we cache them as [batch, num_heads, head_dim, length] as a TPU
# fusion optimization. This also enables the "scatter via one-hot
# broadcast" trick, which means we do a one-hot broadcast instead of a
# scatter/gather operations, resulting in a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable('cache', 'cached_key', jnp.zeros,
swap_dims(key.shape), key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros,
swap_dims(value.shape), value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
batch, num_heads, head_dim, length = (cached_key.value.shape)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
# Sanity shape check of cached key against input query.
expected_shape = (batch, 1, num_heads, head_dim)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_shape, query.shape))
# Create a OHE of the current index. NOTE: the index is increased below.
cur_index = cache_index.value
one_hot_indices = jax.nn.one_hot(cur_index, length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did for
# the cached ones above.
# Note these are currently the key and value of a single position, since
# we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length) <= cur_index,
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
(batch, 1, 1, length)))
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
bias = dynamic_vector_slice_in_dim(
jnp.squeeze(bias, axis=0), jnp.reshape(cur_index, (-1)), 1, -2)
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# Apply attention.
x = dot_product_attention(
query,
key,
value,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
float32_logits=self.float32_logits)
# Back to the original inputs dimensions.
out = DenseGeneral(
features=inputs_q.shape[-1], # output dim is set to the input dim.
axis=(-2, -1),
kernel_init=self.kernel_init,
kernel_axes=('joined_kv', 'embed'),
dtype=self.dtype,
name='out')(
x)
return out
def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple([ax if ax >= 0 else ndim + ax for ax in axes])
def _canonicalize_tuple(x):
if isinstance(x, Iterable):
return tuple(x)
else:
return (x,)
#------------------------------------------------------------------------------
# DenseGeneral for attention layers.
#------------------------------------------------------------------------------
class DenseGeneral(nn.Module):
"""A linear transformation (without bias) with flexible axes.
Attributes:
features: tuple with numbers of output features.
axis: tuple with axes to apply the transformation on.
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
"""
features: Union[Iterable[int], int]
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal')
kernel_axes: Tuple[str, ...] = ()
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),
np.prod(features))
kernel = param_with_axes(
'kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes)
kernel = jnp.asarray(kernel, self.dtype)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
return lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))
def _convert_to_activation_function(
fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
elif isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
elif callable(fn_or_string):
return fn_or_string
else:
raise ValueError("don't know how to convert %s to an activation function" %
(fn_or_string,))
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
intermediate_dim: Shared dimension of hidden layers.
activations: Type of activations for each layer. Each element is either
'linear', a string function name in flax.linen, or a function.
kernel_init: Kernel function, passed to the dense layers.
deterministic: Whether the dropout layers should be deterministic.
intermediate_dropout_rate: Dropout rate used after the intermediate layers.
dtype: Type for the dense layer.
"""
intermediate_dim: int = 2048
activations: Sequence[Union[str, Callable]] = ('relu',)
kernel_init: Initializer = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal')
intermediate_dropout_rate: float = 0.1
dtype: Any = jnp.float32
@nn.compact
def __call__(self, inputs, decode: bool = False, deterministic: bool = False):
"""Applies Transformer MlpBlock module."""
# Iterate over specified MLP input activation functions.
# e.g. ('relu',) or ('gelu', 'linear') for gated-gelu.
activations = []
for idx, act_fn in enumerate(self.activations):
dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}'
x = DenseGeneral(
self.intermediate_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('embed', 'mlp'),
name=dense_name)(
inputs)
x = _convert_to_activation_function(act_fn)(x)
activations.append(x)
# Take elementwise product of above intermediate activations.
x = functools.reduce(operator.mul, activations)
# Apply dropout and final dense output projection.
x = nn.Dropout(
rate=self.intermediate_dropout_rate, broadcast_dims=(-2,))(
x, deterministic=deterministic) # Broadcast along length.
x = with_sharding_constraint(x, ('batch', 'length', 'mlp'))
output = DenseGeneral(
inputs.shape[-1],
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('mlp', 'embed'),
name='wo')(
x)
return output
class Embed(nn.Module):
"""A parameterized function from integers [0, n) to d-dimensional vectors.
Attributes:
num_embeddings: number of embeddings.
features: number of feature dimensions for each embedding.
dtype: the dtype of the embedding vectors (default: float32).
embedding_init: embedding initializer.
one_hot: performs the gather with a one-hot contraction rather than a true
gather. This is currently needed for SPMD partitioning.
"""
num_embeddings: int
features: int
cast_input_dtype: Optional[DType] = None
dtype: DType = jnp.float32
attend_dtype: Optional[DType] = None
embedding_init: Initializer = default_embed_init
one_hot: bool = False
embedding: Array = dataclasses.field(init=False)
def setup(self):
self.embedding = param_with_axes(
'embedding',
self.embedding_init, (self.num_embeddings, self.features),
jnp.float32,
axes=('vocab', 'embed'))
def __call__(self, inputs: Array) -> Array:
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
"""
if self.cast_input_dtype:
inputs = inputs.astype(self.cast_input_dtype)
if not jnp.issubdtype(inputs.dtype, jnp.integer):
raise ValueError('Input type must be an integer or unsigned integer.')
if self.one_hot:
iota = lax.iota(jnp.int32, self.num_embeddings)
one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype)
output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype))
else:
output = jnp.asarray(self.embedding, self.dtype)[inputs]
output = with_sharding_constraint(output, ('batch', 'length', 'embed'))
return output
def attend(self, query: Array) -> Array:
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype
return jnp.dot(query, jnp.asarray(self.embedding, dtype).T)
class FixedEmbed(nn.Module):
"""Fixed (not learnable) embeddings specified by the initializer function.
Attributes:
init_fn: The initializer function that defines the embeddings.
max_length: The maximum supported length.
dtype: The DType to use for the embeddings.
"""
features: int
max_length: int = 2048
embedding_init: Initializer = sinusoidal()
dtype: jnp.dtype = jnp.float32
def setup(self):
# The key is set to None because sinusoid init is deterministic.
shape = (self.max_length, self.features)
self.embedding = self.embedding_init(None, shape, self.dtype) # pylint: disable=too-many-function-args # pytype: disable=wrong-arg-types # jax-ndarray
@nn.compact
def __call__(self,
inputs,
*,
decode: bool = False):
"""Returns the fixed position embeddings specified by the initializer.
Args:
inputs: <int>[batch_size, seq_len] input position indices.
decode: True if running in single-position autoregressive decode mode.
Returns:
The fixed position embeddings <float32>[batch_size, seq_len, features].
"""
# We use a cache position index for tracking decoding position.
if decode:
position_embedder_index = self.variable(
'cache', 'position_embedder_index',
lambda: jnp.array(-1, dtype=jnp.uint32))
i = position_embedder_index.value
position_embedder_index.value = i + 1
return jax.lax.dynamic_slice(self.embedding, jnp.array((i, 0)),
np.array((1, self.features)))
return jnp.take(self.embedding, inputs, axis=0)
#------------------------------------------------------------------------------
# T5 Layernorm - no subtraction of mean or bias.
#------------------------------------------------------------------------------
class LayerNorm(nn.Module):
"""T5 Layer normalization operating on the last axis of the input data."""
epsilon: float = 1e-6
dtype: Any = jnp.float32
scale_init: Initializer = nn.initializers.ones
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
"""Applies layer normalization on the input."""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(mean2 + self.epsilon), self.dtype)
scale = param_with_axes(
'scale', self.scale_init, (features,), jnp.float32, axes=('embed',))
scale = jnp.asarray(scale, self.dtype)
return y * scale
#------------------------------------------------------------------------------
# Mask-making utility functions.
#------------------------------------------------------------------------------
def make_attention_mask(query_input: Array,
key_input: Array,
pairwise_fn: Callable = jnp.multiply,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32) -> Array:
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch, len_q]`, `[batch, len_kv]`, the
attention weights will be `[batch, heads, len_q, len_kv]` and this
function will produce `[batch, 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch, 1, len_q, len_kv]` shaped mask for 1d attention.
"""
# [batch, len_q, len_kv]
mask = pairwise_fn(
# [batch, len_q] -> [batch, len_q, 1]
jnp.expand_dims(query_input, axis=-1),
# [batch, len_q] -> [batch, 1, len_kv]
jnp.expand_dims(key_input, axis=-2))
# [batch, 1, len_q, len_kv]. This creates the head dim.
mask = jnp.expand_dims(mask, axis=-3)
mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))
return mask.astype(dtype)
def make_causal_mask(x: Array,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32) -> Array:
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch, len]`, the self-attention weights
will be `[batch, heads, len, len]` and this function will produce a
causal mask of shape `[batch, 1, len, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: input array of shape `[batch, len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch, 1, len, len]` shaped causal mask for 1d attention.
"""
idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
return make_attention_mask(
idxs,
idxs,
jnp.greater_equal,
extra_batch_dims=extra_batch_dims,
dtype=dtype)
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (
f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype)
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (
f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
def make_decoder_mask(decoder_target_tokens: Array,
dtype: DType,
decoder_causal_attention: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None) -> Array:
"""Compute the self-attention mask for a decoder.
Decoder mask is formed by combining a causal mask, a padding mask and an
optional packing mask. If decoder_causal_attention is passed, it makes the
masking non-causal for positions that have value of 1.
A prefix LM is applied to a dataset which has a notion of "inputs" and
"targets", e.g., a machine translation task. The inputs and targets are
concatenated to form a new target. `decoder_target_tokens` is the concatenated
decoder output tokens.
The "inputs" portion of the concatenated sequence can attend to other "inputs"
tokens even for those at a later time steps. In order to control this
behavior, `decoder_causal_attention` is necessary. This is a binary mask with
a value of 1 indicating that the position belonged to "inputs" portion of the
original dataset.
Example:
Suppose we have a dataset with two examples.
ds = [{"inputs": [6, 7], "targets": [8]},
{"inputs": [3, 4], "targets": [5]}]
After the data preprocessing with packing, the two examples are packed into
one example with the following three fields (some fields are skipped for
simplicity).
decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]
decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]
decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]
where each array has [batch, length] shape with batch size being 1. Then,
this function computes the following mask.
mask = [[[[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]]
mask[b, 1, :, :] represents the mask for the example `b` in the batch.
Because mask is for a self-attention layer, the mask's shape is a square of
shape [query length, key length].
mask[b, 1, i, j] = 1 means that the query token at position i can attend to
the key token at position j.
Args:
decoder_target_tokens: decoder output tokens. [batch, length]
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch, length]
decoder_segment_ids: decoder segmentation info for packed examples. [batch,
length]
Returns:
the combined decoder mask.
"""
masks = []
# The same mask is applied to all attention heads. So the head dimension is 1,
# i.e., the mask will be broadcast along the heads dim.
# [batch, 1, length, length]
causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype)
# Positions with value 1 in `decoder_causal_attneition` can attend
# bidirectionally.
if decoder_causal_attention is not None:
# [batch, 1, length, length]
inputs_mask = make_attention_mask(
decoder_causal_attention,
decoder_causal_attention,
jnp.logical_and,
dtype=dtype)
masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))
else:
masks.append(causal_mask)
# Padding mask.
masks.append(
make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype))
# Packing mask
if decoder_segment_ids is not None:
masks.append(
make_attention_mask(
decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype))
return combine_masks(*masks, dtype=dtype) # pytype: disable=bad-return-type # jax-ndarray
| 32,586 | 38.2142 | 157 | py |
mt3 | mt3-main/mt3/run_length_encoding.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for run length encoding."""
import dataclasses
from typing import Any, Callable, Mapping, MutableMapping, Tuple, Optional, Sequence, TypeVar
from absl import logging
from mt3 import event_codec
import numpy as np
import seqio
import tensorflow as tf
Event = event_codec.Event
# These should be type variables, but unfortunately those are incompatible with
# dataclasses.
EventData = Any
EncodingState = Any
DecodingState = Any
DecodeResult = Any
T = TypeVar('T', bound=EventData)
ES = TypeVar('ES', bound=EncodingState)
DS = TypeVar('DS', bound=DecodingState)
@dataclasses.dataclass
class EventEncodingSpec:
"""Spec for encoding events."""
# initialize encoding state
init_encoding_state_fn: Callable[[], EncodingState]
# convert EventData into zero or more events, updating encoding state
encode_event_fn: Callable[[EncodingState, EventData, event_codec.Codec],
Sequence[event_codec.Event]]
# convert encoding state (at beginning of segment) into events
encoding_state_to_events_fn: Optional[Callable[[EncodingState],
Sequence[event_codec.Event]]]
# create empty decoding state
init_decoding_state_fn: Callable[[], DecodingState]
# update decoding state when entering new segment
begin_decoding_segment_fn: Callable[[DecodingState], None]
# consume time and Event and update decoding state
decode_event_fn: Callable[
[DecodingState, float, event_codec.Event, event_codec.Codec], None]
# flush decoding state into result
flush_decoding_state_fn: Callable[[DecodingState], DecodeResult]
def encode_and_index_events(
state: ES,
event_times: Sequence[float],
event_values: Sequence[T],
encode_event_fn: Callable[[ES, T, event_codec.Codec],
Sequence[event_codec.Event]],
codec: event_codec.Codec,
frame_times: Sequence[float],
encoding_state_to_events_fn: Optional[
Callable[[ES], Sequence[event_codec.Event]]] = None,
) -> Tuple[Sequence[int], Sequence[int], Sequence[int],
Sequence[int], Sequence[int]]:
"""Encode a sequence of timed events and index to audio frame times.
Encodes time shifts as repeated single step shifts for later run length
encoding.
Optionally, also encodes a sequence of "state events", keeping track of the
current encoding state at each audio frame. This can be used e.g. to prepend
events representing the current state to a targets segment.
Args:
state: Initial event encoding state.
event_times: Sequence of event times.
event_values: Sequence of event values.
encode_event_fn: Function that transforms event value into a sequence of one
or more event_codec.Event objects.
codec: An event_codec.Codec object that maps Event objects to indices.
frame_times: Time for every audio frame.
encoding_state_to_events_fn: Function that transforms encoding state into a
sequence of one or more event_codec.Event objects.
Returns:
events: Encoded events and shifts.
event_start_indices: Corresponding start event index for every audio frame.
Note: one event can correspond to multiple audio indices due to sampling
rate differences. This makes splitting sequences tricky because the same
event can appear at the end of one sequence and the beginning of
another.
event_end_indices: Corresponding end event index for every audio frame. Used
to ensure when slicing that one chunk ends where the next begins. Should
always be true that event_end_indices[i] = event_start_indices[i + 1].
state_events: Encoded "state" events representing the encoding state before
each event.
state_event_indices: Corresponding state event index for every audio frame.
"""
indices = np.argsort(event_times, kind='stable')
event_steps = [round(event_times[i] * codec.steps_per_second)
for i in indices]
event_values = [event_values[i] for i in indices]
events = []
state_events = []
event_start_indices = []
state_event_indices = []
cur_step = 0
cur_event_idx = 0
cur_state_event_idx = 0
def fill_event_start_indices_to_cur_step():
while(len(event_start_indices) < len(frame_times) and
frame_times[len(event_start_indices)] <
cur_step / codec.steps_per_second):
event_start_indices.append(cur_event_idx)
state_event_indices.append(cur_state_event_idx)
for event_step, event_value in zip(event_steps, event_values):
while event_step > cur_step:
events.append(codec.encode_event(Event(type='shift', value=1)))
cur_step += 1
fill_event_start_indices_to_cur_step()
cur_event_idx = len(events)
cur_state_event_idx = len(state_events)
if encoding_state_to_events_fn:
# Dump state to state events *before* processing the next event, because
# we want to capture the state prior to the occurrence of the event.
for e in encoding_state_to_events_fn(state):
state_events.append(codec.encode_event(e))
for e in encode_event_fn(state, event_value, codec):
events.append(codec.encode_event(e))
# After the last event, continue filling out the event_start_indices array.
# The inequality is not strict because if our current step lines up exactly
# with (the start of) an audio frame, we need to add an additional shift event
# to "cover" that frame.
while cur_step / codec.steps_per_second <= frame_times[-1]:
events.append(codec.encode_event(Event(type='shift', value=1)))
cur_step += 1
fill_event_start_indices_to_cur_step()
cur_event_idx = len(events)
# Now fill in event_end_indices. We need this extra array to make sure that
# when we slice events, each slice ends exactly where the subsequent slice
# begins.
event_end_indices = event_start_indices[1:] + [len(events)]
events = np.array(events)
state_events = np.array(state_events)
event_start_indices = np.array(event_start_indices)
event_end_indices = np.array(event_end_indices)
state_event_indices = np.array(state_event_indices)
return (events, event_start_indices, event_end_indices,
state_events, state_event_indices)
@seqio.map_over_dataset
def extract_target_sequence_with_indices(features, state_events_end_token=None):
"""Extract target sequence corresponding to audio token segment."""
target_start_idx = features['input_event_start_indices'][0]
target_end_idx = features['input_event_end_indices'][-1]
features['targets'] = features['targets'][target_start_idx:target_end_idx]
if state_events_end_token is not None:
# Extract the state events corresponding to the audio start token, and
# prepend them to the targets array.
state_event_start_idx = features['input_state_event_indices'][0]
state_event_end_idx = state_event_start_idx + 1
while features['state_events'][
state_event_end_idx - 1] != state_events_end_token:
state_event_end_idx += 1
features['targets'] = tf.concat([
features['state_events'][state_event_start_idx:state_event_end_idx],
features['targets']
], axis=0)
return features
def remove_redundant_state_changes_fn(
codec: event_codec.Codec,
feature_key: str = 'targets',
state_change_event_types: Sequence[str] = ()
) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]:
"""Return preprocessing function that removes redundant state change events.
Args:
codec: The event_codec.Codec used to interpret the events.
feature_key: The feature key for which to remove redundant state changes.
state_change_event_types: A list of event types that represent state
changes; tokens corresponding to these event types will be interpreted
as state changes and redundant ones will be removed.
Returns:
A preprocessing function that removes redundant state change events.
"""
state_change_event_ranges = [codec.event_type_range(event_type)
for event_type in state_change_event_types]
def remove_redundant_state_changes(
features: MutableMapping[str, Any],
) -> Mapping[str, Any]:
"""Remove redundant tokens e.g. duplicate velocity changes from sequence."""
current_state = tf.zeros(len(state_change_event_ranges), dtype=tf.int32)
output = tf.constant([], dtype=tf.int32)
for event in features[feature_key]:
# Let autograph know that the shape of 'output' will change during the
# loop.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(output, tf.TensorShape([None]))])
is_redundant = False
for i, (min_index, max_index) in enumerate(state_change_event_ranges):
if (min_index <= event) and (event <= max_index):
if current_state[i] == event:
is_redundant = True
current_state = tf.tensor_scatter_nd_update(
current_state, indices=[[i]], updates=[event])
if not is_redundant:
output = tf.concat([output, [event]], axis=0)
features[feature_key] = output
return features
return seqio.map_over_dataset(remove_redundant_state_changes)
def run_length_encode_shifts_fn(
codec: event_codec.Codec,
feature_key: str = 'targets'
) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]:
"""Return a function that run-length encodes shifts for a given codec.
Args:
codec: The Codec to use for shift events.
feature_key: The feature key for which to run-length encode shifts.
Returns:
A preprocessing function that run-length encodes single-step shifts.
"""
def run_length_encode_shifts(
features: MutableMapping[str, Any]
) -> Mapping[str, Any]:
"""Combine leading/interior shifts, trim trailing shifts.
Args:
features: Dict of features to process.
Returns:
A dict of features.
"""
events = features[feature_key]
shift_steps = 0
total_shift_steps = 0
output = tf.constant([], dtype=tf.int32)
for event in events:
# Let autograph know that the shape of 'output' will change during the
# loop.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(output, tf.TensorShape([None]))])
if codec.is_shift_event_index(event):
shift_steps += 1
total_shift_steps += 1
else:
# Once we've reached a non-shift event, RLE all previous shift events
# before outputting the non-shift event.
if shift_steps > 0:
shift_steps = total_shift_steps
while shift_steps > 0:
output_steps = tf.minimum(codec.max_shift_steps, shift_steps)
output = tf.concat([output, [output_steps]], axis=0)
shift_steps -= output_steps
output = tf.concat([output, [event]], axis=0)
features[feature_key] = output
return features
return seqio.map_over_dataset(run_length_encode_shifts)
def merge_run_length_encoded_targets(
targets: np.ndarray,
codec: event_codec.Codec
) -> Sequence[int]:
"""Merge multiple tracks of target events into a single stream.
Args:
targets: A 2D array (# tracks by # events) of integer event values.
codec: The event_codec.Codec used to interpret the events.
Returns:
A 1D array of merged events.
"""
num_tracks = tf.shape(targets)[0]
targets_length = tf.shape(targets)[1]
current_step = 0
current_offsets = tf.zeros(num_tracks, dtype=tf.int32)
output = tf.constant([], dtype=tf.int32)
done = tf.constant(False)
while not done:
# Let autograph know that the shape of 'output' will change during the loop.
tf.autograph.experimental.set_loop_options(
shape_invariants=[(output, tf.TensorShape([None]))])
# Determine which targets track has the earliest next step.
next_step = codec.max_shift_steps + 1
next_track = -1
for i in range(num_tracks):
if (current_offsets[i] == targets_length or
targets[i][current_offsets[i]] == 0):
# Already reached the end of this targets track.
# (Zero is technically a valid shift event but we never actually use it;
# it is always padding.)
continue
if not codec.is_shift_event_index(targets[i][current_offsets[i]]):
# The only way we would be at a non-shift event is if we have not yet
# reached the first shift event, which means we're at step zero.
next_step = 0
next_track = i
elif targets[i][current_offsets[i]] < next_step:
next_step = targets[i][current_offsets[i]]
next_track = i
if next_track == -1:
# We've already merged all of the target tracks in their entirety.
done = tf.constant(True)
break
if next_step == current_step and next_step > 0:
# We don't need to include the shift event itself as it's the same step as
# the previous shift.
start_offset = current_offsets[next_track] + 1
else:
start_offset = current_offsets[next_track]
# Merge in events up to but not including the next shift.
end_offset = start_offset + 1
while end_offset < targets_length and not codec.is_shift_event_index(
targets[next_track][end_offset]):
end_offset += 1
output = tf.concat(
[output, targets[next_track][start_offset:end_offset]], axis=0)
current_step = next_step
current_offsets = tf.tensor_scatter_nd_update(
current_offsets, indices=[[next_track]], updates=[end_offset])
return output
def decode_events(
state: DS,
tokens: np.ndarray,
start_time: int,
max_time: Optional[int],
codec: event_codec.Codec,
decode_event_fn: Callable[[DS, float, event_codec.Event, event_codec.Codec],
None],
) -> Tuple[int, int]:
"""Decode a series of tokens, maintaining a decoding state object.
Args:
state: Decoding state object; will be modified in-place.
tokens: event tokens to convert.
start_time: offset start time if decoding in the middle of a sequence.
max_time: Events at or beyond this time will be dropped.
codec: An event_codec.Codec object that maps indices to Event objects.
decode_event_fn: Function that consumes an Event (and the current time) and
updates the decoding state.
Returns:
invalid_events: number of events that could not be decoded.
dropped_events: number of events dropped due to max_time restriction.
"""
invalid_events = 0
dropped_events = 0
cur_steps = 0
cur_time = start_time
token_idx = 0
for token_idx, token in enumerate(tokens):
try:
event = codec.decode_event_index(token)
except ValueError:
invalid_events += 1
continue
if event.type == 'shift':
cur_steps += event.value
cur_time = start_time + cur_steps / codec.steps_per_second
if max_time and cur_time > max_time:
dropped_events = len(tokens) - token_idx
break
else:
cur_steps = 0
try:
decode_event_fn(state, cur_time, event, codec)
except ValueError:
invalid_events += 1
logging.info(
'Got invalid event when decoding event %s at time %f. '
'Invalid event counter now at %d.',
event, cur_time, invalid_events, exc_info=True)
continue
return invalid_events, dropped_events
| 15,922 | 36.554245 | 93 | py |
mt3 | mt3-main/mt3/metrics_utils_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics_utils."""
from mt3 import event_codec
from mt3 import metrics_utils
from mt3 import note_sequences
import note_seq
import numpy as np
import tensorflow as tf
class MetricsUtilsTest(tf.test.TestCase):
def test_event_predictions_to_ns(self):
predictions = [
{
'raw_inputs': [0, 0],
'start_time': 0.0,
'est_tokens': [20, 160],
},
{
'raw_inputs': [1, 1],
'start_time': 0.4,
# These last 2 events should be dropped.
'est_tokens': [20, 161, 50, 162],
},
{
'raw_inputs': [2, 2],
'start_time': 0.8,
'est_tokens': [163, 20, 164]
},
]
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=59,
velocity=100,
start_time=0.20,
end_time=0.21)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.60,
end_time=0.61)
expected_ns.notes.add(
pitch=62,
velocity=100,
start_time=0.80,
end_time=0.81)
expected_ns.notes.add(
pitch=63,
velocity=100,
start_time=1.00,
end_time=1.01)
expected_ns.total_time = 1.01
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH)])
res = metrics_utils.event_predictions_to_ns(
predictions, codec=codec,
encoding_spec=note_sequences.NoteOnsetEncodingSpec)
self.assertProtoEquals(expected_ns, res['est_ns'])
self.assertEqual(0, res['est_invalid_events'])
self.assertEqual(2, res['est_dropped_events'])
np.testing.assert_array_equal([0, 0, 1, 1, 2, 2], res['raw_inputs'])
def test_event_predictions_to_ns_with_offsets(self):
predictions = [
{
'raw_inputs': [0, 0],
'start_time': 0.0,
'est_tokens': [20, 356, 160],
},
{
'raw_inputs': [1, 1],
'start_time': 0.4,
'est_tokens': [20, 292, 161],
},
{
'raw_inputs': [2, 2],
'start_time': 0.8,
'est_tokens': [20, 229, 160, 161]
},
]
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=59,
velocity=127,
start_time=0.20,
end_time=1.00)
expected_ns.notes.add(
pitch=60,
velocity=63,
start_time=0.60,
end_time=1.00)
expected_ns.total_time = 1.00
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127)
])
res = metrics_utils.event_predictions_to_ns(
predictions, codec=codec, encoding_spec=note_sequences.NoteEncodingSpec)
self.assertProtoEquals(expected_ns, res['est_ns'])
self.assertEqual(0, res['est_invalid_events'])
self.assertEqual(0, res['est_dropped_events'])
np.testing.assert_array_equal([0, 0, 1, 1, 2, 2], res['raw_inputs'])
def test_event_predictions_to_ns_multitrack(self):
predictions = [
{
'raw_inputs': [0, 0],
'start_time': 0.0,
'est_tokens': [20, 517, 356, 160],
},
{
'raw_inputs': [1, 1],
'start_time': 0.4,
'est_tokens': [20, 356, 399],
},
{
'raw_inputs': [2, 2],
'start_time': 0.8,
'est_tokens': [20, 517, 229, 160]
},
]
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=42,
velocity=127,
start_time=0.60,
end_time=0.61,
is_drum=True,
instrument=9)
expected_ns.notes.add(
pitch=59,
velocity=127,
start_time=0.20,
end_time=1.00,
program=32)
expected_ns.total_time = 1.00
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM)
])
res = metrics_utils.event_predictions_to_ns(
predictions, codec=codec, encoding_spec=note_sequences.NoteEncodingSpec)
self.assertProtoEquals(expected_ns, res['est_ns'])
self.assertEqual(0, res['est_invalid_events'])
self.assertEqual(0, res['est_dropped_events'])
np.testing.assert_array_equal([0, 0, 1, 1, 2, 2], res['raw_inputs'])
def test_event_predictions_to_ns_multitrack_ties(self):
predictions = [
{
'raw_inputs': [0, 0],
'start_time': 0.0,
'est_tokens': [613, # no tied notes
20, 517, 356, 160],
},
{
'raw_inputs': [1, 1],
'start_time': 0.4,
'est_tokens': [517, 160, 613, # tied note
20, 356, 399],
},
{
'raw_inputs': [2, 2],
'start_time': 0.8,
'est_tokens': [613] # no tied notes, causing active note to end
},
]
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=42,
velocity=127,
start_time=0.60,
end_time=0.61,
is_drum=True,
instrument=9)
expected_ns.notes.add(
pitch=59,
velocity=127,
start_time=0.20,
end_time=0.80,
program=32)
expected_ns.total_time = 0.80
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('tie', 0, 0)
])
res = metrics_utils.event_predictions_to_ns(
predictions, codec=codec,
encoding_spec=note_sequences.NoteEncodingWithTiesSpec)
self.assertProtoEquals(expected_ns, res['est_ns'])
self.assertEqual(0, res['est_invalid_events'])
self.assertEqual(0, res['est_dropped_events'])
np.testing.assert_array_equal([0, 0, 1, 1, 2, 2], res['raw_inputs'])
def test_frame_metrics(self):
ref = np.zeros(shape=(128, 5))
est = np.zeros(shape=(128, 5))
# one overlapping note, two false positives, two false negatives
ref[10, 0] = 127
ref[10, 1] = 127
ref[10, 2] = 127
est[10, 2] = 127
est[10, 3] = 127
est[10, 4] = 127
prec, rec, _ = metrics_utils.frame_metrics(ref, est, velocity_threshold=1)
np.testing.assert_approx_equal(prec, 1/3)
np.testing.assert_approx_equal(rec, 1/3)
if __name__ == '__main__':
tf.test.main()
| 8,271 | 30.815385 | 80 | py |
mt3 | mt3-main/mt3/event_codec_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for event_codec."""
from absl.testing import absltest
from mt3 import event_codec
Event = event_codec.Event
EventRange = event_codec.EventRange
class EventCodecTest(absltest.TestCase):
def test_encode_decode(self):
ec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[EventRange('pitch', min_value=0, max_value=127)])
events = [
Event(type='pitch', value=60),
Event(type='shift', value=5),
Event(type='pitch', value=62),
]
encoded = [ec.encode_event(e) for e in events]
self.assertSequenceEqual([161, 5, 163], encoded)
decoded = [ec.decode_event_index(idx) for idx in encoded]
self.assertSequenceEqual(events, decoded)
def test_shift_steps(self):
ec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[EventRange('pitch', min_value=0, max_value=127)])
self.assertEqual(100, ec.max_shift_steps)
self.assertFalse(ec.is_shift_event_index(-1))
self.assertTrue(ec.is_shift_event_index(0))
self.assertTrue(ec.is_shift_event_index(100))
self.assertFalse(ec.is_shift_event_index(101))
if __name__ == '__main__':
absltest.main()
| 1,803 | 31.214286 | 74 | py |
mt3 | mt3-main/mt3/version.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MT3 version."""
__version__ = '0.0.1'
| 622 | 35.647059 | 74 | py |
mt3 | mt3-main/mt3/note_sequences.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions that operate on NoteSequence protos."""
import dataclasses
import itertools
from typing import MutableMapping, MutableSet, Optional, Sequence, Tuple
from mt3 import event_codec
from mt3 import run_length_encoding
from mt3 import vocabularies
import note_seq
DEFAULT_VELOCITY = 100
DEFAULT_NOTE_DURATION = 0.01
# Quantization can result in zero-length notes; enforce a minimum duration.
MIN_NOTE_DURATION = 0.01
@dataclasses.dataclass
class TrackSpec:
name: str
program: int = 0
is_drum: bool = False
def extract_track(ns, program, is_drum):
track = note_seq.NoteSequence(ticks_per_quarter=220)
track_notes = [note for note in ns.notes
if note.program == program and note.is_drum == is_drum]
track.notes.extend(track_notes)
track.total_time = (max(note.end_time for note in track.notes)
if track.notes else 0.0)
return track
def trim_overlapping_notes(ns: note_seq.NoteSequence) -> note_seq.NoteSequence:
"""Trim overlapping notes from a NoteSequence, dropping zero-length notes."""
ns_trimmed = note_seq.NoteSequence()
ns_trimmed.CopyFrom(ns)
channels = set((note.pitch, note.program, note.is_drum)
for note in ns_trimmed.notes)
for pitch, program, is_drum in channels:
notes = [note for note in ns_trimmed.notes if note.pitch == pitch
and note.program == program and note.is_drum == is_drum]
sorted_notes = sorted(notes, key=lambda note: note.start_time)
for i in range(1, len(sorted_notes)):
if sorted_notes[i - 1].end_time > sorted_notes[i].start_time:
sorted_notes[i - 1].end_time = sorted_notes[i].start_time
valid_notes = [note for note in ns_trimmed.notes
if note.start_time < note.end_time]
del ns_trimmed.notes[:]
ns_trimmed.notes.extend(valid_notes)
return ns_trimmed
def assign_instruments(ns: note_seq.NoteSequence) -> None:
"""Assign instrument numbers to notes; modifies NoteSequence in place."""
program_instruments = {}
for note in ns.notes:
if note.program not in program_instruments and not note.is_drum:
num_instruments = len(program_instruments)
note.instrument = (num_instruments if num_instruments < 9
else num_instruments + 1)
program_instruments[note.program] = note.instrument
elif note.is_drum:
note.instrument = 9
else:
note.instrument = program_instruments[note.program]
def validate_note_sequence(ns: note_seq.NoteSequence) -> None:
"""Raise ValueError if NoteSequence contains invalid notes."""
for note in ns.notes:
if note.start_time >= note.end_time:
raise ValueError('note has start time >= end time: %f >= %f' %
(note.start_time, note.end_time))
if note.velocity == 0:
raise ValueError('note has zero velocity')
def note_arrays_to_note_sequence(
onset_times: Sequence[float],
pitches: Sequence[int],
offset_times: Optional[Sequence[float]] = None,
velocities: Optional[Sequence[int]] = None,
programs: Optional[Sequence[int]] = None,
is_drums: Optional[Sequence[bool]] = None
) -> note_seq.NoteSequence:
"""Convert note onset / offset / pitch / velocity arrays to NoteSequence."""
ns = note_seq.NoteSequence(ticks_per_quarter=220)
for onset_time, offset_time, pitch, velocity, program, is_drum in itertools.zip_longest(
onset_times, [] if offset_times is None else offset_times,
pitches, [] if velocities is None else velocities,
[] if programs is None else programs,
[] if is_drums is None else is_drums):
if offset_time is None:
offset_time = onset_time + DEFAULT_NOTE_DURATION
if velocity is None:
velocity = DEFAULT_VELOCITY
if program is None:
program = 0
if is_drum is None:
is_drum = False
ns.notes.add(
start_time=onset_time,
end_time=offset_time,
pitch=pitch,
velocity=velocity,
program=program,
is_drum=is_drum)
ns.total_time = max(ns.total_time, offset_time)
assign_instruments(ns)
return ns
@dataclasses.dataclass
class NoteEventData:
pitch: int
velocity: Optional[int] = None
program: Optional[int] = None
is_drum: Optional[bool] = None
instrument: Optional[int] = None
def note_sequence_to_onsets(
ns: note_seq.NoteSequence
) -> Tuple[Sequence[float], Sequence[NoteEventData]]:
"""Extract note onsets and pitches from NoteSequence proto."""
# Sort by pitch to use as a tiebreaker for subsequent stable sort.
notes = sorted(ns.notes, key=lambda note: note.pitch)
return ([note.start_time for note in notes],
[NoteEventData(pitch=note.pitch) for note in notes])
def note_sequence_to_onsets_and_offsets(
ns: note_seq.NoteSequence,
) -> Tuple[Sequence[float], Sequence[NoteEventData]]:
"""Extract onset & offset times and pitches from a NoteSequence proto.
The onset & offset times will not necessarily be in sorted order.
Args:
ns: NoteSequence from which to extract onsets and offsets.
Returns:
times: A list of note onset and offset times.
values: A list of NoteEventData objects where velocity is zero for note
offsets.
"""
# Sort by pitch and put offsets before onsets as a tiebreaker for subsequent
# stable sort.
notes = sorted(ns.notes, key=lambda note: note.pitch)
times = ([note.end_time for note in notes] +
[note.start_time for note in notes])
values = ([NoteEventData(pitch=note.pitch, velocity=0) for note in notes] +
[NoteEventData(pitch=note.pitch, velocity=note.velocity)
for note in notes])
return times, values
def note_sequence_to_onsets_and_offsets_and_programs(
ns: note_seq.NoteSequence,
) -> Tuple[Sequence[float], Sequence[NoteEventData]]:
"""Extract onset & offset times and pitches & programs from a NoteSequence.
The onset & offset times will not necessarily be in sorted order.
Args:
ns: NoteSequence from which to extract onsets and offsets.
Returns:
times: A list of note onset and offset times.
values: A list of NoteEventData objects where velocity is zero for note
offsets.
"""
# Sort by program and pitch and put offsets before onsets as a tiebreaker for
# subsequent stable sort.
notes = sorted(ns.notes,
key=lambda note: (note.is_drum, note.program, note.pitch))
times = ([note.end_time for note in notes if not note.is_drum] +
[note.start_time for note in notes])
values = ([NoteEventData(pitch=note.pitch, velocity=0,
program=note.program, is_drum=False)
for note in notes if not note.is_drum] +
[NoteEventData(pitch=note.pitch, velocity=note.velocity,
program=note.program, is_drum=note.is_drum)
for note in notes])
return times, values
@dataclasses.dataclass
class NoteEncodingState:
"""Encoding state for note transcription, keeping track of active pitches."""
# velocity bin for active pitches and programs
active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(
default_factory=dict)
def note_event_data_to_events(
state: Optional[NoteEncodingState],
value: NoteEventData,
codec: event_codec.Codec,
) -> Sequence[event_codec.Event]:
"""Convert note event data to a sequence of events."""
if value.velocity is None:
# onsets only, no program or velocity
return [event_codec.Event('pitch', value.pitch)]
else:
num_velocity_bins = vocabularies.num_velocity_bins_from_codec(codec)
velocity_bin = vocabularies.velocity_to_bin(
value.velocity, num_velocity_bins)
if value.program is None:
# onsets + offsets + velocities only, no programs
if state is not None:
state.active_pitches[(value.pitch, 0)] = velocity_bin
return [event_codec.Event('velocity', velocity_bin),
event_codec.Event('pitch', value.pitch)]
else:
if value.is_drum:
# drum events use a separate vocabulary
return [event_codec.Event('velocity', velocity_bin),
event_codec.Event('drum', value.pitch)]
else:
# program + velocity + pitch
if state is not None:
state.active_pitches[(value.pitch, value.program)] = velocity_bin
return [event_codec.Event('program', value.program),
event_codec.Event('velocity', velocity_bin),
event_codec.Event('pitch', value.pitch)]
def note_encoding_state_to_events(
state: NoteEncodingState
) -> Sequence[event_codec.Event]:
"""Output program and pitch events for active notes plus a final tie event."""
events = []
for pitch, program in sorted(
state.active_pitches.keys(), key=lambda k: k[::-1]):
if state.active_pitches[(pitch, program)]:
events += [event_codec.Event('program', program),
event_codec.Event('pitch', pitch)]
events.append(event_codec.Event('tie', 0))
return events
@dataclasses.dataclass
class NoteDecodingState:
"""Decoding state for note transcription."""
current_time: float = 0.0
# velocity to apply to subsequent pitch events (zero for note-off)
current_velocity: int = DEFAULT_VELOCITY
# program to apply to subsequent pitch events
current_program: int = 0
# onset time and velocity for active pitches and programs
active_pitches: MutableMapping[Tuple[int, int],
Tuple[float, int]] = dataclasses.field(
default_factory=dict)
# pitches (with programs) to continue from previous segment
tied_pitches: MutableSet[Tuple[int, int]] = dataclasses.field(
default_factory=set)
# whether or not we are in the tie section at the beginning of a segment
is_tie_section: bool = False
# partially-decoded NoteSequence
note_sequence: note_seq.NoteSequence = dataclasses.field(
default_factory=lambda: note_seq.NoteSequence(ticks_per_quarter=220))
def decode_note_onset_event(
state: NoteDecodingState,
time: float,
event: event_codec.Event,
codec: event_codec.Codec,
) -> None:
"""Process note onset event and update decoding state."""
if event.type == 'pitch':
state.note_sequence.notes.add(
start_time=time, end_time=time + DEFAULT_NOTE_DURATION,
pitch=event.value, velocity=DEFAULT_VELOCITY)
state.note_sequence.total_time = max(state.note_sequence.total_time,
time + DEFAULT_NOTE_DURATION)
else:
raise ValueError('unexpected event type: %s' % event.type)
def _add_note_to_sequence(
ns: note_seq.NoteSequence,
start_time: float, end_time: float, pitch: int, velocity: int,
program: int = 0, is_drum: bool = False
) -> None:
end_time = max(end_time, start_time + MIN_NOTE_DURATION)
ns.notes.add(
start_time=start_time, end_time=end_time,
pitch=pitch, velocity=velocity, program=program, is_drum=is_drum)
ns.total_time = max(ns.total_time, end_time)
def decode_note_event(
state: NoteDecodingState,
time: float,
event: event_codec.Event,
codec: event_codec.Codec
) -> None:
"""Process note event and update decoding state."""
if time < state.current_time:
raise ValueError('event time < current time, %f < %f' % (
time, state.current_time))
state.current_time = time
if event.type == 'pitch':
pitch = event.value
if state.is_tie_section:
# "tied" pitch
if (pitch, state.current_program) not in state.active_pitches:
raise ValueError('inactive pitch/program in tie section: %d/%d' %
(pitch, state.current_program))
if (pitch, state.current_program) in state.tied_pitches:
raise ValueError('pitch/program is already tied: %d/%d' %
(pitch, state.current_program))
state.tied_pitches.add((pitch, state.current_program))
elif state.current_velocity == 0:
# note offset
if (pitch, state.current_program) not in state.active_pitches:
raise ValueError('note-off for inactive pitch/program: %d/%d' %
(pitch, state.current_program))
onset_time, onset_velocity = state.active_pitches.pop(
(pitch, state.current_program))
_add_note_to_sequence(
state.note_sequence, start_time=onset_time, end_time=time,
pitch=pitch, velocity=onset_velocity, program=state.current_program)
else:
# note onset
if (pitch, state.current_program) in state.active_pitches:
# The pitch is already active; this shouldn't really happen but we'll
# try to handle it gracefully by ending the previous note and starting a
# new one.
onset_time, onset_velocity = state.active_pitches.pop(
(pitch, state.current_program))
_add_note_to_sequence(
state.note_sequence, start_time=onset_time, end_time=time,
pitch=pitch, velocity=onset_velocity, program=state.current_program)
state.active_pitches[(pitch, state.current_program)] = (
time, state.current_velocity)
elif event.type == 'drum':
# drum onset (drums have no offset)
if state.current_velocity == 0:
raise ValueError('velocity cannot be zero for drum event')
offset_time = time + DEFAULT_NOTE_DURATION
_add_note_to_sequence(
state.note_sequence, start_time=time, end_time=offset_time,
pitch=event.value, velocity=state.current_velocity, is_drum=True)
elif event.type == 'velocity':
# velocity change
num_velocity_bins = vocabularies.num_velocity_bins_from_codec(codec)
velocity = vocabularies.bin_to_velocity(event.value, num_velocity_bins)
state.current_velocity = velocity
elif event.type == 'program':
# program change
state.current_program = event.value
elif event.type == 'tie':
# end of tie section; end active notes that weren't declared tied
if not state.is_tie_section:
raise ValueError('tie section end event when not in tie section')
for (pitch, program) in list(state.active_pitches.keys()):
if (pitch, program) not in state.tied_pitches:
onset_time, onset_velocity = state.active_pitches.pop((pitch, program))
_add_note_to_sequence(
state.note_sequence,
start_time=onset_time, end_time=state.current_time,
pitch=pitch, velocity=onset_velocity, program=program)
state.is_tie_section = False
else:
raise ValueError('unexpected event type: %s' % event.type)
def begin_tied_pitches_section(state: NoteDecodingState) -> None:
"""Begin the tied pitches section at the start of a segment."""
state.tied_pitches = set()
state.is_tie_section = True
def flush_note_decoding_state(
state: NoteDecodingState
) -> note_seq.NoteSequence:
"""End all active notes and return resulting NoteSequence."""
for onset_time, _ in state.active_pitches.values():
state.current_time = max(state.current_time, onset_time + MIN_NOTE_DURATION)
for (pitch, program) in list(state.active_pitches.keys()):
onset_time, onset_velocity = state.active_pitches.pop((pitch, program))
_add_note_to_sequence(
state.note_sequence, start_time=onset_time, end_time=state.current_time,
pitch=pitch, velocity=onset_velocity, program=program)
assign_instruments(state.note_sequence)
return state.note_sequence
class NoteEncodingSpecType(run_length_encoding.EventEncodingSpec):
pass
# encoding spec for modeling note onsets only
NoteOnsetEncodingSpec = NoteEncodingSpecType(
init_encoding_state_fn=lambda: None,
encode_event_fn=note_event_data_to_events,
encoding_state_to_events_fn=None,
init_decoding_state_fn=NoteDecodingState,
begin_decoding_segment_fn=lambda state: None,
decode_event_fn=decode_note_onset_event,
flush_decoding_state_fn=lambda state: state.note_sequence)
# encoding spec for modeling onsets and offsets
NoteEncodingSpec = NoteEncodingSpecType(
init_encoding_state_fn=lambda: None,
encode_event_fn=note_event_data_to_events,
encoding_state_to_events_fn=None,
init_decoding_state_fn=NoteDecodingState,
begin_decoding_segment_fn=lambda state: None,
decode_event_fn=decode_note_event,
flush_decoding_state_fn=flush_note_decoding_state)
# encoding spec for modeling onsets and offsets, with a "tie" section at the
# beginning of each segment listing already-active notes
NoteEncodingWithTiesSpec = NoteEncodingSpecType(
init_encoding_state_fn=NoteEncodingState,
encode_event_fn=note_event_data_to_events,
encoding_state_to_events_fn=note_encoding_state_to_events,
init_decoding_state_fn=NoteDecodingState,
begin_decoding_segment_fn=begin_tied_pitches_section,
decode_event_fn=decode_note_event,
flush_decoding_state_fn=flush_note_decoding_state)
| 17,423 | 37.979866 | 90 | py |
mt3 | mt3-main/mt3/datasets.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset configurations."""
import dataclasses
from typing import Mapping, Sequence, Union
from mt3 import note_sequences
import tensorflow as tf
@dataclasses.dataclass
class InferEvalSplit:
# key in dictionary containing all dataset splits
name: str
# task name suffix (each eval split is a separate task)
suffix: str
# whether or not to include in the mixture of all eval tasks
include_in_mixture: bool = True
@dataclasses.dataclass
class DatasetConfig:
"""Configuration for a transcription dataset."""
# dataset name
name: str
# mapping from split name to path
paths: Mapping[str, str]
# mapping from feature name to feature
features: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.FixedLenSequenceFeature]]
# training split name
train_split: str
# training eval split name
train_eval_split: str
# list of infer eval split specs
infer_eval_splits: Sequence[InferEvalSplit]
# list of track specs to be used for metrics
track_specs: Sequence[note_sequences.TrackSpec] = dataclasses.field(
default_factory=list)
MAESTROV1_CONFIG = DatasetConfig(
name='maestrov1',
paths={
'train':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-?????-of-00010',
'train_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-00002-of-00010',
'validation':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-?????-of-00010',
'validation_subset':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-0000[06]-of-00010',
'test':
'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_test.tfrecord-?????-of-00010'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
MAESTROV3_CONFIG = DatasetConfig(
name='maestrov3',
paths={
'train':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-?????-of-00025',
'train_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-00004-of-00025',
'validation':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-?????-of-00025',
'validation_subset':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-0002?-of-00025',
'test':
'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_test.tfrecord-?????-of-00025'
},
features={
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
GUITARSET_CONFIG = DatasetConfig(
name='guitarset',
paths={
'train':
'gs://mt3/data/datasets/guitarset/train.tfrecord-?????-of-00019',
'validation':
'gs://mt3/data/datasets/guitarset/validation.tfrecord-?????-of-00006',
},
features={
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'velocity_range': tf.io.FixedLenFeature([], dtype=tf.string),
'id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
])
URMP_CONFIG = DatasetConfig(
name='urmp',
paths={
'train': 'gs://mt3/data/datasets/urmp/train.tfrecord',
'validation': 'gs://mt3/data/datasets/urmp/validation.tfrecord',
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'tracks': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'audio': tf.io.FixedLenFeature([], dtype=tf.string),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string),
'instrument_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation')
])
MUSICNET_CONFIG = DatasetConfig(
name='musicnet',
paths={
'train':
'gs://mt3/data/datasets/musicnet/musicnet-train.tfrecord-?????-of-00036',
'validation':
'gs://mt3/data/datasets/musicnet/musicnet-validation.tfrecord-?????-of-00005',
'test':
'gs://mt3/data/datasets/musicnet/musicnet-test.tfrecord-?????-of-00003'
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32),
'audio': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
MUSICNET_EM_CONFIG = DatasetConfig(
name='musicnet_em',
paths={
'train':
'gs://mt3/data/datasets/musicnet_em/train.tfrecord-?????-of-00103',
'validation':
'gs://mt3/data/datasets/musicnet_em/validation.tfrecord-?????-of-00005',
'test':
'gs://mt3/data/datasets/musicnet_em/test.tfrecord-?????-of-00006'
},
features={
'id': tf.io.FixedLenFeature([], dtype=tf.string),
'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32),
'audio': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'sequence': tf.io.FixedLenFeature([], dtype=tf.string)
},
train_split='train',
train_eval_split='validation',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
CERBERUS4_CONFIG = DatasetConfig(
name='cerberus4',
paths={
'train':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-?????-of-00286',
'train_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-00000-of-00286',
'validation':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-?????-of-00212',
'validation_subset':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-0000?-of-00212',
'test':
'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_test_bass:drums:guitar:piano.tfrecord-?????-of-00106'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature(
[], dtype=tf.int64, allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature(
[], dtype=tf.string, allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature(
[], dtype=tf.float32, allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
],
track_specs=[
note_sequences.TrackSpec('bass', program=32),
note_sequences.TrackSpec('drums', is_drum=True),
note_sequences.TrackSpec('guitar', program=24),
note_sequences.TrackSpec('piano', program=0)
])
SLAKH_CONFIG = DatasetConfig(
name='slakh',
paths={
'train':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-?????-of-02307',
'train_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-00000-of-02307',
'validation':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-?????-of-00168',
'validation_subset':
'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-0000?-of-00168',
'test':
'gs://mt3/data/datasets/slakh/slakh_multi_full_test_all_inst.tfrecord-?????-of-00109'
},
features={
'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),
'inst_names': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'midi_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'mix': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'note_sequences': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'plugin_name': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'program_num': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'slakh_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,
allow_missing=True),
'src_ids': tf.io.FixedLenSequenceFeature([], dtype=tf.string,
allow_missing=True),
'stems': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,
allow_missing=True),
'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),
'target_type': tf.io.FixedLenFeature([], dtype=tf.string),
'track_id': tf.io.FixedLenFeature([], dtype=tf.string),
},
train_split='train',
train_eval_split='validation_subset',
infer_eval_splits=[
InferEvalSplit(name='train', suffix='eval_train_full',
include_in_mixture=False),
InferEvalSplit(name='train_subset', suffix='eval_train'),
InferEvalSplit(name='validation', suffix='validation_full',
include_in_mixture=False),
InferEvalSplit(name='validation_subset', suffix='validation'),
InferEvalSplit(name='test', suffix='test', include_in_mixture=False)
])
| 14,018 | 42.003067 | 127 | py |
mt3 | mt3-main/mt3/vocabularies.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model vocabulary."""
import dataclasses
import math
from typing import Callable, Optional, Sequence
from mt3 import event_codec
import note_seq
import seqio
import t5.data
import tensorflow as tf
DECODED_EOS_ID = -1
DECODED_INVALID_ID = -2
# defaults for vocabulary config
DEFAULT_STEPS_PER_SECOND = 100
DEFAULT_MAX_SHIFT_SECONDS = 10
DEFAULT_NUM_VELOCITY_BINS = 127
@dataclasses.dataclass
class VocabularyConfig:
"""Vocabulary configuration parameters."""
steps_per_second: int = DEFAULT_STEPS_PER_SECOND
max_shift_seconds: int = DEFAULT_MAX_SHIFT_SECONDS
num_velocity_bins: int = DEFAULT_NUM_VELOCITY_BINS
@property
def abbrev_str(self):
s = ''
if self.steps_per_second != DEFAULT_STEPS_PER_SECOND:
s += 'ss%d' % self.steps_per_second
if self.max_shift_seconds != DEFAULT_MAX_SHIFT_SECONDS:
s += 'ms%d' % self.max_shift_seconds
if self.num_velocity_bins != DEFAULT_NUM_VELOCITY_BINS:
s += 'vb%d' % self.num_velocity_bins
return s
def num_velocity_bins_from_codec(codec: event_codec.Codec):
"""Get number of velocity bins from event codec."""
lo, hi = codec.event_type_range('velocity')
return hi - lo
def velocity_to_bin(velocity, num_velocity_bins):
if velocity == 0:
return 0
else:
return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY)
def bin_to_velocity(velocity_bin, num_velocity_bins):
if velocity_bin == 0:
return 0
else:
return int(note_seq.MAX_MIDI_VELOCITY * velocity_bin / num_velocity_bins)
def drop_programs(tokens, codec: event_codec.Codec):
"""Drops program change events from a token sequence."""
min_program_id, max_program_id = codec.event_type_range('program')
return tokens[(tokens < min_program_id) | (tokens > max_program_id)]
def programs_to_midi_classes(tokens, codec):
"""Modifies program events to be the first program in the MIDI class."""
min_program_id, max_program_id = codec.event_type_range('program')
is_program = (tokens >= min_program_id) & (tokens <= max_program_id)
return tf.where(
is_program,
min_program_id + 8 * ((tokens - min_program_id) // 8),
tokens)
@dataclasses.dataclass
class ProgramGranularity:
# both tokens_map_fn and program_map_fn should be idempotent
tokens_map_fn: Callable[[Sequence[int], event_codec.Codec], Sequence[int]]
program_map_fn: Callable[[int], int]
PROGRAM_GRANULARITIES = {
# "flat" granularity; drop program change tokens and set NoteSequence
# programs to zero
'flat': ProgramGranularity(
tokens_map_fn=drop_programs,
program_map_fn=lambda program: 0),
# map each program to the first program in its MIDI class
'midi_class': ProgramGranularity(
tokens_map_fn=programs_to_midi_classes,
program_map_fn=lambda program: 8 * (program // 8)),
# leave programs as is
'full': ProgramGranularity(
tokens_map_fn=lambda tokens, codec: tokens,
program_map_fn=lambda program: program)
}
def build_codec(vocab_config: VocabularyConfig):
"""Build event codec."""
event_ranges = [
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
# velocity bin 0 is used for note-off
event_codec.EventRange('velocity', 0, vocab_config.num_velocity_bins),
# used to indicate that a pitch is present at the beginning of a segment
# (only has an "off" event as when using ties all pitch events until the
# "tie" event belong to the tie section)
event_codec.EventRange('tie', 0, 0),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
]
return event_codec.Codec(
max_shift_steps=(vocab_config.steps_per_second *
vocab_config.max_shift_seconds),
steps_per_second=vocab_config.steps_per_second,
event_ranges=event_ranges)
def vocabulary_from_codec(codec: event_codec.Codec) -> seqio.Vocabulary:
return GenericTokenVocabulary(
codec.num_classes, extra_ids=t5.data.DEFAULT_EXTRA_IDS)
class GenericTokenVocabulary(seqio.Vocabulary):
"""Vocabulary with pass-through encoding of tokens."""
def __init__(self, regular_ids: int, extra_ids: int = 0):
# The special tokens: 0=PAD, 1=EOS, and 2=UNK
self._num_special_tokens = 3
self._num_regular_tokens = regular_ids
super().__init__(extra_ids=extra_ids)
@property
def eos_id(self) -> Optional[int]:
return 1
@property
def unk_id(self) -> Optional[int]:
return 2
@property
def _base_vocab_size(self) -> int:
"""Number of ids.
Returns:
an integer, the vocabulary size
"""
return self._num_special_tokens + self._num_regular_tokens
def _encode(self, token_ids: Sequence[int]) -> Sequence[int]:
"""Encode a list of tokens ids as a list of integers.
To keep the first few ids for special tokens, increase ids by the number
of special tokens.
Args:
token_ids: array of token ids.
Returns:
a list of integers (not terminated by EOS)
"""
encoded = []
for token_id in token_ids:
if not 0 <= token_id < self._num_regular_tokens:
raise ValueError(
f'token_id {token_id} does not fall within valid range of '
f'[0, {self._num_regular_tokens})')
encoded.append(token_id + self._num_special_tokens)
return encoded
def _decode(self, ids: Sequence[int]) -> Sequence[int]:
"""Decode a list of integers to a list of token ids.
The special tokens of PAD and UNK as well as extra_ids will be
replaced with DECODED_INVALID_ID in the output. If EOS is present, it will
be the final token in the decoded output and will be represented by
DECODED_EOS_ID.
Args:
ids: a list of integers
Returns:
a list of token ids.
"""
# convert all the extra ids to INVALID_ID
def _decode_id(encoded_id):
if encoded_id == self.eos_id:
return DECODED_EOS_ID
elif encoded_id < self._num_special_tokens:
return DECODED_INVALID_ID
elif encoded_id >= self._base_vocab_size:
return DECODED_INVALID_ID
else:
return encoded_id - self._num_special_tokens
ids = [_decode_id(int(i)) for i in ids]
return ids
def _encode_tf(self, token_ids: tf.Tensor) -> tf.Tensor:
"""Encode a list of tokens to a tf.Tensor.
Args:
token_ids: array of audio token ids.
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
with tf.control_dependencies(
[tf.debugging.assert_less(
token_ids, tf.cast(self._num_regular_tokens, token_ids.dtype)),
tf.debugging.assert_greater_equal(
token_ids, tf.cast(0, token_ids.dtype))
]):
tf_ids = token_ids + self._num_special_tokens
return tf_ids
def _decode_tf(self, ids: tf.Tensor) -> tf.Tensor:
"""Decode in TensorFlow.
The special tokens of PAD and UNK as well as extra_ids will be
replaced with DECODED_INVALID_ID in the output. If EOS is present, it and
all following tokens in the decoded output and will be represented by
DECODED_EOS_ID.
Args:
ids: a 1d tf.Tensor with dtype tf.int32
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
# Create a mask that is true from the first EOS position onward.
# First, create an array that is True whenever there is an EOS, then cumsum
# that array so that every position after and including the first True is
# >1, then cast back to bool for the final mask.
eos_and_after = tf.cumsum(
tf.cast(tf.equal(ids, self.eos_id), tf.int32), exclusive=False, axis=-1)
eos_and_after = tf.cast(eos_and_after, tf.bool)
return tf.where(
eos_and_after,
DECODED_EOS_ID,
tf.where(
tf.logical_and(
tf.greater_equal(ids, self._num_special_tokens),
tf.less(ids, self._base_vocab_size)),
ids - self._num_special_tokens,
DECODED_INVALID_ID))
def __eq__(self, other):
their_extra_ids = other.extra_ids
their_num_regular_tokens = other._num_regular_tokens
return (self.extra_ids == their_extra_ids and
self._num_regular_tokens == their_num_regular_tokens)
def num_embeddings(vocabulary: GenericTokenVocabulary) -> int:
"""Vocabulary size as a multiple of 128 for TPU efficiency."""
return 128 * math.ceil(vocabulary.vocab_size / 128)
| 9,175 | 31.424028 | 80 | py |
mt3 | mt3-main/mt3/layers_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention classes."""
import dataclasses
from typing import Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from flax.core import freeze
from flax.linen import partitioning as nn_partitioning
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
from mt3 import layers
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
Array = jnp.ndarray
AxisMetadata = nn_partitioning.AxisMetadata # pylint: disable=invalid-name
class SelfAttention(layers.MultiHeadDotProductAttention):
"""Self-attention special case of multi-head dot-product attention."""
@nn.compact
def __call__(self,
inputs_q: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
deterministic: bool = False):
return super().__call__(
inputs_q, inputs_q, mask, bias, deterministic=deterministic)
@dataclasses.dataclass(frozen=True)
class SelfAttentionArgs:
num_heads: int = 1
batch_size: int = 2
# qkv_features: int = 3
head_dim: int = 3
# out_features: int = 4
q_len: int = 5
features: int = 6
dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
float32_logits: bool = False
def __post_init__(self):
# If we are doing decoding, the query length should be 1, because are doing
# autoregressive decoding where we feed one position at a time.
assert not self.decode or self.q_len == 1
def init_args(self):
return dict(
num_heads=self.num_heads,
head_dim=self.head_dim,
dropout_rate=self.dropout_rate,
float32_logits=self.float32_logits)
def apply_args(self):
inputs_q = jnp.ones((self.batch_size, self.q_len, self.features))
mask = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
bias = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
return {
'inputs_q': inputs_q,
'mask': mask,
'bias': bias,
'deterministic': self.deterministic
}
class AttentionTest(parameterized.TestCase):
def test_dot_product_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5, 6
query = jnp.ones((batch_size, q_len, num_heads, qk_depth))
key = jnp.ones((batch_size, kv_len, num_heads, qk_depth))
value = jnp.ones((batch_size, kv_len, num_heads, v_depth))
bias = jnp.ones((batch_size, num_heads, q_len, kv_len))
args = dict(
query=query,
key=key,
value=value,
bias=bias,
dropout_rng=dropout_rng,
dropout_rate=0.5,
deterministic=False,
)
output = layers.dot_product_attention(**args)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
def test_make_attention_mask_multiply_pairwise_fn(self):
decoder_target_tokens = jnp.array([[7, 0, 0], [8, 5, 0]])
attention_mask = layers.make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=jnp.int32)
expected0 = jnp.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
expected1 = jnp.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
self.assertEqual(attention_mask.shape, (2, 1, 3, 3))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_attention_mask_equal_pairwise_fn(self):
segment_ids = jnp.array([[1, 1, 2, 2, 2, 0], [1, 1, 1, 2, 0, 0]])
attention_mask = layers.make_attention_mask(
segment_ids, segment_ids, pairwise_fn=jnp.equal, dtype=jnp.int32)
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected0 = jnp.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1]])
expected1 = jnp.array([[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1]])
self.assertEqual(attention_mask.shape, (2, 1, 6, 6))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_causal_mask_with_padding(self):
x = jnp.array([[7, 0, 0], [8, 5, 0]])
y = layers.make_causal_mask(x)
self.assertEqual(y.shape, (2, 1, 3, 3))
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected_y = jnp.array([[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]],
jnp.float32)
np.testing.assert_allclose(y[0], expected_y)
np.testing.assert_allclose(y[1], expected_y)
def test_make_causal_mask_extra_batch_dims(self):
x = jnp.ones((3, 3, 5))
y = layers.make_causal_mask(x, extra_batch_dims=2)
self.assertEqual(y.shape, (1, 1, 3, 3, 1, 5, 5))
def test_make_causal_mask(self):
x = jnp.ones((1, 3))
y = layers.make_causal_mask(x)
self.assertEqual(y.shape, (1, 1, 3, 3))
expected_y = jnp.array([[[[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]]],
jnp.float32)
np.testing.assert_allclose(y, expected_y)
def test_combine_masks(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32), None,
jnp.array([1, 1, 1, 1], jnp.float32),
jnp.array([1, 1, 1, 0], jnp.float32)
]
y = layers.combine_masks(*masks)
np.testing.assert_allclose(y, jnp.array([0, 1, 0, 0], jnp.float32))
def test_combine_biases(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32), None,
jnp.array([0, 1, 1, 1], jnp.float32),
jnp.array([0, 1, 1, 0], jnp.float32)
]
y = layers.combine_biases(*masks)
np.testing.assert_allclose(y, jnp.array([0, 3, 2, 2], jnp.float32))
def test_make_decoder_mask_lm_unpacked(self):
decoder_target_tokens = jnp.array([6, 7, 3, 0])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens, dtype=jnp.float32)
expected_mask = jnp.array([[[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_lm_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 5, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_segment_ids=decoder_segment_ids)
expected_mask = jnp.array([[[[1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 3, 4, 0]])
decoder_causal_attention = jnp.array([[1, 1, 1, 0, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask = jnp.array(
[[[[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]]],
dtype=jnp.float32)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_packed(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 8, 3, 4, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 2, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 1, 1, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids)
expected_mask = jnp.array([[[[1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked_multiple_elements(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0], [4, 5, 0, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0], [1, 0, 0, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[0, 0, 0, 0]])
expected_mask1 = jnp.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0]])
self.assertEqual(mask.shape, (2, 1, 4, 4))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
np.testing.assert_array_equal(mask[1, 0], expected_mask1)
def test_make_decoder_mask_composite_causal_attention(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention)
expected_mask0 = jnp.array([[1, 1, 0, 0, 1, 1, 0], [1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.assertEqual(mask.shape, (1, 1, 7, 7))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
def test_make_decoder_mask_composite_causal_attention_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 2, 3, 4]])
decoder_segment_ids = jnp.array([[1, 1, 1, 1, 1, 1, 2, 2, 2]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 1, 1, 0]])
mask = layers.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids)
expected_mask0 = jnp.array([[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1]])
self.assertEqual(mask.shape, (1, 1, 9, 9))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention(self, f):
# b: batch, f: emb_dim, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(num_heads=h, head_dim=d, dropout_rate=0)
args = base_args.init_args()
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {
'kernel': query_kernel.reshape(f, -1)
},
'key': {
'kernel': key_kernel.reshape(f, -1)
},
'value': {
'kernel': value_kernel.reshape(f, -1)
},
'out': {
'kernel': out_kernel.reshape(-1, f)
}
}
y = layers.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fhd->bkhd', inputs_kv, key_kernel)
value = np.einsum('bkf,fhd->bkhd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkhd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, rtol=1e-5, atol=1e-5)
def test_multihead_dot_product_attention_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
base_args = SelfAttentionArgs(num_heads=h, head_dim=d, dropout_rate=0)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, h, d, k)),
'cached_value': np.zeros((b, h, d, k)),
'cache_index': np.array(0)
}
inputs_q = np.random.randn(b, 1, f)
inputs_kv = np.random.randn(b, 1, f)
# Mock dense general such that q, k, v projections are replaced by simple
# reshaping.
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
return x.reshape(b, -1, h, d)
with mock.patch.object(
layers.DenseGeneral, '__call__', new=mock_dense_general):
_, mutated = layers.MultiHeadDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=True,
mutable=['cache'])
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, h, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, h, d, k]
cache['cached_key'][:, :, :, 0] = key[:, 0, :, :]
cache['cached_value'][:, :, :, 0] = value[:, 0, :, :]
cache['cache_index'] = np.array(1)
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_dot_product_attention(self):
# b: batch, f: emb_dim, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, h, d)
value = np.random.randn(b, k, h, d)
bias = np.random.randn(b, h, q, k)
attn_out = layers.dot_product_attention(query, key, value, bias=bias)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected = np.einsum('bhqk,bkhd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected, atol=1e-6)
class EmbeddingTest(parameterized.TestCase):
def test_embedder_raises_exception_for_incorrect_input_type(self):
"""Tests that inputs are integers and that an exception is raised if not."""
embed = layers.Embed(num_embeddings=10, features=5)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
bad_inputs = inputs.astype(np.float32)
with self.assertRaisesRegex(
ValueError, 'Input type must be an integer or unsigned integer.'):
_ = embed.apply(variables, bad_inputs)
@parameterized.named_parameters(
{
'testcase_name': 'with_ones',
'init_fn': jax.nn.initializers.ones,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 5 * 10,
}, {
'testcase_name': 'with_zeros',
'init_fn': jax.nn.initializers.zeros,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 0,
})
def test_embedding_initializes_correctly(self, init_fn, num_embeddings,
features, matrix_sum):
"""Tests if the Embed class initializes with the requested initializer."""
embed = layers.Embed(
num_embeddings=num_embeddings,
features=features,
embedding_init=init_fn)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual(int(np.sum(embedding_matrix)), matrix_sum)
def test_embedding_matrix_shape(self):
"""Tests that the embedding matrix has the right shape."""
num_embeddings = 10
features = 5
embed = layers.Embed(num_embeddings=num_embeddings, features=features)
inputs = np.expand_dims(np.arange(features, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual((num_embeddings, features), embedding_matrix.shape)
def test_embedding_attend(self):
"""Tests that attending with ones returns sum of embedding vectors."""
features = 5
embed = layers.Embed(num_embeddings=10, features=features)
inputs = np.array([[1]], dtype=np.int64)
variables = embed.init(jax.random.PRNGKey(0), inputs)
query = np.ones(features, dtype=np.float32)
result = embed.apply(variables, query, method=embed.attend)
expected = np.sum(variables['params']['embedding'], -1)
np.testing.assert_array_almost_equal(result, expected)
class DenseTest(parameterized.TestCase):
def test_dense_general_no_bias(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = layers.DenseGeneral(
features=4,
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_general_two_features(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = layers.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
# We transform the last input dimension to two output dimensions (2, 2).
np.testing.assert_allclose(y, np.full((1, 2, 2), 3.))
def test_dense_general_two_axes(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 2, 2))
model = layers.DenseGeneral(
features=3,
axis=(-2, 2), # Note: this is the same as (1, 2).
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
# We transform the last two input dimensions (2, 2) to one output dimension.
np.testing.assert_allclose(y, np.full((1, 3), 4.))
def test_mlp_same_out_dim(self):
module = layers.MlpBlock(
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
dtype=jnp.float32,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
params = module.init(random.PRNGKey(0), inputs, deterministic=True)
self.assertEqual(
jax.tree_map(lambda a: a.tolist(), params), {
'params': {
'wi': {
'kernel': [[
-0.8675811290740967, 0.08417510986328125,
0.022586345672607422, -0.9124102592468262
],
[
-0.19464373588562012, 0.49809837341308594,
0.7808468341827393, 0.9267289638519287
]],
},
'wo': {
'kernel': [[0.01154780387878418, 0.1397249698638916],
[0.974980354309082, 0.5903260707855225],
[-0.05997943878173828, 0.616570234298706],
[0.2934272289276123, 0.8181164264678955]],
},
},
'params_axes': {
'wi': {
'kernel_axes': AxisMetadata(names=('embed', 'mlp')),
},
'wo': {
'kernel_axes': AxisMetadata(names=('mlp', 'embed')),
},
},
})
result = module.apply(params, inputs, deterministic=True)
np.testing.assert_allclose(
result.tolist(),
[[[0.5237172245979309, 0.8508185744285583],
[0.5237172245979309, 0.8508185744285583],
[1.2344461679458618, 2.3844780921936035]],
[[1.0474344491958618, 1.7016371488571167],
[0.6809444427490234, 0.9663378596305847],
[1.0474344491958618, 1.7016371488571167]]],
rtol=1e-6,
)
if __name__ == '__main__':
absltest.main()
| 21,675 | 38.699634 | 81 | py |
mt3 | mt3-main/mt3/models.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature converter and model for continuous inputs."""
from typing import Mapping
import seqio
from t5x import decoding
from t5x import models
import tensorflow as tf
class ContinuousInputsEncDecFeatureConverter(seqio.FeatureConverter):
"""Feature converter for an encoder-decoder with continuous inputs."""
TASK_FEATURES = {
"inputs": seqio.FeatureConverter.FeatureSpec(dtype=tf.float32, rank=2),
"targets": seqio.FeatureConverter.FeatureSpec(dtype=tf.int32),
}
MODEL_FEATURES = {
"encoder_input_tokens":
seqio.FeatureConverter.FeatureSpec(dtype=tf.float32, rank=2),
"decoder_target_tokens":
seqio.FeatureConverter.FeatureSpec(dtype=tf.int32),
"decoder_input_tokens":
seqio.FeatureConverter.FeatureSpec(dtype=tf.int32),
"decoder_loss_weights":
seqio.FeatureConverter.FeatureSpec(dtype=tf.int32),
}
PACKING_FEATURE_DTYPES = {
"encoder_segment_ids": tf.int32,
"decoder_segment_ids": tf.int32,
"encoder_positions": tf.int32,
"decoder_positions": tf.int32
}
def _convert_features(
self, ds: tf.data.Dataset,
task_feature_lengths: Mapping[str, int]) -> tf.data.Dataset:
"""Convert the dataset to be fed to the encoder-decoder model.
The conversion process involves three steps
1. Each feature in the `task_feature_lengths` is trimmed/padded and
optionally packed depending on the value of self.pack.
2. "inputs" fields are mapped to the encoder input and "targets" are mapped
to decoder input (after being shifted) and target.
All the keys in the `task_feature_lengths` should be present in the input
dataset, which may contain some extra features that are not in the
`task_feature_lengths`. They will not be included in the output dataset.
One common scenario is the "inputs_pretokenized" and "targets_pretokenized"
fields.
Args:
ds: an input tf.data.Dataset to be converted.
task_feature_lengths: a mapping from feature to its length.
Returns:
ds: the converted dataset.
"""
def convert_example(
features: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
# targets_segment_id is present only for a packed dataset.
decoder_input_tokens = seqio.autoregressive_inputs(
features["targets"],
sequence_id=features.get("targets_segment_ids", None))
d = {"encoder_input_tokens": features["inputs"],
"decoder_target_tokens": features["targets"],
"decoder_input_tokens": decoder_input_tokens,
# Loss is computed for all but the padding positions.
"decoder_loss_weights":
seqio.non_padding_position(features["targets"])}
if self.pack:
d["encoder_segment_ids"] = features["inputs_segment_ids"]
d["decoder_segment_ids"] = features["targets_segment_ids"]
d["encoder_positions"] = features["inputs_positions"]
d["decoder_positions"] = features["targets_positions"]
return d
ds = self._pack_or_pad(ds, task_feature_lengths)
return ds.map(
convert_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def get_model_feature_lengths(
self, task_feature_lengths: Mapping[str, int]) -> Mapping[str, int]:
"""Define the length relationship between input and output features."""
encoder_length = task_feature_lengths["inputs"]
decoder_length = task_feature_lengths["targets"]
model_feature_lengths = {
"encoder_input_tokens": encoder_length,
"decoder_target_tokens": decoder_length,
"decoder_input_tokens": decoder_length,
"decoder_loss_weights": decoder_length
}
if self.pack:
model_feature_lengths["encoder_segment_ids"] = encoder_length
model_feature_lengths["decoder_segment_ids"] = decoder_length
model_feature_lengths["encoder_positions"] = encoder_length
model_feature_lengths["decoder_positions"] = decoder_length
return model_feature_lengths
class ContinuousInputsEncoderDecoderModel(models.EncoderDecoderModel):
"""Encoder-decoder model with continuous inputs."""
FEATURE_CONVERTER_CLS = ContinuousInputsEncDecFeatureConverter
def __init__(self, module, input_vocabulary, output_vocabulary, optimizer_def,
input_depth, decode_fn=decoding.beam_search, label_smoothing=0.0,
z_loss=0.0, loss_normalizing_factor=None):
super().__init__(
module=module,
input_vocabulary=input_vocabulary,
output_vocabulary=output_vocabulary,
optimizer_def=optimizer_def,
decode_fn=decode_fn,
label_smoothing=label_smoothing,
z_loss=z_loss,
loss_normalizing_factor=loss_normalizing_factor)
self._input_depth = input_depth
def get_initial_variables(self, rng, input_shapes, input_types=None):
"""Hacky override to bypass eval/infer inability to handle rank-3 inputs."""
encoder_shape = input_shapes["encoder_input_tokens"]
if len(encoder_shape) == 2:
input_shapes = {
"encoder_input_tokens": (*encoder_shape, self._input_depth),
**{k: v for k, v in input_shapes.items()
if k != "encoder_input_tokens"}
}
else:
assert encoder_shape[-1] == self._input_depth
return super().get_initial_variables(
rng=rng, input_shapes=input_shapes, input_types=input_types)
| 5,990 | 38.156863 | 80 | py |
mt3 | mt3-main/mt3/metrics.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transcription metrics."""
import collections
import copy
import functools
from typing import Any, Iterable, Mapping, Optional, Sequence
import mir_eval
from mt3 import event_codec
from mt3 import metrics_utils
from mt3 import note_sequences
from mt3 import spectrograms
from mt3 import summaries
from mt3 import vocabularies
import note_seq
import numpy as np
import seqio
def _program_aware_note_scores(
ref_ns: note_seq.NoteSequence,
est_ns: note_seq.NoteSequence,
granularity_type: str
) -> Mapping[str, float]:
"""Compute precision/recall/F1 for notes taking program into account.
For non-drum tracks, uses onsets and offsets. For drum tracks, uses onsets
only. Applies MIDI program map of specified granularity type.
Args:
ref_ns: Reference NoteSequence with ground truth labels.
est_ns: Estimated NoteSequence.
granularity_type: String key in vocabularies.PROGRAM_GRANULARITIES dict.
Returns:
A dictionary containing precision, recall, and F1 score.
"""
program_map_fn = vocabularies.PROGRAM_GRANULARITIES[
granularity_type].program_map_fn
ref_ns = copy.deepcopy(ref_ns)
for note in ref_ns.notes:
if not note.is_drum:
note.program = program_map_fn(note.program)
est_ns = copy.deepcopy(est_ns)
for note in est_ns.notes:
if not note.is_drum:
note.program = program_map_fn(note.program)
program_and_is_drum_tuples = (
set((note.program, note.is_drum) for note in ref_ns.notes) |
set((note.program, note.is_drum) for note in est_ns.notes)
)
drum_precision_sum = 0.0
drum_precision_count = 0
drum_recall_sum = 0.0
drum_recall_count = 0
nondrum_precision_sum = 0.0
nondrum_precision_count = 0
nondrum_recall_sum = 0.0
nondrum_recall_count = 0
for program, is_drum in program_and_is_drum_tuples:
est_track = note_sequences.extract_track(est_ns, program, is_drum)
ref_track = note_sequences.extract_track(ref_ns, program, is_drum)
est_intervals, est_pitches, unused_est_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(est_track))
ref_intervals, ref_pitches, unused_ref_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(ref_track))
args = {
'ref_intervals': ref_intervals, 'ref_pitches': ref_pitches,
'est_intervals': est_intervals, 'est_pitches': est_pitches
}
if is_drum:
args['offset_ratio'] = None
precision, recall, unused_f_measure, unused_avg_overlap_ratio = (
mir_eval.transcription.precision_recall_f1_overlap(**args))
if is_drum:
drum_precision_sum += precision * len(est_intervals)
drum_precision_count += len(est_intervals)
drum_recall_sum += recall * len(ref_intervals)
drum_recall_count += len(ref_intervals)
else:
nondrum_precision_sum += precision * len(est_intervals)
nondrum_precision_count += len(est_intervals)
nondrum_recall_sum += recall * len(ref_intervals)
nondrum_recall_count += len(ref_intervals)
precision_sum = drum_precision_sum + nondrum_precision_sum
precision_count = drum_precision_count + nondrum_precision_count
recall_sum = drum_recall_sum + nondrum_recall_sum
recall_count = drum_recall_count + nondrum_recall_count
precision = (precision_sum / precision_count) if precision_count else 0
recall = (recall_sum / recall_count) if recall_count else 0
f_measure = mir_eval.util.f_measure(precision, recall)
drum_precision = ((drum_precision_sum / drum_precision_count)
if drum_precision_count else 0)
drum_recall = ((drum_recall_sum / drum_recall_count)
if drum_recall_count else 0)
drum_f_measure = mir_eval.util.f_measure(drum_precision, drum_recall)
nondrum_precision = ((nondrum_precision_sum / nondrum_precision_count)
if nondrum_precision_count else 0)
nondrum_recall = ((nondrum_recall_sum / nondrum_recall_count)
if nondrum_recall_count else 0)
nondrum_f_measure = mir_eval.util.f_measure(nondrum_precision, nondrum_recall)
return {
f'Onset + offset + program precision ({granularity_type})': precision,
f'Onset + offset + program recall ({granularity_type})': recall,
f'Onset + offset + program F1 ({granularity_type})': f_measure,
f'Drum onset precision ({granularity_type})': drum_precision,
f'Drum onset recall ({granularity_type})': drum_recall,
f'Drum onset F1 ({granularity_type})': drum_f_measure,
f'Nondrum onset + offset + program precision ({granularity_type})':
nondrum_precision,
f'Nondrum onset + offset + program recall ({granularity_type})':
nondrum_recall,
f'Nondrum onset + offset + program F1 ({granularity_type})':
nondrum_f_measure
}
def _note_onset_tolerance_sweep(
ref_ns: note_seq.NoteSequence, est_ns: note_seq.NoteSequence,
tolerances: Iterable[float] = (0.01, 0.02, 0.05, 0.1, 0.2, 0.5)
) -> Mapping[str, float]:
"""Compute note precision/recall/F1 across a range of tolerances."""
est_intervals, est_pitches, unused_est_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(est_ns))
ref_intervals, ref_pitches, unused_ref_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(ref_ns))
scores = {}
for tol in tolerances:
precision, recall, f_measure, _ = (
mir_eval.transcription.precision_recall_f1_overlap(
ref_intervals=ref_intervals, ref_pitches=ref_pitches,
est_intervals=est_intervals, est_pitches=est_pitches,
onset_tolerance=tol, offset_min_tolerance=tol))
scores[f'Onset + offset precision ({tol})'] = precision
scores[f'Onset + offset recall ({tol})'] = recall
scores[f'Onset + offset F1 ({tol})'] = f_measure
return scores
def transcription_metrics(
targets: Sequence[Mapping[str, Any]],
predictions: Sequence[Mapping[str, Any]],
codec: event_codec.Codec,
spectrogram_config: spectrograms.SpectrogramConfig,
onsets_only: bool,
use_ties: bool,
track_specs: Optional[Sequence[note_sequences.TrackSpec]] = None,
num_summary_examples: int = 5,
frame_fps: float = 62.5,
frame_velocity_threshold: int = 30,
) -> Mapping[str, seqio.metrics.MetricValue]:
"""Compute mir_eval transcription metrics."""
if onsets_only and use_ties:
raise ValueError('Ties not compatible with onset-only transcription.')
if onsets_only:
encoding_spec = note_sequences.NoteOnsetEncodingSpec
elif not use_ties:
encoding_spec = note_sequences.NoteEncodingSpec
else:
encoding_spec = note_sequences.NoteEncodingWithTiesSpec
# The first target for each full example contains the NoteSequence; just
# organize by ID.
full_targets = {}
for target in targets:
if target['ref_ns']:
full_targets[target['unique_id']] = {'ref_ns': target['ref_ns']}
# Gather all predictions for the same ID and concatenate them in time order,
# to construct full-length predictions.
full_predictions = metrics_utils.combine_predictions_by_id(
predictions=predictions,
combine_predictions_fn=functools.partial(
metrics_utils.event_predictions_to_ns,
codec=codec,
encoding_spec=encoding_spec))
assert sorted(full_targets.keys()) == sorted(full_predictions.keys())
full_target_prediction_pairs = [
(full_targets[id], full_predictions[id])
for id in sorted(full_targets.keys())
]
scores = collections.defaultdict(list)
all_track_pianorolls = collections.defaultdict(list)
for target, prediction in full_target_prediction_pairs:
scores['Invalid events'].append(prediction['est_invalid_events'])
scores['Dropped events'].append(prediction['est_dropped_events'])
def remove_drums(ns):
ns_drumless = note_seq.NoteSequence()
ns_drumless.CopyFrom(ns)
del ns_drumless.notes[:]
ns_drumless.notes.extend([note for note in ns.notes if not note.is_drum])
return ns_drumless
est_ns_drumless = remove_drums(prediction['est_ns'])
ref_ns_drumless = remove_drums(target['ref_ns'])
# Whether or not there are separate tracks, compute metrics for the full
# NoteSequence minus drums.
est_tracks = [est_ns_drumless]
ref_tracks = [ref_ns_drumless]
use_track_offsets = [not onsets_only]
use_track_velocities = [not onsets_only]
track_instrument_names = ['']
if track_specs is not None:
# Compute transcription metrics separately for each track.
for spec in track_specs:
est_tracks.append(note_sequences.extract_track(
prediction['est_ns'], spec.program, spec.is_drum))
ref_tracks.append(note_sequences.extract_track(
target['ref_ns'], spec.program, spec.is_drum))
use_track_offsets.append(not onsets_only and not spec.is_drum)
use_track_velocities.append(not onsets_only)
track_instrument_names.append(spec.name)
for est_ns, ref_ns, use_offsets, use_velocities, instrument_name in zip(
est_tracks, ref_tracks, use_track_offsets, use_track_velocities,
track_instrument_names):
track_scores = {}
est_intervals, est_pitches, est_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(est_ns))
ref_intervals, ref_pitches, ref_velocities = (
note_seq.sequences_lib.sequence_to_valued_intervals(ref_ns))
# Precision / recall / F1 using onsets (and pitches) only.
precision, recall, f_measure, avg_overlap_ratio = (
mir_eval.transcription.precision_recall_f1_overlap(
ref_intervals=ref_intervals,
ref_pitches=ref_pitches,
est_intervals=est_intervals,
est_pitches=est_pitches,
offset_ratio=None))
del avg_overlap_ratio
track_scores['Onset precision'] = precision
track_scores['Onset recall'] = recall
track_scores['Onset F1'] = f_measure
if use_offsets:
# Precision / recall / F1 using onsets and offsets.
precision, recall, f_measure, avg_overlap_ratio = (
mir_eval.transcription.precision_recall_f1_overlap(
ref_intervals=ref_intervals,
ref_pitches=ref_pitches,
est_intervals=est_intervals,
est_pitches=est_pitches))
del avg_overlap_ratio
track_scores['Onset + offset precision'] = precision
track_scores['Onset + offset recall'] = recall
track_scores['Onset + offset F1'] = f_measure
if use_velocities:
# Precision / recall / F1 using onsets and velocities (no offsets).
precision, recall, f_measure, avg_overlap_ratio = (
mir_eval.transcription_velocity.precision_recall_f1_overlap(
ref_intervals=ref_intervals,
ref_pitches=ref_pitches,
ref_velocities=ref_velocities,
est_intervals=est_intervals,
est_pitches=est_pitches,
est_velocities=est_velocities,
offset_ratio=None))
track_scores['Onset + velocity precision'] = precision
track_scores['Onset + velocity recall'] = recall
track_scores['Onset + velocity F1'] = f_measure
if use_offsets and use_velocities:
# Precision / recall / F1 using onsets, offsets, and velocities.
precision, recall, f_measure, avg_overlap_ratio = (
mir_eval.transcription_velocity.precision_recall_f1_overlap(
ref_intervals=ref_intervals,
ref_pitches=ref_pitches,
ref_velocities=ref_velocities,
est_intervals=est_intervals,
est_pitches=est_pitches,
est_velocities=est_velocities))
track_scores['Onset + offset + velocity precision'] = precision
track_scores['Onset + offset + velocity recall'] = recall
track_scores['Onset + offset + velocity F1'] = f_measure
# Calculate framewise metrics.
is_drum = all([n.is_drum for n in ref_ns.notes])
ref_pr = metrics_utils.get_prettymidi_pianoroll(
ref_ns, frame_fps, is_drum=is_drum)
est_pr = metrics_utils.get_prettymidi_pianoroll(
est_ns, frame_fps, is_drum=is_drum)
all_track_pianorolls[instrument_name].append((est_pr, ref_pr))
frame_precision, frame_recall, frame_f1 = metrics_utils.frame_metrics(
ref_pr, est_pr, velocity_threshold=frame_velocity_threshold)
track_scores['Frame Precision'] = frame_precision
track_scores['Frame Recall'] = frame_recall
track_scores['Frame F1'] = frame_f1
for metric_name, metric_value in track_scores.items():
if instrument_name:
scores[f'{instrument_name}/{metric_name}'].append(metric_value)
else:
scores[metric_name].append(metric_value)
# Add program-aware note metrics for all program granularities.
# Note that this interacts with the training program granularity; in
# particular granularities *higher* than the training granularity are likely
# to have poor metrics.
for granularity_type in vocabularies.PROGRAM_GRANULARITIES:
for name, score in _program_aware_note_scores(
target['ref_ns'], prediction['est_ns'],
granularity_type=granularity_type).items():
scores[name].append(score)
# Add (non-program-aware) note metrics across a range of onset/offset
# tolerances.
for name, score in _note_onset_tolerance_sweep(
ref_ns=ref_ns_drumless, est_ns=est_ns_drumless).items():
scores[name].append(score)
mean_scores = {k: np.mean(v) for k, v in scores.items()}
score_histograms = {'%s (hist)' % k: seqio.metrics.Histogram(np.array(v))
for k, v in scores.items()}
# Pick several examples to summarize.
targets_to_summarize, predictions_to_summarize = zip(
*full_target_prediction_pairs[:num_summary_examples])
# Compute audio summaries.
audio_summaries = summaries.audio_summaries(
targets=targets_to_summarize,
predictions=predictions_to_summarize,
spectrogram_config=spectrogram_config)
# Compute transcription summaries.
transcription_summaries = summaries.transcription_summaries(
targets=targets_to_summarize,
predictions=predictions_to_summarize,
spectrogram_config=spectrogram_config,
ns_feature_suffix='ns',
track_specs=track_specs)
pianorolls_to_summarize = {
k: v[:num_summary_examples] for k, v in all_track_pianorolls.items()
}
prettymidi_pianoroll_summaries = summaries.prettymidi_pianoroll(
pianorolls_to_summarize, fps=frame_fps)
return {
**mean_scores,
**score_histograms,
**audio_summaries,
**transcription_summaries,
**prettymidi_pianoroll_summaries,
}
| 15,414 | 38.223919 | 80 | py |
mt3 | mt3-main/mt3/__init__.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base module for MT3."""
from mt3 import datasets
from mt3 import event_codec
from mt3 import inference
from mt3 import layers
from mt3 import metrics
from mt3 import metrics_utils
from mt3 import models
from mt3 import network
from mt3 import note_sequences
from mt3 import preprocessors
from mt3 import run_length_encoding
from mt3 import spectrograms
from mt3 import summaries
from mt3 import tasks
from mt3 import vocabularies
from mt3.version import __version__
| 1,052 | 29.970588 | 74 | py |
mt3 | mt3-main/mt3/summaries.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorBoard summaries and utilities."""
from typing import Any, Mapping, Optional, Sequence, Tuple
import librosa
from mt3 import note_sequences
from mt3 import spectrograms
import note_seq
from note_seq import midi_synth
from note_seq import sequences_lib
from note_seq.protobuf import music_pb2
import numpy as np
import seqio
_DEFAULT_AUDIO_SECONDS = 30.0
_DEFAULT_PIANOROLL_FRAMES_PER_SECOND = 15
# TODO(iansimon): pick a SoundFont; for some reason the default is all organ
def _extract_example_audio(
examples: Sequence[Mapping[str, Any]],
sample_rate: float,
num_seconds: float,
audio_key: str = 'raw_inputs'
) -> np.ndarray:
"""Extract audio from examples.
Args:
examples: List of examples containing raw audio.
sample_rate: Number of samples per second.
num_seconds: Number of seconds of audio to include.
audio_key: Dictionary key for the raw audio.
Returns:
An n-by-num_samples numpy array of samples.
"""
n = len(examples)
num_samples = round(num_seconds * sample_rate)
all_samples = np.zeros([n, num_samples])
for i, ex in enumerate(examples):
samples = ex[audio_key][:num_samples]
all_samples[i, :len(samples)] = samples
return all_samples
def _example_to_note_sequence(
example: Mapping[str, Sequence[float]],
ns_feature_name: str,
note_onset_feature_name: str,
note_offset_feature_name: str,
note_frequency_feature_name: str,
note_confidence_feature_name: str,
num_seconds: float
) -> music_pb2.NoteSequence:
"""Extract NoteSequence from example."""
if ns_feature_name:
ns = example[ns_feature_name]
else:
onset_times = np.array(example[note_onset_feature_name])
pitches = librosa.hz_to_midi(
example[note_frequency_feature_name]).round().astype(int)
assert len(onset_times) == len(pitches)
if note_offset_feature_name or note_confidence_feature_name:
offset_times = (
example[note_offset_feature_name]
if note_offset_feature_name
else onset_times + note_sequences.DEFAULT_NOTE_DURATION
)
assert len(onset_times) == len(offset_times)
confidences = (np.array(example[note_confidence_feature_name])
if note_confidence_feature_name else None)
velocities = np.ceil(
note_seq.MAX_MIDI_VELOCITY * confidences if confidences is not None
else note_sequences.DEFAULT_VELOCITY * np.ones_like(onset_times)
).astype(int)
assert len(onset_times) == len(velocities)
ns = note_sequences.note_arrays_to_note_sequence(
onset_times=onset_times, offset_times=offset_times,
pitches=pitches, velocities=velocities)
else:
ns = note_sequences.note_arrays_to_note_sequence(
onset_times=onset_times, pitches=pitches)
return sequences_lib.trim_note_sequence(ns, 0, num_seconds)
def _synthesize_example_notes(
examples: Sequence[Mapping[str, Sequence[float]]],
ns_feature_name: str,
note_onset_feature_name: str,
note_offset_feature_name: str,
note_frequency_feature_name: str,
note_confidence_feature_name: str,
sample_rate: float,
num_seconds: float,
) -> np.ndarray:
"""Synthesize example notes to audio.
Args:
examples: List of example dictionaries, containing either serialized
NoteSequence protos or note onset times and pitches.
ns_feature_name: Name of serialized NoteSequence feature.
note_onset_feature_name: Name of note onset times feature.
note_offset_feature_name: Name of note offset times feature.
note_frequency_feature_name: Name of note frequencies feature.
note_confidence_feature_name: Name of note confidences (velocities) feature.
sample_rate: Sample rate at which to synthesize.
num_seconds: Number of seconds to synthesize for each example.
Returns:
An n-by-num_samples numpy array of samples.
"""
if (ns_feature_name is not None) == (note_onset_feature_name is not None):
raise ValueError(
'must specify exactly one of NoteSequence feature and onset feature')
n = len(examples)
num_samples = round(num_seconds * sample_rate)
all_samples = np.zeros([n, num_samples])
for i, ex in enumerate(examples):
ns = _example_to_note_sequence(
ex,
ns_feature_name=ns_feature_name,
note_onset_feature_name=note_onset_feature_name,
note_offset_feature_name=note_offset_feature_name,
note_frequency_feature_name=note_frequency_feature_name,
note_confidence_feature_name=note_confidence_feature_name,
num_seconds=num_seconds)
fluidsynth = midi_synth.fluidsynth
samples = fluidsynth(ns, sample_rate=sample_rate)
if len(samples) > num_samples:
samples = samples[:num_samples]
all_samples[i, :len(samples)] = samples
return all_samples
def _examples_to_pianorolls(
targets: Sequence[Mapping[str, Sequence[float]]],
predictions: Sequence[Mapping[str, Sequence[float]]],
ns_feature_suffix: str,
note_onset_feature_suffix: str,
note_offset_feature_suffix: str,
note_frequency_feature_suffix: str,
note_confidence_feature_suffix: str,
track_specs: Optional[Sequence[note_sequences.TrackSpec]],
num_seconds: float,
frames_per_second: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate pianoroll images from example notes.
Args:
targets: List of target dictionaries, containing either serialized
NoteSequence protos or note onset times and pitches.
predictions: List of prediction dictionaries, containing either serialized
NoteSequence protos or note onset times and pitches.
ns_feature_suffix: Suffix of serialized NoteSequence feature.
note_onset_feature_suffix: Suffix of note onset times feature.
note_offset_feature_suffix: Suffix of note offset times feature.
note_frequency_feature_suffix: Suffix of note frequencies feature.
note_confidence_feature_suffix: Suffix of note confidences (velocities)
feature.
track_specs: Optional list of TrackSpec objects to indicate a set of tracks
into which each NoteSequence should be split. Tracks will be stacked
vertically in the pianorolls
num_seconds: Number of seconds to show for each example.
frames_per_second: Number of pianoroll frames per second.
Returns:
onset_pianorolls: An n-by-num_pitches-by-num_frames-by-4 numpy array of
pianoroll images showing only onsets.
full_pianorolls: An n-by-num_pitches-by-num_frames-by-4 numpy array of
pianoroll images.
"""
if (ns_feature_suffix is not None) == (note_onset_feature_suffix is not None):
raise ValueError(
'must specify exactly one of NoteSequence feature and onset feature')
def ex_to_ns(example, prefix):
return _example_to_note_sequence(
example=example,
ns_feature_name=(prefix + ns_feature_suffix
if ns_feature_suffix else None),
note_onset_feature_name=(prefix + note_onset_feature_suffix
if note_onset_feature_suffix else None),
note_offset_feature_name=(prefix + note_offset_feature_suffix
if note_offset_feature_suffix else None),
note_frequency_feature_name=(
prefix + note_frequency_feature_suffix
if note_frequency_feature_suffix else None),
note_confidence_feature_name=(
prefix + note_confidence_feature_suffix
if note_confidence_feature_suffix else None),
num_seconds=num_seconds)
n = len(targets)
num_pitches = note_seq.MAX_MIDI_PITCH - note_seq.MIN_MIDI_PITCH + 1
num_frames = round(num_seconds * frames_per_second)
num_tracks = len(track_specs) if track_specs else 1
pianoroll_height = num_tracks * num_pitches + (num_tracks - 1)
onset_images = np.zeros([n, pianoroll_height, num_frames, 3])
full_images = np.zeros([n, pianoroll_height, num_frames, 3])
for i, (target, pred) in enumerate(zip(targets, predictions)):
target_ns, pred_ns = [
ex_to_ns(ex, prefix)
for (ex, prefix) in [(target, 'ref_'), (pred, 'est_')]
]
# Show lines at frame boundaries. To ensure that these lines are drawn with
# the same downsampling and frame selection logic as the real NoteSequences,
# use this hack to draw the lines with a NoteSequence that contains notes
# across all pitches at all frame start times.
start_times_ns = note_seq.NoteSequence()
start_times_ns.CopyFrom(target_ns)
del start_times_ns.notes[:]
for start_time in pred['start_times']:
if start_time < target_ns.total_time:
for pitch in range(
note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH + 1):
start_times_ns.notes.add(
pitch=pitch,
velocity=100,
start_time=start_time,
end_time=start_time + (1 / frames_per_second))
start_time_roll = sequences_lib.sequence_to_pianoroll(
start_times_ns,
frames_per_second=frames_per_second,
min_pitch=note_seq.MIN_MIDI_PITCH,
max_pitch=note_seq.MAX_MIDI_PITCH,
onset_mode='length_ms')
num_start_time_frames = min(len(start_time_roll.onsets), num_frames)
if track_specs is not None:
target_tracks = [note_sequences.extract_track(target_ns,
spec.program, spec.is_drum)
for spec in track_specs]
pred_tracks = [note_sequences.extract_track(pred_ns,
spec.program, spec.is_drum)
for spec in track_specs]
else:
target_tracks = [target_ns]
pred_tracks = [pred_ns]
for j, (target_track, pred_track) in enumerate(zip(target_tracks[::-1],
pred_tracks[::-1])):
target_roll = sequences_lib.sequence_to_pianoroll(
target_track,
frames_per_second=frames_per_second,
min_pitch=note_seq.MIN_MIDI_PITCH,
max_pitch=note_seq.MAX_MIDI_PITCH,
onset_mode='length_ms')
pred_roll = sequences_lib.sequence_to_pianoroll(
pred_track,
frames_per_second=frames_per_second,
min_pitch=note_seq.MIN_MIDI_PITCH,
max_pitch=note_seq.MAX_MIDI_PITCH,
onset_mode='length_ms')
num_target_frames = min(len(target_roll.onsets), num_frames)
num_pred_frames = min(len(pred_roll.onsets), num_frames)
start_offset = j * (num_pitches + 1)
end_offset = (j + 1) * (num_pitches + 1) - 1
# Onsets
onset_images[
i, start_offset:end_offset, :num_start_time_frames, 0
] = start_time_roll.onsets[:num_start_time_frames, :].T
onset_images[
i, start_offset:end_offset, :num_target_frames, 1
] = target_roll.onsets[:num_target_frames, :].T
onset_images[
i, start_offset:end_offset, :num_pred_frames, 2
] = pred_roll.onsets[:num_pred_frames, :].T
# Full notes
full_images[
i, start_offset:end_offset, :num_start_time_frames, 0
] = start_time_roll.onsets[:num_start_time_frames, :].T
full_images[
i, start_offset:end_offset, :num_target_frames, 1
] = target_roll.active[:num_target_frames, :].T
full_images[
i, start_offset:end_offset, :num_pred_frames, 2
] = pred_roll.active[:num_pred_frames, :].T
# Add separator between tracks.
if j < num_tracks - 1:
onset_images[i, end_offset, :, 0] = 1
full_images[i, end_offset, :, 0] = 1
return onset_images[:, ::-1, :, :], full_images[:, ::-1, :, :]
def prettymidi_pianoroll(
track_pianorolls: Mapping[str, Sequence[Tuple[np.ndarray, np.ndarray]]],
fps: float,
num_seconds=_DEFAULT_AUDIO_SECONDS
) -> Mapping[str, seqio.metrics.MetricValue]:
"""Create summary from given pianorolls."""
max_len = int(num_seconds * fps)
summaries = {}
for inst_name, all_prs in track_pianorolls.items():
est_prs, ref_prs = zip(*all_prs)
bs = len(ref_prs)
pianoroll_image_batch = np.zeros(shape=(bs, 128, max_len, 3))
for i in range(bs):
ref_pr = ref_prs[i][:, :max_len]
est_pr = est_prs[i][:, :max_len]
pianoroll_image_batch[i, :, :est_pr.shape[1], 2] = est_pr
pianoroll_image_batch[i, :, :ref_pr.shape[1], 1] = ref_pr
if not inst_name:
inst_name = 'all instruments'
summaries[f'{inst_name} pretty_midi pianoroll'] = seqio.metrics.Image(
image=pianoroll_image_batch, max_outputs=bs)
return summaries
def audio_summaries(
targets: Sequence[Mapping[str, Sequence[float]]],
predictions: Sequence[Mapping[str, Sequence[float]]],
spectrogram_config: spectrograms.SpectrogramConfig,
num_seconds: float = _DEFAULT_AUDIO_SECONDS
) -> Mapping[str, seqio.metrics.MetricValue]:
"""Compute audio summaries for a list of examples.
Args:
targets: List of targets, unused as we pass the input audio tokens via
predictions.
predictions: List of predictions, including input audio tokens.
spectrogram_config: Spectrogram configuration.
num_seconds: Number of seconds of audio to include in the summaries.
Longer audio will be cropped (from the beginning), shorter audio will be
padded with silence (at the end).
Returns:
A dictionary mapping "audio" to the audio summaries.
"""
del targets
samples = _extract_example_audio(
examples=predictions,
sample_rate=spectrogram_config.sample_rate,
num_seconds=num_seconds)
return {
'audio': seqio.metrics.Audio(
audiodata=samples[:, :, np.newaxis],
sample_rate=spectrogram_config.sample_rate,
max_outputs=samples.shape[0])
}
def transcription_summaries(
targets: Sequence[Mapping[str, Sequence[float]]],
predictions: Sequence[Mapping[str, Sequence[float]]],
spectrogram_config: spectrograms.SpectrogramConfig,
ns_feature_suffix: Optional[str] = None,
note_onset_feature_suffix: Optional[str] = None,
note_offset_feature_suffix: Optional[str] = None,
note_frequency_feature_suffix: Optional[str] = None,
note_confidence_feature_suffix: Optional[str] = None,
track_specs: Optional[Sequence[note_sequences.TrackSpec]] = None,
num_seconds: float = _DEFAULT_AUDIO_SECONDS,
pianoroll_frames_per_second: float = _DEFAULT_PIANOROLL_FRAMES_PER_SECOND,
) -> Mapping[str, seqio.metrics.MetricValue]:
"""Compute note transcription summaries for multiple examples.
Args:
targets: List of targets containing ground truth.
predictions: List of predictions, including raw input audio.
spectrogram_config: The spectrogram configuration.
ns_feature_suffix: Suffix of serialized NoteSequence feature.
note_onset_feature_suffix: Suffix of note onset times feature.
note_offset_feature_suffix: Suffix of note offset times feature.
note_frequency_feature_suffix: Suffix of note frequencies feature.
note_confidence_feature_suffix: Suffix of note confidences (velocities)
feature.
track_specs: Optional list of TrackSpec objects to indicate a set of tracks
into which each NoteSequence should be split.
num_seconds: Number of seconds of audio to include in the summaries.
Longer audio will be cropped (from the beginning), shorter audio will be
padded with silence (at the end).
pianoroll_frames_per_second: Temporal resolution of pianoroll images.
Returns:
A dictionary of input, ground truth, and transcription summaries.
"""
audio_samples = _extract_example_audio(
examples=predictions,
sample_rate=spectrogram_config.sample_rate,
num_seconds=num_seconds)
def synthesize(examples, prefix):
return _synthesize_example_notes(
examples=examples,
ns_feature_name=(prefix + ns_feature_suffix
if ns_feature_suffix else None),
note_onset_feature_name=(prefix + note_onset_feature_suffix
if note_onset_feature_suffix else None),
note_offset_feature_name=(prefix + note_offset_feature_suffix
if note_offset_feature_suffix else None),
note_frequency_feature_name=(
prefix + note_frequency_feature_suffix
if note_frequency_feature_suffix else None),
note_confidence_feature_name=(
prefix + note_confidence_feature_suffix
if note_confidence_feature_suffix else None),
sample_rate=spectrogram_config.sample_rate,
num_seconds=num_seconds)
synthesized_predictions = synthesize(predictions, 'est_')
onset_pianoroll_images, full_pianoroll_images = _examples_to_pianorolls(
targets=targets,
predictions=predictions,
ns_feature_suffix=ns_feature_suffix,
note_onset_feature_suffix=note_onset_feature_suffix,
note_offset_feature_suffix=note_offset_feature_suffix,
note_frequency_feature_suffix=note_frequency_feature_suffix,
note_confidence_feature_suffix=note_confidence_feature_suffix,
track_specs=track_specs,
num_seconds=num_seconds,
frames_per_second=pianoroll_frames_per_second)
return {
'input_with_transcription': seqio.metrics.Audio(
audiodata=np.stack([audio_samples, synthesized_predictions], axis=2),
sample_rate=spectrogram_config.sample_rate,
max_outputs=audio_samples.shape[0]),
'pianoroll': seqio.metrics.Image(
image=full_pianoroll_images,
max_outputs=full_pianoroll_images.shape[0]),
'onset_pianoroll': seqio.metrics.Image(
image=onset_pianoroll_images,
max_outputs=onset_pianoroll_images.shape[0]),
}
| 18,358 | 37.896186 | 80 | py |
mt3 | mt3-main/mt3/tasks.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transcription task definitions."""
import functools
from typing import Optional, Sequence
from mt3 import datasets
from mt3 import event_codec
from mt3 import metrics
from mt3 import mixing
from mt3 import preprocessors
from mt3 import run_length_encoding
from mt3 import spectrograms
from mt3 import vocabularies
import note_seq
import numpy as np
import seqio
import t5
import tensorflow as tf
# Split audio frame sequences into this length before the cache placeholder.
MAX_NUM_CACHED_FRAMES = 2000
seqio.add_global_cache_dirs(['gs://mt3/data/cache_tasks/'])
def construct_task_name(
task_prefix: str,
spectrogram_config=spectrograms.SpectrogramConfig(),
vocab_config=vocabularies.VocabularyConfig(),
task_suffix: Optional[str] = None
) -> str:
"""Construct task name from prefix, config, and optional suffix."""
fields = [task_prefix]
if spectrogram_config.abbrev_str:
fields.append(spectrogram_config.abbrev_str)
if vocab_config.abbrev_str:
fields.append(vocab_config.abbrev_str)
if task_suffix:
fields.append(task_suffix)
return '_'.join(fields)
def trim_eos(tokens: Sequence[int]) -> np.ndarray:
"""If EOS is present, remove it and everything after."""
tokens = np.array(tokens, np.int32)
if vocabularies.DECODED_EOS_ID in tokens:
tokens = tokens[:np.argmax(tokens == vocabularies.DECODED_EOS_ID)]
return tokens
def postprocess(tokens, example, is_target, codec):
"""Transcription postprocessing function."""
tokens = trim_eos(tokens)
if is_target:
return {
'unique_id': example['unique_id'][0],
'ref_ns': (note_seq.NoteSequence.FromString(example['sequence'][0])
if example['sequence'][0] else None),
'ref_tokens': tokens,
}
start_time = example['input_times'][0]
# Round down to nearest symbolic token step.
start_time -= start_time % (1 / codec.steps_per_second)
return {
'unique_id': example['unique_id'][0],
'raw_inputs': example['raw_inputs'],
'est_tokens': tokens,
'start_time': start_time
}
def add_transcription_task_to_registry(
dataset_config: datasets.DatasetConfig,
spectrogram_config: spectrograms.SpectrogramConfig,
vocab_config: vocabularies.VocabularyConfig,
tokenize_fn, # TODO(iansimon): add type signature
onsets_only: bool,
include_ties: bool,
skip_too_long: bool = False
) -> None:
"""Add note transcription task to seqio.TaskRegistry."""
codec = vocabularies.build_codec(vocab_config)
vocabulary = vocabularies.vocabulary_from_codec(codec)
output_features = {
'targets': seqio.Feature(vocabulary=vocabulary),
'inputs': seqio.ContinuousFeature(dtype=tf.float32, rank=2)
}
task_name = 'onsets' if onsets_only else 'notes'
if include_ties:
task_name += '_ties'
task_prefix = f'{dataset_config.name}_{task_name}'
train_task_name = construct_task_name(
task_prefix=task_prefix,
spectrogram_config=spectrogram_config,
vocab_config=vocab_config,
task_suffix='train')
mixture_task_names = []
tie_token = codec.encode_event(event_codec.Event('tie', 0))
track_specs = (dataset_config.track_specs
if dataset_config.track_specs else None)
# Add transcription training task.
seqio.TaskRegistry.add(
train_task_name,
source=seqio.TFExampleDataSource(
split_to_filepattern={
'train': dataset_config.paths[dataset_config.train_split],
'eval': dataset_config.paths[dataset_config.train_eval_split]
},
feature_description=dataset_config.features),
output_features=output_features,
preprocessors=[
functools.partial(
tokenize_fn,
spectrogram_config=spectrogram_config, codec=codec,
is_training_data=True, onsets_only=onsets_only,
include_ties=include_ties),
functools.partial(
t5.data.preprocessors.split_tokens,
max_tokens_per_segment=MAX_NUM_CACHED_FRAMES,
feature_key='inputs',
additional_feature_keys=[
'input_event_start_indices', 'input_event_end_indices',
'input_state_event_indices'
],
passthrough_feature_keys=['targets', 'state_events']),
seqio.CacheDatasetPlaceholder(),
functools.partial(
t5.data.preprocessors.select_random_chunk,
feature_key='inputs',
additional_feature_keys=[
'input_event_start_indices', 'input_event_end_indices',
'input_state_event_indices'
],
passthrough_feature_keys=['targets', 'state_events'],
uniform_random_start=True),
functools.partial(
run_length_encoding.extract_target_sequence_with_indices,
state_events_end_token=tie_token if include_ties else None),
functools.partial(preprocessors.map_midi_programs, codec=codec),
run_length_encoding.run_length_encode_shifts_fn(
codec,
feature_key='targets'),
functools.partial(
mixing.mix_transcription_examples,
codec=codec,
targets_feature_keys=['targets']),
run_length_encoding.remove_redundant_state_changes_fn(
feature_key='targets', codec=codec,
state_change_event_types=['velocity', 'program']),
functools.partial(
preprocessors.compute_spectrograms,
spectrogram_config=spectrogram_config),
functools.partial(preprocessors.handle_too_long, skip=skip_too_long),
functools.partial(
seqio.preprocessors.tokenize_and_append_eos,
copy_pretokenized=False)
],
postprocess_fn=None,
metric_fns=[],
)
# Add transcription eval tasks.
for split in dataset_config.infer_eval_splits:
eval_task_name = construct_task_name(
task_prefix=task_prefix,
spectrogram_config=spectrogram_config,
vocab_config=vocab_config,
task_suffix=split.suffix)
if split.include_in_mixture:
mixture_task_names.append(eval_task_name)
seqio.TaskRegistry.add(
eval_task_name,
source=seqio.TFExampleDataSource(
split_to_filepattern={'eval': dataset_config.paths[split.name]},
feature_description=dataset_config.features),
output_features=output_features,
preprocessors=[
functools.partial(
tokenize_fn,
spectrogram_config=spectrogram_config, codec=codec,
is_training_data='train' in split.name, onsets_only=onsets_only,
include_ties=include_ties),
seqio.CacheDatasetPlaceholder(),
preprocessors.add_unique_id,
preprocessors.pad_notesequence_array,
functools.partial(
t5.data.preprocessors.split_tokens_to_inputs_length,
feature_key='inputs',
additional_feature_keys=['input_times', 'sequence'],
passthrough_feature_keys=['unique_id']),
# Add dummy targets as they are dropped during the above split to
# avoid memory blowups, but expected to be present by seqio; the
# evaluation metrics currently only use the target NoteSequence.
preprocessors.add_dummy_targets,
functools.partial(
preprocessors.compute_spectrograms,
spectrogram_config=spectrogram_config),
functools.partial(preprocessors.handle_too_long, skip=False),
functools.partial(
seqio.preprocessors.tokenize_and_append_eos,
copy_pretokenized=False)
],
postprocess_fn=functools.partial(postprocess, codec=codec),
metric_fns=[
functools.partial(
metrics.transcription_metrics,
codec=codec,
spectrogram_config=spectrogram_config,
onsets_only=onsets_only,
use_ties=include_ties,
track_specs=track_specs)
],
)
seqio.MixtureRegistry.add(
construct_task_name(
task_prefix=task_prefix, spectrogram_config=spectrogram_config,
vocab_config=vocab_config, task_suffix='eval'),
mixture_task_names,
default_rate=1)
# Just use default spectrogram config.
SPECTROGRAM_CONFIG = spectrograms.SpectrogramConfig()
# Create two vocabulary configs, one default and one with only on-off velocity.
VOCAB_CONFIG_FULL = vocabularies.VocabularyConfig()
VOCAB_CONFIG_NOVELOCITY = vocabularies.VocabularyConfig(num_velocity_bins=1)
# Transcribe MAESTRO v1.
add_transcription_task_to_registry(
dataset_config=datasets.MAESTROV1_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_FULL,
tokenize_fn=functools.partial(
preprocessors.tokenize_transcription_example,
audio_is_samples=False,
id_feature_key='id'),
onsets_only=False,
include_ties=False)
# Transcribe MAESTRO v3.
add_transcription_task_to_registry(
dataset_config=datasets.MAESTROV3_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_FULL,
tokenize_fn=functools.partial(
preprocessors.tokenize_transcription_example,
audio_is_samples=False,
id_feature_key='id'),
onsets_only=False,
include_ties=False)
# Transcribe MAESTRO v3 without velocities, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.MAESTROV3_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_transcription_example,
audio_is_samples=False,
id_feature_key='id'),
onsets_only=False,
include_ties=True)
# Transcribe GuitarSet, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.GUITARSET_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=preprocessors.tokenize_guitarset_example,
onsets_only=False,
include_ties=True)
# Transcribe URMP mixes, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.URMP_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_example_with_program_lookup,
inst_name_to_program_fn=preprocessors.urmp_instrument_to_program,
id_feature_key='id'),
onsets_only=False,
include_ties=True)
# Transcribe MusicNet, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.MUSICNET_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_transcription_example,
audio_is_samples=True,
id_feature_key='id'),
onsets_only=False,
include_ties=True)
# Transcribe MusicNetEM, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.MUSICNET_EM_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_transcription_example,
audio_is_samples=True,
id_feature_key='id'),
onsets_only=False,
include_ties=True)
# Transcribe Cerberus4 (piano-guitar-bass-drums quartets), with ties.
add_transcription_task_to_registry(
dataset_config=datasets.CERBERUS4_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_slakh_example,
track_specs=datasets.CERBERUS4_CONFIG.track_specs,
ignore_pitch_bends=True),
onsets_only=False,
include_ties=True)
# Transcribe 10 random sub-mixes of each song from Slakh, with ties.
add_transcription_task_to_registry(
dataset_config=datasets.SLAKH_CONFIG,
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
tokenize_fn=functools.partial(
preprocessors.tokenize_slakh_example,
track_specs=None,
ignore_pitch_bends=True),
onsets_only=False,
include_ties=True)
# Construct task names to include in transcription mixture.
MIXTURE_DATASET_NAMES = [
'maestrov3', 'guitarset', 'urmp', 'musicnet_em', 'cerberus4', 'slakh'
]
MIXTURE_TRAIN_TASK_NAMES = []
MIXTURE_EVAL_TASK_NAMES = []
MIXTURE_TEST_TASK_NAMES = []
for dataset_name in MIXTURE_DATASET_NAMES:
MIXTURE_TRAIN_TASK_NAMES.append(
construct_task_name(task_prefix=f'{dataset_name}_notes_ties',
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
task_suffix='train'))
MIXTURE_EVAL_TASK_NAMES.append(
construct_task_name(task_prefix=f'{dataset_name}_notes_ties',
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
task_suffix='validation'))
MIXING_TEMPERATURE = 10 / 3
# Add the mixture of all transcription tasks, with ties.
seqio.MixtureRegistry.add(
construct_task_name(
task_prefix='mega_notes_ties',
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
task_suffix='train'),
MIXTURE_TRAIN_TASK_NAMES,
default_rate=functools.partial(
seqio.mixing_rate_num_examples,
temperature=MIXING_TEMPERATURE))
seqio.MixtureRegistry.add(
construct_task_name(
task_prefix='mega_notes_ties',
spectrogram_config=SPECTROGRAM_CONFIG,
vocab_config=VOCAB_CONFIG_NOVELOCITY,
task_suffix='eval'),
MIXTURE_EVAL_TASK_NAMES,
default_rate=functools.partial(
seqio.mixing_rate_num_examples,
temperature=MIXING_TEMPERATURE))
| 14,610 | 35.255583 | 80 | py |
mt3 | mt3-main/mt3/note_sequences_test.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for note_sequences."""
from mt3 import event_codec
from mt3 import note_sequences
from mt3 import run_length_encoding
import note_seq
import numpy as np
import tensorflow as tf
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('tie', 0, 0)
])
class RunLengthEncodingTest(tf.test.TestCase):
def test_encode_and_index_note_sequence(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=1.1,
pitch=61,
velocity=100)
ns.notes.add(start_time=2.0,
end_time=2.1,
pitch=62,
velocity=100)
ns.notes.add(start_time=3.0,
end_time=3.1,
pitch=63,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 403)
expected_events = ([1] * 100 +
[162] +
[1] * 100 +
[163] +
[1] * 100 +
[164] +
[1] * 100)
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(162, events[100])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(163, events[201])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 201)
self.assertEqual(event_end_indices[2000], 201)
self.assertEqual(164, events[302])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 302)
self.assertEqual(event_end_indices[3000], 302)
self.assertEqual(1, events[-1])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 402)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_velocity(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=1)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets(ns))
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 408)
expected_events = ([1] * 100 +
[230, 162] +
[1] * 100 +
[356, 163] +
[1] * 100 +
[229, 162] +
[1] * 100 +
[229, 163])
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(230, events[100])
self.assertEqual(162, events[101])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(356, events[202])
self.assertEqual(163, events[203])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 202)
self.assertEqual(event_end_indices[2000], 202)
self.assertEqual(229, events[304])
self.assertEqual(162, events[305])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 304)
self.assertEqual(event_end_indices[3000], 304)
self.assertEqual(229, events[406])
self.assertEqual(163, events[407])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 405)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_multitrack(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=1.0,
pitch=37,
velocity=127,
is_drum=True)
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=127,
program=0)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127,
program=40)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
(tokens, event_start_indices, event_end_indices, state_tokens,
state_event_indices) = run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState(),
event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events))
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertEqual(len(frame_times), len(state_event_indices))
self.assertLen(tokens, 414)
expected_events = (
[event_codec.Event('velocity', 127), event_codec.Event('drum', 37)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 62)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 62)])
expected_tokens = [codec.encode_event(e) for e in expected_events]
np.testing.assert_array_equal(expected_tokens, tokens)
expected_state_events = [
event_codec.Event('tie', 0), # state prior to first drum
event_codec.Event('tie', 0), # state prior to first onset
event_codec.Event('program', 0), # state prior to second onset
event_codec.Event('pitch', 61), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 0), # state prior to first offset
event_codec.Event('pitch', 61), # |
event_codec.Event('program', 40), # |
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 40), # state prior to second offset
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0) # |
]
expected_state_tokens = [codec.encode_event(e)
for e in expected_state_events]
np.testing.assert_array_equal(expected_state_tokens, state_tokens)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(state_event_indices[0], 0)
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 102)
self.assertEqual(event_end_indices[1000], 102)
self.assertEqual(state_event_indices[1000], 1)
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 205)
self.assertEqual(event_end_indices[2000], 205)
self.assertEqual(state_event_indices[2000], 2)
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 308)
self.assertEqual(event_end_indices[3000], 308)
self.assertEqual(state_event_indices[3000], 5)
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 410)
self.assertEqual(event_end_indices[-1], len(expected_events))
self.assertEqual(state_event_indices[-1], 10)
def test_encode_and_index_note_sequence_last_token_alignment(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=0.1,
pitch=60,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 1.008, step=.008)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None,
event_times=event_times,
event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 102)
expected_events = [161] + [1] * 101
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(event_start_indices[125], 101)
self.assertEqual(event_end_indices[125], 102)
def test_decode_note_sequence_events(self):
events = [25, 161, 50, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_onsets_only(self):
events = [5, 161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_velocity(self):
events = [5, 356, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_missing_offset(self):
events = [5, 356, 161, 10, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.10)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.10,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_multitrack(self):
events = [5, 525, 356, 161, 15, 356, 394, 25, 525, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=37,
velocity=127,
start_time=0.15,
end_time=0.16,
instrument=9,
is_drum=True)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25,
program=40)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_tokens(self):
events = [5, -1, 161, -2, 25, 162, 9999]
decoding_state = note_sequences.NoteDecodingState()
invalid_events, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(3, invalid_events)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_allow_event_at_exactly_max_time(self):
events = [161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.00,
end_time=1.01)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=1.25,
end_time=1.26)
expected_ns.total_time = 1.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_dropped_events(self):
events = [5, 161, 30, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(2, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.05,
end_time=1.06)
expected_ns.total_time = 1.06
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_events(self):
events = [25, 230, 50, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(1, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
if __name__ == '__main__':
tf.test.main()
| 19,150 | 36.847826 | 103 | py |
mt3 | mt3-main/mt3/preprocessors.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transcription preprocessors."""
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple
from absl import logging
import gin
from immutabledict import immutabledict
import librosa
from mt3 import event_codec
from mt3 import note_sequences
from mt3 import run_length_encoding
from mt3 import spectrograms
from mt3 import vocabularies
import note_seq
import numpy as np
import seqio
import tensorflow as tf
def add_unique_id(ds: tf.data.Dataset) -> tf.data.Dataset:
"""Add unique integer ID to each example in a dataset."""
def add_id_field(i, ex):
ex['unique_id'] = [i]
return ex
return ds.enumerate().map(
add_id_field, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@seqio.map_over_dataset
def pad_notesequence_array(ex):
"""Pad the NoteSequence array so that it can later be "split"."""
ex['sequence'] = tf.pad(tf.expand_dims(ex['sequence'], 0),
[[0, len(ex['input_times']) - 1]])
return ex
@seqio.map_over_dataset
def add_dummy_targets(ex):
"""Add dummy targets; used in eval when targets are not actually used."""
ex['targets'] = np.array([], dtype=np.int32)
return ex
def _audio_to_frames(
samples: Sequence[float],
spectrogram_config: spectrograms.SpectrogramConfig,
) -> Tuple[Sequence[Sequence[int]], np.ndarray]:
"""Convert audio samples to non-overlapping frames and frame times."""
frame_size = spectrogram_config.hop_width
logging.info('Padding %d samples to multiple of %d', len(samples), frame_size)
samples = np.pad(samples,
[0, frame_size - len(samples) % frame_size],
mode='constant')
frames = spectrograms.split_audio(samples, spectrogram_config)
num_frames = len(samples) // frame_size
logging.info('Encoded %d samples to %d frames (%d samples each)',
len(samples), num_frames, frame_size)
times = np.arange(num_frames) / spectrogram_config.frames_per_second
return frames, times
def _include_inputs(ds, input_record, fields_to_omit=('audio',)):
"""Include fields from input record (other than audio) in dataset records."""
def include_inputs_fn(output_record):
for key in set(input_record.keys()) - set(output_record.keys()):
output_record[key] = input_record[key]
for key in fields_to_omit:
del output_record[key]
return output_record
return ds.map(include_inputs_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def tokenize_transcription_example(
ds: tf.data.Dataset, spectrogram_config: spectrograms.SpectrogramConfig,
codec: event_codec.Codec, is_training_data: bool,
onsets_only: bool, include_ties: bool, audio_is_samples: bool,
id_feature_key: Optional[str] = None
) -> tf.data.Dataset:
"""Tokenize a note transcription example for run-length encoding.
Outputs include:
inputs: audio sample frames, num_frames-by-frame_size
input_time: timestamp for each frame
targets: symbolic sequence of note-related events
input_event_start_indices: start target index for every input index
input_event_end_indices: end target index for every input index
Args:
ds: Input dataset.
spectrogram_config: Spectrogram configuration.
codec: Event vocabulary codec.
is_training_data: Unused.
onsets_only: If True, include only onset events (not offset, velocity, or
program).
include_ties: If True, also write state events containing active notes to
support a "tie" section after run-length encoding.
audio_is_samples: If True, audio is floating-point samples instead of
serialized WAV.
id_feature_key: If not None, replace sequence ID with specified key field
from the dataset.
Returns:
Dataset with the outputs described above.
"""
del is_training_data
if onsets_only and include_ties:
raise ValueError('Ties not supported when only modeling onsets.')
def tokenize(sequence, audio, sample_rate, example_id=None):
ns = note_seq.NoteSequence.FromString(sequence)
note_sequences.validate_note_sequence(ns)
if example_id is not None:
ns.id = example_id
if audio_is_samples:
samples = audio
if sample_rate != spectrogram_config.sample_rate:
samples = librosa.resample(
samples, sample_rate, spectrogram_config.sample_rate)
else:
samples = note_seq.audio_io.wav_data_to_samples_librosa(
audio, sample_rate=spectrogram_config.sample_rate)
logging.info('Got samples for %s::%s with length %d',
ns.id, ns.filename, len(samples))
frames, frame_times = _audio_to_frames(samples, spectrogram_config)
if onsets_only:
times, values = note_sequences.note_sequence_to_onsets(ns)
else:
ns = note_seq.apply_sustain_control_changes(ns)
times, values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
# The original NoteSequence can have a lot of control changes we don't need;
# delete them.
del ns.control_changes[:]
(events, event_start_indices, event_end_indices,
state_events, state_event_indices) = (
run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState() if include_ties else None,
event_times=times,
event_values=values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events
if include_ties else None)))
yield {
'inputs': frames,
'input_times': frame_times,
'targets': events,
'input_event_start_indices': event_start_indices,
'input_event_end_indices': event_end_indices,
'state_events': state_events,
'input_state_event_indices': state_event_indices,
'sequence': ns.SerializeToString()
}
def process_record(input_record):
if audio_is_samples and 'sample_rate' not in input_record:
raise ValueError('Must provide sample rate when audio is samples.')
args = [
input_record['sequence'],
input_record['audio'],
input_record['sample_rate'] if 'sample_rate' in input_record else 0
]
if id_feature_key is not None:
args.append(input_record[id_feature_key])
ds = tf.data.Dataset.from_generator(
tokenize,
output_signature={
'inputs':
tf.TensorSpec(
shape=(None, spectrogram_config.hop_width),
dtype=tf.float32),
'input_times':
tf.TensorSpec(shape=(None,), dtype=tf.float32),
'targets':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_start_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_end_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'state_events':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_state_event_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'sequence':
tf.TensorSpec(shape=(), dtype=tf.string)
},
args=args)
ds = _include_inputs(ds, input_record)
return ds
tokenized_records = ds.flat_map(process_record)
return tokenized_records
def tokenize_guitarset_example(
ds: tf.data.Dataset, spectrogram_config: spectrograms.SpectrogramConfig,
codec: event_codec.Codec, is_training_data: bool,
onsets_only: bool, include_ties: bool
) -> tf.data.Dataset:
"""Tokenize a GuitarSet transcription example."""
def _preprocess_example(ex, name):
assert 'inst_names' not in ex, 'Key `inst_names` is already populated.'
ex['inst_names'] = [name]
ex['instrument_sequences'] = [ex.pop('sequence')]
return ex
ds = ds.map(
lambda x: _preprocess_example(x, 'Clean Guitar'),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = tokenize_example_with_program_lookup(
ds,
spectrogram_config=spectrogram_config,
codec=codec,
is_training_data=is_training_data,
inst_name_to_program_fn=guitarset_instrument_to_program,
onsets_only=onsets_only,
include_ties=include_ties,
id_feature_key='id')
return ds
def guitarset_instrument_to_program(instrument: str) -> int:
"""GuitarSet is all guitar, return the first MIDI guitar program."""
if instrument == 'Clean Guitar':
return 24
else:
raise ValueError('Unknown GuitarSet instrument: %s' % instrument)
def tokenize_example_with_program_lookup(
ds: tf.data.Dataset,
spectrogram_config: spectrograms.SpectrogramConfig,
codec: event_codec.Codec,
is_training_data: bool,
onsets_only: bool,
include_ties: bool,
inst_name_to_program_fn: Callable[[str], int],
id_feature_key: Optional[str] = None
) -> tf.data.Dataset:
"""Tokenize an example, optionally looking up and assigning program numbers.
This can be used by any dataset where a mapping function can be used to
map from the inst_names feature to a set of program numbers.
Args:
ds: Input dataset.
spectrogram_config: Spectrogram configuration.
codec: Event vocabulary codec.
is_training_data: Unused.
onsets_only: If True, include only onset events (not offset & velocity).
include_ties: If True, include tie events.
inst_name_to_program_fn: A function used to map the instrument names
in the `inst_names` feature of each example to a MIDI program number.
id_feature_key: If not None, replace sequence ID with specified key field
from the dataset.
Returns:
Dataset with the outputs described above.
"""
del is_training_data
def tokenize(sequences, inst_names, audio, example_id=None):
# Add all the notes from the tracks to a single NoteSequence.
ns = note_seq.NoteSequence(ticks_per_quarter=220)
tracks = [note_seq.NoteSequence.FromString(seq) for seq in sequences]
assert len(tracks) == len(inst_names)
for track, inst_name in zip(tracks, inst_names):
program = inst_name_to_program_fn(
inst_name.decode())
# Note that there are no pitch bends in URMP data; the below block will
# raise PitchBendError if one is encountered.
add_track_to_notesequence(ns, track, program=program, is_drum=False,
ignore_pitch_bends=False)
note_sequences.assign_instruments(ns)
note_sequences.validate_note_sequence(ns)
if example_id is not None:
ns.id = example_id
samples = note_seq.audio_io.wav_data_to_samples_librosa(
audio, sample_rate=spectrogram_config.sample_rate)
logging.info('Got samples for %s::%s with length %d',
ns.id, ns.filename, len(samples))
frames, frame_times = _audio_to_frames(samples, spectrogram_config)
if onsets_only:
times, values = note_sequences.note_sequence_to_onsets(ns)
else:
times, values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
# The original NoteSequence can have a lot of control changes we don't need;
# delete them.
del ns.control_changes[:]
(events, event_start_indices, event_end_indices,
state_events, state_event_indices) = (
run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState() if include_ties else None,
event_times=times,
event_values=values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events
if include_ties else None)))
yield {
'inputs': frames,
'input_times': frame_times,
'targets': events,
'input_event_start_indices': event_start_indices,
'input_event_end_indices': event_end_indices,
'state_events': state_events,
'input_state_event_indices': state_event_indices,
'sequence': ns.SerializeToString()
}
def process_record(input_record):
args = [
input_record['instrument_sequences'],
input_record['inst_names'],
input_record['audio'],
]
if id_feature_key is not None:
args.append(input_record[id_feature_key])
ds = tf.data.Dataset.from_generator(
tokenize,
output_signature={
'inputs':
tf.TensorSpec(
shape=(None, spectrogram_config.hop_width),
dtype=tf.float32),
'input_times':
tf.TensorSpec(shape=(None,), dtype=tf.float32),
'targets':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_start_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_end_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'state_events':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_state_event_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'sequence':
tf.TensorSpec(shape=(), dtype=tf.string)
},
args=args)
ds = _include_inputs(ds, input_record)
return ds
tokenized_records = ds.flat_map(process_record)
return tokenized_records
_URMP_INSTRUMENT_PROGRAMS = immutabledict({
'vn': 40, # violin
'va': 41, # viola
'vc': 42, # cello
'db': 43, # double bass
'tpt': 56, # trumpet
'tbn': 57, # trombone
'tba': 58, # tuba
'hn': 60, # French horn
'sax': 64, # saxophone
'ob': 68, # oboe
'bn': 70, # bassoon
'cl': 71, # clarinet
'fl': 73 # flute
})
def urmp_instrument_to_program(urmp_instrument: str) -> int:
"""Fetch the program number associated with a given URMP instrument code."""
if urmp_instrument not in _URMP_INSTRUMENT_PROGRAMS:
raise ValueError('unknown URMP instrument: %s' % urmp_instrument)
return _URMP_INSTRUMENT_PROGRAMS[urmp_instrument]
_SLAKH_CLASS_PROGRAMS = immutabledict({
'Acoustic Piano': 0,
'Electric Piano': 4,
'Chromatic Percussion': 8,
'Organ': 16,
'Acoustic Guitar': 24,
'Clean Electric Guitar': 26,
'Distorted Electric Guitar': 29,
'Acoustic Bass': 32,
'Electric Bass': 33,
'Violin': 40,
'Viola': 41,
'Cello': 42,
'Contrabass': 43,
'Orchestral Harp': 46,
'Timpani': 47,
'String Ensemble': 48,
'Synth Strings': 50,
'Choir and Voice': 52,
'Orchestral Hit': 55,
'Trumpet': 56,
'Trombone': 57,
'Tuba': 58,
'French Horn': 60,
'Brass Section': 61,
'Soprano/Alto Sax': 64,
'Tenor Sax': 66,
'Baritone Sax': 67,
'Oboe': 68,
'English Horn': 69,
'Bassoon': 70,
'Clarinet': 71,
'Pipe': 73,
'Synth Lead': 80,
'Synth Pad': 88
})
def slakh_class_to_program_and_is_drum(slakh_class: str) -> Tuple[int, bool]:
"""Map Slakh class string to program number and boolean indicating drums."""
if slakh_class == 'Drums':
return 0, True
elif slakh_class not in _SLAKH_CLASS_PROGRAMS:
raise ValueError('unknown Slakh class: %s' % slakh_class)
else:
return _SLAKH_CLASS_PROGRAMS[slakh_class], False
class PitchBendError(Exception):
pass
def add_track_to_notesequence(ns: note_seq.NoteSequence,
track: note_seq.NoteSequence,
program: int, is_drum: bool,
ignore_pitch_bends: bool):
"""Add a track to a NoteSequence."""
if track.pitch_bends and not ignore_pitch_bends:
raise PitchBendError
track_sus = note_seq.apply_sustain_control_changes(track)
for note in track_sus.notes:
note.program = program
note.is_drum = is_drum
ns.notes.extend([note])
ns.total_time = max(ns.total_time, note.end_time)
def tokenize_slakh_example(
ds: tf.data.Dataset,
spectrogram_config: spectrograms.SpectrogramConfig,
codec: event_codec.Codec,
is_training_data: bool,
onsets_only: bool,
include_ties: bool,
track_specs: Optional[Sequence[note_sequences.TrackSpec]],
ignore_pitch_bends: bool
) -> tf.data.Dataset:
"""Tokenize a Slakh multitrack note transcription example."""
def tokenize(sequences, samples, sample_rate, inst_names, example_id):
if sample_rate != spectrogram_config.sample_rate:
samples = librosa.resample(
samples, sample_rate, spectrogram_config.sample_rate)
frames, frame_times = _audio_to_frames(samples, spectrogram_config)
# Add all the notes from the tracks to a single NoteSequence.
ns = note_seq.NoteSequence(ticks_per_quarter=220)
tracks = [note_seq.NoteSequence.FromString(seq) for seq in sequences]
assert len(tracks) == len(inst_names)
if track_specs:
# Specific tracks expected.
assert len(tracks) == len(track_specs)
for track, spec, inst_name in zip(tracks, track_specs, inst_names):
# Make sure the instrument name matches what we expect.
assert inst_name.decode() == spec.name
try:
add_track_to_notesequence(ns, track,
program=spec.program, is_drum=spec.is_drum,
ignore_pitch_bends=ignore_pitch_bends)
except PitchBendError:
# TODO(iansimon): is there a way to count these?
return
else:
for track, inst_name in zip(tracks, inst_names):
# Instrument name should be Slakh class.
program, is_drum = slakh_class_to_program_and_is_drum(
inst_name.decode())
try:
add_track_to_notesequence(ns, track, program=program, is_drum=is_drum,
ignore_pitch_bends=ignore_pitch_bends)
except PitchBendError:
# TODO(iansimon): is there a way to count these?
return
note_sequences.assign_instruments(ns)
note_sequences.validate_note_sequence(ns)
if is_training_data:
# Trim overlapping notes in training (as our event vocabulary cannot
# represent them), but preserve original NoteSequence for eval.
ns = note_sequences.trim_overlapping_notes(ns)
ns.id = example_id
if onsets_only:
times, values = note_sequences.note_sequence_to_onsets(ns)
else:
times, values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
(events, event_start_indices, event_end_indices,
state_events, state_event_indices) = (
run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState() if include_ties else None,
event_times=times,
event_values=values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events
if include_ties else None)))
yield {
'inputs': frames,
'input_times': frame_times,
'targets': events,
'input_event_start_indices': event_start_indices,
'input_event_end_indices': event_end_indices,
'state_events': state_events,
'input_state_event_indices': state_event_indices,
'sequence': ns.SerializeToString()
}
def process_record(input_record):
ds = tf.data.Dataset.from_generator(
tokenize,
output_signature={
'inputs':
tf.TensorSpec(
shape=(None, spectrogram_config.hop_width),
dtype=tf.float32),
'input_times':
tf.TensorSpec(shape=(None,), dtype=tf.float32),
'targets':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_start_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_event_end_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'state_events':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'input_state_event_indices':
tf.TensorSpec(shape=(None,), dtype=tf.int32),
'sequence':
tf.TensorSpec(shape=(), dtype=tf.string)
},
args=[
input_record['note_sequences'], input_record['mix'],
input_record['audio_sample_rate'], input_record['inst_names'],
input_record['track_id']
])
ds = _include_inputs(ds, input_record, fields_to_omit=['mix', 'stems'])
return ds
tokenized_records = ds.flat_map(process_record)
return tokenized_records
@seqio.map_over_dataset
def compute_spectrograms(ex, spectrogram_config):
samples = spectrograms.flatten_frames(ex['inputs'])
ex['inputs'] = spectrograms.compute_spectrogram(samples, spectrogram_config)
ex['raw_inputs'] = samples
return ex
def handle_too_long(dataset: tf.data.Dataset,
output_features: seqio.preprocessors.OutputFeaturesType,
sequence_length: seqio.preprocessors.SequenceLengthType,
skip: bool = False) -> tf.data.Dataset:
"""Handle sequences that are too long, by either failing or skipping them."""
def max_length_for_key(key):
max_length = sequence_length[key]
if output_features[key].add_eos:
max_length -= 1
return max_length
if skip:
# Drop examples where one of the features is longer than its maximum
# sequence length.
def is_not_too_long(ex):
return not tf.reduce_any(
[k in output_features and len(v) > max_length_for_key(k)
for k, v in ex.items()])
dataset = dataset.filter(is_not_too_long)
def assert_not_too_long(key: str, value: tf.Tensor) -> tf.Tensor:
if key in output_features:
max_length = max_length_for_key(key)
tf.debugging.assert_less_equal(
tf.shape(value)[0], max_length,
f'Value for "{key}" field exceeds maximum length')
return value
# Assert that no examples have features longer than their maximum sequence
# length.
return dataset.map(
lambda ex: {k: assert_not_too_long(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@gin.configurable
def map_midi_programs(
ds: tf.data.Dataset,
codec: event_codec.Codec,
granularity_type: str = 'full',
feature_key: str = 'targets'
) -> Mapping[str, Any]:
"""Apply MIDI program map to token sequences."""
granularity = vocabularies.PROGRAM_GRANULARITIES[granularity_type]
def _map_program_tokens(ex):
ex[feature_key] = granularity.tokens_map_fn(ex[feature_key], codec)
return ex
return ds.map(_map_program_tokens,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
| 23,632 | 34.273134 | 80 | py |
mt3 | mt3-main/mt3/event_codec.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encode and decode events."""
import dataclasses
from typing import List, Tuple
@dataclasses.dataclass
class EventRange:
type: str
min_value: int
max_value: int
@dataclasses.dataclass
class Event:
type: str
value: int
class Codec:
"""Encode and decode events.
Useful for declaring what certain ranges of a vocabulary should be used for.
This is intended to be used from Python before encoding or after decoding with
GenericTokenVocabulary. This class is more lightweight and does not include
things like EOS or UNK token handling.
To ensure that 'shift' events are always the first block of the vocab and
start at 0, that event type is required and specified separately.
"""
def __init__(self, max_shift_steps: int, steps_per_second: float,
event_ranges: List[EventRange]):
"""Define Codec.
Args:
max_shift_steps: Maximum number of shift steps that can be encoded.
steps_per_second: Shift steps will be interpreted as having a duration of
1 / steps_per_second.
event_ranges: Other supported event types and their ranges.
"""
self.steps_per_second = steps_per_second
self._shift_range = EventRange(
type='shift', min_value=0, max_value=max_shift_steps)
self._event_ranges = [self._shift_range] + event_ranges
# Ensure all event types have unique names.
assert len(self._event_ranges) == len(
set([er.type for er in self._event_ranges]))
@property
def num_classes(self) -> int:
return sum(er.max_value - er.min_value + 1 for er in self._event_ranges)
# The next couple methods are simplified special case methods just for shift
# events that are intended to be used from within autograph functions.
def is_shift_event_index(self, index: int) -> bool:
return (self._shift_range.min_value <= index) and (
index <= self._shift_range.max_value)
@property
def max_shift_steps(self) -> int:
return self._shift_range.max_value
def encode_event(self, event: Event) -> int:
"""Encode an event to an index."""
offset = 0
for er in self._event_ranges:
if event.type == er.type:
if not er.min_value <= event.value <= er.max_value:
raise ValueError(
f'Event value {event.value} is not within valid range '
f'[{er.min_value}, {er.max_value}] for type {event.type}')
return offset + event.value - er.min_value
offset += er.max_value - er.min_value + 1
raise ValueError(f'Unknown event type: {event.type}')
def event_type_range(self, event_type: str) -> Tuple[int, int]:
"""Return [min_id, max_id] for an event type."""
offset = 0
for er in self._event_ranges:
if event_type == er.type:
return offset, offset + (er.max_value - er.min_value)
offset += er.max_value - er.min_value + 1
raise ValueError(f'Unknown event type: {event_type}')
def decode_event_index(self, index: int) -> Event:
"""Decode an event index to an Event."""
offset = 0
for er in self._event_ranges:
if offset <= index <= offset + er.max_value - er.min_value:
return Event(
type=er.type, value=er.min_value + index - offset)
offset += er.max_value - er.min_value + 1
raise ValueError(f'Unknown event index: {index}')
| 3,898 | 33.504425 | 80 | py |
mt3 | mt3-main/mt3/scripts/extract_monophonic_examples.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detect monophonic tracks and extract notes."""
import collections
import os
from absl import app
from absl import flags
from absl import logging
import ddsp
import librosa
import note_seq
import numpy as np
import scipy
import tensorflow as tf
_INPUT_DIR = flags.DEFINE_string(
'input_dir', None,
'Input directory containing WAV files.')
_OUTPUT_TFRECORD_PATH = flags.DEFINE_string(
'output_tfrecord_path', None,
'Path to the output TFRecord containing tf.train.Example protos with '
'monophonic tracks and inferred NoteSequence protos.')
CREPE_SAMPLE_RATE = 16000
CREPE_FRAME_RATE = 100
MONOPHONIC_CONFIDENCE_THRESHOLD = 0.95 # confidence must be greater than this
MONOPHONIC_CONFIDENCE_FRAC = 0.2 # for this fraction of frames
# split input audio into clips
CLIP_LENGTH_SECONDS = 5
def is_monophonic_heuristic(f0_confidence):
"""Heuristic to check for monophonicity using f0 confidence."""
return (np.sum(f0_confidence >= MONOPHONIC_CONFIDENCE_THRESHOLD) /
len(f0_confidence) >= MONOPHONIC_CONFIDENCE_FRAC)
# HMM parameters for modeling notes and F0 tracks.
F0_MIDI_SIGMA = 0.2
OCTAVE_ERROR_PROB = 0.05
NOTES_PER_SECOND = 2
NOTE_CHANGE_PROB = NOTES_PER_SECOND / CREPE_FRAME_RATE
F0_CONFIDENCE_EXP = 7.5
def f0_hmm_matrices(f0_hz, f0_confidence):
"""Observation and transition matrices for hidden Markov model of F0."""
f0_midi = librosa.hz_to_midi(f0_hz)
f0_midi_diff = f0_midi[:, np.newaxis] - np.arange(128)[np.newaxis, :]
# Compute the probability of each pitch at each frame, taking octave errors
# into account.
f0_midi_prob_octave_correct = scipy.stats.norm.pdf(
f0_midi_diff, scale=F0_MIDI_SIGMA)
f0_midi_prob_octave_low = scipy.stats.norm.pdf(
f0_midi_diff + 12, scale=F0_MIDI_SIGMA)
f0_midi_prob_octave_high = scipy.stats.norm.pdf(
f0_midi_diff - 12, scale=F0_MIDI_SIGMA)
# distribution of pitch values given note
f0_midi_loglik = ((1 - OCTAVE_ERROR_PROB) * f0_midi_prob_octave_correct +
0.5 * OCTAVE_ERROR_PROB * f0_midi_prob_octave_low +
0.5 * OCTAVE_ERROR_PROB * f0_midi_prob_octave_high)
# (uniform) distribution of pitch values given rest
f0_midi_rest_loglik = -np.log(128)
# Here we interpret confidence, after adjusting by exponent, as P(not rest).
f0_confidence_prob = np.power(f0_confidence, F0_CONFIDENCE_EXP)[:, np.newaxis]
obs_loglik = np.concatenate([
# probability of note (normalized by number of possible notes)
f0_midi_loglik + np.log(f0_confidence_prob) - np.log(128),
# probability of rest
f0_midi_rest_loglik + np.log(1.0 - f0_confidence_prob)
], axis=1)
# Normalize to adjust P(confidence | note) by uniform P(note).
# TODO(iansimon): Not sure how correct this is but it doesn't affect the path.
obs_loglik += np.log(129)
trans_prob = ((NOTE_CHANGE_PROB / 128) * np.ones(129) +
(1 - NOTE_CHANGE_PROB - NOTE_CHANGE_PROB / 128) * np.eye(129))
trans_loglik = np.log(trans_prob)
return obs_loglik, trans_loglik
def hmm_forward(obs_loglik, trans_loglik):
"""Forward algorithm for a hidden Markov model."""
n, k = obs_loglik.shape
trans = np.exp(trans_loglik)
loglik = 0.0
l = obs_loglik[0] - np.log(k)
c = scipy.special.logsumexp(l)
loglik += c
for i in range(1, n):
p = np.exp(l - c)
l = np.log(np.dot(p, trans)) + obs_loglik[i]
c = scipy.special.logsumexp(l)
loglik += c
return loglik
def hmm_viterbi(obs_loglik, trans_loglik):
"""Viterbi algorithm for a hidden Markov model."""
n, k = obs_loglik.shape
loglik_matrix = np.zeros_like(obs_loglik)
path_matrix = np.zeros_like(obs_loglik, dtype=np.int32)
loglik_matrix[0, :] = obs_loglik[0, :] - np.log(k)
for i in range(1, n):
mat = np.tile(loglik_matrix[i - 1][:, np.newaxis], [1, 129]) + trans_loglik
path_matrix[i, :] = mat.argmax(axis=0)
loglik_matrix[i, :] = mat[path_matrix[i, :], range(129)] + obs_loglik[i]
path = [np.argmax(loglik_matrix[-1])]
for i in range(n, 1, -1):
path.append(path_matrix[i - 1, path[-1]])
return [(pitch if pitch < 128 else None) for pitch in path[::-1]]
def pitches_to_notesequence(pitches):
"""Convert sequence of pitches output by Viterbi to NoteSequence proto."""
ns = note_seq.NoteSequence(ticks_per_quarter=220)
current_pitch = None
start_time = None
for frame, pitch in enumerate(pitches):
time = frame / CREPE_FRAME_RATE
if pitch != current_pitch:
if current_pitch is not None:
ns.notes.add(
pitch=current_pitch, velocity=100,
start_time=start_time, end_time=time)
current_pitch = pitch
start_time = time
if current_pitch is not None:
ns.notes.add(
pitch=current_pitch, velocity=100,
start_time=start_time, end_time=len(pitches) / CREPE_FRAME_RATE)
if ns.notes:
ns.total_time = ns.notes[-1].end_time
return ns
# Per-frame log likelihood threshold below which an F0 track will be discarded.
# Note that this is dependent on the HMM parameters specified above, so if those
# change then this threshold should also change.
PER_FRAME_LOGLIK_THRESHOLD = 0.3
def extract_note_sequence(crepe, samples, counters):
"""Use CREPE to attempt to extract a monophonic NoteSequence from audio."""
f0_hz, f0_confidence = crepe.predict_f0_and_confidence(
samples[np.newaxis, :], viterbi=False)
f0_hz = f0_hz[0].numpy()
f0_confidence = f0_confidence[0].numpy()
if not is_monophonic_heuristic(f0_confidence):
counters['not_monophonic'] += 1
return None
obs_loglik, trans_loglik = f0_hmm_matrices(f0_hz, f0_confidence)
loglik = hmm_forward(obs_loglik, trans_loglik)
if loglik / len(obs_loglik) < PER_FRAME_LOGLIK_THRESHOLD:
counters['low_likelihood'] += 1
return None
pitches = hmm_viterbi(obs_loglik, trans_loglik)
ns = pitches_to_notesequence(pitches)
counters['extracted_monophonic_sequence'] += 1
return ns
def process_wav_file(wav_filename, crepe, counters):
"""Extract monophonic transcription examples from a WAV file."""
wav_data = tf.io.gfile.GFile(wav_filename, 'rb').read()
samples = note_seq.audio_io.wav_data_to_samples_librosa(
wav_data, sample_rate=CREPE_SAMPLE_RATE)
clip_length_samples = int(CREPE_SAMPLE_RATE * CLIP_LENGTH_SECONDS)
for start_sample in range(0, len(samples), clip_length_samples):
clip_samples = samples[start_sample:start_sample + clip_length_samples]
if len(clip_samples) < clip_length_samples:
clip_samples = np.pad(
clip_samples, [(0, clip_length_samples - len(clip_samples))])
ns = extract_note_sequence(crepe, clip_samples, counters)
if ns:
feature = {
'audio': tf.train.Feature(
float_list=tf.train.FloatList(value=clip_samples.tolist())),
'filename': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[wav_filename.encode()])),
'offset': tf.train.Feature(
int64_list=tf.train.Int64List(value=[start_sample])),
'sampling_rate': tf.train.Feature(
float_list=tf.train.FloatList(value=[CREPE_SAMPLE_RATE])),
'sequence': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[ns.SerializeToString()]))
}
yield tf.train.Example(features=tf.train.Features(feature=feature))
def main(unused_argv):
flags.mark_flags_as_required(['input_dir', 'output_tfrecord_path'])
crepe = ddsp.spectral_ops.PretrainedCREPE('full')
counters = collections.defaultdict(int)
with tf.io.TFRecordWriter(_OUTPUT_TFRECORD_PATH.value) as writer:
for filename in tf.io.gfile.listdir(_INPUT_DIR.value):
if not filename.endswith('.wav'):
logging.info('skipping %s...', filename)
counters['non_wav_files_skipped'] += 1
continue
logging.info('processing %s...', filename)
for ex in process_wav_file(
os.path.join(_INPUT_DIR.value, filename), crepe, counters):
writer.write(ex.SerializeToString())
counters['wav_files_processed'] += 1
for k, v in counters.items():
logging.info('COUNTER: %s = %d', k, v)
if __name__ == '__main__':
app.run(main)
| 8,754 | 33.742063 | 80 | py |
mt3 | mt3-main/mt3/scripts/dump_task.py | # Copyright 2023 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple debugging utility for printing out task contents."""
import re
from absl import app
from absl import flags
import mt3.tasks # pylint: disable=unused-import
import seqio
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("task", None, "A registered Task.")
flags.DEFINE_string("task_cache_dir", None, "Directory to use for task cache.")
flags.DEFINE_integer("max_examples", 10,
"Maximum number of examples (-1 for no limit).")
flags.DEFINE_string("format_string", "targets = {targets}",
"Format for printing examples.")
flags.DEFINE_string("split", "train",
"Which split of the dataset, e.g. train or validation.")
flags.DEFINE_integer("sequence_length_inputs", 256,
"Sequence length for inputs.")
flags.DEFINE_integer("sequence_length_targets", 1024,
"Sequence length for targets.")
def main(_):
if FLAGS.task_cache_dir:
seqio.add_global_cache_dirs([FLAGS.task_cache_dir])
task = seqio.get_mixture_or_task(FLAGS.task)
ds = task.get_dataset(
sequence_length={
"inputs": FLAGS.sequence_length_inputs,
"targets": FLAGS.sequence_length_targets,
},
split=FLAGS.split,
use_cached=bool(FLAGS.task_cache_dir),
shuffle=False)
keys = re.findall(r"{([\w+]+)}", FLAGS.format_string)
def _example_to_string(ex):
key_to_string = {}
for k in keys:
if k in ex:
v = ex[k].numpy().tolist()
key_to_string[k] = task.output_features[k].vocabulary.decode(v)
else:
key_to_string[k] = ""
return FLAGS.format_string.format(**key_to_string)
for ex in ds.take(FLAGS.max_examples):
for k, v in ex.items():
print(f"{k}: {tf.shape(v)}")
print(_example_to_string(ex))
print()
if __name__ == "__main__":
flags.mark_flags_as_required(["task"])
app.run(main)
| 2,485 | 29.691358 | 79 | py |
FairAC | FairAC-main/src/utils.py | #%%
import numpy as np
import scipy.sparse as sp
import torch
import os
import pandas as pd
import dgl
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#%%
#%%
def load_data(path="../dataset/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
print(labels)
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def load_pokec(dataset,sens_attr,predict_attr, path="../dataset/pokec/", label_number=1000,sens_number=500,seed=19,test_idx=False):
"""Load data"""
print('Loading {} dataset from {}'.format(dataset,path))
idx_features_labels = pd.read_csv(os.path.join(path,"{}.csv".format(dataset)))
header = list(idx_features_labels.columns)
header.remove("user_id")
header.remove(sens_attr)
header.remove(predict_attr)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
# build graph
idx = np.array(idx_features_labels["user_id"], dtype=int)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(os.path.join(path,"{}_relationship.txt".format(dataset)), dtype=int)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=int).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# features = normalize(features)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
# adj = sparse_mx_to_torch_sparse_tensor(adj)
import random
random.seed(seed)
label_idx = np.where(labels>=0)[0]
random.shuffle(label_idx)
idx_train = label_idx[:min(int(0.5 * len(label_idx)),label_number)]
idx_val = label_idx[int(0.5 * len(label_idx)):int(0.75 * len(label_idx))]
if test_idx:
idx_test = label_idx[label_number:]
idx_val = idx_test
else:
idx_test = label_idx[int(0.75 * len(label_idx)):]
sens = idx_features_labels[sens_attr].values
sens_idx = set(np.where(sens >= 0)[0])
idx_test = np.asarray(list(sens_idx & set(idx_test)))
sens = torch.FloatTensor(sens)
idx_sens_train = list(sens_idx - set(idx_val) - set(idx_test))
random.seed(seed)
random.shuffle(idx_sens_train)
idx_sens_train = torch.LongTensor(idx_sens_train[:sens_number])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
# random.shuffle(sens_idx)
return adj, features, labels, idx_train, idx_val, idx_test, sens,idx_sens_train
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def feature_norm(features):
min_values = features.min(axis=0)[0]
max_values = features.max(axis=0)[0]
return 2*(features - min_values).div(max_values-min_values) - 1
def accuracy(output, labels):
output = output.squeeze()
preds = (output>0).type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_softmax(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
#%%
#%%
def load_pokec_emb(dataset,sens_attr,predict_attr, path="../dataset/pokec/", label_number=1000,sens_number=500,seed=19,test_idx=False):
print('Loading {} dataset from {}'.format(dataset,path))
graph_embedding = np.genfromtxt(
os.path.join(path,"{}.embedding".format(dataset)),
skip_header=1,
dtype=float
)
embedding_df = pd.DataFrame(graph_embedding)
embedding_df[0] = embedding_df[0].astype(int)
embedding_df = embedding_df.rename(index=int, columns={0:"user_id"})
idx_features_labels = pd.read_csv(os.path.join(path,"{}.csv".format(dataset)))
idx_features_labels = pd.merge(idx_features_labels,embedding_df,how="left",on="user_id")
idx_features_labels = idx_features_labels.fillna(0)
#%%
header = list(idx_features_labels.columns)
header.remove("user_id")
header.remove(sens_attr)
header.remove(predict_attr)
features = sp.csr_matrix(idx_features_labels[header], dtype=np.float32)
labels = idx_features_labels[predict_attr].values
#%%
# build graph
idx = np.array(idx_features_labels["user_id"], dtype=int)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(os.path.join(path,"{}_relationship.txt".format(dataset)), dtype=int)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = adj + sp.eye(adj.shape[0])
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(labels)
import random
random.seed(seed)
label_idx = np.where(labels>=0)[0]
random.shuffle(label_idx)
idx_train = label_idx[:min(int(0.5 * len(label_idx)),label_number)]
idx_val = label_idx[int(0.5 * len(label_idx)):int(0.75 * len(label_idx))]
if test_idx:
idx_test = label_idx[label_number:]
else:
idx_test = label_idx[int(0.75 * len(label_idx)):]
sens = idx_features_labels[sens_attr].values
sens_idx = set(np.where(sens >= 0)[0])
idx_test = np.asarray(list(sens_idx & set(idx_test)))
sens = torch.FloatTensor(sens)
idx_sens_train = list(sens_idx - set(idx_val) - set(idx_test))
random.seed(seed)
random.shuffle(idx_sens_train)
idx_sens_train = torch.LongTensor(idx_sens_train[:sens_number])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test, sens, idx_sens_train | 8,676 | 33.84739 | 135 | py |
FairAC | FairAC-main/src/train_fairAC_GNN_report.py | import time
import argparse
import dgl
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn.functional as F
from utils import accuracy, load_pokec
from models.FairAC import FairAC2, GNN
def parser_args():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=2000,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-5,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=128,
help='Number of hidden units of the sensitive attribute estimator')
parser.add_argument('--dropout', type=float, default=.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--lambda1', type=float, default=1.,
help='The hyperparameter of loss Lc')
parser.add_argument('--lambda2', type=float, default=1.,
help='The hyperparameter of loss Lt, i.e. beta in paper')
parser.add_argument('--model', type=str, default="GAT",
help='the type of model GCN/GAT')
parser.add_argument('--dataset', type=str, default='pokec_n',
choices=['pokec_z', 'pokec_n', 'nba'])
parser.add_argument('--num-hidden', type=int, default=64,
help='Number of hidden units of classifier.')
parser.add_argument("--num-heads", type=int, default=1,
help="number of hidden attention heads")
parser.add_argument("--num-out-heads", type=int, default=1,
help="number of output attention heads")
parser.add_argument("--num-layers", type=int, default=1,
help="number of hidden layers")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument("--attn-drop", type=float, default=.0,
help="attention dropout")
parser.add_argument('--negative-slope', type=float, default=0.2,
help="the negative slope of leaky relu")
parser.add_argument('--acc', type=float, default=0.688,
help='the selected FairGNN accuracy on val would be at least this high')
parser.add_argument('--roc', type=float, default=0.745,
help='the selected FairGNN ROC score on val would be at least this high')
parser.add_argument('--sens_number', type=int, default=200,
help="the number of sensitive attributes")
parser.add_argument('--label_number', type=int, default=500,
help="the number of labels")
parser.add_argument('--attn_vec_dim', type=int, default=128,
help="attention vector dim")
parser.add_argument('--num_heads', type=int, default=1,
help="the number of attention heads")
parser.add_argument('--feat_drop_rate', type=float, default=0.3,
help="feature dropout rate")
parser.add_argument('--num_sen_class', type=int, default=1,
help="number of sensitive classes")
parser.add_argument('--transformed_feature_dim', type=int, default=128,
help="transformed feature dimensions")
parser.add_argument('--sample_number', type=int, default=1000,
help="the number of samples for training")
parser.add_argument('--load', type=bool, default=False,
help="load AC model, use with AC_model_path")
parser.add_argument('--AC_model_path', type=str, default="./AC_model",
help="AC_model_path")
parser.add_argument('--GNN_model_path', type=str, default="./GNN_model",
help="GNN_model_path")
args = parser.parse_known_args()[0]
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args)
return args
def fair_metric(output, idx, labels, sens):
val_y = labels[idx].cpu().numpy()
idx_s0 = sens.cpu().numpy()[idx.cpu().numpy()] == 0
idx_s1 = sens.cpu().numpy()[idx.cpu().numpy()] == 1
idx_s0_y1 = np.bitwise_and(idx_s0, val_y == 1)
idx_s1_y1 = np.bitwise_and(idx_s1, val_y == 1)
pred_y = (output[idx].squeeze() > 0).type_as(labels).cpu().numpy()
parity = abs(sum(pred_y[idx_s0]) / sum(idx_s0) - sum(pred_y[idx_s1]) / sum(idx_s1))
equality = abs(sum(pred_y[idx_s0_y1]) / sum(idx_s0_y1) - sum(pred_y[idx_s1_y1]) / sum(idx_s1_y1))
return parity, equality
def main():
args = parser_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
print(args.dataset)
if args.dataset != 'nba':
if args.dataset == 'pokec_z':
dataset = 'region_job'
embedding = np.load('pokec_z_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
sens_attr = "region"
else:
dataset = 'region_job_2'
embedding = np.load('pokec_n_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
sens_attr = "region"
predict_attr = "I_am_working_in_field"
label_number = args.label_number
sens_number = args.sens_number
seed = 20
path = "../dataset/pokec/"
test_idx = False
else:
dataset = 'nba'
sens_attr = "country"
predict_attr = "SALARY"
label_number = 100
sens_number = 50
seed = 42
path = "../dataset/NBA"
test_idx = True
embedding = np.load('nba_embedding10.npy') # embeding is produced by Deep Walk
embedding = torch.tensor(embedding)
print(dataset)
adj, features, labels, idx_train, _, idx_test, sens, _ = load_pokec(dataset,
sens_attr,
predict_attr,
path=path,
label_number=label_number,
sens_number=sens_number,
seed=seed, test_idx=test_idx)
# remove idx_test adj, features
exclude_test = torch.ones(adj.shape[1]).bool() # indices after removing idx_test
exclude_test[idx_test] = False
sub_adj = adj[exclude_test][:, exclude_test]
indices = []
counter = 0
for e in exclude_test:
indices.append(counter)
if e:
counter += 1
indices = torch.LongTensor(indices)
y_idx = indices[idx_train]
# ################ modification on dataset idx######################
print(len(idx_test))
from utils import feature_norm
# G = dgl.DGLGraph()
G = dgl.from_scipy(adj, device='cuda:0')
subG = dgl.from_scipy(sub_adj, device='cuda:0')
if dataset == 'nba':
features = feature_norm(features)
labels[labels > 1] = 1
if sens_attr:
sens[sens > 0] = 1
# Model and optimizer
adj_mat = adj.toarray()
adjTensor = torch.FloatTensor(adj_mat)
sub_nodes = np.array_split(range(features.shape[0]), 4)
sub_nodes = [torch.tensor(s).cuda() for s in sub_nodes]
transformed_feature_dim = args.transformed_feature_dim
GNNmodel = GNN(nfeat=transformed_feature_dim, args=args)
ACmodel = FairAC2(feature_dim=features.shape[1],transformed_feature_dim=transformed_feature_dim, emb_dim=embedding.shape[1], args=args)
if args.load:
ACmodel = torch.load(args.AC_model_path)
GNNmodel = torch.load(args.GNN_model_path)
# mdotodel.estimator.load_state_dict(torch.load("./checkpoint/GCN_sens_{}_ns_{}".format(dataset, sens_number)))
if args.cuda:
GNNmodel.cuda()
ACmodel.cuda()
embedding = embedding.cuda()
features = features.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
sens = sens.cuda()
# fair sub graph adj for all graph
subgraph_adj_list = []
feat_keep_idx_sub_list = []
feat_drop_idx_sub_list = []
for sub_node in sub_nodes:
feat_keep_idx_sub, feat_drop_idx_sub = train_test_split(np.arange(len(sub_node)),
test_size=args.feat_drop_rate)
feat_keep_idx_sub_list.append(feat_keep_idx_sub)
feat_drop_idx_sub_list.append(feat_drop_idx_sub)
subgraph_adj = adjTensor[sub_node][:, sub_node][:, feat_keep_idx_sub]
subgraph_adj_list.append(subgraph_adj)
from sklearn.metrics import roc_auc_score
# Train model
t_total = time.time()
best_result = {}
best_fair = 100
best_acc = 0
best_auc = 0
best_ar = 0
best_ars_result = {}
features_embedding = torch.zeros((features.shape[0], transformed_feature_dim)).cuda()
for epoch in range(args.epochs):
t = time.time()
GNNmodel.train()
ACmodel.train()
GNNmodel.optimizer_G.zero_grad()
ACmodel.optimizer_AC.zero_grad()
ACmodel.optimizer_S.zero_grad()
if epoch < args.epochs and not args.load:
# define train dataset, using the sub_nodes[0][feat_keep_idx_sub], which are fully labeled
ac_train_idx = sub_nodes[0][feat_keep_idx_sub_list[0]][:args.sample_number]
# ac_train_idx = sub_nodes[epoch%len(sub_nodes)][feat_keep_idx_sub_list[epoch%len(sub_nodes)]][:1000]
feat_keep_idx, feat_drop_idx = train_test_split(np.arange(ac_train_idx.shape[0]),
test_size=args.feat_drop_rate)
features_train = features[ac_train_idx]
sens_train = sens[ac_train_idx]
training_adj = adjTensor[ac_train_idx][:, ac_train_idx][:, feat_keep_idx].cuda()
feature_src_re2, features_hat, transformed_feature = ACmodel(training_adj, embedding[ac_train_idx], embedding[ac_train_idx][feat_keep_idx],
features_train[feat_keep_idx])
loss_ac = ACmodel.loss(features_train[feat_drop_idx], feature_src_re2[feat_drop_idx, :])
loss_reconstruction = F.pairwise_distance(features_hat, features_train[feat_keep_idx],2).mean()
# base AC finished###############
# pretrain AC model
if epoch < 200:
# ###############pretrain AC model ##########################
print("Epoch: {:04d}, loss_ac: {:.4f},loss_reconstruction: {:.4f}"
.format(epoch, loss_ac.item(), loss_reconstruction.item()))
AC_loss = loss_reconstruction + loss_ac
AC_loss.backward()
ACmodel.optimizer_AC.step()
continue
# mitigate unfairness loss
transformed_feature_detach = transformed_feature.detach()
sens_prediction_detach = ACmodel.sensitive_pred(transformed_feature_detach)
criterion = torch.nn.BCEWithLogitsLoss()
# only update sensitive classifier
Csen_loss = criterion(sens_prediction_detach, sens_train[feat_keep_idx].unsqueeze(1).float())
# sensitive optimizer.step
Csen_loss.backward()
ACmodel.optimizer_S.step()
feature_src_re2[feat_keep_idx] = transformed_feature
sens_prediction = ACmodel.sensitive_pred(feature_src_re2[feat_drop_idx])
sens_confusion = torch.ones(sens_prediction.shape, device=sens_prediction.device, dtype=torch.float32) / 2
Csen_adv_loss = criterion(sens_prediction, sens_confusion)
sens_prediction_keep = ACmodel.sensitive_pred(transformed_feature)
Csen_loss = criterion(sens_prediction_keep, sens_train[feat_keep_idx].unsqueeze(1).float())
# sensitive optimizer.step
# AC optimizer.step
AC_loss = args.lambda2*(Csen_adv_loss -Csen_loss)+loss_reconstruction + args.lambda1*loss_ac
AC_loss.backward()
ACmodel.optimizer_AC.step()
if epoch < args.epochs and epoch % 100 == 0:
print("Epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}"
.format(epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(), Csen_adv_loss.item()
))
if epoch > 1000 and epoch % 200 == 0 or epoch == args.epochs-1:
with torch.no_grad():
# ############# Attribute completion over graph######################
for i, sub_node in enumerate(sub_nodes):
feat_keep_idx_sub = feat_keep_idx_sub_list[i]
feat_drop_idx_sub = feat_drop_idx_sub_list[i]
feature_src_AC, features_hat, transformed_feature = ACmodel(subgraph_adj_list[i].cuda(),
embedding[sub_node],
embedding[sub_node][
feat_keep_idx_sub],
features[sub_node][
feat_keep_idx_sub])
features_embedding[sub_node[feat_drop_idx_sub]] = feature_src_AC[feat_drop_idx_sub]
features_embedding[sub_node[feat_keep_idx_sub]] = transformed_feature
GNNmodel_inside = GNN(nfeat=transformed_feature_dim, args=args).cuda()
GNNmodel_inside.train()
for sub_epoch in range(1000):
features_embedding_exclude_test = features_embedding[exclude_test].detach()
feat_emb, y = GNNmodel_inside(subG, features_embedding_exclude_test)
Cy_loss = GNNmodel_inside.criterion(y[y_idx], labels[idx_train].unsqueeze(1).float())
GNNmodel_inside.optimizer_G.zero_grad()
Cy_loss.backward()
GNNmodel_inside.optimizer_G.step()
if args.load:
loss_ac = torch.zeros(1)
loss_reconstruction = torch.zeros(1)
Csen_loss = torch.zeros(1)
Csen_adv_loss = torch.zeros(1)
if sub_epoch % 100 == 0:
print(
"Epoch: {:04d}, sub_epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}, Cy_loss: {:.4f}"
.format(epoch, sub_epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(),
Csen_adv_loss.item(),
Cy_loss.item()))
##################### training finished ###################################
cls_loss = Cy_loss
GNNmodel_inside.eval()
ACmodel.eval()
with torch.no_grad():
_, output = GNNmodel_inside(G, features_embedding)
acc_test = accuracy(output[idx_test], labels[idx_test])
roc_test = roc_auc_score(labels[idx_test].cpu().numpy(),
output[idx_test].detach().cpu().numpy())
parity, equality = fair_metric(output, idx_test, labels, sens)
# if acc_val > args.acc and roc_val > args.roc:
if best_acc <= acc_test:
best_acc = acc_test
best_acc_result = {}
best_acc_result['acc'] = acc_test.item()
best_acc_result['roc'] = roc_test
best_acc_result['parity'] = parity
best_acc_result['equality'] = equality
best_ars_result['best_acc_result'] = best_acc_result
if best_auc <= roc_test:
best_auc = roc_test
best_auc_result = {}
best_auc_result['acc'] = acc_test.item()
best_auc_result['roc'] = roc_test
best_auc_result['parity'] = parity
best_auc_result['equality'] = equality
best_ars_result['best_auc_result'] = best_auc_result
if best_ar <= roc_test + acc_test:
best_ar = roc_test + acc_test
best_ar_result = {}
best_ar_result['acc'] = acc_test.item()
best_ar_result['roc'] = roc_test
best_ar_result['parity'] = parity
best_ar_result['equality'] = equality
best_ars_result['best_ar_result'] = best_ar_result
if acc_test > args.acc and roc_test > args.roc:
if best_fair > parity + equality:
best_fair = parity + equality
best_result['acc'] = acc_test.item()
best_result['roc'] = roc_test
best_result['parity'] = parity
best_result['equality'] = equality
torch.save(GNNmodel_inside, "GNNinside_epoch{:04d}_acc{:.4f}_roc{:.4f}_par{:.4f}_eq_{:.4f}".format(epoch,
acc_test.item(),
roc_test
,
parity,
equality))
torch.save(ACmodel,
"ACmodelinside_epoch{:04d}_acc{:.4f}_roc{:.4f}_par{:.4f}_eq_{:.4f}".format(epoch,
acc_test.item(),
roc_test
, parity,
equality))
print("=================================")
log = "Epoch: {:04d}, loss_ac: {:.4f}, loss_reconstruction: {:.4f}, Csen_loss: {:.4}, Csen_adv_loss: {:.4f}, cls: {:.4f}" \
.format(epoch, loss_ac.item(), loss_reconstruction.item(), Csen_loss.item(),
Csen_adv_loss.item(), cls_loss.item())
with open('log.txt', 'a') as f:
f.write(log)
print("Test:",
"accuracy: {:.4f}".format(acc_test.item()),
"roc: {:.4f}".format(roc_test),
"parity: {:.4f}".format(parity),
"equality: {:.4f}".format(equality))
log = 'Test: accuracy: {:.4f} roc: {:.4f} parity: {:.4f} equality: {:.4f}\n' \
.format(acc_test.item(), roc_test, parity, equality)
with open('log.txt', 'a') as f:
f.write(log)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
print('============performace on test set=============')
print(best_ars_result)
with open('log.txt', 'a') as f:
f.write(str(best_ars_result))
if len(best_result) > 0:
log = "Test: accuracy: {:.4f}, roc: {:.4f}, parity: {:.4f}, equality: {:.4f}"\
.format(best_result['acc'],best_result['roc'], best_result['parity'],best_result['equality'])
with open('log.txt', 'a') as f:
f.write(log)
print("Test:",
"accuracy: {:.4f}".format(best_result['acc']),
"roc: {:.4f}".format(best_result['roc']),
"parity: {:.4f}".format(best_result['parity']),
"equality: {:.4f}".format(best_result['equality']))
else:
print("Please set smaller acc/roc thresholds")
if __name__ == '__main__':
main()
| 21,680 | 50.376777 | 162 | py |
FairAC | FairAC-main/src/models/HGNN_AC.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class HGNN_AC(nn.Module):
def __init__(self, in_dim, hidden_dim, dropout, activation, num_heads, cuda=False):
super(HGNN_AC, self).__init__()
self.dropout = dropout
self.attentions = [AttentionLayer(in_dim, hidden_dim, dropout, activation, cuda) for _ in range(num_heads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, bias, emb_dest, emb_src, feature_src):
adj = F.dropout(bias, self.dropout, training=self.training)
x = torch.cat([att(adj, emb_dest, emb_src, feature_src).unsqueeze(0) for att in self.attentions], dim=0)
return torch.mean(x, dim=0, keepdim=False)
class AttentionLayer(nn.Module):
def __init__(self, in_dim, hidden_dim, dropout, activation, cuda=False):
super(AttentionLayer, self).__init__()
self.dropout = dropout
self.activation = activation
self.is_cuda = cuda
self.W = nn.Parameter(nn.init.xavier_normal_(
torch.Tensor(in_dim, hidden_dim).type(torch.cuda.FloatTensor if cuda else torch.FloatTensor),
gain=np.sqrt(2.0)), requires_grad=True)
self.W2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(hidden_dim, hidden_dim).type(
torch.cuda.FloatTensor if cuda else torch.FloatTensor), gain=np.sqrt(2.0)),
requires_grad=True)
self.leakyrelu = nn.LeakyReLU(0.2)
def forward(self, bias, emb_dest, emb_src, feature_src):
h_1 = torch.mm(emb_src, self.W)
h_2 = torch.mm(emb_dest, self.W)
e = self.leakyrelu(torch.mm(torch.mm(h_2, self.W2), h_1.t()))
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(bias > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, feature_src)
return self.activation(h_prime)
| 2,074 | 38.903846 | 115 | py |
FairAC | FairAC-main/src/models/GCN.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.body = GCN_Body(nfeat,nhid,dropout)
self.fc = nn.Linear(nhid,nclass)
def forward(self, g, x):
x = self.body(g,x)
x = self.fc(x)
return x
# def GCN(nn.Module):
class GCN_Body(nn.Module):
def __init__(self, nfeat, nhid, dropout):
super(GCN_Body, self).__init__()
self.gc1 = GraphConv(nfeat, nhid)
self.gc2 = GraphConv(nhid, nhid)
self.dropout = nn.Dropout(dropout)
def forward(self, g, x):
x = F.relu(self.gc1(g, x))
x = self.dropout(x)
x = self.gc2(g, x)
# x = self.dropout(x)
return x
| 830 | 22.742857 | 53 | py |
FairAC | FairAC-main/src/models/FairGNN.py | import random
import torch.nn as nn
from .GCN import GCN,GCN_Body
from .GAT import GAT,GAT_body
from .SAGE import SAGE_Body
from .HGNN_AC import HGNN_AC
import torch
import torch.nn.functional as F
import numpy as np
def get_model(nfeat, args):
if args.model == "GCN":
model = GCN_Body(nfeat,args.num_hidden,args.dropout)
elif args.model == "GAT":
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)
elif args.model == "SAGE":
model = SAGE_Body(nfeat, args.num_hidden, args.dropout)
else:
print("Model not implement")
return
return model
class FairGNN(nn.Module):
def __init__(self, nfeat, args):
super(FairGNN,self).__init__()
nhid = args.num_hidden
dropout = args.dropout
self.estimator = GCN(nfeat,args.hidden,1,dropout)
self.GNN = get_model(nfeat,args)
self.classifier = nn.Linear(nhid,1)
self.adv = nn.Linear(nhid,1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters()) + list(self.estimator.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr = args.lr, weight_decay = args.weight_decay)
self.optimizer_A = torch.optim.Adam(self.adv.parameters(), lr = args.lr, weight_decay = args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self,g,x):
s = self.estimator(g,x)
z = self.GNN(g,x)
y = self.classifier(z)
return y,s
def optimize(self,g,x,labels,idx_train,sens,idx_sens_train):
self.train()
### update E, G
self.adv.requires_grad_(False)
self.optimizer_G.zero_grad()
s = self.estimator(g,x)
h = self.GNN(g,x)
y = self.classifier(h)
s_g = self.adv(h)
s_score = torch.sigmoid(s.detach())
# s_score = (s_score > 0.5).float()
s_score[idx_sens_train]=sens[idx_sens_train].unsqueeze(1).float()
y_score = torch.sigmoid(y)
self.cov = torch.abs(torch.mean((s_score - torch.mean(s_score)) * (y_score - torch.mean(y_score))))
self.cls_loss = self.criterion(y[idx_train],labels[idx_train].unsqueeze(1).float())
self.adv_loss = self.criterion(s_g,s_score)
self.G_loss = self.cls_loss + self.args.alpha * self.cov - self.args.beta * self.adv_loss
self.G_loss.backward()
self.optimizer_G.step()
## update Adv
self.adv.requires_grad_(True)
self.optimizer_A.zero_grad()
s_g = self.adv(h.detach())
self.A_loss = self.criterion(s_g,s_score)
self.A_loss.backward()
self.optimizer_A.step()
class FairGnn(nn.Module):
def __init__(self, nfeat, args):
super(FairGnn, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
# only has a attention attribute completion model, without autoencoder.
class ClassicAC(nn.Module):
def __init__(self, emb_dim, args):
super(ClassicAC, self).__init__()
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
self.optimizer_AC = torch.optim.Adam(self.hgnn_ac.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
feature_src)
return feature_src_re, None
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(origin_feature, AC_feature, 2).mean()
# baseAC, used autoencoder to improve performance.
class BaseAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(BaseAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
# Fair AC using Fair select approach. done
class FairSelectAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(FairSelectAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src, fairadj = False):
if not fairadj:
fair_adj = self.fair_select(bias,feature_src)
else:
fair_adj = bias
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(fair_adj,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def fair_select(self, adj, feature_with_sens):
sens = feature_with_sens[:,-1] + 1 # covert 0 to 1, 1 to 2. in case adj is 0 which cause wrong counter.
sens_num_class = len(torch.unique(sens))
for idx,row in enumerate(adj):
sens_counter = [0] * (sens_num_class+1)
sen_row = (row*sens).long()
sen_row_array = np.array(sen_row.cpu())
for i in range(sens_num_class+1):
sens_counter[i] = np.count_nonzero(sen_row_array == i)
# for i in sen_row:
# sens_counter[i] += 1
sens_counter.remove(sens_counter[0]) # ignore 0, which means the number of no edges nodes pairs
# fint the min sens_counter that greater than 0
least_num_sens_class = max(sens_counter)
for counter in sens_counter:
if counter > 0 and counter < least_num_sens_class:
least_num_sens_class = counter
remove_number = [max(counter - least_num_sens_class,0) for counter in sens_counter] # number of edges per class that need to remove to keep fair
for i,number in enumerate(remove_number):
if(number > 0):
sen_class = i+1
sens_idx = np.where(sen_row.cpu() == sen_class)[0]
drop_idx = torch.tensor(random.sample(list(sens_idx), number)).long()
adj[idx][drop_idx] = 0
return adj
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
class FairAC_GNN(nn.Module):
def __init__(self, nfeat,transformed_feature_dim,emb_dim, args):
super(FairAC_GNN, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
self.ACmodel = BaseAC(nfeat,transformed_feature_dim, emb_dim,args)
G_params = list(self.ACmodel.parameters()) + list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
def feature_transform(self, features):
return self.ACmodel.feature_transform(features)
def feature_decoder(self, transformed_features):
return self.ACmodel.feature_decoder(transformed_features)
| 10,512 | 39.279693 | 159 | py |
FairAC | FairAC-main/src/models/FairAC.py | import random
import torch.nn as nn
from .GCN import GCN,GCN_Body
from .GAT import GAT,GAT_body
from .SAGE import SAGE_Body
from .HGNN_AC import HGNN_AC
import torch
import torch.nn.functional as F
import numpy as np
def get_model(nfeat, args):
if args.model == "GCN":
model = GCN_Body(nfeat,args.num_hidden,args.dropout)
elif args.model == "GAT":
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT_body(args.num_layers,nfeat,args.num_hidden,heads,args.dropout,args.attn_drop,args.negative_slope,args.residual)
elif args.model == "SAGE":
model = SAGE_Body(nfeat, args.num_hidden, args.dropout)
else:
print("Model not implement")
return
return model
class GNN(nn.Module):
def __init__(self, nfeat, args):
super(GNN, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
return z, y
class FairGnn(nn.Module):
def __init__(self, nfeat, args):
super(FairGnn, self).__init__()
nhid = args.num_hidden
self.GNN = get_model(nfeat, args)
self.classifier = nn.Linear(nhid, 1)
self.classifierSen = nn.Linear(nhid, 1)
G_params = list(self.GNN.parameters()) + list(self.classifier.parameters())
self.optimizer_G = torch.optim.Adam(G_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.args = args
self.criterion = nn.BCEWithLogitsLoss()
self.G_loss = 0
self.A_loss = 0
def forward(self, g, x):
z = self.GNN(g, x)
y = self.classifier(z)
s = self.classifierSen(z)
return z, y, s
# baseAC, used autoencoder to improve performance.
class BaseAC(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(BaseAC, self).__init__()
self.fc = torch.nn.Linear(feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.fc.parameters()) + list(self.fcdecoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.fc(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.fcdecoder(transformed_features)
return feature_src_re, feature_hat
def feature_transform(self, features):
return self.fc(features)
def feature_decoder(self, transformed_features):
return self.fcdecoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.fc(origin_feature), AC_feature, 2).mean()
class FairAC2(nn.Module):
def __init__(self, feature_dim, transformed_feature_dim, emb_dim, args):
super(FairAC2, self).__init__()
self.fc = torch.nn.Linear(feature_dim, 2*transformed_feature_dim)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(2*transformed_feature_dim, transformed_feature_dim)
nn.init.xavier_normal_(self.fc.weight, gain=1.414)
nn.init.xavier_normal_(self.fc2.weight, gain=1.414)
self.encoder = torch.nn.Sequential(self.fc, self.relu, self.fc2)
self.fcdecoder = torch.nn.Linear(transformed_feature_dim, transformed_feature_dim*2)
self.relu2 = torch.nn.ReLU()
self.fcdecoder2 = torch.nn.Linear(transformed_feature_dim*2, feature_dim)
nn.init.xavier_normal_(self.fcdecoder.weight, gain=1.414)
nn.init.xavier_normal_(self.fcdecoder2.weight, gain=1.414)
self.decoder = torch.nn.Sequential(self.fcdecoder, self.relu2, self.fcdecoder2)
self.hgnn_ac = HGNN_AC(in_dim=emb_dim, hidden_dim=args.attn_vec_dim, dropout=args.dropout,
activation=F.elu, num_heads=args.num_heads, cuda=args.cuda)
AC_params = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(self.hgnn_ac.parameters())
self.optimizer_AC = torch.optim.Adam(AC_params, lr=args.lr, weight_decay=args.weight_decay)
# divide AC_params into two parts.
AE_params = list(self.encoder.parameters()) + list(self.decoder.parameters())
self.optimizer_AE = torch.optim.Adam(AE_params, lr=args.lr, weight_decay=args.weight_decay)
self.optimizer_AConly = torch.optim.Adam(self.hgnn_ac.parameters(), lr=args.lr, weight_decay=args.weight_decay)
self.classifierSen = nn.Linear(transformed_feature_dim, args.num_sen_class)
self.optimizer_S = torch.optim.Adam(self.classifierSen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def forward(self, bias, emb_dest, emb_src, feature_src):
transformed_features = self.encoder(feature_src)
feature_src_re = self.hgnn_ac(bias,
emb_dest, emb_src,
transformed_features)
feature_hat = self.decoder(transformed_features)
return feature_src_re, feature_hat, transformed_features
def sensitive_pred(self, transformed_features):
return self.classifierSen(transformed_features)
def feature_transform(self, features):
return self.encoder(features)
def feature_decoder(self, transformed_features):
return self.decoder(transformed_features)
def loss(self, origin_feature, AC_feature):
return F.pairwise_distance(self.encoder(origin_feature).detach(), AC_feature, 2).mean()
class AverageAC(nn.Module):
def __init__(self):
super(AverageAC, self).__init__()
def forward(self, adj, feature_src):
degree = [max(1,adj[i].sum().item()) for i in range(adj.shape[0])]
mean_adj = torch.stack([adj[i]/degree[i] for i in range(adj.shape[0])])
feature_src_re = mean_adj.matmul(feature_src)
return feature_src_re
| 6,883 | 40.97561 | 131 | py |
FairAC | FairAC-main/src/models/SAGE.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import SAGEConv
class SAGE(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(SAGE, self).__init__()
self.body = SAGE_Body(nfeat,nhid,dropout)
self.fc = nn.Linear(nhid,nclass)
def forward(self, g, x):
x = self.body(g,x)
x = self.fc(x)
return x
# def GCN(nn.Module):
class SAGE_Body(nn.Module):
def __init__(self, nfeat, nhid, dropout):
super(SAGE_Body, self).__init__()
self.gc1 = SAGEConv(nfeat, nhid, 'mean')
self.gc2 = SAGEConv(nhid, nhid, 'mean')
self.dropout = nn.Dropout(dropout)
def forward(self, g, x):
x = F.relu(self.gc1(g, x))
x = self.dropout(x)
x = self.gc2(g, x)
# x = self.dropout(x)
return x
| 848 | 23.257143 | 53 | py |
FairAC | FairAC-main/src/models/__init__.py | from .GCN import *
from .GAT import *
from .HGNN_AC import *
from .FairGNN import *
from .SAGE import * | 103 | 19.8 | 22 | py |
FairAC | FairAC-main/src/models/GAT.py | import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
class GAT_body(nn.Module):
def __init__(self,
num_layers,
in_dim,
num_hidden,
heads,
feat_drop,
attn_drop,
negative_slope,
residual):
super(GAT_body, self).__init__()
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.activation = F.elu
# input projection (no residual)
self.gat_layers.append(GATConv(
in_dim, num_hidden, heads[0],
feat_drop, attn_drop, negative_slope, False, self.activation))
# hidden layers
for l in range(1, num_layers):
# due to multi-head, the in_dim = num_hidden * num_heads
self.gat_layers.append(GATConv(
num_hidden * heads[l-1], num_hidden, heads[l],
feat_drop, attn_drop, negative_slope, residual, self.activation))
# output projection
self.gat_layers.append(GATConv(
num_hidden * heads[-2], num_hidden, heads[-1],
feat_drop, attn_drop, negative_slope, residual, None))
def forward(self, g, inputs):
h = inputs
for l in range(self.num_layers):
h = self.gat_layers[l](g, h).flatten(1)
# output projection
logits = self.gat_layers[-1](g, h).mean(1)
return logits
class GAT(nn.Module):
def __init__(self,
num_layers,
in_dim,
num_hidden,
num_classes,
heads,
feat_drop,
attn_drop,
negative_slope,
residual):
super(GAT, self).__init__()
self.body = GAT_body(num_layers, in_dim, num_hidden, heads, feat_drop, attn_drop, negative_slope, residual)
self.fc = nn.Linear(num_hidden,num_classes)
def forward(self, g, inputs):
logits = self.body(g,inputs)
logits = self.fc(logits)
return logits | 2,108 | 33.57377 | 115 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/setup.py | # -*- coding: utf-8 -*-
from distutils.core import setup
LONGDOC = """
Engine of Chinese Input Method.
Please go to https://github.com/someus/Pinyin2Hanzi for more info.
具体使用请移步 https://github.com/someus/Pinyin2Hanzi 。
"""
setup(
name='Pinyin2Hanzi',
version='0.1.1',
description='拼音转汉字, Engine of Chinese Input Method',
long_description=LONGDOC,
author='Letian Sun',
author_email='sunlt1699@gmail.com',
url='https://github.com/someus/Pinyin2Hanzi',
license="MIT",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Text Processing',
],
keywords='NLP,Chinese,Pinyin',
packages=['Pinyin2Hanzi'],
package_dir={'Pinyin2Hanzi':'Pinyin2Hanzi'},
package_data={'Pinyin2Hanzi':['*.*','data/*']}
) | 1,164 | 31.361111 | 66 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/dag.py | # coding: utf-8
from __future__ import (print_function, unicode_literals, absolute_import)
from .interface import AbstractDagParams
from .priorityset import PrioritySet
from .util import xrange
import math
def dag(dag_params, pinyin_list, path_num=6, log=False):
assert( isinstance(dag_params, AbstractDagParams) )
pinyin_num = len(pinyin_list)
if pinyin_num == 0:
return []
D = [PrioritySet(path_num) for _ in xrange(pinyin_num)]
## idx is 1
for from_idx in xrange(0, 1):
for to_idx in xrange(from_idx, pinyin_num):
kvs = dag_params.get_phrase(pinyin_list[from_idx:to_idx+1], num=path_num)
for item in kvs:
word = [item[0]]
if log:
score = math.log(item[1])
else:
score = item[1]
D[to_idx].put(score, word)
for from_idx in xrange(1, pinyin_num):
prev_paths = D[from_idx-1]
for to_idx in xrange(from_idx, pinyin_num):
kvs = dag_params.get_phrase(pinyin_list[from_idx:to_idx+1], num=path_num)
for prev_item in prev_paths:
for item in kvs:
word = prev_item.path + [ item[0] ]
if log:
score = prev_item.score + math.log(item[1])
else:
score = prev_item.score * item[1]
D[to_idx].put(score, word)
result = [ item for item in D[-1] ]
return sorted(result, key=lambda item: item.score, reverse=True)
| 1,575 | 27.654545 | 85 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/implement.py | # coding: utf-8
from __future__ import (print_function, unicode_literals, absolute_import)
from .interface import AbstractHmmParams, AbstractDagParams
from .util import as_text
import os
import json
DATA = 'data'
DEFAULT = 'default'
class DefaultHmmParams(AbstractHmmParams):
def __init__(self,):
current_dir = self.pwd()
self.py2hz_dict = self.readjson(os.path.join(current_dir, 'data', 'hmm_py2hz.json'))
self.start_dict = self.readjson(os.path.join(current_dir, 'data', 'hmm_start.json'))
self.emission_dict = self.readjson(os.path.join(current_dir, 'data', 'hmm_emission.json'))
self.transition_dict = self.readjson(os.path.join(current_dir, 'data', 'hmm_transition.json'))
def readjson(self, filename):
with open(filename) as outfile:
return json.load(outfile)
def pwd(self):
return os.path.dirname(os.path.abspath(__file__))
def start(self, state):
''' get start prob of state(hanzi) '''
state = as_text(state)
data = self.start_dict[DATA]
default = self.start_dict[DEFAULT]
if state in data:
prob = data[state]
else:
prob = default
return float(prob)
def emission(self, state, observation):
''' state (hanzi) -> observation (pinyin) '''
pinyin = as_text(observation)
hanzi = as_text(state)
data = self.emission_dict[DATA]
default = self.emission_dict[DEFAULT]
if hanzi not in data:
return float( default )
prob_dict = data[hanzi]
if pinyin not in prob_dict:
return float( default )
else:
return float( prob_dict[pinyin] )
def transition(self, from_state, to_state):
''' state -> state '''
from_state = as_text(from_state)
to_state = as_text(to_state)
prob = 0.0
data = self.transition_dict[DATA]
default = self.transition_dict[DEFAULT]
if from_state not in data:
return float( default )
prob_dict = data[from_state]
if to_state in prob_dict:
return float( prob_dict[to_state] )
if DEFAULT in prob_dict:
return float( prob_dict[DEFAULT] )
return float( default )
def get_states(self, observation):
''' get states which produce the given obs '''
return [hanzi for hanzi in self.py2hz_dict[observation]]
class DefaultDagParams(AbstractDagParams):
def __init__(self,):
current_dir = self.pwd()
self.char_dict = self.readjson(os.path.join(current_dir, 'data', 'dag_char.json'))
self.phrase_dict = self.readjson(os.path.join(current_dir, 'data', 'dag_phrase.json'))
def readjson(self, filename):
with open(filename) as outfile:
return json.load(outfile)
def pwd(self,):
return os.path.dirname(os.path.abspath(__file__))
def get_phrase(self, pinyin_list, num=6):
''' pinyin_list是拼音组成的list,例如['yi', 'ge'] '''
if len(pinyin_list) == 0:
return []
if len(pinyin_list) == 1:
data = self.char_dict
else:
data = self.phrase_dict
pinyin = ','.join(pinyin_list)
if pinyin not in data:
return []
return data[pinyin][:num]
| 3,400 | 27.341667 | 102 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/priorityset.py | # coding: utf-8
import heapq
class Item(object):
def __init__(self, score, path):
self.__score = score
self.__path = path
@property
def score(self):
return self.__score
@property
def path(self):
return self.__path
def __lt__(self, other):
return self.__score < other.score
def __le__(self, other):
return self.__score <= other.score
def __eq__(self, other):
return self.__score == other.score
def __ne__(self, other):
return self.__score != other.score
def __gt__(self, other):
return self.__score > other.score
def __ge__(self, other):
return self.__score >= other.score
def __str__(self):
return '< score={0}, path={1} >'.format(self.__score, self.__path)
def __repr__(self):
return self.__str__()
class PrioritySet(object):
def __init__(self, capacity):
self.capacity = capacity
self.data = []
def put(self, score, path):
assert(isinstance(path, list) == True)
heapq.heappush(self.data, [score, Item(score, path)])
while len(self.data) > self.capacity:
heapq.heappop(self.data)
def __len__(self):
return len(self.data)
def __iter__(self):
for item in self.data:
yield item[1]
def __str__(self):
s = '[ \n'
for item in self.data:
s = s + '\t' + str(item[1]) + '\n'
s += ']'
return s
def __repr__(self):
return self.__str__()
| 1,553 | 20.583333 | 74 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/viterbi.py | # coding: utf-8
from __future__ import (print_function, unicode_literals, absolute_import)
from .interface import AbstractHmmParams
from .priorityset import PrioritySet
import math
def viterbi(hmm_params, observations, path_num=6, log=False, min_prob=3.14e-200):
assert( isinstance(hmm_params, AbstractHmmParams) )
V = [{}]
t = 0
cur_obs = observations[t]
# Initialize base cases (t == 0)
prev_states = cur_states = hmm_params.get_states(cur_obs) # wordset
for state in cur_states:
if log:
__score = math.log(max(hmm_params.start(state), min_prob)) + \
math.log(max(hmm_params.emission(state, cur_obs), min_prob))
else:
__score = max(hmm_params.start(state), min_prob) * \
max(hmm_params.emission(state, cur_obs), min_prob)
__path = [state]
V[0].setdefault(state, PrioritySet(path_num))
V[0][state].put(__score, __path)
# Run Viterbi for t > 0
for t in range(1, len(observations)):
cur_obs = observations[t]
if len(V) == 2:
V = [V[-1]]
V.append({})
prev_states = cur_states
cur_states = hmm_params.get_states(cur_obs)
for y in cur_states:
V[1].setdefault( y, PrioritySet(path_num) )
max_item = None
for y0 in prev_states: # from y0(t-1) to y(t)
for item in V[0][y0]:
if log:
_s = item.score + \
math.log(max(hmm_params.transition(y0, y), min_prob)) + \
math.log(max(hmm_params.emission(y, cur_obs), min_prob))
else:
_s = item.score * \
max(hmm_params.transition(y0, y), min_prob) * \
max(hmm_params.emission(y, cur_obs), min_prob)
_p = item.path + [y]
V[1][y].put(_s, _p)
result = PrioritySet(path_num)
for last_state in V[-1]:
for item in V[-1][last_state]:
result.put(item.score, item.path)
result = [item for item in result]
return sorted(result, key=lambda item: item.score, reverse=True) | 2,243 | 33.523077 | 85 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/util.py | # coding: utf-8
from __future__ import (print_function, unicode_literals, absolute_import)
import os
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
PY2 = sys.version_info[0] == 2
if not PY2:
# Python 3.x and up
xrange = range
def as_text(v): ## 生成unicode字符串
if v is None:
return None
elif isinstance(v, bytes):
return v.decode('utf-8', errors='ignore')
elif isinstance(v, str):
return v
else:
raise ValueError('Unknown type %r' % type(v))
def is_text(v):
return isinstance(v, str)
else:
# Python 2.x
xrange = xrange
def as_text(v):
if v is None:
return None
elif isinstance(v, unicode):
return v
elif isinstance(v, str):
return v.decode('utf-8', errors='ignore')
else:
raise ValueError('Invalid type %r' % type(v))
def is_text(v):
return isinstance(v, unicode)
def is_chinese(v):
if is_text(v):
if len(v) == 0:
return False
return all(u'\u4e00' <= c <= u'\u9fff' or c == u'〇' for c in v)
else:
raise ValueError('Invalid type %r' % type(v))
def current_dir():
return os.path.dirname(os.path.realpath(__file__))
__removetone_dict = {
'ā': 'a',
'á': 'a',
'ǎ': 'a',
'à': 'a',
'ē': 'e',
'é': 'e',
'ě': 'e',
'è': 'e',
'ī': 'i',
'í': 'i',
'ǐ': 'i',
'ì': 'i',
'ō': 'o',
'ó': 'o',
'ǒ': 'o',
'ò': 'o',
'ū': 'u',
'ú': 'u',
'ǔ': 'u',
'ù': 'u',
'ü': 'v',
'ǖ': 'v',
'ǘ': 'v',
'ǚ': 'v',
'ǜ': 'v',
'ń': 'n',
'ň': 'n',
'': 'm',
}
## 由于从 __future__ 导入了 unicode_literals , 所以字符串默认是unicode的
# for k in __removetone_dict.keys():
# v = __removetone_dict[k]
# __removetone_dict.pop(k, None)
# __removetone_dict[as_text(k)] = as_text(v)
def remove_tone(one_py):
""" 删除拼音中的音调
lǔ -> lu
"""
one_py = as_text(one_py)
r = as_text('')
for c in one_py:
if c in __removetone_dict:
r += __removetone_dict[c]
else:
r += c
return r
def normlize_pinyin(one_py):
""" 规范化
ue -> ve
"""
if 'ue' in one_py:
return one_py.replace('ue', 've')
if 'ng' == one_py: # 嗯
return 'en'
return one_py
def simplify_pinyin(one_py):
return normlize_pinyin( remove_tone(one_py.lower()) )
# 拼音
__pinyin = set(['gu','qiao','qian','qve','ge','gang','ga','lian','liao','rou','zong',\
'tu','seng','yve','ti','te','jve','ta','nong','zhang','fan','ma','gua','die','gui',\
'guo','gun','sang','diu','zi','ze','za','chen','zu','ba','dian','diao','nei','suo',\
'sun','zhao','sui','kuo','kun','kui','cao','zuan','kua','den','lei','neng','men',\
'mei','tiao','geng','chang','cha','che','fen','chi','fei','chu','shui','me','tuan',\
'mo','mi','mu','dei','cai','zhan','zhai','can','ning','wang','pie','beng','zhuang',\
'tan','tao','tai','song','ping','hou','cuan','lan','lao','fu','fa','jiong','mai',\
'xiang','mao','man','a','jiang','zun','bing','su','si','sa','se','ding','xuan',\
'zei','zen','kong','pang','jie','jia','jin','lo','lai','li','peng','jiu','yi','yo',\
'ya','cen','dan','dao','ye','dai','zhen','bang','nou','yu','weng','en','ei','kang',\
'dia','er','ru','keng','re','ren','gou','ri','tian','qi','shua','shun','shuo','qun',\
'yun','xun','fiao','zan','zao','rang','xi','yong','zai','guan','guai','dong','kuai',\
'ying','kuan','xu','xia','xie','yin','rong','xin','tou','nian','niao','xiu','fo',\
'kou','niang','hua','hun','huo','hui','shuan','quan','shuai','chong','bei','ben',\
'kuang','dang','sai','ang','sao','san','reng','ran','rao','ming','null','lie','lia',\
'min','pa','lin','mian','mie','liu','zou','miu','nen','kai','kao','kan','ka','ke',\
'yang','ku','deng','dou','shou','chuang','nang','feng','meng','cheng','di','de','da',\
'bao','gei','du','gen','qu','shu','sha','she','ban','shi','bai','nun','nuo','sen','lve',\
'kei','fang','teng','xve','lun','luo','ken','wa','wo','ju','tui','wu','le','ji','huang',\
'tuo','cou','la','mang','ci','tun','tong','ca','pou','ce','gong','cu','lv','dun','pu',\
'ting','qie','yao','lu','pi','po','suan','chua','chun','chan','chui','gao','gan','zeng',\
'gai','xiong','tang','pian','piao','cang','heng','xian','xiao','bian','biao','zhua','duan',\
'cong','zhui','zhuo','zhun','hong','shuang','juan','zhei','pai','shai','shan','shao','pan',\
'pao','nin','hang','nie','zhuai','zhuan','yuan','niu','na','miao','guang','ne','hai','han',\
'hao','wei','wen','ruan','cuo','cun','cui','bin','bie','mou','nve','shen','shei','fou','xing',\
'qiang','nuan','pen','pei','rui','run','ruo','sheng','dui','bo','bi','bu','chuan','qing',\
'chuai','duo','o','chou','ou','zui','luan','zuo','jian','jiao','sou','wan','jing','qiong',\
'wai','long','yan','liang','lou','huan','hen','hei','huai','shang','jun','hu','ling','ha','he',\
'zhu','ceng','zha','zhe','zhi','qin','pin','ai','chai','qia','chao','ao','an','qiu','ni','zhong',\
'zang','nai','nan','nao','chuo','tie','you','nu','nv','zheng','leng','zhou','lang','e',])
# 声母
__shengmu = set(['b','p','m','f','d','t','n','l','g','k','h','j','q','x','zh','ch','sh','r','z','c','s',])
# 韵母
__single_yunmu = set(['a', 'o', 'e', 'i', 'u', 'v'])
__complex_yunmu = set(['ai','ei','ui','ao','ou','iu','ie','ve','er','an','en','in','un','ang','eng','ing','ong',])
def is_pinyin(v):
return v in __pinyin
def all_pinyin():
for _ in __pinyin:
yield _
def is_shengmu(v):
return v in __shengmu
def is_single_yunmu(v):
return v in __single_yunmu
def is_complex_yunmu(v):
return v in __complex_yunmu
def is_yunmu(v):
return is_single_yunmu(v) or is_complex_yunmu(v)
def get_shengmu(one_py):
if len(one_py) == 0:
return None
elif len(one_py) == 1:
if is_shengmu(one_py):
return one_py
else:
return None
else:
if is_shengmu(one_py[:2]):
return one_py[:2]
elif is_shengmu(one_py[:1]):
return one_py[:1]
else:
return None | 6,251 | 30.736041 | 114 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/__init__.py | from __future__ import absolute_import
from .interface import AbstractHmmParams, AbstractDagParams
from .implement import DefaultHmmParams, DefaultDagParams
from .priorityset import Item, PrioritySet
from .util import is_chinese, remove_tone, normlize_pinyin, simplify_pinyin, is_pinyin, all_pinyin
from .dag import dag
from .viterbi import viterbi | 351 | 34.2 | 98 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/Pinyin2Hanzi/interface.py | # coding: utf-8
class AbstractHmmParams(object):
def start(self, state):
''' get start prob of state(hanzi) '''
pass
def emission(self, state, observation):
''' state (hanzi) -> observation (pinyin) '''
pass
def transition(self, from_state, to_state):
''' state -> state '''
pass
def get_states(self, observation):
''' get states which produce the given obs '''
pass
class AbstractDagParams(object):
def get_phrase(self, pinyin_list, num):
pass | 547 | 21.833333 | 54 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/dag/gen_phrase.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
import json
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
from ChineseTone import PinyinHelper, PinyinFormat
import jieba
def cut(s):
return jieba.cut(s, cut_all=False)
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
def readdatafromfile(filename):
with open(filename) as outfile:
return json.load(outfile)
result = {}
max_num = 0.
min_num = 100000000000000.
for line in open('./word.txt'):
line = util.as_text(line.strip())
if '=' not in line:
continue
word, num = line.split('=')
num = float(num)
pinyin_list = PinyinHelper.convertToPinyinFromSentence(word, segment=cut)
pinyins = ','.join(pinyin_list)
pinyins = util.simplify_pinyin(pinyins)
result.setdefault(pinyins, {})
result[pinyins].setdefault(word, 0)
result[pinyins][word] += num
max_num = max(max_num, result[pinyins][word])
min_num = min(min_num, result[pinyins][word])
for line in open('./phrase.txt'):
line = util.as_text(line.strip())
if '=' not in line:
continue
word, _ = line.split('=')
num = 1.
pinyin_list = PinyinHelper.convertToPinyinFromSentence(word, segment=cut)
pinyins = ','.join(pinyin_list)
pinyins = util.simplify_pinyin(pinyins)
result.setdefault(pinyins, {})
result[pinyins].setdefault(word, 0)
result[pinyins][word] += num
max_num = max(max_num, result[pinyins][word])
min_num = min(min_num, result[pinyins][word])
result['max_num'] = max_num
result['min_num'] = min_num
writejson2file(result, './result/dag_phrase.json')
# with open('./result/dag_phrase.txt', 'w') as output:
# s = ''
# for pinyin in result:
# for word in result[pinyin]:
# num = result[pinyin][word]
# s = s + pinyin + '=' + word + '=' + str(num) + '\n'
# output.write(s) | 2,024 | 27.125 | 77 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/dag/gen_finally.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
import json
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
def readdatafromfile(filename):
with open(filename) as outfile:
return json.load(outfile)
def _filter(s):
s = s.replace(chr(0), '')
return s
def get_weight(raw_value, raw_min, raw_max, weight_min, weigth_max):
raw_value = float(raw_value)
raw_min = float(raw_min)
raw_max = float(raw_max)
weight_min = float(weight_min)
weigth_max = float(weigth_max)
weight_diff = weigth_max - weight_min
bili = (raw_value - raw_min) / (raw_max - raw_min)
return weight_min + bili * (weigth_max - weight_min)
# print( get_weight(23, 1, 23, 1, 230) )
#
# 单字权重由0.1到0.2
def gen_dag_char():
data = readdatafromfile('./result/dag_char.json')
result = {}
max_num = data['max_num']
min_num = data['min_num']
for pinyin in data:
if pinyin == 'max_num' or pinyin == 'min_num':
continue
for hanzi in data[pinyin]:
hanzi = _filter(hanzi)
pinyin = _filter(pinyin)
num = data[pinyin][hanzi]
result.setdefault(pinyin, [])
weight = get_weight(num, min_num, max_num, 0.1, 0.2)
result[pinyin].append((hanzi, weight))
result[pinyin] = sorted(result[pinyin], key = lambda item: item[1], reverse=True)
writejson2file(result, '../../Pinyin2Hanzi/data/dag_char.json')
# 词权重由0.2到1.0
def gen_dag_phrase():
data = readdatafromfile('./result/dag_phrase.json')
result = {}
max_num = data['max_num']
min_num = data['min_num']
for pinyin in data:
if pinyin == 'max_num' or pinyin == 'min_num':
continue
for phrase in data[pinyin]:
phrase = _filter(phrase)
pinyin = _filter(pinyin)
num = data[pinyin][phrase]
result.setdefault(pinyin, [])
weight = get_weight(num, min_num, max_num, 0.2, 1.0)
result[pinyin].append( (phrase, weight) )
result[pinyin] = sorted(result[pinyin], key = lambda item: item[1], reverse=True)
writejson2file(result, '../../Pinyin2Hanzi/data/dag_phrase.json')
##
gen_dag_char()
gen_dag_phrase() | 2,429 | 26.931034 | 89 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/dag/gen_char.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
import json
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
pinyin2hanzi_file = '../hmm/result/pinyin2hanzi.txt'
base_emission_file = '../hmm/result/base_emission.json'
output_file = './result/dag_char.json'
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
def readdatafromfile(filename):
with open(filename) as outfile:
return json.load(outfile)
result = {}
data = readdatafromfile(base_emission_file)
max_num = 0.
min_num = 100000000000000.
for hanzi in data:
for pinyin in data[hanzi]:
pinyin = util.simplify_pinyin(pinyin)
num = data[hanzi][pinyin]
key = pinyin
result.setdefault(key, {})
result[key].setdefault(hanzi, 0)
result[key][hanzi] += num
max_num = max(max_num, result[key][hanzi])
min_num = min(min_num, result[key][hanzi])
for line in open(pinyin2hanzi_file):
line = util.as_text(line.strip())
if '=' not in line:
continue
pinyin, chars = line.split('=')
if len(pinyin) == 0 or len(chars) == 0:
continue
pinyin = util.simplify_pinyin(pinyin)
for hanzi in chars:
key = pinyin
result.setdefault(key, {})
result[key].setdefault(hanzi, 0)
result[key][hanzi] += 1.
max_num = max(max_num, result[key][hanzi])
min_num = min(min_num, result[key][hanzi])
result['max_num'] = max_num
result['min_num'] = min_num
writejson2file(result, output_file)
# with open(output_file, 'w') as output:
# s = ''
# for pinyin in result:
# for hanzi in result[pinyin]:
# num = result[pinyin][hanzi]
# s = s + pinyin + '=' + hanzi + '=' + str(num) + '\n'
# output.write(s) | 1,904 | 24.065789 | 66 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/hmm/process_article.py | # coding: utf-8
'''
从文章中提取句子,放到sentence.txt中
'''
from __future__ import (print_function, unicode_literals)
import os
import sys
import json
import pypinyin
import argparse
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
ARTICLE_DIR = './article'
SENTENCE_FILE = './result/sentence.txt'
def topinyin(s):
"""
s都是汉字
"""
s = util.as_text(s)
py_list = pypinyin.lazy_pinyin(s)
result = []
for py in py_list:
py = util.as_text(py)
if py == '〇':
result.append('ling')
else:
result.append(util.simplify_pinyin(py))
return result
def extract_chinese_sentences(content):
content = util.as_text(content)
content = content.replace(' ', '')
# content = content.replace('\n', '')
# content = content.replace('\r', '')
content = content.replace('\t', '')
sentences = []
s = ''
for c in content:
if util.is_chinese(c):
s += c
else:
sentences.append(s)
s = ''
sentences.append(s)
return [s.strip() for s in sentences if len(s.strip()) > 1]
def gen_sentence():
all_files = []
for root, directories, filenames in os.walk(ARTICLE_DIR):
for filename in filenames:
p = os.path.join(ARTICLE_DIR, filename)
if p.endswith('.txt'):
all_files.append(p)
mid_out = open(SENTENCE_FILE, 'w')
for fp in all_files:
print('process '+ fp)
with open(fp) as out:
content = out.read()
sentences = extract_chinese_sentences(content)
mid_out.write('\n'.join(sentences) + '\n')
mid_out.close()
def main():
gen_sentence()
if __name__ == '__main__':
main() | 1,822 | 20.963855 | 63 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/hmm/process_finally.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import os
import sys
import json
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
BASE_START_FILE = './result/base_start.json'
BASE_EMISSION_FILE = './result/base_emission.json'
BASE_TRANSITION_FILE = './result/base_transition.json'
ALL_STATES_FILE = './result/all_states.txt' # 所有的字
ALL_OBSERVATIONS_FILE = './result/all_observations.txt' # 所有的拼音
PY2HZ_FILE = './result/pinyin2hanzi.txt'
HZ2PY_FILE = './hanzipinyin.txt'
FIN_PY2HZ_FILE = '../../Pinyin2Hanzi/data/hmm_py2hz.json'
FIN_START_FILE = '../../Pinyin2Hanzi/data/hmm_start.json'
FIN_EMISSION_FILE = '../../Pinyin2Hanzi/data/hmm_emission.json'
FIN_TRANSITION_FILE = '../../Pinyin2Hanzi/data/hmm_transition.json'
PINYIN_NUM = 411.
HANZI_NUM = 20903.
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
def readdatafromfile(filename):
with open(filename) as outfile:
return json.load(outfile)
def gen_py2hz():
data = {}
for line in open(PY2HZ_FILE):
line = util.as_text(line.strip())
ls = line.split('=')
if len(ls) != 2:
raise Exception('invalid format')
py, chars = ls
py = py.strip()
chars = chars.strip()
if len(py)>0 and len(chars)>0:
data[py] = chars
writejson2file(data, FIN_PY2HZ_FILE)
def gen_start():
data = {'default': 1, 'data': None}
start = readdatafromfile(BASE_START_FILE)
count = HANZI_NUM
for hanzi in start:
count += start[hanzi]
for hanzi in start:
start[hanzi] = start[hanzi] / count
data['default'] = 1.0 / count
data['data'] = start
writejson2file(data, FIN_START_FILE)
def gen_emission():
"""
base_emission = {} #> {'泥': {'ni':1.0}, '了':{'liao':0.5, 'le':0.5}}
"""
data = {'default': 1.e-200, 'data': None}
emission = readdatafromfile(BASE_EMISSION_FILE)
for line in open('./hanzipinyin.txt'):
line = util.as_text(line.strip())
hanzi, pinyin_list = line.split('=')
pinyin_list = [util.simplify_pinyin(item.strip()) for item in pinyin_list.split(',')]
char_list = [hanzi] * len(pinyin_list)
for hanzi, pinyin in zip(char_list, pinyin_list):
emission.setdefault(hanzi, {})
emission[hanzi].setdefault(pinyin, 0.)
emission[hanzi][pinyin] += 1.
for hanzi in emission:
num_sum = 0.
for pinyin in emission[hanzi]:
num_sum += emission[hanzi][pinyin]
for pinyin in emission[hanzi]:
emission[hanzi][pinyin] = emission[hanzi][pinyin] / num_sum
data['data'] = emission
writejson2file(data, FIN_EMISSION_FILE)
def gen_tramsition():
"""
{'你': {'好':10, '们':2}, '我': {}}
"""
data = {'default': 1./HANZI_NUM, 'data': None}
transition = readdatafromfile(BASE_TRANSITION_FILE)
for c1 in transition:
num_sum = HANZI_NUM # 默认每个字都有机会
for c2 in transition[c1]:
num_sum += transition[c1][c2]
for c2 in transition[c1]:
transition[c1][c2] = float(transition[c1][c2]+1) / num_sum
transition[c1]['default'] = 1./num_sum
data['data'] = transition
writejson2file(data, FIN_TRANSITION_FILE)
def main():
gen_py2hz()
gen_start()
gen_emission()
gen_tramsition()
if __name__ == '__main__':
main() | 3,619 | 26.218045 | 93 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/hmm/gen_base.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import os
import sys
import json
from ChineseTone import PinyinHelper
import argparse
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
SENTENCE_FILE = './result/sentence.txt'
WORD_FILE = './word.txt'
HANZI2PINYIN_FILE = './hanzipinyin.txt'
BASE_START = './result/base_start.json'
BASE_EMISSION = './result/base_emission.json'
BASE_TRANSITION = './result/base_transition.json'
def writejson2file(data, filename):
with open(filename, 'w') as outfile:
data = json.dumps(data, indent=4, sort_keys=True)
outfile.write(data)
def topinyin(s):
"""
s都是汉字
"""
s = util.as_text(s)
py_list = PinyinHelper.convertToPinyinFromSentence(s)
result = []
for py in py_list:
py = util.as_text(py)
if py == '〇':
result.append('ling')
else:
result.append(util.simplify_pinyin(py))
if ',' in ''.join(result):
print(s)
print(''.join(result))
sys.exit()
return result
def extract_chinese_sentences(content):
content = util.as_text(content)
content = content.replace(' ', '')
content = content.replace('\t', '')
sentences = []
s = ''
for c in content:
if util.is_chinese(c):
s += c
else:
sentences.append(s)
s = ''
sentences.append(s)
return [s.strip() for s in sentences if len(s.strip()) > 1]
def process_hanzipinyin(emission):
## ./hanzipinyin.txt
print('read from hanzipinyin.txt')
for line in open(HANZI2PINYIN_FILE):
line = util.as_text(line.strip())
if '=' not in line:
continue
hanzi, pinyins = line.split('=')
pinyins = pinyins.split(',')
pinyins = [util.simplify_pinyin(py) for py in pinyins]
for pinyin in pinyins:
emission.setdefault(hanzi, {})
emission[hanzi].setdefault(pinyin, 0)
emission[hanzi][pinyin] += 1
def read_from_sentence_txt(start, emission, transition):
## ./result/sentence.txt
print('read from sentence.txt')
for line in open(SENTENCE_FILE):
line = util.as_text(line.strip())
if len(line) < 2:
continue
if not util.is_chinese(line):
continue
## for start
start.setdefault(line[0], 0)
start[line[0]] += 1
## for emission
pinyin_list = topinyin(line)
char_list = [c for c in line]
for hanzi, pinyin in zip(char_list, pinyin_list):
emission.setdefault(hanzi, {})
emission[hanzi].setdefault(pinyin, 0)
emission[hanzi][pinyin] += 1
## for transition
for f, t in zip(line[:-1], line[1:]):
transition.setdefault(f, {})
transition[f].setdefault(t, 0)
transition[f][t] += 1
def read_from_word_txt(start, emission, transition):
## ! 基于word.txt的优化
print('read from word.txt')
_base = 1000.
_min_value = 2.
for line in open(WORD_FILE):
line = util.as_text(line.strip())
if '=' not in line:
continue
if len(line) < 3:
continue
ls = line.split('=')
if len(ls) != 2:
continue
word, num = ls
word = word.strip()
num = num.strip()
if len(num) == 0:
continue
num = float(num)
num = max(_min_value, num/_base)
if not util.is_chinese(word):
continue
## for start
start.setdefault(word[0], 0)
start[word[0]] += num
## for emission
pinyin_list = topinyin(word)
char_list = [c for c in word]
for hanzi, pinyin in zip(char_list, pinyin_list):
emission.setdefault(hanzi, {})
emission[hanzi].setdefault(pinyin, 0)
emission[hanzi][pinyin] += num
## for transition
for f, t in zip(word[:-1], word[1:]):
transition.setdefault(f, {})
transition[f].setdefault(t, 0)
transition[f][t] += num
def gen_base():
""" 先执行gen_middle()函数 """
start = {} # {'你':2, '号':1}
emission = {} # 应该是 {'泥': {'ni':1.0}, '了':{'liao':0.5, 'le':0.5}} 而不是 {'ni': {'泥': 2, '你':10}, 'hao': {...} } ×
transition = {} # {'你': {'好':10, '们':2}, '我': {}}
process_hanzipinyin(emission)
read_from_sentence_txt(start, emission, transition)
read_from_word_txt(start, emission, transition)
## write to file
writejson2file(start, BASE_START)
writejson2file(emission, BASE_EMISSION)
writejson2file(transition, BASE_TRANSITION)
def main():
gen_base()
if __name__ == '__main__':
main()
| 4,871 | 25.193548 | 123 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/train/hmm/process_hzpy.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import os
import sys
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
SOURCE_FILE = './hanzipinyin.txt'
ALL_STATES_FILE = './result/all_states.txt' # 汉字(隐藏状态)
ALL_OBSERVATIONS_FILE = './result/all_observations.txt' # 拼音(观测值)
PINYIN2HANZI_FILE = './result/pinyin2hanzi.txt'
states = set()
observations = set()
py2hz = {}
for line in open(SOURCE_FILE):
line = util.as_text(line.strip())
hanzi, pinyin_list = line.split('=')
pinyin_list = [util.simplify_pinyin(item.strip()) for item in pinyin_list.split(',')]
states.add(hanzi)
for pinyin in pinyin_list:
observations.add(pinyin)
py2hz.setdefault(pinyin, set())
py2hz[pinyin].add(hanzi)
# 声母
shengmu = util.get_shengmu(pinyin)
if shengmu is not None:
py2hz.setdefault(shengmu, set())
py2hz[shengmu].add(hanzi)
with open(ALL_STATES_FILE, 'w') as out:
s = '\n'.join(states)
out.write(s)
with open(ALL_OBSERVATIONS_FILE, 'w') as out:
s = '\n'.join(observations)
out.write(s)
with open(PINYIN2HANZI_FILE, 'w') as out:
s = ''
for k in py2hz:
s = s + k + '=' + ''.join(py2hz[k]) + '\n'
out.write(s)
print('end') | 1,399 | 22.728814 | 89 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/example/pinyin_list.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
sys.path.append('..')
from Pinyin2Hanzi import all_pinyin
from Pinyin2Hanzi import DefaultDagParams
dagparams = DefaultDagParams()
for py in all_pinyin():
if len(dagparams.get_phrase([py]) ) == 0:
print(py)
print( dagparams.get_phrase(['ju']) )
print( dagparams.get_phrase(['jve']) )
| 387 | 19.421053 | 57 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/example/viterbi_health_fever.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
sys.path.append('..')
from Pinyin2Hanzi import AbstractHmmParams
from Pinyin2Hanzi import viterbi
class HmmParams(AbstractHmmParams):
def __init__(self,):
self.states = ('Healthy', 'Fever')
self.observations = ('normal', 'cold', 'dizzy')
self.start_probability = {'Healthy': 0.6, 'Fever': 0.4}
self.transition_probability = {
'Healthy' : {'Healthy': 0.7, 'Fever': 0.3},
'Fever' : {'Healthy': 0.4, 'Fever': 0.6}
}
self.emission_probability = {
'Healthy' : {'normal': 0.5, 'cold': 0.4, 'dizzy': 0.1},
'Fever' : {'normal': 0.1, 'cold': 0.3, 'dizzy': 0.6}
}
def start(self, state):
''' get start prob of state(hanzi) '''
return self.start_probability[state]
def emission(self, state, observation):
''' state (hanzi) -> observation (pinyin) '''
return self.emission_probability[state][observation]
def transition(self, from_state, to_state):
''' state -> state '''
return self.transition_probability[from_state][to_state]
def get_states(self, observation):
''' get states which produce the given obs '''
return self.states
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 10, log = False)
for item in result:
print(item.score, item.path)
print(20*'--')
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 2, log = False)
for item in result:
print(item.score, item.path)
print(20*'--')
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 1, log = False)
for item in result:
print(item.score, item.path)
print(20*'--')
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 1)
for item in result:
print(item.score, item.path)
print(20*'--')
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 4, log = True)
for item in result:
print(item.score, item.path)
print(20*'--')
result = viterbi(hmm_params=HmmParams(), observations=('normal', 'cold', 'dizzy'), path_num = 2, log = True)
for item in result:
print(item.score, item.path) | 2,383 | 29.177215 | 110 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/example/viterbi_pinyin2hanzi.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
sys.path.append('..')
from Pinyin2Hanzi import DefaultHmmParams
from Pinyin2Hanzi import viterbi
hmmparams = DefaultHmmParams()
result = viterbi(hmm_params=hmmparams, observations=('ni', 'hao', 'a'), path_num = 5, log = True)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('ni', 'hao', 'a'), path_num = 2, log = True)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('chuang', 'qian', 'ming', 'yve', 'guang'), path_num = 5)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('chuang', 'qian', 'ming', 'yve', 'guang'), path_num = 2)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('ni', 'zhi', 'bu', 'zhi', 'dao'), path_num = 5)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('wo', 'men', 'dou', 'shi', 'hao', 'ren'), path_num = 5)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('wo', 'men', 'dou', 'shi', 'hao', 'hai', 'zi'), path_num = 5)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('wo', 'bing', 'bu', 'kuai', 'le'), path_num = 2)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('sheng', 'lve'), path_num = 2)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'--')
result = viterbi(hmm_params=hmmparams, observations=('ren', 'min', 'wu', 'zhuang'), path_num = 2)
for item in result:
print(item.score, '/'.join(item.path)) | 2,026 | 27.957143 | 114 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/example/dag_pinyin2hanzi_2.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
sys.path.append('..')
from Pinyin2Hanzi import DefaultDagParams
from Pinyin2Hanzi import dag
dagparams = DefaultDagParams()
print( dag(dagparams, [u'ti', u'chu', u'le', u'jie', u'jve', u'fang', u'an'], path_num=1) )
print( dag(dagparams, [u'ti', u'chu', u'le'], path_num=1) )
print( dag(dagparams, ['jie', 'jve', 'fang', 'an'], path_num=1) )
print( dag(dagparams, ['jie', 'jve'], path_num=1) )
print( dag(dagparams, ['fang', 'an'], path_num=1) )
| 541 | 22.565217 | 91 | py |
Pinyin2Hanzi | Pinyin2Hanzi-master/example/dag_pinyin2hanzi.py | # coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
sys.path.append('..')
from Pinyin2Hanzi import DefaultDagParams
from Pinyin2Hanzi import dag
dagparams = DefaultDagParams()
result = dag(dagparams, ['wo'])
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'hao'])
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shi'])
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shi'], path_num=2, log=True)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shi'], path_num=6, log=False)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shi'], path_num=2, log=False)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'*')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shi'], path_num=1, log=False)
for item in result:
print(item.score, '/'.join(item.path))
print(20*'=')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', 'de', 'shiiiii'], path_num=1, log=False)
print(result)
print(20*'=')
result = dag(dagparams, ['ni', 'bu', 'zhi', 'dao', '的', 'shi'], path_num=1, log=False)
print(result) | 1,477 | 22.83871 | 91 | py |
Few-shot-WSI | Few-shot-WSI-master/setup.py | #!/usr/bin/env python
import os
import subprocess
import time
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
MAJOR = 0
MINOR = 3
PATCH = 0
SUFFIX = ''
if PATCH != '':
SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
else:
SHORT_VERSION = '{}.{}{}'.format(MAJOR, MINOR, SUFFIX)
version_file = 'openselfsup/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from openselfsup.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
"""
sha = get_hash()
VERSION = SHORT_VERSION + '+' + sha
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
write_version_py()
setup(
name='openselfsup',
version=get_version(),
description='Self-Supervision Toolbox and Benchmark',
long_description=readme(),
author='Xiaohang Zhan',
author_email='xiaohangzhan@outlook.com',
keywords='unsupervised learning, self-supervised learning',
url='https://github.com/open-mmlab/openselfsup',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
zip_safe=False)
| 6,047 | 30.5 | 125 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/test.py | import argparse
import importlib
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.utils import (get_root_logger, dist_forward_collect,
nondist_forward_collect, traverse_replace)
def single_gpu_test(model, data_loader):
model.eval()
func = lambda **x: model(mode='test', **x)
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def multi_gpu_test(model, data_loader):
model.eval()
func = lambda **x: model(mode='test', **x)
rank, world_size = get_dist_info()
results = dist_forward_collect(func, data_loader, rank,
len(data_loader.dataset))
return results
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
cfg.model.pretrained = None # ensure to use checkpoint rather than pretraining
# check memcached package exists
if importlib.util.find_spec('mc') is None:
traverse_replace(cfg, 'memcached', False)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# logger
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'test_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset = build_dataset(cfg.data.val)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_model(cfg.model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader) # dict{key: np.ndarray}
rank, _ = get_dist_info()
if rank == 0:
for name, val in outputs.items():
dataset.evaluate(
torch.from_numpy(val), name, logger, topk=(1, 5))
if __name__ == '__main__':
main()
| 3,944 | 31.073171 | 83 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/publish_model.py | import argparse
import subprocess
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file):
tmp_file = in_file + ".tmp"
subprocess.Popen(['cp', in_file, tmp_file])
sha = subprocess.check_output(['sha256sum', tmp_file]).decode()
out_file = in_file
if out_file.endswith('.pth'):
out_file = out_file[:-4]
final_file = out_file + f'-{sha[:8]}.pth'
assert final_file != in_file, \
"The output filename is the same as the input file."
print("Output file: {}".format(final_file))
subprocess.Popen(['mv', tmp_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file)
if __name__ == '__main__':
main()
| 898 | 25.441176 | 68 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/extract.py | import argparse
import importlib
import numpy as np
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.utils import dist_forward_collect, nondist_forward_collect
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.models.utils import MultiPooling
from openselfsup.utils import get_root_logger
class ExtractProcess(object):
def __init__(self,
pool_type='specified',
backbone='resnet50',
layer_indices=(0, 1, 2, 3, 4)):
self.multi_pooling = MultiPooling(
pool_type, in_indices=layer_indices, backbone=backbone)
def _forward_func(self, model, **x):
backbone_feats = model(mode='extract', **x)
pooling_feats = self.multi_pooling(backbone_feats)
flat_feats = [xx.view(xx.size(0), -1) for xx in pooling_feats]
feat_dict = {'feat{}'.format(i + 1): feat.cpu() \
for i, feat in enumerate(flat_feats)}
return feat_dict
def extract(self, model, data_loader, distributed=False):
model.eval()
func = lambda **x: self._forward_func(model, **x)
if distributed:
rank, world_size = get_dist_info()
results = dist_forward_collect(func, data_loader, rank,
len(data_loader.dataset))
else:
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def parse_args():
parser = argparse.ArgumentParser(
description='OpenSelfSup extract features of a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', default=None, help='checkpoint file')
parser.add_argument(
'--pretrained', default='random',
help='pretrained model file, exclusive to --checkpoint')
parser.add_argument(
'--dataset-config',
default='benchmarks/extract_info/voc07.py',
help='extract dataset config file path')
parser.add_argument(
'--layer-ind',
type=str,
help='layer indices, separated by comma, e.g., "0,1,2,3,4"')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
layer_ind = [int(idx) for idx in args.layer_ind.split(',')]
cfg.model.backbone.out_indices = layer_ind
# checkpoint and pretrained are exclusive
assert args.pretrained == "random" or args.checkpoint is None, \
"Checkpoint and pretrained are exclusive."
# check memcached package exists
if importlib.util.find_spec('mc') is None:
for field in ['train', 'val', 'test']:
if hasattr(cfg.data, field):
getattr(cfg.data, field).data_source.memcached = False
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# logger
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'extract_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# build the dataloader
dataset_cfg = mmcv.Config.fromfile(args.dataset_config)
dataset = build_dataset(dataset_cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=dataset_cfg.data.imgs_per_gpu,
workers_per_gpu=dataset_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# specify pretrained model
if args.pretrained != 'random':
assert isinstance(args.pretrained, str)
cfg.model.pretrained = args.pretrained
# build the model and load checkpoint
model = build_model(cfg.model)
if args.checkpoint is not None:
logger.info("Use checkpoint: {} to extract features".format(
args.checkpoint))
load_checkpoint(model, args.checkpoint, map_location='cpu')
elif args.pretrained != "random":
logger.info('Use pretrained model: {} to extract features'.format(
args.pretrained))
else:
logger.info('No checkpoint or pretrained is give, use random init.')
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# build extraction processor
extractor = ExtractProcess(
pool_type='specified', backbone='resnet50', layer_indices=layer_ind)
# run
outputs = extractor.extract(model, data_loader, distributed=distributed)
rank, _ = get_dist_info()
mmcv.mkdir_or_exist("{}/features/".format(args.work_dir))
if rank == 0:
for key, val in outputs.items():
split_num = len(dataset_cfg.split_name)
split_at = dataset_cfg.split_at
for ss in range(split_num):
output_file = "{}/features/{}_{}.npy".format(
args.work_dir, dataset_cfg.split_name[ss], key)
if ss == 0:
np.save(output_file, val[:split_at[0]])
elif ss == split_num - 1:
np.save(output_file, val[split_at[-1]:])
else:
np.save(output_file, val[split_at[ss - 1]:split_at[ss]])
if __name__ == '__main__':
main()
| 6,703 | 35.63388 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/upgrade_models.py | import torch
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--save-path', type=str, required=True, help='destination file name')
args = parser.parse_args()
return args
def main():
args = parse_args()
ck = torch.load(args.checkpoint, map_location=torch.device('cpu'))
output_dict = dict(state_dict=dict(), author='OpenSelfSup')
for key, value in ck.items():
if key.startswith('head'):
continue
else:
output_dict['state_dict'][key] = value
torch.save(output_dict, args.save_path)
if __name__ == '__main__':
main()
| 712 | 24.464286 | 77 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/extract_backbone_weights.py | import torch
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='This script extracts backbone weights from a checkpoint')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'output', type=str, help='destination file name')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.output.endswith(".pth")
ck = torch.load(args.checkpoint, map_location=torch.device('cpu'))
output_dict = dict(state_dict=dict(), author="OpenSelfSup")
has_backbone = False
for key, value in ck['state_dict'].items():
if key.startswith('backbone'):
output_dict['state_dict'][key[9:]] = value
has_backbone = True
if not has_backbone:
raise Exception("Cannot find a backbone module in the checkpoint.")
torch.save(output_dict, args.output)
if __name__ == '__main__':
main()
| 952 | 28.78125 | 78 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/train.py | from __future__ import division
import argparse
import importlib
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from openselfsup import __version__
from openselfsup.apis import set_random_seed, train_model
from openselfsup.datasets import build_dataset
from openselfsup.models import build_model
from openselfsup.utils import collect_env, get_root_logger, traverse_replace
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--work_dir',
type=str,
default=None,
help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--pretrained', default=None, help='pretrained model file')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('-d','--dev', default=False,action='store_true')
parser.add_argument('-c','--continue_training', default=False,action='store_true')
parser.add_argument('--port', type=int, default=29500,
help='port only works when launcher=="slurm"')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.continue_training:
cfg.resume_from = osp.join(cfg.work_dir, 'latest.pth')
if args.dev:
cfg['data']['imgs_per_gpu']=16
# check memcached package exists
if importlib.util.find_spec('mc') is None:
traverse_replace(cfg, 'memcached', False)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
assert cfg.model.type not in \
['DeepCluster', 'MOCO', 'SimCLR', 'ODC', 'NPID'], \
"{} does not support non-dist training.".format(cfg.model.type)
else:
distributed = True
if args.launcher == 'slurm':
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'train_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
if args.pretrained is not None:
assert isinstance(args.pretrained, str)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
datasets = [build_dataset(cfg.data.train)]
assert len(cfg.workflow) == 1, "Validation is called by hook."
if cfg.checkpoint_config is not None:
# save openselfsup version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
openselfsup_version=__version__, config=cfg.text)
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 5,150 | 33.112583 | 86 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/count_parameters.py | import argparse
from mmcv import Config
from openselfsup.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Train a model')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
model = build_model(cfg.model)
num_params = sum(p.numel() for p in model.parameters()) / 1000000.
num_grad_params = sum(p.numel() for p in model.parameters() \
if p.requires_grad) / 1000000.
num_backbone_params = sum(
p.numel() for p in model.backbone.parameters()) / 1000000.
num_backbone_grad_params = sum(p.numel() for p in model.backbone.parameters() \
if p.requires_grad) / 1000000.
print(
"Number of backbone parameters: {:.5g} M".format(num_backbone_params))
print("Number of backbone parameters requiring grad: {:.5g} M".format(
num_backbone_grad_params))
print("Number of total parameters: {:.5g} M".format(num_params))
print("Number of total parameters requiring grad: {:.5g} M".format(
num_grad_params))
if __name__ == '__main__':
main()
| 1,201 | 29.820513 | 83 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/prepare_data/convert_subset.py | """
SimCLR provides list files for semi-supervised benchmarks:
https://github.com/google-research/simclr/tree/master/imagenet_subsets/
This script convert the list files into the required format in OpenSelfSup.
"""
import argparse
parser = argparse.ArgumentParser(
description='Convert ImageNet subset lists provided by simclr.')
parser.add_argument('input', help='Input list file.')
parser.add_argument('output', help='Output list file.')
args = parser.parse_args()
# create dict
with open("data/imagenet/meta/train_labeled.txt", 'r') as f:
lines = f.readlines()
keys = [l.split('/')[0] for l in lines]
labels = [l.strip().split()[1] for l in lines]
mapping = {}
for k,l in zip(keys, labels):
if k not in mapping:
mapping[k] = l
else:
assert mapping[k] == l
# convert
with open(args.input, 'r') as f:
lines = f.readlines()
fns = [l.strip() for l in lines]
sample_keys = [l.split('_')[0] for l in lines]
sample_labels = [mapping[k] for k in sample_keys]
output_lines = ["{}/{} {}\n".format(k, fn, l) for \
k,fn,l in zip(sample_keys, fns, sample_labels)]
with open(args.output, 'w') as f:
f.writelines(output_lines)
| 1,162 | 31.305556 | 75 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/prepare_data/create_voc_data_files.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
################################################################################
"""
This script can be used to extract the VOC2007 and VOC2012 dataset files
[data, labels] from the given annotations that can be used for training. The
files can be prepared for various data splits
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import logging
import numpy as np
import os
import sys
from glob import glob
# initiate the logger
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def validate_files(input_files):
"""
The valid files will have name: <class_name>_<split>.txt. We want to remove
all the other files from the input.
"""
output_files = []
for item in input_files:
if len(item.split('/')[-1].split('_')) == 2:
output_files.append(item)
return output_files
def get_data_files(split, args):
data_dir = os.path.join(args.data_source_dir, 'ImageSets/Main')
assert os.path.exists(data_dir), "Data: {} doesn't exist".format(data_dir)
test_data_files = glob(os.path.join(data_dir, '*_test.txt'))
test_data_files = validate_files(test_data_files)
if args.separate_partitions > 0:
train_data_files = glob(os.path.join(data_dir, '*_train.txt'))
val_data_files = glob(os.path.join(data_dir, '*_val.txt'))
train_data_files = validate_files(train_data_files)
val_data_files = validate_files(val_data_files)
assert len(train_data_files) == len(val_data_files)
if split == 'train':
data_files = train_data_files
elif split == 'test':
data_files = test_data_files
else:
data_files = val_data_files
else:
train_data_files = glob(os.path.join(data_dir, '*_trainval.txt'))
if len(test_data_files) == 0:
# For VOC2012 dataset, we have trainval, val and train data.
train_data_files = glob(os.path.join(data_dir, '*_train.txt'))
test_data_files = glob(os.path.join(data_dir, '*_val.txt'))
test_data_files = validate_files(test_data_files)
train_data_files = validate_files(train_data_files)
data_files = train_data_files if (split
== 'train') else test_data_files
assert len(train_data_files) == len(test_data_files), "Missing classes"
return data_files
def get_images_labels_info(split, args):
assert os.path.exists(args.data_source_dir), "Data source NOT found. Abort"
data_files = get_data_files(split, args)
# we will construct a map for image name to the vector of -1, 0, 1
# we sort the data_files which gives sorted class names as well
img_labels_map = {}
for cls_num, data_path in enumerate(sorted(data_files)):
# for this class, we have images and each image will have label
# 1, -1, 0 -> present, not present, ignore respectively as in VOC data.
with open(data_path, 'r') as fopen:
for line in fopen:
try:
img_name, orig_label = line.strip().split()
if img_name not in img_labels_map:
img_labels_map[img_name] = -np.ones(
len(data_files), dtype=np.int32)
orig_label = int(orig_label)
# in VOC data, -1 (not present), set it to 0 as train target
if orig_label == -1:
orig_label = 0
# in VOC data, 0 (ignore), set it to -1 as train target
elif orig_label == 0:
orig_label = -1
img_labels_map[img_name][cls_num] = orig_label
except Exception:
logger.info('Error processing: {} data_path: {}'.format(
line, data_path))
img_paths, img_labels = [], []
for item in sorted(img_labels_map.keys()):
img_paths.append(
os.path.join(args.data_source_dir, 'JPEGImages', item + '.jpg'))
img_labels.append(img_labels_map[item])
output_dict = {}
if args.generate_json:
cls_names = []
for item in sorted(data_files):
name = item.split('/')[-1].split('.')[0].split('_')[0]
cls_names.append(name)
img_ids, json_img_labels = [], []
for item in sorted(img_labels_map.keys()):
img_ids.append(item)
json_img_labels.append(img_labels_map[item])
for img_idx in range(len(img_ids)):
img_id = img_ids[img_idx]
out_lbl = {}
for cls_idx in range(len(cls_names)):
name = cls_names[cls_idx]
out_lbl[name] = int(json_img_labels[img_idx][cls_idx])
output_dict[img_id] = out_lbl
return img_paths, img_labels, output_dict
def main():
parser = argparse.ArgumentParser(description="Create VOC data files")
parser.add_argument(
'--data_source_dir',
type=str,
default=None,
help="Path to data directory containing ImageSets and JPEGImages")
parser.add_argument(
'--output_dir',
type=str,
default=None,
help="Output directory where images/label information will be written")
parser.add_argument(
'--separate_partitions',
type=int,
default=0,
help="Whether to create files separately for partitions train/test/val"
)
parser.add_argument(
'--generate_json',
type=int,
default=0,
help="Whether to json files for partitions train/test/val")
args = parser.parse_args()
# given the data directory for the partitions train, val, and test, we will
# write numpy files for each partition.
partitions = ['train', 'test']
if args.separate_partitions > 0:
partitions.append('val')
for partition in partitions:
logger.info(
'========Preparing {} data files========'.format(partition))
imgs_info, lbls_info, output_dict = get_images_labels_info(
partition, args)
img_info_out_path = os.path.join(args.output_dir,
partition + '_images.npy')
label_info_out_path = os.path.join(args.output_dir,
partition + '_labels.npy')
logger.info(
'=================SAVING DATA files=======================')
logger.info('partition: {} saving img_paths to: {}'.format(
partition, img_info_out_path))
logger.info('partition: {} saving lbls_paths: {}'.format(
partition, label_info_out_path))
logger.info('partition: {} imgs: {}'.format(partition,
np.array(imgs_info).shape))
np.save(img_info_out_path, np.array(imgs_info))
np.save(label_info_out_path, np.array(lbls_info))
if args.generate_json:
json_out_path = os.path.join(args.output_dir,
partition + '_targets.json')
import json
with open(json_out_path, 'w') as fp:
json.dump(output_dict, fp)
logger.info('Saved Json to: {}'.format(json_out_path))
logger.info('DONE!')
if __name__ == '__main__':
main()
| 7,741 | 38.907216 | 80 | py |
Few-shot-WSI | Few-shot-WSI-master/tools/prepare_data/create_voc_low_shot_challenge_samples.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
################################################################################
"""
This script is used to create the low-shot data for VOC svm trainings.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import json
import logging
import numpy as np
import os
import random
import sys
# create the logger
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def load_json(file_path, ground_truth=True):
import json
assert os.path.exists(file_path), "{} does not exist".format(file_path)
with open(file_path, 'r') as fp:
data = json.load(fp)
img_ids = sorted(list(data.keys()))
cls_names = sorted(list(data[img_ids[0]].keys()))
if ground_truth:
output = np.empty((len(img_ids), len(cls_names)), dtype=np.int32)
else:
output = np.empty((len(img_ids), len(cls_names)), dtype=np.float64)
for idx in range(len(img_ids)):
for cls_idx in range(len(cls_names)):
output[idx][cls_idx] = data[img_ids[idx]][cls_names[cls_idx]]
return output, img_ids, cls_names
def save_json(input_data, img_ids, cls_names, output_file):
output_dict = {}
for img_idx in range(len(img_ids)):
img_id = img_ids[img_idx]
out_lbl = {}
for cls_idx in range(len(cls_names)):
name = cls_names[cls_idx]
out_lbl[name] = int(input_data[img_idx][cls_idx])
output_dict[img_id] = out_lbl
logger.info('Saving file: {}'.format(output_file))
with open(output_file, 'w') as fp:
json.dump(output_dict, fp)
def sample_symbol(input_targets, output_target, symbol, num):
logger.info('Sampling symbol: {} for num: {}'.format(symbol, num))
num_classes = input_targets.shape[1]
for idx in range(num_classes):
symbol_data = np.where(input_targets[:, idx] == symbol)[0]
sampled = random.sample(list(symbol_data), num)
for index in sampled:
output_target[index, idx] = symbol
return output_target
def generate_independent_sample(opts, targets, img_ids, cls_names):
k_values = [int(val) for val in opts.k_values.split(",")]
# the way sample works is: for each independent sample, and a given k value
# we create a matrix of the same shape as given targets file. We initialize
# this matrix with -1 (ignore label). We then sample k positive and
# (num_classes-1) * k negatives.
# N x 20 shape
num_classes = targets.shape[1]
for idx in range(opts.num_samples):
for k in k_values:
logger.info('Sampling: {} time for k-value: {}'.format(idx + 1, k))
output = np.ones(targets.shape, dtype=np.int32) * -1
output = sample_symbol(targets, output, 1, k)
output = sample_symbol(targets, output, 0, (num_classes - 1) * k)
prefix = opts.targets_data_file.split('/')[-1].split('.')[0]
output_file = os.path.join(
opts.output_path,
'{}_sample{}_k{}.json'.format(prefix, idx + 1, k))
save_json(output, img_ids, cls_names, output_file)
npy_output_file = os.path.join(
opts.output_path,
'{}_sample{}_k{}.npy'.format(prefix, idx + 1, k))
logger.info('Saving npy file: {}'.format(npy_output_file))
np.save(npy_output_file, output)
logger.info('Done!!')
def main():
parser = argparse.ArgumentParser(
description='Sample Low shot data for VOC')
parser.add_argument(
'--targets_data_file',
type=str,
default=None,
help="Json file containing image labels")
parser.add_argument(
'--output_path',
type=str,
default=None,
help="path where low-shot samples should be saved")
parser.add_argument(
'--k_values',
type=str,
default="1,2,4,8,16,32,64,96",
help="Low-shot k-values for svm testing.")
parser.add_argument(
'--num_samples',
type=int,
default=5,
help="Number of independent samples.")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
opts = parser.parse_args()
targets, img_ids, cls_names = load_json(opts.targets_data_file)
generate_independent_sample(opts, targets, img_ids, cls_names)
if __name__ == '__main__':
main()
| 4,741 | 34.924242 | 80 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/dict_construction.py | import numpy as np
from openselfsup.third_party import clustering
from scipy.spatial.distance import cdist
import os
import warnings
import time
import pickle as pkl
from sklearn.neighbors import KNeighborsClassifier
from scipy.special import softmax
import argparse
Kmeans = clustering.__dict__['Kmeans']
pth = 'wsi_workdir/workdir/extracted_feats'
dict_pth = 'wsi_workdir/workdir/dict'
def main(args):
model = args.model
# number of prototypes
num_prototypes = args.num_prototypes
# number of shift vector for each prototypes
num_shift_vectors = args.num_shift_vectors
# loading features
features = np.load(f'{pth}/{model}/{args.features}.npy', 'r')
labels = np.load('wsi_workdir/workdir/extracted_feats/NCT_train_labels.npy', 'r')
print(f'{model} features loaded.')
if args.novel_class != None:
if args.novel_class == 78:
features = features[(labels!=7) * (labels!=8) ]
else:
features = features[labels!=args.novel_class]
os.makedirs(f'{dict_pth}/{model}',exist_ok=True)
print(f'using {len(features)} features to cluster...')
kmeans = Kmeans(k=num_prototypes, pca_dim=-1)
kmeans.cluster(features, seed=66)
assignments = kmeans.labels.astype(np.int64)
# compute the prototype for each cluster
prototypes = np.array([np.mean(features[assignments==i],axis=0)
for i in range(num_prototypes)])
# compute covariance matrix for each cluster
covariance = np.array([np.cov(features[assignments==i].T)
for i in range(num_prototypes)])
# save the legacy dict : {prototype: covariance}
np.save(f'{dict_pth}/{model}/NCT_PROTO_BANK_{num_prototypes}.npy', prototypes)
np.save(f'{dict_pth}/{model}/NCT_COV_BANK_{num_prototypes}.npy', covariance)
# generate shift vector bank.
SHIFT_BANK = []
for cov in covariance:
SHIFT_BANK.append(
# sample shift vector from zero-mean multivariate Gaussian distritbuion N(0, cov)
np.random.multivariate_normal(np.zeros(cov.shape[0]),
cov,
size=num_shift_vectors))
SHIFT_BANK = np.array(SHIFT_BANK)
# save the shift bank
np.save(f'{dict_pth}/{model}/NCT_SHIFT_BANK_{num_prototypes}.npy', SHIFT_BANK)
print('legacy dict constructed', f'saving to {dict_pth}/{model}/NCT_SHIFT_BANK_{num_prototypes}.npy')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='legacy dictionary construction')
parser.add_argument('--model', type=str, required=True, help='model name')
parser.add_argument('--features', type=str, required=False, default='NCT_train', help='features file name')
parser.add_argument('--novel_class', required=False, type=int, default=None, help='excluding which class')
parser.add_argument('--num_prototypes', type=int, default=16)
parser.add_argument('--num_shift_vectors', type=int, default=2000)
args = parser.parse_args()
main(args)
| 3,061 | 39.289474 | 111 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/distributed_meta_test.py | import argparse
import datetime
import scipy
import numpy as np
from scipy.stats import t
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.neighbors import NearestCentroid
from sklearn.metrics import f1_score
from scipy.spatial.distance import cdist
from tqdm.contrib.concurrent import process_map
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, h
def aug_base_samples(features, labels, num_aug_shots=50):
samples, gt_labels = [], []
# for each class
for label in np.unique(labels):
selected_samples = features[labels==label]
# find the most closest prototypes
proto_id = np.argmin(cdist(selected_samples, PROTO_BANK), axis=1)
generated_samples = []
for ix, sample in zip(proto_id, selected_samples):
generated_samples.append(
np.concatenate([
[sample],
# generate new latent augmented samples by z' = z + delta
# delta is sampled from pre-generated shift bank, indexed by prototype id.
sample[np.newaxis, :] + SHIFT_BANK[ix][np.random.choice(NUM_SHIFTs,num_aug_shots)]
])
)
samples.append(np.concatenate(generated_samples, axis=0))
gt_labels.extend([label]*len(samples[-1]))
return np.concatenate(samples, axis=0), gt_labels
def meta_testing(task_pth):
task_dataset = np.load(task_pth, allow_pickle=True)
support_xs, support_ys, query_xs, query_ys = task_dataset
if len(np.shape(support_xs)) == 3:
support_xs = np.reshape(support_xs[:,:args.aug_times,:], (-1, 512))
support_ys = np.repeat(support_ys, args.aug_times)
if args.clf == 'Ridge':
clf = RidgeClassifier()
elif args.clf == 'logistic_regression':
clf = LogisticRegression(max_iter=1000)
elif args.clf == 'nearest_centroid':
clf = NearestCentroid()
else:
raise NotImplementedError
clf.fit(support_xs, support_ys)
y_pred = clf.predict(query_xs)
return (query_ys, y_pred)
def meta_testing_LatentAug(task_pth):
task_dataset = np.load(task_pth, allow_pickle=True)
support_xs, support_ys, query_xs, query_ys = task_dataset
if len(np.shape(support_xs)) == 3:
support_xs = np.reshape(support_xs[:,:args.aug_times], (-1, 512))
support_ys = np.repeat(support_ys, args.aug_times)
support_xs, support_ys = aug_base_samples(support_xs, support_ys, num_aug_shots=args.num_aug_shots-1)
if args.clf == 'Ridge':
clf = RidgeClassifier()
elif args.clf == 'logistic_regression':
clf = LogisticRegression(max_iter=1000)
elif args.clf == 'nearest_centroid':
clf = NearestCentroid()
else:
raise NotImplementedError
clf.fit(support_xs, support_ys)
y_pred = clf.predict(query_xs)
return (query_ys, y_pred)
def evaluate(y_trues, y_preds):
f1s = []
for y_true, y_pred in zip(y_trues, y_preds):
f1s.append(f1_score(y_true, y_pred,average=None))
return np.array(f1s)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='distributed meta-testing')
parser.add_argument('--num_threads', type=int, default=48, help='Number of threads for parallel processing, too large may result in errors')
parser.add_argument('--task', type=str, default='mixture', required=False, help='one of [near,mixture,out]')
parser.add_argument('--num_task', type=int, default=300, help='Number of tasks')
parser.add_argument('--num_shots', type=int, default=5, help='Number of shots, e.g., 1, 5, 10')
parser.add_argument('--mode', type=str, default='linear', help='one of [linear, latent_aug]')
parser.add_argument('--clf', type=str, default='Ridge')
parser.add_argument('--num_aug_shots', type=int, default=100, help='Number of data augmentation shots')
parser.add_argument('--aug_times', type=int, default=0, help='Number of latent_augmentation times')
parser.add_argument('--num_prototypes', type=int, default=16, help='Number of prototypes')
parser.add_argument('--dict_pth', type=str, default='wsi_workdir/workdir/dict')
parser.add_argument('--model', type=str, required=True, help='CLP or FSP')
parser.add_argument('--task_data_pth', type=str, default='wsi_workdir/workdir/tasks')
parser.add_argument('--novel_class', type=int)
args = parser.parse_args()
args.task_data_pth = f'{args.task_data_pth}/{args.task}'
print(datetime.datetime.now())
if args.mode != 'linear':
print('loading bank')
PROTO_BANK = np.load(f'{args.dict_pth}/{args.model}/NCT_PROTO_BANK_{args.num_prototypes}.npy','r')
SHIFT_BANK = np.load(f'{args.dict_pth}/{args.model}/NCT_SHIFT_BANK_{args.num_prototypes}.npy','r')
print('bank loaded')
NUM_SHIFTs = len(SHIFT_BANK[0])
task_paths = []
# linear or latent_aug
if args.task == 'near':
for i in range(args.num_task):
task_paths.append(
f'{args.task_data_pth}/task_{i}/9-way-{args.num_shots}-shot_wo_{args.novel_class}_{args.model}.npy')
elif args.task == 'mixture':
for i in range(args.num_task):
task_paths.append(
f'{args.task_data_pth}/task_{i}/5-way-{args.num_shots}-shot_{args.model}.npy')
elif args.task == 'out' or args.task == 'out_homo':
for i in range(args.num_task):
task_paths.append(
f'{args.task_data_pth}/task_{i}/3-way-{args.num_shots}-shot_{args.model}.npy')
elif args.task == 'NCT_78_aug':
for i in range(args.num_task):
task_paths.append(
f'{args.task_data_pth}/task_{i}/9-way-{args.num_shots}-shot.npy')
else:
raise NotImplementedError
if args.mode == 'linear':
results = process_map(meta_testing, task_paths, max_workers=args.num_threads)
elif args.mode=='latent_aug':
results = process_map(meta_testing_LatentAug, task_paths, max_workers=args.num_threads)
else:
raise NotImplementedError
preds = np.array([x[1] for x in results])
trues = np.array([x[0] for x in results])
print(datetime.datetime.now())
print('configs:\n', args)
print('model:', args.model, 'num_shots:', args.num_shots, 'num_task:', args.num_task, 'mode:', args.mode, 'clf:', args.clf)
f1s = evaluate(trues, preds)
means, cis = [], []
for f1 in np.transpose(f1s,(1,0)):
m, h = mean_confidence_interval(f1)
means.append(m)
cis.append(h)
for m, h in zip(means, cis):
print(f'{m*100:.2f} {h*100:.2f}')
print(f'{np.mean(means)*100:.2f} {np.mean(cis)*100:.2f}')
| 6,809 | 43.220779 | 144 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/extract.py | import argparse
import importlib
import numpy as np
import os
import os.path as osp
import time
from tqdm import trange,tqdm
import threading
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from openselfsup.utils import dist_forward_collect, nondist_forward_collect
from openselfsup.datasets import build_dataloader, build_dataset
from openselfsup.models import build_model
from openselfsup.models.utils import MultiPooling
from openselfsup.utils import get_root_logger
from torch import nn
import argparse
def nondist_forward_collect(func, data_loader, length):
results = []
prog_bar = mmcv.ProgressBar(len(data_loader))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = func(**data)
results.append(result)
prog_bar.update()
results_all = {}
for k in results[0].keys():
results_all[k] = np.concatenate(
[batch[k].numpy() for batch in results], axis=0)
assert results_all[k].shape[0] == length
return results_all
def extract(model, data_loader):
model.eval()
func = lambda **x: model(mode='extract', **x)
results = nondist_forward_collect(func, data_loader,
len(data_loader.dataset))
return results
def main(args):
config_file = args.config
cfg = mmcv.Config.fromfile(config_file)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
dataset = build_dataset(cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
func = lambda **x: model(mode='extract', **x)
result_dict = extract(model, data_loader)
features = result_dict['backbone']
np.save(args.output, features)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='extract dataset features using pretrained ')
parser.add_argument('--pretrained', type=str, required=True, help='path to pretrained model')
parser.add_argument('--config', type=str, required=True, help='path to data root')
parser.add_argument('--output', type=str, required=True, help='output path')
parser.add_argument('--start', type=int, required=False)
args = parser.parse_args()
main(args)
exit()
## extract augmented features
config_file = args.config
cfg = mmcv.Config.fromfile(config_file)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
dataset = build_dataset(cfg.data.extract)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=cfg.data.imgs_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
cfg.model.pretrained = args.pretrained
model = build_model(cfg.model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
func = lambda **x: model(mode='extract', **x)
def extract_and_save(idxs):
for idx in tqdm(idxs):
result_dict = nondist_forward_collect(func, data_loader, len(data_loader.dataset))
features = result_dict['backbone']
np.save(f'wsi_workdir/workdir/extracted_feats/moco_v3_wo_78/NCT_aug/NCT_aug_{idx}.npy', features)
print('saving', idx)
extract_and_save(np.arange(args.start, args.start+25))
| 3,789 | 29.564516 | 109 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/tools/generate_aug_NCT78_task.py | import numpy as np
import argparse
import os
import warnings
import threading
from tqdm import tqdm
warnings.filterwarnings("ignore")
def aug_NCT78_task(out_dir, task_ids, num_shots, options):
task_name = f'9-way-{num_shots}-shot'
out_dir = f'{out_dir}/NCT_78_aug'
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}' ,exist_ok=True)
support_idxs, query_idxs = \
np.load(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', allow_pickle=True)
support_xs, support_ys, query_xs, query_ys = \
np.load(f'{out_dir}/task_{task_id}/{task_name}.npy', allow_pickle=True)
support_xs_aug = []
for aug_feat in aug_feats:
support_xs_aug.append(aug_feat[support_idxs])
support_xs_aug = np.array(support_xs_aug)
support_xs_aug = np.transpose(support_xs_aug, (1,0,2))
support_xs = np.concatenate(
[support_xs[:, np.newaxis, ], support_xs_aug],
axis=1
)
np.save(f"{out_dir}/task_{task_id}/{task_name}_aug_{options['aug_times']}.npy", (support_xs, support_ys, query_xs, query_ys))
# np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
# print('saving to', f'{out_dir}/task_{task_id}/{task_name}_idxs.npy')
def generate_NCT78_task(out_dir, task_ids, num_shots, options):
task_name = f'9-way-{num_shots}-shot'
out_dir = f'{out_dir}/NCT_78_aug'
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}',exist_ok=True)
support_xs, support_ys = [], []
query_xs, query_ys = [], []
support_idxs, query_idxs = [], []
for label in np.arange(9):
#### choose global index ###
_support_idxs = np.random.choice(train_label_idxs[label], num_shots, replace=False)
_query_idxs = np.random.choice(test_label_idxs[label], num_querys)
## collect index ##
support_idxs.append(_support_idxs)
query_idxs.append(_query_idxs)
## collect support and query features ###
support_xs.append(train_feats[_support_idxs])
support_ys.append(train_labels[_support_idxs])
query_xs.append(test_feats[_query_idxs])
query_ys.append(test_labels[_query_idxs])
### concatenate features and labels ###
support_xs = np.concatenate(support_xs)
support_ys = np.concatenate(support_ys)
query_xs = np.concatenate(query_xs)
query_ys = np.concatenate(query_ys)
### get indexes ####
support_idxs = np.concatenate(support_idxs)
query_idxs = np.concatenate(query_idxs)
np.save(f"{out_dir}/task_{task_id}/{task_name}.npy", (support_xs, support_ys, query_xs, query_ys))
np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Crop the WSIs into patches')
parser.add_argument('--task', type=str, default='NCT78_aug')
parser.add_argument('--num_threads', type=int, default=10, help='Number of threads for parallel processing, too large may result in errors')
parser.add_argument('--num_task', type=int, default=300, help='number of tasks')
parser.add_argument('--num_shots', type=int, default=5, help='number of shots')
parser.add_argument('--aug_times', type=int, default=0, help='number of augmentation times')
parser.add_argument('--task_out_dir', type=str, default='wsi_workdir/workdir/tasks')
args = parser.parse_args()
num_querys = 15
print('generating tasks, this could take a while for big dataset, please be patient')
if not os.path.exists(args.task_out_dir):
os.makedirs(args.task_out_dir, exist_ok=True)
if args.task == 'NCT78':
task_gen_func = generate_NCT78_task
root = 'wsi_workdir/workdir/extracted_feats'
train_feats = np.load(f'{root}/clp_wo_78/NCT_train.npy', 'r')
train_labels = np.load(f'{root}/NCT_train_labels.npy', 'r')
train_label_idxs = [np.where(train_labels==label)[0] for label in range(9)]
test_feats = np.load(f'{root}/clp_wo_78/NCT_test.npy', 'r')
test_labels = np.load(f'{root}/NCT_test_labels.npy', 'r')
test_label_idxs = [np.where(test_labels==label)[0] for label in range(9)]
options = None
elif args.task == 'NCT78_aug':
task_gen_func = aug_NCT78_task
options = {
'aug_times':args.aug_times,
}
root = 'wsi_workdir/workdir/extracted_feats/clp_wo_78/NCT_aug'
aug_feats = []
print('loading features')
for aug_times in tqdm(range(args.aug_times)):
aug_feats.append(np.load(f'{root}/NCT_aug_{aug_times}.npy', 'r'))
print('features loaded')
else:
raise NotImplementedError
each_thread = int(np.floor(args.num_task/args.num_threads))
task_ids = np.arange(args.num_task)
threads = []
for i in range(args.num_threads):
if i < (args.num_threads-1):
t = threading.Thread(
target=task_gen_func,
args=(args.task_out_dir,
task_ids[each_thread*i:each_thread*(i+1)],
args.num_shots,
options))
else:
t = threading.Thread(
target=task_gen_func,
args=(args.task_out_dir,
task_ids[each_thread*i:],
args.num_shots,
options))
threads.append(t)
for thread in threads:
thread.start() | 5,792 | 40.378571 | 144 | py |
Few-shot-WSI | Few-shot-WSI-master/wsi_workdir/tools/generate_task.py | import numpy as np
import argparse
import os
import warnings
import threading
from tqdm import tqdm
warnings.filterwarnings("ignore")
def generate_near_domain_task(out_dir, task_ids, num_shots, options):
out_dir = f'{out_dir}/near'
nv = options['novel_class']
if options['initialization'] or options['overwrite']:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}',exist_ok=True)
support_xs, support_ys = [], []
query_xs, query_ys = [], []
support_idxs, query_idxs = [], []
for label in range(9):
#### choose global index ###
_support_idxs = np.random.choice(train_label_idxs[label], num_shots)
_query_idxs = np.random.choice(test_label_idxs[label], num_querys)
## collect index ##
support_idxs.append(_support_idxs)
query_idxs.append(_query_idxs)
## collect support and query features ###
support_xs.append(NCT_train_feats[_support_idxs])
support_ys.append(NCT_train_labels[_support_idxs])
query_xs.append(NCT_test_feats[_query_idxs])
query_ys.append(NCT_test_labels[_query_idxs])
### concatenate features and labels ###
support_xs = np.concatenate(support_xs)
support_ys = np.concatenate(support_ys)
query_xs = np.concatenate(query_xs)
query_ys = np.concatenate(query_ys)
### get indexes ####
support_idxs = np.concatenate(support_idxs)
query_idxs = np.concatenate(query_idxs)
task_name = f'9-way-{num_shots}-shot_wo_{nv}'
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
else:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}',exist_ok=True)
task_name = f'9-way-{num_shots}-shot_wo_{nv}'
support_idxs, query_idxs = \
np.load(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', allow_pickle=True)
support_xs = NCT_train_feats[support_idxs]
query_xs = NCT_test_feats[query_idxs]
support_ys = NCT_train_labels[support_idxs]
query_ys = NCT_test_labels[query_idxs]
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
# np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
def generate_mixture_domain_task(out_dir, task_ids, num_shots, options):
task_name = f'5-way-{num_shots}-shot'
out_dir = f'{out_dir}/mixture'
if options['initialization'] or options['overwrite']:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}',exist_ok=True)
support_xs, support_ys = [], []
query_xs, query_ys = [], []
support_idxs, query_idxs = [], []
for label in np.arange(5):
#### choose global index ###
_support_idxs = np.random.choice(label_idxs[label], num_shots, replace=False)
_query_idxs = np.random.choice(label_idxs[label], num_querys)
## collect index ##
support_idxs.append(_support_idxs)
query_idxs.append(_query_idxs)
## collect support and query features ###
support_xs.append(feats[_support_idxs])
support_ys.append(labels[_support_idxs])
query_xs.append(feats[_query_idxs])
query_ys.append(labels[_query_idxs])
### concatenate features and labels ###
support_xs = np.concatenate(support_xs)
support_ys = np.concatenate(support_ys)
query_xs = np.concatenate(query_xs)
query_ys = np.concatenate(query_ys)
### get indexes ####
support_idxs = np.concatenate(support_idxs)
query_idxs = np.concatenate(query_idxs)
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
else:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
support_idxs, query_idxs = \
np.load(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', allow_pickle=True)
support_xs = feats[support_idxs]
query_xs = feats[query_idxs]
support_ys = labels[support_idxs]
query_ys = labels[query_idxs]
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
# print('saving to',f'{out_dir}/task_{task_id}/{task_name}.npy')
def generate_out_domain_task(out_dir, task_ids, num_shots, options):
task_name = f'3-way-{num_shots}-shot'
if options['mode'] == 'hetero':
out_dir = f'{out_dir}/out'
elif options['mode'] == 'homo':
out_dir = f'{out_dir}/out_homo'
else:
raise NotImplementedError
if options['initialization'] or options['overwrite']:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}',exist_ok=True)
support_xs, support_ys = [], []
query_xs, query_ys = [], []
support_idxs, query_idxs = [], []
if options['mode'] == 'homo':
support_id = np.random.choice(np.unique(PAIP_train_ids))
else:
support_id = None
for label in range(3):
#### choose global index ###
if options['mode'] == 'hetero': # hetero mode
_train_label_idxs = train_label_idxs[label]
elif options['mode'] == 'homo':
_train_label_idxs = np.where((PAIP_train_labels==label)\
* (PAIP_wsi_ids==support_id))[0]
_support_idxs = np.random.choice(_train_label_idxs, num_shots, replace=False)
_query_idxs = np.random.choice(test_label_idxs[label], num_querys)
## collect index ##
support_idxs.append(_support_idxs)
query_idxs.append(_query_idxs)
## collect support and query features ###
support_xs.append(PAIP_train_feats[_support_idxs])
support_ys.append(PAIP_train_labels[_support_idxs])
query_xs.append(PAIP_test_feats[_query_idxs])
query_ys.append(PAIP_test_labels[_query_idxs])
### concatenate features and labels ###
support_xs = np.concatenate(support_xs)
support_ys = np.concatenate(support_ys)
query_xs = np.concatenate(query_xs)
query_ys = np.concatenate(query_ys)
### get indexes ####
support_idxs = np.concatenate(support_idxs)
query_idxs = np.concatenate(query_idxs)
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
else:
for _ in tqdm(range(len(task_ids))):
task_id = task_ids[_]
os.makedirs(f'{out_dir}/task_{task_id}', exist_ok=True)
support_idxs, query_idxs = \
np.load(f'{out_dir}/task_{task_id}/3-way-{num_shots}-shot_idxs.npy', allow_pickle=True)
support_xs = PAIP_train_feats[support_idxs]
query_xs = PAIP_test_feats[query_idxs]
support_ys = PAIP_train_labels[support_idxs]
query_ys = PAIP_test_labels[query_idxs]
np.save(f"{out_dir}/task_{task_id}/{task_name}_{options['model']}.npy", (support_xs, support_ys, query_xs, query_ys))
np.save(f'{out_dir}/task_{task_id}/{task_name}_idxs.npy', (support_idxs, query_idxs))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='generate tasks')
parser.add_argument('--num_threads', type=int, default=10, help='Number of threads for parallel processing, too large may result in errors')
parser.add_argument('--task', type=str, default=None, required=True, help='one of [near, middle, out]')
parser.add_argument('--num_task', type=int, default=1000, help='number of tasks to generate')
parser.add_argument('--num_shots', type=int, default=10, help='number of support shot per task')
parser.add_argument('--aug_times', type=int, default=0, help='number of augmentation times,\
the data augmented features are pre-extracted.')
parser.add_argument('--mode', type=str, default='hetero', help='one of hetero or homo')
parser.add_argument('--model', type=str, default='clp', help='one of fsp or clp')
parser.add_argument('--initialization', action='store_true', default=False, help='first time to generate task?')
parser.add_argument('--overwrite', action='store_true', default=False, help='overwrite existing tasks,\
be careful with this option.')
parser.add_argument('--task_out_dir', type=str, default='wsi_workdir/workdir/tasks')
parser.add_argument('--novel_class', type=int, required=False, help='novel class in near domain task')
args = parser.parse_args()
num_querys = 15
print(args.initialization)
options=dict(overwrite=args.overwrite,
initialization=args.initialization,
model=args.model)
print('generating tasks, this could take a while for big dataset, please be patient')
if not os.path.exists(args.task_out_dir):
os.makedirs(args.task_out_dir, exist_ok=True)
if args.task == 'near':
task_gen_func = generate_near_domain_task
root = 'wsi_workdir/workdir/extracted_feats'
NCT_train_feats = np.load(f'{root}/{args.model}/NCT_train.npy', 'r')
NCT_train_labels = np.load(f'{root}/NCT_train_labels.npy', 'r')
train_label_idxs = [np.where(NCT_train_labels==label)[0] for label in range(9)]
NCT_test_feats = np.load(f'{root}/{args.model}/NCT_test.npy', 'r')
NCT_test_labels = np.load(f'{root}/NCT_test_labels.npy', 'r')
test_label_idxs = [np.where(NCT_test_labels==label)[0] for label in range(9)]
options['aug_times'] = args.aug_times
options['novel_class'] = args.novel_class
print('ready to generate tasks')
elif args.task == 'mixture':
task_gen_func = generate_mixture_domain_task
root = 'wsi_workdir/workdir/extracted_feats'
feats = np.load(f'{root}/{args.model}/LC.npy')
labels = np.load(f'{root}/LC_labels.npy', 'r')
label_idxs = [np.where(labels==label)[0] for label in range(5)]
elif args.task == 'out':
task_gen_func = generate_out_domain_task
options['mode']=args.mode
root = 'wsi_workdir/workdir/extracted_feats'
PAIP_train_feats = np.load(f'{root}/{args.model}/PAIP_train.npy', 'r')
PAIP_train_labels = np.load(f'{root}/PAIP_train_labels.npy', 'r')
train_label_idxs = [np.where(PAIP_train_labels==label)[0] for label in range(3)]
PAIP_test_feats = np.load(f'{root}/{args.model}/PAIP_test.npy', 'r')
PAIP_test_labels = np.load(f'{root}/PAIP_test_labels.npy', 'r')
test_label_idxs = [np.where(PAIP_test_labels==label)[0] for label in range(3)]
data_pth = 'data/PAIP19/data'
wsi_ids = np.sort(os.listdir(data_pth))
PAIP_train_ids = np.array(wsi_ids[:15])
PAIP_test_ids = np.array(wsi_ids[15:])
data_list = 'data/PAIP19/meta/paip_train_labeled.txt'
pos = open(data_list,'r')
pos = pos.readlines()
PAIP_wsi_ids = np.array([x.split('/')[0] for x in pos])
else:
raise NotImplementedError
each_thread = int(np.floor(args.num_task/args.num_threads))
task_ids = np.arange(args.num_task)
threads = []
for i in range(args.num_threads):
if i < (args.num_threads-1):
t = threading.Thread(
target=task_gen_func,
args=(args.task_out_dir,
task_ids[each_thread*i:each_thread*(i+1)],
args.num_shots,
options))
else:
t = threading.Thread(
target=task_gen_func,
args=(args.task_out_dir,
task_ids[each_thread*i:],
args.num_shots,
options))
threads.append(t)
for thread in threads:
thread.start() | 13,175 | 46.225806 | 144 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.