repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
ScottSoren/EC_MS | src/EC_MS/Chem/__init__.py | <reponame>ScottSoren/EC_MS
from EC_MS.Chem.MolarMasses import *
from EC_MS.Chem.PhysCon import *
from EC_MS.Chem.Thermochem import *
__version__ = "0.5.0"
__title__ = "EC_MS.Chem"
__description__ = "Chemistry sub-package to EC_MS. Can calculate molar masses, standard potentials, etc."
__url__ = "https://github.com/ScottSoren/EC_MS"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
|
ScottSoren/EC_MS | src/EC_MS/Electrolytes.py | <reponame>ScottSoren/EC_MS
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 14 21:36:53 2017
This module will capture all the modelling from Chapter 3.1 in Scott's MSc
thesis for a range of electrolytes, as well as functions interfacing with
EC_MS data to enable predictions of pH change etc.
@author: soren
"""
import os
import numpy as np
from scipy.optimize import brentq
from scipy.integrate import odeint
from matplotlib import pyplot as plt
from .Object_Files import lines_to_dictionary
from . import Chem
data_directory = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data"
with open(data_directory + os.sep + "Electrolytes.txt") as f:
electrolyte_lines = f.readlines()
electrolyte_dict = lines_to_dictionary(electrolyte_lines)
Kw = 1.0e-14
def get_electrolyte_type(cation, anion):
if anion in ["CO3--", "HCO3-", "H2CO3", "CO2", "CO3", "HCO3"]:
return "carbonate"
if anion in ["PO4---", "HPO4--", "H2PO4-", "H3PO4", "PO4", "HPO4", "H2PO4"]:
return "phosphate"
if anion in ["SO4--", "HSO4-", "H2SO4", "SO4", "HSO4"]:
return "sulfate"
if cation in ["NH4+", "NH4"]:
return "ammonium"
if cation in ["H+", "H"]:
return "strong acid"
if anion in ["OH-", "OH"]:
return "strong base"
if anion is not None and cation is not None:
print("Electrolyte assumed to be neutral salt.")
return "salt"
print("Error: unknown electrolyte type")
def read_charge(ion):
z = 0
while ion[-1] == "+":
z += 1
ion = ion[:-1]
while ion[-1] == "-":
z -= 1
ion = ion[:-1]
return z
class Electrolyte:
def __init__(
self,
name=None,
electrolyte_type=None,
s=None,
concentration=None,
pH=None,
cation=None,
anion=None,
spectator=None,
E=None,
F=None,
p=None,
verbose=True,
):
"""
Allows for a lot of ways of initializing the electrolyte.
As in Scott's MSc thesis, E is total acid+anion species, p is
equilibrium pressure of gas.
"""
if electrolyte_type is not None:
pass
elif name is not None:
try:
electrolyte_type = electrolyte_dict[name]["electrolyte_type"]
except KeyError: # then the first argument is an electrolyte type, not the
# name of a pre-specified electrolyte
electrolyte_type = name
name = None
else:
electrolyte_type = get_electrolyte_type(cation, anion)
name = electrolyte_type
constants = electrolyte_dict[electrolyte_type]
self.name = name
self.constants = constants
self.electrolyte_type = electrolyte_type
self.cation = cation
self.spectator = spectator
self.anion = anion
self.concentration = concentration
self.s = s
self.pH = pH
self.p = p
self.E = E
self.F = F
self.verbose = verbose
if name is not None: # if it's a named electrolyte
specs = electrolyte_dict[name]
for (key, value) in specs.items():
if not hasattr(self, key):
setattr(self, key, value)
elif getattr(self, key) is None:
setattr(self, key, value)
if self.s is None and hasattr(self, "concentration"):
self.s = self.concentration
elif self.concentration is None:
self.concentration = self.s
self.set_buffer()
self.equilibrate()
if self.name is None:
self.name = (
str(self.concentration)
+ " M "
+ self.electrolyte_type
+ ", pH = "
+ str(self.pH)
)
if self.verbose:
print("Initialized electolyte: " + self.name + "\n")
def set_buffer(self):
self.species = ["H+", "OH-"]
for (key, value) in self.constants.items():
if not hasattr(self, key):
setattr(self, key, value)
elif getattr(self, key) is None:
setattr(self, key, value)
if "buffer" in self.constants:
self.pKa = np.array(self.constants["pKa"])
self.Ka = np.power(10, -self.pKa)
self.ions = self.constants[
"buffer"
] # list of the species participating in the
# acid-base equilibrium from most to least protonated
# self.cation = self.constants['cation']
# self.spectator = self.constants['spectator']
else:
self.ions = []
if self.spectator is None:
if "acid" in self.electrolyte_type:
self.spectator = self.anion
else:
self.spectator = self.cation
self.N_buf = len(self.ions)
self.z = np.array([read_charge(ion) for ion in self.ions])
self.species = ["H+", "OH-"] + self.ions + [self.spectator]
self.charge = dict([(ion, read_charge(ion)) for ion in self.species])
if "Keq" in self.constants:
self.dissolved = self.constants["gas"] + "(aq)"
self.species += [self.dissolved]
if "Kh" in self.constants:
self.gas = self.constants["gas"] + "(g)"
self.species += [self.gas]
def equilibrate(self, **kwargs):
"""
Updates all other parameters from any two specified parameters.
Was a bit tricky to code, but I like what I came up with.
"""
if "buffer" not in self.constants:
return self.equilibrate_simple(**kwargs)
variables = ["s"] # to be in order of decreasing likelihood to be held constant
if "Kh" in self.constants: # then we've got equilibrium with a vapor
variables += ["p"]
if (
"Keq" in self.constants
): # then we've got equilibrium with a dissolved species
variables += ["E"]
variables += ["F", "pH"]
fundict = {
("s", "p", "pH"): self.pH_sp,
("s", "E", "pH"): self.pH_sE,
("s", "F", "pH"): self.pH_sF,
("s", "pH", "F"): self.F_spH,
("s", "pH", "E"): self.E_spH,
("s", "pH", "p"): self.p_spH,
("F", "pH", "s"): self.s_FpH,
}
done = []
if len(kwargs) > 2:
print(
"Error, can't equilibrate: not enough degrees of freedom!\n"
+ " got the following: "
+ str(done)
)
return
elif len(kwargs) > 0: # then use those two parameters to set the rest:
for (var, value) in kwargs.items():
setattr(self, var, value)
done += [var]
while len(done) < 2:
for var in variables:
if getattr(self, var) is not None and var not in done:
done += [var]
# print(str(done))
break
else:
print(
"Error, can't equilibrate: Too many degrees of freedom!\n"
+ " got the following: "
+ str(done)
)
return
while len(done) < len(variables):
try:
(names, fun) = [
(n, f)
for (n, f) in fundict.items()
if n[0] in done and n[1] in done and n[2] not in done
][0]
except IndexError:
print(
"Error, can't equilibrate: no function appropriate to use.\n"
+ " got the following: "
+ str(done)
)
return
print("calling function " + str(fun)) # debugging
fun()
# print('names = ' + str(names) + ', vals = ' +
# str([getattr(self, names[0]), getattr(self, names[1])]) + ', a = ' + str(fun()))
done += [names[2]]
if self.verbose:
print("Equilibrated successfully! pH = " + str(self.pH))
def equilibrate_simple(self, **kwargs):
variables = ["s", "pH"]
fundict = {"s": self.pH_s, "pH": self.s_pH}
if len(kwargs) > 1:
print(
"Error, can't equilibrate: not enough degrees of freedom!\n"
+ " got the following: "
+ str(kwargs.keys())
)
return
elif len(kwargs) == 1: # then use those two parameters to set the rest:
for var, value in kwargs.items():
setattr(self, var, value)
fun = fundict[var]
else:
for var in variables:
if getattr(self, var) is not None:
fun = fundict[var]
break
else:
print(
"Error, can't equilibrate: Too many degrees of freedom!\n"
+ " simply got nothing."
)
return
fun()
if self.verbose:
print("Equilibrated successfully! pH = " + str(self.pH))
def get_concentrations(self, **kwargs):
if len(kwargs) > 0:
self.equilibrate(**kwargs)
bufvec = self.buffer_vec(self.pH)
bv = sum(bufvec)
self.conc = {}
for b, ion in zip(bufvec, self.ions):
self.conc[ion] = self.F * b / bv
self.conc["H+"] = np.power(10.0, -self.pH)
self.conc["OH-"] = Kw / self.conc["H+"]
if hasattr(self, "dissolved"):
self.conc[self.dissolved] = self.E - self.F
if hasattr(self, "gas"):
self.conc[self.gas] = self.conc[self.dissolved] * self.constants["Kh"]
self.conc[self.spectator] = self.s
return self.conc
def get_conductivity(self, **kwargs):
if len(kwargs) > 0 or not hasattr(self, "conc"):
self.get_concentrations(**kwargs)
self.kappa = 0
self.kap = {}
for ion, conc in self.conc.items():
# print(ion)
charge = read_charge(ion)
if charge != 0:
self.kap[ion] = (
conc
* 1e3
* Chem.Far
* np.abs(charge)
* electrolyte_dict["mobility"][ion]
)
# units: mol/l * l/m^3 * C/mol * m^2/(V*s) = A/(V*m) = S/m
self.kappa += self.kap[ion]
return self.kappa
def get_conductivities(self, **kwargs):
self.get_conductivity(**kwargs)
return self.kap
def pHsolve(self, residual):
"""
Whatever equilibrium model I use, this is how the actual numerical
solution will go down. Residual is the total net charge concentration
as a function of pH, which of course gives 0 for the right pH.
"""
if self.pH is not None:
pH0 = self.pH
else:
pH0 = 7
try:
pH = brentq(residual, pH0 - 0.1, pH0 + 0.1)
except ValueError:
if self.verbose:
print("jump from pH = " + str(pH0) + " : brentq from scratch.")
pH = brentq(residual, -6, 20)
self.pH = pH
return pH
def pH_s(self, s=None):
"""
sets self.pH using s-F equilibrium, i.e. excluding dissolved gas
"""
if s is None:
s = self.s
sc = s * self.charge[self.spectator] # spectator charge
def residual(pH):
return sc + np.power(10, -pH) - Kw * np.power(10, pH)
return self.pHsolve(residual)
def s_pH(self, pH=None):
if pH is None:
pH = self.pH
x = np.power(10.0, -pH)
sc = Kw / x - x # spectator charge
s = sc / self.charge[self.spectator]
self.s = s
return s
def buffer_vec(self, pH):
"""
Useful concept for acid-base equilibrium with multiple Ka's:
bufvec = [1, Ka1/x, Ka2*Ka1/x^2, ...]
where x is proton concentration, and pKa1<pKa2<...
"""
return np.array(
[
np.product(self.Ka[0:i]) / np.power(10.0, -pH * i)
for i in range(self.N_buf)
]
)
# if 10 rather than 10.0, I get nans
def pH_sp(self, s=None, p=None):
"""
sets self.pH using s-p equilibrium.
Only works for electrolytes in eq. with gas, e.g. carbonate
"""
if s is None:
s = self.s
if p is None:
p = self.p
Keq = self.constants["Keq"]
Kh = self.constants["Kh"]
def buffer_charge(pH):
bufvec = self.buffer_vec(pH)
return p * Keq / Kh * np.dot(self.z, bufvec)
sc = s * self.charge[self.spectator] # spectator charge
def residual(pH):
return sc + np.power(10, -pH) - Kw * np.power(10, pH) + buffer_charge(pH)
return self.pHsolve(residual)
def pH_sE(self, s=None, E=None):
"""
sets self.pH using s-E equilibrium, i.e. including dissolved gas
"""
if s is None:
s = self.s
if E is None:
E = self.E
try:
Keq = self.constants["Keq"]
except KeyError:
if self.verbose:
print("no gas equilibrium in electrolyte " + self.name)
return self.pH_sF(s, E)
def buffer_charge(pH):
bufvec = self.buffer_vec(pH)
return E * np.dot(self.z, bufvec) / (1 / Keq + np.sum(bufvec))
sc = s * self.charge[self.spectator] # spectator charge
def residual(pH):
return sc + np.power(10, -pH) - Kw * np.power(10, pH) + buffer_charge(pH)
return self.pHsolve(residual)
def pH_sF(self, s=None, F=None):
"""
sets self.pH using s-F equilibrium, i.e. excluding dissolved gas
"""
if s is None:
s = self.s
if F is None:
F = self.F
def buffer_charge(pH):
bufvec = self.buffer_vec(pH)
return F * np.dot(self.z, bufvec) / np.sum(bufvec)
sc = s * self.charge[self.spectator] # spectator charge
def residual(pH):
return sc + np.power(10, -pH) - Kw * np.power(10, pH) + buffer_charge(pH)
return self.pHsolve(residual)
def p_spH(self, s=None, pH=None):
if s is None:
s = self.s
if pH is None:
pH = self.pH
try:
Kh = self.constants["Kh"]
Keq = self.constants["Keq"]
except KeyError:
# print('no gas equilibrium in electrolyte ' + self.name)
return self.E_spH(s, pH)
# print('pH = ' + str(pH) + ', s = ' + str(s))
x = np.power(10.0, -pH) # 10 instead of 10.0 gives error. wtf numpy?
bufvec = self.buffer_vec(pH)
# print('bufvec = ' + str(bufvec) + '\ndot product = ' + str(np.dot(bufvec, self.z)))
sc = s * self.charge[self.spectator] # spectator charge
p = (-sc - x + Kw / x) * Kh / (Keq * np.dot(bufvec, self.z))
self.p = p
return p
def E_spH(self, s=None, pH=None):
if s is None:
s = self.s
if pH is None:
pH = self.pH
try:
Keq = self.constants["Keq"]
except KeyError:
# print('no gas equilibrium in electrolyte ' + self.name)
return self.F_spH(s, pH)
x = np.power(10.0, -pH)
bufvec = self.buffer_vec(pH)
sc = s * self.charge[self.spectator] # spectator charge
E = (-sc - x + Kw / x) * (sum(bufvec) + 1 / Keq) / np.dot(bufvec, self.z)
self.E = E
return E
def F_spH(self, s=None, pH=None):
if s is None:
s = self.s
if pH is None:
pH = self.pH
x = np.power(10.0, -pH)
bufvec = self.buffer_vec(pH)
sc = s * self.charge[self.spectator] # spectator charge
F = (-sc - x + Kw / x) * sum(bufvec) / np.dot(bufvec, self.z)
self.F = F
return F
def s_FpH(self, F=None, pH=None):
if F is None:
F = self.F
if pH is None:
pH = self.pH
x = np.power(10.0, -pH)
bufvec = self.buffer_vec(pH)
sc = Kw / x - x - F * np.dot(bufvec, self.z) / sum(bufvec) # spectator charge
s = sc / self.charge[self.spectator]
self.s = s
return s
def electrolysis_ode(quantities, t, pars):
"""
It'll take some work to make this completely general. Designed now for s-E equilibrium
for carbonate.
Also this is in serious need of vectorization.
"""
electrolyte = pars[0]
equilibrium_type = pars[1] # for example: sE, or sF
cq_dot = pars[2](t)
quantitiesdict = {
equilibrium_type[0]: quantities[0],
equilibrium_type[1]: quantities[1],
}
# quantitiesdict is a **kwarg dictionary for inputting the parameters specified
# by the equilibrium type.
kap = electrolyte.get_conductivities(**quantitiesdict)
# this will equilibrate the electrolyte. Then I can just read the conductivities
kappa = electrolyte.kappa
ddt = {}
ddt["F"] = 0
for ion, k in kap.items():
# print('ion: ' + ion)
if ion == electrolyte.spectator:
ddt["s"] = k / (read_charge(ion) * kappa) * cq_dot
elif ion in electrolyte.ions:
ddt["F"] += k / (read_charge(ion) * kappa) * cq_dot
ddt["E"] = ddt["F"]
dquantitiesdt = np.array([ddt[q] for q in list(equilibrium_type)])
return dquantitiesdt
def electrolysis_simple_ode(s, t, pars):
electrolyte = pars[0]
cq_dot = pars[1](t)
# quantitiesdict is a **kwarg dictionary for inputting the parameters specified
# by the equilibrium type.
kap = electrolyte.get_conductivities(s=s)
k = kap[electrolyte.spectator]
z = electrolyte.charge[electrolyte.spectator]
# this will equilibrate the electrolyte. Then I can just read the conductivities
kappa = electrolyte.kappa
dsdt = k / (z * kappa) * cq_dot
return dsdt
def electrolyze(
electrolyte="standard",
equilibrium_type="sE",
tj=None,
tpulse=60,
tspan=None,
colors={},
j_el=-5,
L=100e-6,
ax="new",
leg=True,
):
"""
calculates pH and acid/base concentrations as a function of time during
electrolysis, starting with a given electrolyte state and assuming
proton-consuming (j_el<0) or proton-releasing (j_el>0) reactions and an
isolated film (diffusion layer) of thickness L over the electrode.
"""
if tj is None:
if tspan is None:
tspan = [0, tpulse]
tvec = np.linspace(tspan[0], tspan[-1], 1000)
def j_fun(t):
if t < 0:
return 0
if t < tpulse:
return j_el
return 0
else:
t_in = tj[0]
j_in = tj[1]
tvec = t_in
def j_fun(t):
if t < t_in[0]:
return 0
if t < t_in[-1]:
return np.interp(t, t_in, j_in)
return 0
def cq_dot_fun(t):
return -j_fun(t) / (Chem.Far * L) * 1e-2
# units: mA/cm^2 / (C/mol * m) * (A/m^2/(mA/cm^2)) (M / (mol/m^3)) = M / s
if type(electrolyte) is str:
electrolyte = Electrolyte(electrolyte)
electrolyte.equilibrate()
electrolyte.verbose = False # otherwise it really screams.
vec = {}
quantitiesdict = []
if "buffer" in electrolyte.constants:
pars = (electrolyte, equilibrium_type, cq_dot_fun)
var_0 = equilibrium_type[0]
var_1 = equilibrium_type[1]
quantities0 = [getattr(electrolyte, var_0), getattr(electrolyte, var_1)]
solution = odeint(electrolysis_ode, quantities0, tvec, args=(pars,))
vec[var_0] = solution[:, 0]
vec[var_1] = solution[:, 1]
for (q0, q1) in zip(vec[var_0], vec[var_1]):
quantitiesdict += [{var_0: q0, var_1: q1}]
else:
s0 = electrolyte.s
pars = (electrolyte, cq_dot_fun)
solution = odeint(electrolysis_simple_ode, s0, tvec, args=(pars,))
vec["s"] = solution
quantitiesdict = [{"s": q} for q in vec["s"]]
pHvec = []
y = {}
for species in electrolyte.get_concentrations().keys():
if "(g)" not in species:
print("preparing space to get: " + species)
y[species] = []
for qdict in quantitiesdict:
for species, conc in electrolyte.get_concentrations(**qdict).items():
# print('geting data for: ' + species)
if species in y:
y[species] += [conc]
pHvec += [electrolyte.pH]
default_colors = ["m", "b", "c", "g", "y", "r", "0.5"]
if ax == "new":
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax2 = ax1.twinx()
for species, vec in y.items():
if species in colors:
color = colors[species]
else:
color = default_colors.pop()
print("plotting: " + species)
ax1.plot(tvec, vec, color=color, label=species)
ax2.plot(tvec, pHvec, color="k", label="pH")
if leg:
ax1.legend()
ax1.set_ylabel("concentration / M")
ax1.set_xlabel("time / s")
ax2.set_ylabel("pH")
return [ax1, ax2]
if __name__ == "__main__":
plt.close("all")
el1 = Electrolyte("standard", verbose=True)
print("saturating electrolyte 1 with 1 bar gas...")
el1.equilibrate(p=1)
print("saturated, pH = " + str(el1.pH))
el2 = Electrolyte(
cation="K", anion="PO4---", concentration=1.0, pH=12, verbose=False
)
titrateit = False
if titrateit:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax2 = ax1.twinx()
F = 0.5
el2.F = F
y = {}
pHvec = []
for species in el2.get_concentrations().keys():
if species in el2.ions:
y[species] = []
svec = np.linspace(0, 2, 1000)
for s in svec:
for species, conc in el2.get_concentrations(s=s, F=F).items():
if species in y:
y[species] += [conc]
pHvec += [el2.pH]
colors = ["m", "b", "c", "g", "y", "r"]
for (species, vec), color in zip(y.items(), colors):
ax1.plot(svec, vec, color=color, label=species)
ax2.plot(svec, pHvec, color="k", label="pH")
ax1.legend()
ax1.set_ylabel("concentration / M")
ax1.set_xlabel("K+ concentration / M")
ax2.set_ylabel("pH")
ax1.set_title("total acid/base concentration = " + str(F) + " M")
electrolyzeit = True
if electrolyzeit:
el3 = Electrolyte("phosphate", F=0.5, s=0.05)
electrolyze(el3, equilibrium_type="sF", tpulse=100, j_el=-10, ax="new")
el4 = Electrolyte("perchloric acid", s=0.1)
el6 = Electrolyte("hydroxide", s=1)
electrolyze(el6, tpulse=100, j_el=10, ax="new")
# print(el1.pH_sp(s=0.1, p=400e-6))
|
ScottSoren/EC_MS | src/EC_MS/spectra.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 15:55:31 2020
@author: scott
"""
import os, re, pickle
import numpy as np
from matplotlib import pyplot as plt
from .PVMassSpec import read_PVMS
from .MS import Peak
from .dataset import Dataset
float_match = "[-]?\d+[\.]?\d*(e[-+]?\d+)?" # matches floats like '-3.5e4' or '7' or '245.13' or '1e-15'
def data_to_spectrums(data):
spectrums = []
x_list = [col for col in data.keys() if re.search("^" + float_match + "$", col)]
x = np.array([eval(v) for v in x_list])
I_sort = np.argsort(x)
x = x[I_sort]
tstamp = data["tstamp"]
ts = data[data["t_str"]]
for (n, t) in enumerate(ts):
try:
y = np.array([float(data[x_list[I]][n]) for I in I_sort])
except TypeError:
print(
"Warning!!! couldn't convert value "
+ " in spectrum number "
+ str(n)
+ ". Skipping that spectrum."
)
continue
spectrum = Spectrum(x=x, y=y, t=t, tstamp=tstamp)
spectrums += [spectrum]
return spectrums
class Spectrum:
def __init__(
self,
file_path=None,
x=None,
y=None,
t=None,
tstamp=None,
index=0,
data_type="PVMS",
):
# print("Initiating Spectrum!") # debugging
if x is None and y is None:
spectra = Spectra(file_path, data_type=data_type)
spectrum = spectra[index]
x, y, t, tstamp = spectrum.x, spectrum.y, spectrum.t, spectrum.tstamp
# print(file_path) # debugging
self.x = x
self.y = y
self.t = t
self.tstamp = tstamp
self.file_path = file_path
self.bg = 0
if file_path is not None:
self.folder, self.file = os.path.split(file_path)
def get_signal(self, Mspan=None):
x, y = self.x, self.y
if Mspan is not None:
mask = np.logical_and(Mspan[0] < x, x < Mspan[-1])
x, y = x[mask], y[mask]
return x, y
def plot(self, Mspan=None, ax="new", **kwargs):
"""
plots the spectrum. kwargs are fed to matplotlib.pyplot.plot
"""
if ax == "new":
fig, ax = plt.subplots()
ax.set_xlabel("m/z")
ax.set_ylabel("signal")
x, y = self.get_signal(Mspan=Mspan)
ax.plot(x, y, **kwargs)
return ax
def reset(self):
"""
undoes self.set_background
"""
self.y = self.y + self.bg
self.bg = 0
def set_background(self, M_bg=None, Mspan=None):
"""
sets self.bg to the average value within Mspan.
If Mspan is none, sets self.bg to min(self.y)
Either way, saves self.bg and subtracts it from self.y
self.reset() undoes this.
This function calls self.reset() at the beginning to not compound
background sets
"""
self.reset()
if M_bg is None and Mspan is not None: # I can see this being a mistake
M_bg = Mspan
if M_bg is not None:
x_bg, y_bg = self.get_signal(Mspan=M_bg)
bg = np.mean(y_bg)
else:
bg = min(self.y)
self.y = self.y - bg
self.M_bg = M_bg
self.bg = bg
return bg
def get_max(self, Mspan=None, ax=None):
x, y = self.get_signal(Mspan=Mspan)
return max(y)
def get_integral(self, Mspan=None, M_bg=None, ax=None, **kwargs):
x, y = self.get_signal(Mspan=Mspan)
if M_bg is not None:
x_bg, y_bg = self.get_signal(Mspan=M_bg)
bg = np.mean(y_bg)
else:
bg = 0
integral = np.trapz(y - bg, x)
if ax is not None:
if ax == "new":
ax = self.plot(Mspan=Mspan)
ax.fill_between(x, y, bg * np.ones(x.shape), **kwargs)
return integral
def get_peak(self, Mspan=None, mass=None, width=None):
if Mspan is None:
M = float(mass[1:])
Mspan = [M - width / 2, M + width / 2]
x, y = self.get_signal(Mspan=Mspan)
peak = Peak(x, y)
return peak
def __sub__(self, spectrum_2):
y_diff = self.y - spectrum_2.y
x = self.x
t = self.t
tstamp = self.tstamp
return Spectrum(x=x, y=y_diff, t=t, tstamp=tstamp)
def spectrums_from_data(data):
x = data["x"]
spectra = data["spectra"]
spectrums = []
for i, y in enumerate(spectra):
spectrum = Spectrum(x=x, y=y)
spectrums += [spectrum]
return spectrums
def spectra_from_data(data): # should be class function of Spectra
spectrums = spectrums_from_data(data)
print("spectra_from_daa: spectrums = " + str(spectrums)) # debugging
return Spectra(spectrums=spectrums)
class Spectra:
def __init__(
self,
file_path=None,
folder=None,
spectrums=None,
data=None,
tstamp=None,
data_type="PVMS",
name="spectra",
):
# print(spectrums) # debugging
if file_path is not None and file_path[-4:] == ".pkl":
with open(file_path, "rb") as f:
spectra_data = pickle.load(f)
tstamp = spectra_data["tstamp"]
data = spectra_data["data"]
self.x = spectra_data["x"]
self.spectra = spectra_data["spectra"]
if "name" in spectra_data:
self.name = spectra_data["name"]
self.data = data
if tstamp is None and "tstamp" in data:
tstamp = data["tstamp"]
try: # okay, doing this twice, but whatever.
self.t = data[data["t_str"]]
except KeyError:
print("Warning!!! can't find t in self.data")
spectrums = self.spectrums_from_spectra()
elif data is None and spectrums is None:
if data_type == "PVMS":
data = read_PVMS(file_path)
spectrums = data_to_spectrums(data)
if tstamp is None and "tstamp" in data:
tstamp = data["tstamp"]
else:
print(
"Spectra.__init__ does not yet support reading spectrums "
+ "from files with data_type = "
+ data_type
)
self.tstamp = tstamp
self.file_path = file_path
if file_path is not None:
self.folder, self.file = os.path.split(file_path)
name = "spectra from " + self.file
self.spectrums = spectrums
self.data = data
try:
self.t = data[data["t_str"]]
except KeyError:
print("Warning!!! can't find t in self.data")
if not hasattr(self, "x"): # it'll already have this if loaded form pickle
self.x = spectrums[0].x
if not hasattr(
self, "spectra"
): # it'll already have this if loaded from pickle
self.spectra = np.stack([spectrum.y for spectrum in spectrums])
if not hasattr(self, name):
self.name = name
def __getitem__(self, key):
if type(key) is int:
return self.spectrums[key]
elif key in self.data:
return self.data[key]
elif hasattr(self, key):
return getattr(self, key)
raise KeyError(
"Spectra has no attribute " + key + ", and spectra.data has no key " + key
)
def __len__(self):
return len(self.spectrums)
def spectrums_from_spectra(self, spectra=None, x=None):
if spectra is None:
spectra = self.spectra
if x is None:
x = self.x
spectrums = []
for i, y in enumerate(spectra):
if "t" in self.data:
t_i = self.data["t"][i]
elif hasattr(self, "t"):
t_i = self.t[i]
else:
t_i = None
# print(f"{t_i} should be {self.t[i]}") # debugging
spectrum = Spectrum(x=x, y=y, t=t_i)
spectrums += [spectrum]
self.spectrums = spectrums
return spectrums
def save(self, file_name):
"""
Has the problem that it won't save properly if spectrums are different
length. However, PVMassSpec can't save that kind of file, and neither
can (as of now) Zilien.
"""
spectra_data = {
"x": self.x,
"spectra": self.spectra,
"data": self.data,
"tstamp": self.tstamp,
"file_path": self.file_path,
"name": self.name,
}
with open(file_name, "wb") as f:
pickle.dump(spectra_data, f)
def heat_plot(
self,
ax="new",
vs="number",
logscale=True,
orientation="yx",
zrange=None,
**kwargs,
):
"""
kwargs are passed on to imshow.
"""
spectra = self.spectra
if ax == "new":
fig, ax = plt.subplots()
if vs == "number":
t = np.arange(len(self))
t_label = "scan number"
elif vs == "t":
t = self.t
t_label = "time / [s]"
M = self.x
# this makes the extent one increment off.
t = np.append(t, 2 * t[-1] - t[-2])
M = np.append(M, 2 * M[-1] - M[-2])
if orientation == "xy":
spectra = np.swapaxes(spectra, 0, 1)
spectra = np.flip(spectra, axis=0)
ax.set_ylabel("m/z")
ax.set_xlabel(t_label)
else:
ax.set_ylabel(t_label)
ax.set_xlabel("m/z")
if "extent" not in kwargs:
if orientation == "xy":
extent = [t[0], t[-1], M[0], M[-1]]
elif orientation == "yx":
extent = [M[0], M[-1], t[-1], t[0]]
kwargs.update(extent=extent)
if logscale:
spectra = np.log(spectra)
if zrange is None:
good = np.logical_and(~np.isnan(spectra), ~np.isinf(spectra))
# print('spectra = \n' + str(spectra)) # debugging
low = np.min(spectra[good])
high = np.max(spectra[good])
else:
low = zrange[0]
high = zrange[1]
spectra[spectra < low] = low
spectra[spectra > high] = high
spectra[np.isnan(spectra)] = low
spectra[np.isinf(spectra)] = low
if "aspect" not in kwargs:
kwargs.update(aspect="auto")
elif kwargs["aspect"] == "square":
if orientation == "xy":
aspect = (t[-1] - t[0]) / (M[-1] - M[0])
elif orientation == "yx":
aspect = (M[-1] - M[0]) / (t[-1] - t[0])
kwargs.update(aspect=aspect)
if "cmap" not in kwargs:
kwargs.update(cmap="inferno")
ax.imshow(spectra, **kwargs)
def get_dataset(
self,
masses,
mode="gauss_height",
fit_width=1,
y_bg=None,
bg_mode=None,
endpoints=2,
):
dataset = Dataset(data_type=mode)
dataset.data = {"timecols": {}}
for mass in masses:
xcol, ycol = mass + "-x", mass + "-y"
x = np.array([])
y = np.array([])
M = float(mass[1:])
Mspan = [M - fit_width / 2, M + fit_width / 2]
for spectrum in self.spectrums:
peak = spectrum.get_peak(Mspan=Mspan)
try:
peak.fit_gauss(y_bg=y_bg, bg_mode=bg_mode, endpoints=endpoints)
height = peak.height
except (IndexError, AttributeError):
print(
f"Warning!!! can't fit data in the range for {mass} at t~{spectrum.t}. putting nan."
)
y = np.nan
x = np.append(x, spectrum.t)
y = np.append(y, height)
dataset.add_data_col(xcol, x, col_type=mode)
dataset.add_data_col(ycol, y, col_type=mode)
dataset.timecols[ycol] = xcol
if not "spectrum number" in dataset.data:
# ^ this should get called for the first mass column
n_vec = np.arange(len(x))
dataset.add_data_col(col="spectrum number", value=n_vec, timecol="M4-x")
dataset.data["tstamp"] = self.data["tstamp"]
dataset.data["data_type"] = "spectra"
dataset["title"] = self.name
dataset.empty = False
return dataset
|
ScottSoren/EC_MS | src/EC_MS/Chem/Thermochem.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 00:21:47 2017
@author: scott
17G30:
There may be something time-saving here:
/home/scott/Dropbox/other_DTU/MSc/Scripts/PYTHON3/Masters Project/15L29 Report Intro
... but I can probably do better from scratch
or, better, build off someone elses work.
how about this?
https://pypi.python.org/pypi/CoolProp/2.2.3
#or a function that looks up from hbcp or something...
googling around does not make it seem easy
"""
import re
import numpy as np
from math import gcd
from .PhysCon import R, Far
from .MolarMasses import get_elements
dfH0 = { # standard enthalpies of formation / [kJ/mol]
"H2O(g)": -241.82,
"H2O(l)": -285.8,
"CH3CH2OH(g)": -234.8, # Langes Handbook
"CH3CH2OH(l)": -277,
"CH3CH2CH2OH(g)": -256, # NIST
"CH3CH2CHO(g)": -188.7, # NIST
"C2H6(g)": -84.0, # NIST
"HCOOH(g)": -378.6, # NIST
"CH4(g)": -74.9, # NIST
"CH3CHO(g)": -166.1,
"CH3OH(g)": -201.2, # Langes Handbook, John A Dean, 15th edition
"CO(g)": -110.5,
"C2H4(g)": 52.4, # NIST
"C(s)": 0,
"O2(g)": 0,
"H2(g)": 0,
"Cu2O(s)": -170.71, # NIST
"CuO(s)": -156.06, # NIST
"Cu(OH)2(s)": -450.37, # NIST
"CO2(g)": -393.5,
}
S0 = { # standard entropy / [J/(mol*K)]
"H2O(g)": 188.72,
"H2O(l)": 69.940,
"CO2(g)": 213.8,
"CH3CH2OH(g)": 281.6, # Langes Handbook
"CH3CH2OH(l)": 161,
"CH3CH2CH2OH(g)": 322.49, # NIST
"CH3CH2CHO(g)": 304.4, # NIST
"HCOOH(g)": 248.7, # NIST
"CO(g)": 197.7,
"C2H4(g)": 219.3, # NIST
"H2(g)": 130.68, # NIST
"C(s)": 5.6, # NIST
"N2(g)": 191.61, # NIST
"O2(g)": 205.15, # NIST
#'C2H6(g)':229.5, #http://bilbo.chm.uri.edu/CHM112/tables/thermtable.htm
"CH4(g)": 186.7, # NIST
"CH3CHO(g)": 263.8,
"CH3OH(g)": 126.8, # Langes Handbook, <NAME>, 15th edition
"Cu(s)": 33.17, # NIST
"Cu2O(s)": 92.37, # NIST
"CuO(s)": 42.59, # NIST
"Cu(OH)2(s)": 108.43, # NIST
}
dfG0 = { # standard free energies of formation / [kJ/mol]
# I need to find a way to querry a reliable database, i.e., NIST.
# Most of these standard energies are from
"""
Commented out because the source is no longer accessible.
# bilbo.chm.uri.edu/CHM112/tables/thermtable.htm
'H2O':-237.1,
'H2(g)':0,'O2(g)':0, 'Cu':0, 'C(s)':0,
'H2O(l)':-237.2,'CO2(g)':-394.4,'HCOOH(l)':-346,
'CO(g)':-137.2,'CH3OH(g)': -162.3,#'CH4(g)':-50.75,
'CH3COOH(l)':-389.9,'CH3CHO(g)':-133.4,'CH3CH2OH(g)':-167.9,
'C2H4(g)':68.12,'C3H8O(g)':-163.0,'C2H2O4(g)':-662.7,
'CO2(aq)':-386.2,
'HCOOH(aq)':-356,
'CH3COOH(aq)':-396.6,
'C2H2O4(aq)':-697.0, 'CH3OH(l)':-166.4, 'CH3CH2OH(l)':-174.9,
"""
# http://www2.ucdsb.on.ca/tiss/stretton/database/organic_thermo.htm:
"C3H6": 62.0, # 74.7 , ... can't find a reliable one.
"C3H8": -23.4,
"C10H22": 33.32, # https://www.chemeo.com/cid/44-644-8/Decane
"Cu(s)": 0,
"Cu2(OH)2CO3(s)": -894.00, # Kiseleva1992, https://link.springer.com/content/pdf/10.1007%2FBF00204009.pdf
"e-": 0,
"H+": 0, # defining standard state in electrochemistry
"": 0, # anticipating the unanticipated
}
# standard pure substance states for formation energies
pure_states = {
"H": "H2(g)",
"C": "C(s)",
"N": "N2(g)",
"O": "O2(g)",
"Cu": "Cu(s)",
}
standard_states = dict(
[(mol, "g") for mol in ["H2", "CO", "CO2", "CH4", "C2H4", "O2"]]
+ [
(mol, "l")
for mol in [
"H2O",
"HCOOH",
"CH3OH",
"CH3COOH",
"CH3CH2OH",
"CH3CH2CH2OH",
"CH3CHO",
"C2H2O4",
]
]
+ [("e-", "")]
)
standard_redox = {
"H": +1,
"O": -2,
"N": -3,
"F": -1,
"Cl": -1,
"Br": -1,
"I": -1,
"S": -2,
"Li": +1,
"Na": +1,
"K": +1,
"Rb": +1,
"Cs": +1,
"e": -1, # electrons come with a negative charge
"-": +1,
"+": -1 # If charges are counted this way, then
# the sum of redox states in any compound is zero, which is convenient
}
dsH0 = { # enthalpy of solvation / [kJ/mol], for T dependence of kH
"ethanol": -19.5, # temperarily taken value for CO2 below to check that the function runs properly! Couldn't find it for ethanol
"CO2": -19.5, # Carroll1991
} # solvation enthalpy at 25C
kH0 = { # Henry's Law constant of volatility in bar*l/mol
"N2": 1660.87,
"CO2": 29.87,
"H2O": 0.0005791,
"CH3CH2OH": 0.0047595,
"Cl2": 10.411,
"CO": 1078.33,
"C2H4": 213.187,
"C2H6": 52.553,
"CH3CHO": 0.0714,
"Ar": 728.80,
"O2": 768.464,
"CH4": 713.928,
"He": 2726.81,
"H2": 1289.037,
"CH3OH": 0.00455,
"CH3CH2CHO": 0.0769,
"CH3CH2CH2OH": 0.00667,
"HCOOH": 1.78e-4,
} # all from Sander1999, I think. NIST WebBook quotes these values
aka = { # dictionary of pseudonyms
"C3H8O": "CH3CH2CH2OH",
"CH3CH2CH2OH(l)": "CH3CH2CH2OH(aq)",
"CH3CHO(l)": "CH3CHO(aq)",
# ^ the standard states are liquid, but I have data for the aqueous
"CO1": "CO",
}
def nu_to_rxn(nu, arrow="-->"):
"""
--- arguments
nu is a dictionairy containing the stoichiometric coefficients, such as
{'CO2':-6, 'H2O':-6, 'C6H12O6':1, 'O2':6}
--- return
rxn is a string describing a reaction, such as
'6 CO2 + 6 H2O -> C6H12O6 + 6 O2'
"""
rxn = arrow
for mol, n in nu.items():
if type(n) is not int:
n = np.round(n, decimals=2)
if n > 0:
if not rxn[-len(arrow) :] == arrow:
rxn = rxn + " +"
if n == 1:
rxn = rxn + " " + mol
else:
rxn = rxn + " " + str(n) + " " + mol
elif n < 0:
if not rxn[0 : len(arrow)] == arrow:
rxn = "+ " + rxn
if n == -1:
rxn = mol + " " + rxn
else:
rxn = str(-n) + " " + mol + " " + rxn
return rxn
def rxn_to_nu(rxn, arrow=None):
"""
--- arguments
rxn is a string describing a reaction, such as
'6 CO2 + 6 H2O -> C6H12O6 + 6 O2
--- return
nu is a dictionairy containing the stoichiometric coefficients, such as
{'CO2':-6, 'H2O':-6, 'C6H12O6':1, 'O2':6}
"""
nu = {}
parts = rxn.split()
if arrow is None:
arrows = ["->", "-->"]
elif type(arrow) is str:
arrows = [arrow]
lr = -1 # -1 for left of arrow, +1 for right of arrow
n = 1 # nu[part]
for part in parts:
if part in arrows:
lr = 1
continue
if part == "+":
continue
try:
n = int(part)
continue
except ValueError:
try:
n = float(part)
continue
except ValueError:
pass
nu[part] = n * lr
n = 1
return nu
def get_formation_reaction(comp, out="nu", verbose=False):
"""
Returns the formation reaction of comp, either as a string (out='string')
or as a dictionairy of stoichiometric coefficents (out='nu')
"""
nu = {comp: 1}
elements = get_elements(comp)
for atom, n in elements.items():
ss = pure_states[atom]
n_atom = get_elements(ss)[atom]
if ss in nu:
nu[ss] += -n / n_atom
else:
nu[ss] = -n / n_atom
rxn = nu_to_rxn(nu)
if verbose:
print(rxn)
if out == "string":
return rxn
return nu
def read_state(comp, verbose=False):
match_state = re.search("\([a-z]+\)\Z", comp)
if match_state is None:
if verbose:
print("can't read state for " + comp)
return comp, None
c = comp[: match_state.start()]
s = match_state.group()[1:-1]
return (c, s)
def get_cs(c, s):
"""
cs is a compound with its state in parentheses. This function generates
"""
if s is None or s == "":
return c
s = s.strip()
c = c.strip()
if not (s[0] == "(" and s[-1] == ")"):
s = "(" + s + ")"
cs = c + s
return cs
def get_standard_state(comp, states={}, verbose=True, out="cs"):
c, s = read_state(comp)
if s is not None:
if verbose:
print(comp + " already has a state! Using state = " + s)
ss = s
comp = c
else:
try:
ss = states[comp] # compound with state
except KeyError:
if verbose:
print(
"no state given for "
+ comp
+ ".\n"
+ "Input it as, e.g. states={'"
+ comp
+ "':'aq'}"
)
if "+" in comp or "-" in comp:
ss = "aq"
if verbose:
print("I'll assume you meant " + ss)
else:
try:
ss = standard_states[comp]
if verbose:
print("Using its standard state, " + ss)
except KeyError:
ss = None
cs = get_cs(comp, ss)
# print(cs)
if out == "cs":
return cs
return comp, ss
def get_dfS(comp, T=None, verbose=True):
"""
Get the change in entropy in the standard formation reaction of a compound
"""
if T is not None:
print("T-dependence of entropy not implemented. Using S0.")
nu = get_formation_reaction(comp)
dfS0 = 0
for c, n in nu.items():
# print('c = ' + c + ', n = ' + str(n)) #for debugging
try:
dfS0 += n * S0[c]
except KeyError:
try:
cs = get_standard_state(c, verbose=verbose)
dfS0 += n * S0[cs]
except KeyError:
if verbose:
print(
"no standard entropy available for "
+ c
+ ". Couldn't get dfS for "
+ comp
+ ". Returning None."
)
return None
return dfS0
def get_dfH(comp, T=None, verbose=True):
"""
Get the change in enthalpy in the standard formation reaction of a compound
"""
if T is not None:
print("T-dependence of enthalpy not implemented. Using H0.")
try:
dfH = dfH0[comp]
except KeyError:
try:
cs = get_standard_state(comp, verbose=verbose)
dfH = dfH0[cs]
except KeyError:
if verbose:
print(
"no standard enthalpy available for " + comp + ". Returning None."
)
return None
return dfH
def get_dfG(
comp,
T=None,
dfG={},
states={},
T0=298.15,
trycs=True,
tryaka=True,
verbose=True,
vverbose=False,
):
"""
Returns formation free energy of a compound in the specified state in kJ/mol
e.g. get_dfG('H2O(l)') returns the standard free energy change of the
reaction 'H2(l) + 1/2 O2(l) --> H2O(l)', which is dfG = -237.2 kJ/mol.
This is a stubborn and robust function that really really wants to give
you a free energy of formation for your compound.
This compound first checks in the input dfG (enables it's automated use
even when there's untabulated stuff in play.)
Then it checks the tabulated value in the dictionairy dfG0 at the top
of this module.
Then it checks if it can calculate dfG from dfS and dfH.
Then it checks if it can calcualte dfG from another state using e.g.,
the henry's-law constant to calculate the free energy of acqueous species
from the gas-phase free energy.
Finally, it checks if the compound was input without its state, in which
case it guesses a state and starts over.
"""
dfG1 = dfG0.copy()
dfG1.update(dfG)
if T is not None:
print("Temperature-dependent free energy not fully implemented.")
try:
dfH = get_dfH(comp, T=T)
dfS = get_dfS(comp, T=T)
dfG = dfH - T * dfS * 1e-3
return dfG
except KeyError:
print("Something missing. returning None for dfG(" + comp + ").")
return None
if comp in dfG1:
if verbose:
print("found dfG(" + comp + ")")
return dfG1[comp]
if verbose:
print("couldn't find dfG(" + comp + ")")
# Try and get it from enthalpy and entropy:
dfS = get_dfS(comp, verbose=vverbose)
dfH = get_dfH(comp, verbose=vverbose)
if dfS is not None and dfH is not None:
dfGc = dfH - T0 * dfS * 1e-3
print("returning dfH(" + comp + ") - T0 * dfS(" + comp + ")")
return dfGc
# Try and get it from other states of the same substance:
c, s = read_state(comp)
if s == "aq":
cs_g = get_cs(c=c, s="g")
dfGc_g = get_dfG(
cs_g,
T=T,
dfG=dfG,
states=states,
T0=T0,
trycs=trycs,
tryaka=tryaka,
verbose=verbose,
vverbose=vverbose,
)
if dfGc_g is not None: # then use the Henry's-law constant!
kH = get_kH(c, tryG=False) # tryG = False prevents the
# infinite recursion that results if it tries to get kH from dfG, that
if kH is None:
print("just using dfG(" + cs_g + ")")
return dfGc_g
dfGc = dfGc_g + R * T0 * 1e-3 * np.log(kH)
if verbose:
print("returning dfG(" + cs_g + ") + RTln(kH)")
return dfGc
print("couldn't get dfG for " + comp + ".")
if tryaka:
if comp in aka:
print(comp + " is also known as " + aka[comp])
return get_dfG(
aka[comp],
T=T,
dfG=dfG,
states=states,
T0=T0,
trycs=trycs,
tryaka=False,
verbose=verbose,
vverbose=vverbose,
)
if trycs and s is None: # likely, the input just forgot the state
c, s = get_standard_state(comp, states=states, out="both", verbose=vverbose)
cs = get_cs(c, s)
return get_dfG(
cs,
T=T,
dfG=dfG,
states=states,
T0=T0,
trycs=False,
tryaka=tryaka,
verbose=verbose,
vverbose=vverbose,
)
print("Returning None.")
return None
def get_drG(nu, states={}, dfG={}, verbose=True):
"""
returns standard free energy of formation in [kJ/mol] for a reaction given
as a dictionary of stoichiometric coefficients or as a reaction string.
"""
drG = 0
if type(nu) is str:
nu = rxn_to_nu(nu)
for comp, n in nu.items():
dfGc = get_dfG(comp, states=states, dfG=dfG, verbose=verbose)
if dfGc is None:
print(
"no free energy of formation available for "
+ comp
+ ".\n"
+ "Input it as, e.g. dfG={'"
+ comp
+ "':0}.\n"
+ "for now, I'll assume it's zero"
)
else:
drG += n * dfGc
return drG
def p_vap(mol="H2O", T=298.15, unit="Pa"):
"""
Returns the vapor pressure of a molecule at a given temperature, based on
data in dfH0 and S0 dictionaries.
"""
dH = (dfH0[mol + "(g)"] - dfH0[mol + "(l)"]) * 1e3
dS = S0[mol + "(g)"] - S0[mol + "(l)"]
if unit == "Pa":
p0 = 1e5
elif unit == "bar":
p0 = 1
elif unit == "mbar":
p0 = 1000
p = p0 * np.exp(-dH / (R * T) + dS / R)
return p
def get_kH(
comp=None, T=None, dfG={}, kH_0=None, dsH=None, T0=298.15, verbose=True, tryG=True
):
"""
Returns the henry's-law vapor pressure of a molecule at a given temperature.
Yet to be fully implemented.
"""
if T is not None:
if kH_0 is None:
kH_0 = kH0[comp]
if dsH is None:
dsH = dsH0[comp]
kH_T = kH_0 * np.exp(-dsH / R * (1 / T0 - 1 / T))
return kH_T
if comp in kH0:
return kH0[comp]
c, s = read_state(comp)
if c in kH0:
return kH0[c]
if tryG:
cs_aq = get_cs(c, "aq")
dfG_aq = get_dfG(cs_aq, T=T, dfG=dfG, verbose=verbose)
cs_g = get_cs(c, "g")
dfG_g = get_dfG(cs_g, T=T, dfG=dfG, verbose=verbose)
deG = dfG_g - dfG_aq # free energy change of evaporation, i.e., aq --> g
kH = np.log(deG * 1e3 / (R * T))
print("got kH from dfG(" + cs_aq + ")")
return kH
print("couldn't get kH")
def get_oxidation_state(comp, atom="C", redox_states={}):
"""
Gets the average oxidation state of a specified atom (default is carbon)
in a chemical formula, with all the other elements having standard oxidation
states or oxidation states specified here in redox_states.
"""
redox = standard_redox.copy()
redox.update(redox_states)
if type(comp) is str:
elements = get_elements(comp)
else:
elements = comp
ro = 0 # the combined oxidation state of everything other than atom
for element, n in elements.items():
if element == atom:
n_comp = n
continue
try:
ro += n * redox[element]
except KeyError:
print(
"I don't know the oxidation state of "
+ element
+ "\n"
+ "Input it in as redox_states = {atom: oxidation_state}.\n"
+ "For now, I'll assume it's zero."
)
return -ro / n_comp
def get_rxn_EC(
product="CH4",
reactant="CO2",
atom="C",
redox_states={},
extra=["H+", "H2O"],
out="string",
):
"""
Generates a balanced electrochemical reaction
inputs
- product : formula for product compound
- reactant : formula for reactant compound
- atom : we assume only this element changes oxidation state
- redox_states : dict for which each key is a spectator atom who's
redox state is assumed to be the corresponding value.
Default is at the top of this script
- extra : will be the compounds used to balance the spectator atoms.
NOT YET IMPLEMENTED. H2O and H+ are always used now.
- out : specifies output. default is reaction string.
out = 'nu' for stoichiometric coefficients of reaction
"""
redox = standard_redox.copy()
redox.update(redox_states)
elements_r = get_elements(reactant)
natoms_r = elements_r[atom]
ro_r = get_oxidation_state(elements_r, atom=atom, redox_states=redox_states)
elements_p = get_elements(product)
natoms_p = elements_p[atom]
ro_p = get_oxidation_state(elements_p, atom=atom, redox_states=redox_states)
nu = {} # balanced reaction will go here
if product == reactant:
print("product equals reactant. No reaction.")
return nu
n_atoms = natoms_r * natoms_p / gcd(natoms_r, natoms_p)
# total number of specified atom in balanced reaction
nu[reactant] = int(-n_atoms / natoms_r)
nu[product] = int(n_atoms / natoms_p)
nu["e-"] = n_atoms * (ro_p - ro_r)
# this can be done in a cool general way, but, assuming I've just got H, O, and atom:
if "O" in get_elements(nu):
if "H2O" in nu:
nu["H2O"] += -get_elements(nu)["O"]
else:
nu["H2O"] = -get_elements(nu)["O"]
if "H" in get_elements(nu):
if "H+" in nu:
nu["H+"] += -get_elements(nu)["H"]
else:
nu["H+"] = -get_elements(nu)["H"]
if not set(get_elements(nu).keys()).issubset({atom, "O", "H", "+", "-", "e"}):
print(
"function Chem.get_rxn_EC: WARNING!\n"
+ "Some of the atoms in your reaction aren't implemented.\n"
+ "The reaction won't be balanced."
)
rxn = nu_to_rxn(nu)
print(rxn)
if out == "string":
return rxn
return nu
def get_rxn_cell(rxn_an, rxn_cat, out="string"):
if type(rxn_an) is str:
rxn_an = rxn_to_nu(rxn_an)
if type(rxn_cat) is str:
rxn_cat = rxn_to_nu(rxn_cat)
n_an = int(rxn_an["e-"]) # positive for forward reaction
n_cat = int(rxn_cat["e-"]) # negative for forward reaction
n = (
-n_an * n_cat / gcd(n_an, n_cat)
) # positive if both are forward or both backwards
nu = {}
for comp, i in rxn_an.items():
if comp in nu:
nu[comp] += n / n_an * i # positive if rxn_cat is forwards
else:
nu[comp] = n / n_an * i
for comp, i in rxn_cat.items():
if comp in nu:
nu[comp] += -n / n_cat * i # positive if rxn_an is forwards
else:
nu[comp] = -n / n_cat * i
rxn = nu_to_rxn(nu)
print(rxn)
if out == "string":
return rxn
return nu
def get_rxn_c(
reactant="CH4",
product="CO2",
atom="C",
redox_states={},
extra=["H+", "H2O"],
out="string",
):
"""
Returns the combustion reaction for a product.
"""
nu_cat = get_rxn_EC(product="H2O", reactant="O2", atom="O", out="nu")
nu_an = get_rxn_EC(
product=product,
reactant=reactant,
redox_states=redox_states,
extra=extra,
out="nu",
)
nu_c = get_rxn_cell(nu_an, nu_cat, out="nu")
if out == "string":
return nu_to_rxn(nu_c)
else:
return nu_c
def get_standard_potential(nu, pH=0, T=298.15, states={}, dfG={}, verbose=True):
"""
returns equilibrium potential in [V] on SHE scale for a reaction given
as a dictionary of stoichiometric coefficients or as a rxn string.
If nu['H+'] == nu['e-'], then leave pH=0 to get the pH-independent
standard potential on the RHE scale
"""
if type(nu) is str:
nu = rxn_to_nu(nu)
drG = get_drG(nu, states=states, dfG=dfG, verbose=verbose)
n_el = nu["e-"]
E0 = drG * 1e3 / (n_el * Far)
if "H+" in nu.keys():
n_H = nu["H+"]
E0 = E0 - n_H / n_el * np.log(10) * R * T / Far * pH
return E0
def get_dcG(
reactant="CH4",
product="CO2",
atom="C",
redox_states={},
extra=["H+", "H2O"],
states={},
dfG={},
verbose=True,
):
nu = get_rxn_c(
product=product,
reactant=reactant,
redox_states=redox_states,
extra=extra,
out="nu",
)
dcG = get_drG(nu, states=states, dfG=dfG, verbose=verbose)
if not nu[reactant] == -1:
dcG = -dcG / nu[reactant]
return dcG
|
ScottSoren/EC_MS | src/EC_MS/Integrate_Signals.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 2 22:50:34 2016
Most recently edited: 16I23
@author: Scott
This module has functions for integrating and averaging signals over specified
time frames or cycles, mostly for pulsed electrolysis experiments.
"""
# make python2-compatible:
from __future__ import print_function
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
import os
from .Plotting import plot_experiment
from .EC import select_cycles
from .Combining import cut_dataset
from .Quantification import get_flux, get_potential, get_current
def get_datapoints(
dataset,
cycles,
mols=["H2", "C2H4", "CH4"],
tspan=[0, 100],
t_steady=[50, 60],
Vcycle=0,
transient="CH4",
colors=None,
cycle_str=None,
plotcycles=False,
plottransient=False,
data_type="CA",
verbose=True,
):
"""
Ways to control this function:
(1) put in a dictionary for mols with plotting colors and sub-dictionaries
for products that should be split into transient ('dyn') and steady-state ('ss')
for example, mols={'C2H4':'g', 'CH4':{'ss':'r','dyn':[0.8, 0, 0]}
All transient integrations will use same t_steady
(2)
"""
if verbose:
print("\n\nfunction 'get_datapoints' at your service!\n")
# interpret inputs.
if type(mols) is dict:
colors = mols.copy()
if colors is None:
colors = mols
if type(mols) is dict:
mols = list(mols.keys())
if type(transient) is str:
transient = [transient]
else:
mols = list(colors.keys())
transient = [mol for (mol, value) in colors.items() if type(value) is dict]
if type(t_steady) is dict:
transient = list(t_steady.keys())
else:
ts = t_steady
t_steady = {}
if type(colors) is dict:
for mol in transient:
if type(colors[mol]) is dict:
colors[mol] = colors[mol]["ss"]
# just for plotting cycles with appropriate colors
if Vcycle in ["previous", "last", "rest"]:
Vcycle = -1
elif Vcycle in ["present", "current", "same", "work"]:
Vcycle = 0
elif Vcycle in ["next"]:
Vcycle = 1
V_str = dataset["V_str"]
# prepare space for results:
V = []
integrals = {}
for mol in mols:
if mol in transient:
integrals[mol] = {"ss": [], "dyn": []}
if mol not in t_steady.keys():
t_steady[mol] = ts
else:
integrals[mol] = []
# get results:
for cycle in cycles:
off_data = select_cycles(
dataset,
cycles=cycle + Vcycle,
t_zero="start",
data_type=data_type,
cycle_str=cycle_str,
verbose=verbose,
)
# off_data is data from the cycle that the independent variable is obtained form
on_data = select_cycles(
dataset,
cycles=[cycle, cycle + 1],
t_zero="start",
data_type=data_type,
cycle_str=cycle_str,
verbose=verbose,
)
# on_data is data from the cycle, and following cycle for the tail, that the dependent variable is obtained from
t_off = off_data["time/s"]
V_off = off_data[V_str]
V += [np.trapz(V_off, t_off) / (t_off[-1] - t_off[0])]
if plotcycles:
title = str(cycle) + ", U = " + str(V[-1])
plot_experiment(on_data, mols=colors, title=title, verbose=verbose)
for mol in integrals.keys():
title = mol + ", cycle=" + str(cycle) + ", U=" + str(V[-1])
if verbose:
print("working on: " + str(mol))
x, y = get_flux(
on_data,
tspan=tspan,
mol=mol,
removebackground=True,
unit="nmol/s",
verbose=verbose,
)
if type(integrals[mol]) is dict:
ts = t_steady[mol]
if plottransient:
ax = "new"
else:
ax = None
ss, dyn = integrate_transient(
x, y, tspan=tspan, t_steady=ts, ax=ax, title=title, verbose=verbose
)
integrals[mol]["ss"] += [ss]
integrals[mol]["dyn"] += [dyn]
else:
integrals[mol] += [np.trapz(y, x)]
integrals["V"] = V
if verbose:
print("\nfunction 'get_datapoints' finished!\n\n")
return integrals
def integrate_transient(
x,
y,
tspan=None,
t_transient=None,
t_steady="half",
ax=None,
title=None,
colors=["r", "b", "g"],
verbose=True,
):
"""
This will return seperate values for the transients and steady-states of a
a certain compound, based on extrapolating the average signal after t_transient
to the interval before t_transient and subtracting.
"""
if ax == "new":
fig = plt.figure()
ax = fig.add_subplot(111)
if tspan is None:
tspan = [x[0], x[-1]]
"""
if t_transient is None:
t_transient = tspan
elif t_transient == 'half':
t_transient = [tspan[0], (tspan[0] + tspan[-1])/2]
"""
if t_steady == "half":
t_steady = [(tspan[0] + tspan[-1]) / 2, tspan[1]]
I_int = [I for (I, x_I) in enumerate(x) if tspan[0] <= x_I <= tspan[-1]]
# I_transient = [I for (I, x_I) in enumerate(x) if t_transient[0]<=x_I<=t_transient[-1]]
I_steady = [I for (I, x_I) in enumerate(x) if t_steady[0] < x_I <= t_steady[-1]]
x_int = x[I_int]
y_int = y[I_int]
# x_transient = x[I_transient]
# y_transient = y[I_steady]
x_steady = x[I_steady]
y_steady = y[I_steady]
base = np.trapz(y_steady, x_steady) / (x_steady[-1] - x_steady[0])
y_zero = np.zeros(np.shape(x_int))
y_base = y_zero + base
y_s = np.minimum(y_int, base)
y_t = np.maximum(y_int - base, 0)
steady = np.trapz(y_s, x_int)
transient = np.trapz(y_t, x_int)
if ax is not None:
ax.fill_between(
x_int,
y_int,
y_zero,
where=y_int > y_zero,
facecolor=colors[1],
interpolate=True,
)
ax.fill_between(
x_int,
y_int,
y_base,
where=y_int > y_base,
facecolor=colors[2],
interpolate=True,
)
ax.plot(x, y, color=colors[0])
if title is not None:
ax.set_title(title)
if verbose:
if title is not None:
print(title)
print("\tsteady = " + str(steady) + "\n\ttransient = " + str(transient))
return steady, transient
def activity_steps(
data,
mols,
cycles,
cycle_str="selector",
mode="average",
t_int=15,
t_tail=30,
t_pre=15,
t_i=None,
t_f=None,
find_max=False,
t_max_buffer=5,
V_max_buffer=5,
find_min=False,
t_min_buffer=5,
V_min_buffer=5,
background=None,
t_bg=None,
t_bg_r=None,
unit="pmol/s",
ax="new",
tspan_plot=None,
verbose=True,
):
"""
Powerful function for determining activity and faradaic efficiency for
a set of potential steps.
Requires calibrated molecule objects (mols) and cycle numbers, which by
default refer to data['selector']
if mode='average', it integrates over the last t_int of each cycle. If
mode='integral', it integrates from t_pre before the start until t_tail
after the end of each cycle.
If find_max=True, rather than using the full timespan of the cycle, it
finds the timespan at which the potential is within V_max_buffer mV of its
maximum value, and cuts of t_max_buffer, and then uses this timespan as above.
Correspondingly for find_min, V_min_buffer, and t_min_buffer.
if t_i or t_f is not None, then it cuts the dataset according to [t_i, t_f] first.
[But, actually looking at it, that might not be necessary. Just giving a negative
number to t_tail or t_pre would have the same effect in a less convoluted way.]
A timespan for which to get the background signals at each of the masses
can be given as t_bg. Alternately, background can be set to 'linear' in
which case it draws a line connecting the signals just past the endpoints
of the timespan for each cycle.
If ax is not None, it highlights the area under the signals and EC currents
that are integrated/averaged.
The function returns a dictionary including:
'Qs': the integrated charges (in C) or averaged currents (in A) for each cycle
'ns': dictionary containing, for each molecule, the integrated amounts
or average flux for each cycle, in specified unit (default: pmol/s)
'Vs': the average potential for each cycle, in V
'ax': the axes on which the function plotted.
"""
if verbose:
print("\n\nfunction 'activity_steps' at your service!\n")
# ----- parse inputs -------- #
try:
iter(mols)
except TypeError:
mols = [mols]
mdict = dict([(m.name, m) for m in mols])
if mode in ["average", "averaging", "mean"]:
mode = "average"
elif mode in ["integral", "integrate", "integrating"]:
mode = "integral"
if t_bg is not None:
bgs = {}
for mol, m in mdict.items():
x_bg, y_bg = m.get_flux(data, tspan=t_bg, removebackground=False, unit=unit)
bgs[mol] = np.mean(y_bg)
# should perhaps give additional options for bg, but honostly t_bg is pretty good
else:
bgs = dict([(mol, 0) for mol in mdict.keys()])
if ax == "new":
ax = plot_experiment(
data,
mols,
removebackground=False,
tspan=tspan_plot,
emphasis=None,
unit=unit,
)
else:
try:
iter(ax)
except TypeError:
ax = [ax]
Qs, Vs = np.array([]), np.array([])
ns = dict([(mol, np.array([])) for mol in mdict.keys()])
for cycle in cycles:
c = select_cycles(data, [cycle], cycle_str=cycle_str, verbose=verbose)
if t_i is not None or t_f is not None:
tspan_cut = [c["time/s"][0], c["time/s"][-1]]
if t_i is not None:
tspan_cut[0] += t_i
if t_f is not None:
tspan_cut[-1] -= t_f
c = cut_dataset(c, tspan=tspan_cut)
if find_max:
t_v, v = get_potential(c)
v_max = max(v)
mask = v_max - V_max_buffer * 1e-3 < v
t_max = t_v[mask]
t_start = t_max[0] + t_max_buffer
t_end = t_max[-1] - t_max_buffer
elif find_min:
t_v, v = get_potential(c)
v_min = min(v)
mask = v < v_min + V_min_buffer * 1e-3
t_min = t_v[mask]
t_start = t_min[0] + t_min_buffer
t_end = t_min[-1] - t_min_buffer
else:
t_start = c["time/s"][0]
t_end = c["time/s"][-1]
if mode == "average":
try:
iter(t_int)
except TypeError:
tspan = [t_end - t_int, t_end]
else:
tspan = [t_start + t_int[0], t_start + t_int[-1]]
elif mode == "integral":
c = select_cycles(
data,
[cycle - 1, cycle, cycle + 1],
cycle_str=cycle_str,
verbose=verbose,
)
tspan = [t_start - t_pre, t_end + t_tail]
t_v, v = get_potential(c, tspan=tspan, verbose=verbose)
V = np.mean(v)
Vs = np.append(Vs, V)
t, I = get_current(c, tspan=tspan, verbose=verbose, unit="A")
if mode == "average":
Q = np.mean(I)
elif mode == "integral":
Q = np.trapz(I, t)
Qs = np.append(Qs, Q)
for mol, m in mdict.items():
x, y0 = m.get_flux(c, tspan=tspan, unit=unit, verbose=verbose)
if t_bg_r is not None:
t_bg = [t_start + t_bg_r[0], t_start + t_bg_r[-1]]
x_bg, y_bg = m.get_flux(data, tspan=t_bg, unit=unit)
if ax is not None:
ax[0].plot(x_bg, y_bg, color=m.get_color(), linewidth=2)
bg = np.mean(y_bg)
else:
bg = bgs[mol]
y = y0 - bg
if mode == "average":
yy = np.mean(y)
elif mode == "integral":
yy = np.trapz(y, x)
ns[mol] = np.append(ns[mol], yy)
if ax is not None:
try:
iter(bg)
except TypeError:
bg = bg * np.ones(y0.shape)
color = m.get_color()
ax[0].fill_between(x, y0, bg, color=color, alpha=0.5)
if ax is not None:
ax[1].plot(t_v, v, "k-", linewidth=3)
J = I * 1e3 / data["A_el"]
bg_J = np.zeros(J.shape)
ax[2].fill_between(t, J, bg_J, color="0.5", alpha=0.5)
if verbose:
print("\nfunction 'activity_steps' finished!\n\n")
return {"Qs": Qs, "ns": ns, "Vs": Vs, "ax": ax}
|
ScottSoren/EC_MS | src/EC_MS/add_data.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 18:56:37 2020
@author: scott
"""
import os, sys
data_directory = (
os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "molecules"
)
def __main__():
pass
|
ScottSoren/EC_MS | src/EC_MS/Datapoints.py | <filename>src/EC_MS/Datapoints.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 16:13:26 2017
To have all the tools used for the Errorbars script in versitile,
easily accessible form.
@author: scott
"""
from matplotlib import pyplot as plt
import numpy as np
# one could argue that the following two scripts should be in this module, but instead:
# get_datapoints is in Integrate_Signals.py
# plot_datapoints is in Plottying.py
# import sys
# sys.exit()
def fill_with(quantitydict, value):
"""
generates a (multilayer) dictionary with the same keys as an input
dictionary, but values replaced by value
"""
emptydict = {}
for (key, val) in quantitydict.items():
# print(str(key) + ' ' + str(value))
if type(val) is dict:
emptydict[key] = get_empty(val)
else:
if type(value) in [list, dict]:
value = value.copy() # otherwise they get linked...
emptydict[key] = value
return emptydict
def get_empty(quantitydict):
"""
generates a (multilayer) dictionary with the same keys as an input
dictionary, but values replaced by empty lists
"""
return fill_with(quantitydict, value=[])
def add_datapoint(source, target, index=None, add_key=True):
"""
adds the values in a source dictionary to
"""
# print(str(source))
for key, value in source.items():
if type(value) is dict:
if key not in target.keys() and add_key:
target[key] = {}
add_datapoint(value, target[key], index, add_key=add_key)
continue
if index is None:
v = value
else:
# print(f'key={key}, value={value}, index={index}') # debugging
try:
v = value[index]
except IndexError:
v = value
if key in target.keys():
# print('key in target.keys()') # debugging
# print(f'target={target}') # debugging
if type(target[key]) is np.ndarray:
target[key] = np.append(target[key], v)
elif hasattr(v, "__iter__"):
target[key] += v
else:
target[key] += [v]
# print('adding ' + str(value[index]) + ' to ' + str(key))
elif add_key:
# print('adding key') # debugging
if hasattr(v, "__iter__"):
target[key] = v.copy() # this .copy() is important
else:
target[key] = [v]
def datapoints_to_values(
datapoints, X="all", X_str="V", rnd=2, avoid="blank", verbose=True
):
"""
Reorganizes the datapoints dictionary, such that
the value indicated by X_str is the outer organizational level. A list of
desired X_str to include in values can be input as X. Numerical values
are considered equal if equal to rnd decimals.
The original outermost organizational level (i.e., sample) is lost.
"""
if verbose:
print("\n\nfunction 'datapoints_to_values\ at your service!\n")
if type(avoid) is str:
avoid = [avoid]
empty = get_empty(list(datapoints.values())[0])
values = {}
for (name, data) in datapoints.items():
if type(name) == type(avoid) and len([a for a in avoid if a in name]) > 0:
continue
if verbose:
print("adding {} to values based on ".format(name) + X_str)
try:
x_vec = data[X_str]
x_vec[0]
except IndexError:
x_vec = [data[X_str]]
for i, x in enumerate(x_vec):
if rnd is None:
x_round = x
else:
try:
x_round = float(np.round(x, rnd))
except TypeError: # if it's not a numerical value, just move on.
x_round = x
print(X) # debugging
if X == "all":
if x_round not in values:
values[x_round] = get_empty(empty)
elif x_round not in X:
print(str(x_round) + " not in potentials")
continue
add_datapoint(source=data, target=values[x_round], index=i)
if verbose:
print("\nfunction 'points_to_values' finished!\n\n")
return values
def datapoints_to_datalist(datapoints, avoid=[], verbose=True):
"""Removes the outer layer of the datapoints dictionary, i.e.
sample. The second organizational level (i.e., molecule) becomes the
outermost level. Lists and arrays are appended.
In other words, it just lumps all samples together.
"""
if verbose:
print("\n\nfunction 'datapoints_to_datalists' at your service!\n")
if type(avoid) is str:
avoid = [avoid]
datalists = {}
for name, point in datapoints.items():
if len([a for a in avoid if a in name]) > 0:
print("skipping " + name)
continue
if verbose:
print("working on " + name)
add_datapoint(point, datalists, add_key=True) # should be just that simple
if verbose:
print("\nfunction 'datapoints_to_datalists' finished!\n\n")
return datalists
def values_to_stats(values, logmean=False):
"""
replaces all numerical arrays or lists in the values of a (multilayer)
dictionary with the two-element list: [mean, standard_devation]
"""
# print('\nfunction values_to_stats in Datapoints.py has been called.')
stats = {}
for key, value in values.items():
# print(key)
if type(value) is dict:
stats[key] = values_to_stats(value, logmean=logmean)
# remember to feed arguments inwards in recursive functions!
elif type(value) is list or type(value) is np.ndarray:
if logmean:
# print('logmean is True')
mean = np.exp(np.mean(np.log(value)))
std = np.exp(np.log(mean) + np.std(np.log(value))) - mean
else:
mean = np.mean(value)
# std = 0
std = np.std(value)
stats[key] = [mean, std]
return stats
def get_mlu(stat, logmean=False): # mlu stands for for: mean, [lower, upper]
try:
if len(stat) < 2:
return stat, None
except TypeError:
# print("function 'get_mlu' says: stat must be iterable.") # too verbose
return stat, None
if stat[1] == 0:
return stat[0], None
elif len(stat) == 2:
mean = stat[0]
std = stat[1]
if logmean:
log_mean = np.log(mean)
log_std = np.log((std + mean) / mean)
upper = np.exp(log_mean + log_std)
lower = np.exp(log_mean - log_std)
# print('logmean is True')
else:
upper = mean + std
lower = mean - std
elif len(stat) == 3:
lower = stat[0]
mean = stat[1]
upper = stat[2]
else:
print("need stats of length 2 or 3 for errorbars")
raise ValueError
return mean, [lower, upper]
def plot_errorbar(
xstat,
ystat,
# ax=plt.gca(),
# This was generating the blank figure!!!
# Don't put plt.gca() in a function default!
ax="current", # do it the normal way instead :)
logmean=False,
marker=".",
color="k",
markersize=None,
xfactor=1,
yfactor=1,
specs={},
linespecs={},
**kwargs,
):
specs.update(kwargs) # so that kwargs get fed to plot
if ax == "current":
ax = plt.gca()
elif ax == "new":
ax = plt.figure().add_subplot(111)
x, x_lu = get_mlu(xstat, logmean)
y, y_lu = get_mlu(ystat, logmean)
# print("x_lu={}, y_lu={}".format(x_lu, y_lu)) # debugging
if marker is None and "marker" in specs:
marker = specs.pop("marker")
elif x_lu is None and y_lu is None:
# marker = '.'
specs = {}
if markersize is None:
if marker == ".":
markersize = 10
else:
markersize = 5
# print(f'x={x}, y={y}') # debugging
# print(f'marker = {marker}, specs={specs}') # debugging
ax.plot(
x * xfactor,
y * yfactor,
marker=marker,
markersize=markersize,
color=color,
**specs,
)
if x_lu is not None:
ax.plot(
[x_lu[0], x_lu[1]],
[y * yfactor, y * yfactor],
"|-",
color=color,
**linespecs,
)
if y_lu is not None:
ax.plot(
[x, x],
[y_lu[0] * yfactor, y_lu[1] * yfactor],
"_-",
color=color,
**linespecs,
)
def plot_errorbars_y(stats, colors=None, ax="new", marker=None, factor=1, **kwargs):
if ax == "new":
fig, ax = plt.subplots()
for x, stat in stats.items():
if colors is None:
plot_errorbar(
ax=ax, xstat=x, ystat=stat, marker=marker, yfactor=factor, **kwargs,
)
else:
for key, color in colors.items():
ystat = stat[key]
plot_errorbar(
ax=ax,
xstat=x,
ystat=ystat,
color=color,
marker=marker,
yfactor=factor,
**kwargs,
)
return ax
def plot_errorbars_y_old(
stats,
x="outer",
ax="new",
label="",
logmean=False,
Xrange=None,
verbose=True,
outercall=True,
color="k",
colors=None,
specs=None,
factor=1,
):
if verbose and outercall:
print("\n\nfunction 'plot_errorbars_y' at your service!\n")
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
# print(type(stats))
if type(stats) is not dict:
if Xrange is None or Xrange[0] <= x <= Xrange[1]:
plot_errorbar(
x, stats, ax=ax, color=colors, logmean=logmean, yfactor=factor
)
# print('I should have just plotted something.')
return ax
# oh, shit, how do I reconcile the following with my desire to use specs{}
# instead of just a color for plotting functions? I just won't for now.
if colors is None:
colors = color
if x not in ["outer", "inner"] and type(colors) is not dict:
colors = fill_with(stats, color)
if x not in ["outer", "inner"] and type(Xrange) is not dict:
colors = fill_with(stats, Xrange)
for key, val in stats.items():
if verbose:
print("working on " + label + str(key))
if x == "outer":
x_val = key
color_val = colors
Xrange_val = Xrange
elif x == "inner":
print("errorbars: x='inner' not yet implemented.")
pass
else:
x_val = x
try:
color_val = colors[key]
except KeyError:
if verbose:
print("skipping " + key)
continue
if Xrange is None:
Xrange_val = None # 17H14
else:
Xrange_val = Xrange[key]
plot_errorbars_y(
val,
x=x_val,
ax=ax,
colors=color_val,
Xrange=Xrange_val,
label=label + str(key) + "_",
outercall=False,
logmean=logmean,
factor=factor,
specs=specs,
)
if verbose and outercall:
print("\nfunction 'plot_errorbars_y' finished!\n\n")
return ax
def get_from_key(item, key, reduced_key=None, delimiter="."):
"""
nice little tool to aid in flexibility when dealing with multilayer dicts.
"""
if type(item) is not dict:
return item
try:
return item[key]
except KeyError:
if reduced_key is None:
reduced_key = key.split(delimiter)[0]
return item[reduced_key]
def plot_datalist_fit(
datalist,
colors,
X_str="V",
Xrange="all",
keys=None,
txt=None,
ax="new",
specs={},
results={},
X=None,
logy=False,
logx=False,
label="",
verbose=True,
outercall=True,
):
"""
Some parts of this function, particularly the writing and plotting bit,
are just for tafel. Otherwise its as general as possible, to an extent
that may be a bit ridiculous...
"""
if verbose and outercall:
print("\n\nfunction 'plot_datalist_fit' at your service!\n")
if type(datalist) is not dict:
print("could't find data for " + label)
return
if ax == "new":
ax = plt.figure().add_subplot(111)
if type(txt) is str:
txt = open(txt, "w")
if keys is None:
if type(Xrange) is dict:
keys = (
Xrange.keys()
) # for multiple vspans for a given quantity, just put a '.' in in the key
elif type(colors) is dict:
keys = colors.keys()
elif type(datalist) is dict:
keys = datalist.keys()
if X_str in datalist.keys():
X = datalist[X_str]
for key in keys:
if key == X_str:
continue
if verbose:
print("working on: " + label + key)
xspan = get_from_key(
Xrange, key
) # so I'm flexible in how deep I define vspan, color, and data.
color = get_from_key(colors, key)
data = get_from_key(datalist, key)
# print(xspan)
if type(color) is dict or type(xspan) is dict or X is None:
results[key] = {}
plot_datalist_fit(
data,
colors=color,
X_str=X_str,
Xrange=xspan,
txt=txt,
ax=ax,
specs=specs,
X=X,
logx=logx,
logy=logy,
label=key + "_",
results=results[key],
verbose=verbose,
outercall=False,
)
continue
y = np.array(data)
x = np.array(X)
if not xspan == "all":
try:
I_keep = [
I for (I, x_I) in enumerate(x) if x_I > xspan[0] and x_I < xspan[1]
]
x = x[I_keep]
y = y[I_keep]
except:
print(xspan)
print(x)
raise
# print('couldn\'t cut x and y')
# print('len(x) = ' + str(len(x)))
# print('xspan = ' + str(xspan)))
if logy:
y = np.log(y)
if logx:
x = np.log(x)
p1 = np.polyfit(x, y, deg=1)
a = p1[0] # slope
b = p1[1] # intercept
if logy:
ts = np.log(10) / a # tafel slope
if txt is not None:
txt.write("---\n" + label + key + " on interval " + str(xspan) + "\n")
txt.write(
"ln("
+ label
+ key
+ "/[nmol]) = "
+ str(b)
+ " + "
+ str(a)
+ " * (V vs RHE / [V])\n"
)
if logy:
txt.write("\ttafel slope = " + str(ts * 1e3) + " mV/decade\n")
if ax is not None:
x_fit = np.array(xspan)
y_fit = b + a * x_fit
if logy:
y_fit = np.exp(y_fit)
ax.plot(x_fit, y_fit, color=color, label=label + key, **specs)
results[key] = p1
if outercall and txt is not None:
txt.close()
if verbose and outercall:
print("\nfunction 'plot_datalist_fit' finished!\n\n")
return results, ax
|
ScottSoren/EC_MS | _tests_/test_0.py | <gh_stars>1-10
# %%
from matplotlib import pyplot as plt
from EC_MS import Molecule
from EC_MS.utils.extraction_class import Extraction
# %%
Molecule("ethanol").plot_spectrum()
# %%
plt.show()
print(Extraction.__str__)
|
ScottSoren/EC_MS | src/EC_MS/__init__.py | <filename>src/EC_MS/__init__.py
"""
Most recently edited 16I23. Imports the EC_MS package
For a description of this package, see the included NOTES__EC_MS.text
@author: Scott
"""
# fmt: off
# ^ I can't have black formatting this because it breaks setup.py
__version__ = "0.7.5dev"
__title__ = "EC_MS"
__description__ = "Analysis tools for electrochemistry and mass spectrometry and a lot in between"
__url__ = "https://github.com/ScottSoren/EC_MS"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2018 <NAME>"
__license__ = "MIT"
# fmt: on
print("\n" + "-" * 10 + " Importing EC_MS v" + __version__ + " " + "-" * 10)
print("from " + __file__ + "\n\n")
import EC_MS.Chem as Chem
from EC_MS.dataset import *
from EC_MS.parsing_tools import *
from EC_MS.Data_Importing import *
from EC_MS.Combining import *
from EC_MS.Plotting import *
from EC_MS.Object_Files import *
from EC_MS.Molecules import *
from EC_MS.Chips import *
from EC_MS.Calibration import *
from EC_MS.Quantification import *
from EC_MS.Datapoints import *
from EC_MS.Integrate_Signals import *
from EC_MS.EC import *
from EC_MS.Potentiostat import *
from EC_MS.spectra import *
from EC_MS.PVMassSpec import *
from EC_MS.Zilien import *
from EC_MS.Electrolytes import *
from EC_MS.Time_Response import *
from EC_MS.patches import *
from EC_MS.utils import *
|
ScottSoren/EC_MS | src/EC_MS/Zilien.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 19:08:46 2020
@author: scott
"""
import os, pickle
import numpy as np
from functools import wraps
from matplotlib import pyplot as plt
from .dataset import Dataset
from .spectra import Spectrum, Spectra, spectra_from_data
"""
The main Zilien importing is at present taken care of by .Data_Importing/load_from_file
and the chaotic multi-format parser that it calls.
I think a better way would be to have a module for each data type, inhereting from Dataset
and with its own parsers, which may use some shared tools in a shared module.
In general, the structure of EC_MS needs serious reworking!
"""
class Zilien_Dataset(Dataset):
# @wraps(Dataset.__init__)
def __init__(self, *args, **kwargs):
if "data_type" not in kwargs:
kwargs["data_type"] = "SI"
print(kwargs)
super().__init__(*args, **kwargs)
self.get_spectra()
def get_spectra(self):
if "spectra_data" in self.data:
self.spectra = spectra_from_data(self.data["spectra_data"])
else:
try:
spectra_folder = (
"".join([s + " " for s in self.file.split(" ")[2:]]).split(".")[0]
+ " mass scans"
)
spectra_path = os.path.join(self.folder, spectra_folder)
self.spectra_folder, self.spectra_path = spectra_folder, spectra_path
self.spectra = read_zilien_spectra(spectra_path, data=self.data)
except FileNotFoundError:
print("Warning!!! No spectra found! consider using normal Dataset")
# self.spectrums = self.spectra.spectrums
def __getitem__(self, key):
if type(key) is int:
return self.spectra[key]
def save(self, file_name):
spectra_data = {"x": self[0].x, "spectra": self.spectra.spectra}
self.data["spectra_data"] = spectra_data
with open(file_name, "wb") as f:
pickle.dump(self.data, f)
def read_zilien_spectrum(file_path, delim="\t"):
with open(file_path, "r") as f:
lines = f.readlines()
data = {
"file": file_path,
"header": "",
}
N_col_head = len(
lines
) # this will decrease when the loop knows when the column header line is comming
nondata_cols = [] # this will store abstime, so I don't have to parse.
for n, line in enumerate(lines):
l = line.strip()
if n < N_col_head:
if len(l) == 0:
N_col_head = n + 1
# print(dataset['header']) # debugging
# if n< 10: print(line)
# data['header'] = data['header'] + line # If I use .join instead, it gives a memory error, I don't understand why.
elif n == N_col_head:
data_cols = l.split(delim)
for col in data_cols:
data[col] = np.array([])
# data['header'] = data['header'] + line # If I use .join instead, it gives a memory error, I don't understand why.
elif n > N_col_head:
for col, val in zip(data_cols, l.split(delim)):
if col in nondata_cols:
data[col] += [val]
continue
try:
x = eval(val)
except SyntaxError:
print(
"removing "
+ col
+ " from data_cols due to value "
+ val
+ " on line "
+ str(n)
)
data[col] = list(data[col])
data[col] += [val]
nondata_cols += [col]
else:
data[col] = np.append(data[col], x)
data["data_cols"] = set(data_cols)
return data
def read_zilien_spectrums(folder, delim="\t"):
"""
"""
lslist = os.listdir(folder)
spectra = []
ts = []
for f in lslist:
try:
time_str = f.split("started at measurement time")[1]
except IndexError:
print(f + " does not seem to be a Zilien spectrum with timestamp")
else:
time_str = time_str.split(".tsv")[0].strip()
t = float(time_str)
data = read_zilien_spectrum(folder + os.sep + f, delim=delim)
# return data # debugging
x = data["Mass [AMU]"]
y = data["Current [A]"]
spectrum = Spectrum(x=x, y=y, t=t)
ts += [t]
spectra += [spectrum]
I_sort = np.argsort(ts)
ts = [ts[I] for I in I_sort]
spectrums = [
spectra[I] for I in I_sort
] # can't directly write spectra[I_sort] since it's not an np array
return spectrums
def read_zilien_spectra(folder, delim="\t", data=None):
"""
"""
spectrums = read_zilien_spectrums(folder, delim="\t")
# return spectrums # debugging
return Spectra(folder=folder, spectrums=spectrums, data=data)
|
ScottSoren/EC_MS | src/EC_MS/Chem/MolarMasses.py | """
This module does some useful stuff with parsing chemical formulas.
"""
import re
# fmt: off
# ^ so that Black doesn't format these lists, which are kind of organized according to the periodic table
Zdict = {'H':1,'He':2,
'Li':3,'Be':4,'B':5,'C':6,'N':7,'O':8,'F':9,'Ne':10,
'Na':11,'Mg':12,'Al':13,'Si':14,'P':15,'S':16,'Cl':17,'Ar':18,
'K':19,'Ca':20,'Sc':21,'Ti':22,'V':23,'Cr':24,'Mn':25,'Fe':26,'Co':27,'Ni':28,'Cu':29,'Zn':30,'Ga':31,'Ge':32,'As':33,'Se':34,'Br':35,'Kr':36,
'Rb':37,'Sr':38,'Y':39,'Zr':40,'Nb':41,'Mo':42,'Tc':43,'Ru':44,'Rh':45,'Pd':46,'Ag':47,'Cd':48,'In':49,'Sn':50,'Sb':51,'Te':52,'I':53,'Xe':54,
'Cs':55,'Ba':56,'La':57,'Hf':72,'Ta':73,'W':74,'Re':75,'Os':76,'Ir':77,'Pt':78,'Au':79,'Hg':80,'Tl':81,'Pb':82,'Bi':83,'Po':84,'At':85,'Rn':86,
'Fr':87,'Ra':88,'Ac':89,
'Ce':58,'Gd':64,
'U':92,
'Heavy':0,'Light':0, 'e':0, '+':0, '-':0}
# Heavy and Light are so that I can have isotopically labled stuff.
# + and - are so that charges are collected by break_down
Mdict = {'H':1.008,'He':4.002,
'Li':6.941,'Be':9.012,'B':10.811,'C':12.011,'N':14.007,'O':15.999,'F':18.998,'Ne':20.180,
'Na':22.990,'Mg':24.305,'Al':26.982,'Si':28.086,'P':30.974,'S':32.065,'Cl':35.453,'Ar':39.948,
'K':39.098,'Ca':40.078,'Sc':44.956,'Ti':47.867,'V':50.942,'Cr':51.996,'Mn':54.938,'Fe':55.845,'Co':58.933,'Ni':58.693,'Cu':63.546,'Zn':65.380,'Ga':69.723,'Ge':72.640,'As':74.922,'Se':78.96,'Br':79.904,'Kr':83.904,
'Rb':85.468,'Sr':87.620,'Y':88.906,'Zr':91.224,'Nb':92.906,'Mo':95.960,'Tc':98,'Ru':101.070,'Rh':102.906,'Pd':106.420,'Ag':107.868,'Cd':112.411,'In':114.818,'Sn':118.710,'Sb':121.760,'Te':127.600,'I':126.904,'Xe':131.293,
'Cs':132.905,'Ba':137.327,'La':138.905,'Hf':178.490,'Ta':180.948,'W':183.840,'Re':186.207,'Os':190.230,'Ir':192.217,'Pt':195.084,'Au':196.967,'Hg':200.590,'Tl':204.383,'Pb':207.200,'Bi':208.980,'Po':209,'At':210,'Rn':222,
'Fr':223,'Ra':226,'Ac':227,
'Ce':140.116,'Gd':157.250,
'U':238.029,
'Heavy':1,'Light':-1, # So that I can have isotopically labled stuff.
'e':0, '-':0, '+':0} # So that charges are collected by break_down
# fmt: on
# ^ so that Black formats what comes below
def BreakDown(
*args, **kwargs
): # Reads parentheses and breaks down an arbitrary compound into a dictionary, like AxByCz to {'A':x,'B':y,'C':z}
print(
"function Chem.BreakDown is now called Chem.break_down. Remember that next time!"
)
return break_down(*args, **kwargs)
def break_down(compound, forgiving=True):
"""
Breaks a string representing a chemical formula down into constituent parts.
Things in parentheses are considered one part.
Any string of a capital letter followed by lower case letters is considered to be
an irriducible element.
Any number is considered to quantify the element imediately proceeding it.
Space is ignored
Other characters raise a ValueError unless forgiving=True
Example:
>>> break_down('CH3(CH2)5CHO')
>>> {'C':2, 'H':4', 'CH2':5, 'O':1}
This function is called recursively by get_elements.
"""
parts = {}
number = ""
element = ""
subcompound = ""
nest = 0
N = len(compound)
addit = False
for i, char in enumerate(compound):
if char == "(":
if nest == 0:
addit = True
else:
subcompound += char
nest += 1
elif nest > 0:
if char == ")":
nest -= 1
if nest == 0:
element = subcompound
subcompound = ""
else:
subcompound += char
else:
if re.search("[/.0-9]", char):
number += char
elif re.search("[a-z]", char):
element += char
elif re.search("[A-Z\-\+]", char):
addit = True
elif re.search("\S", char):
print(
"Not quite sure what you're talking about, mate, when you say ",
char,
)
if not forgiving:
raise ValueError
if i == N - 1:
addit = True
# print('char = ' + char + '\nelement = ' + element + '\naddit = ' + str(addit))
if addit:
if len(number) > 0:
try:
n = int(number)
except ValueError:
n = float(number)
number = ""
else:
n = 1
if len(element) > 0:
if element in parts:
parts[element] += n
else:
parts[element] = n
if nest == 0:
element = char
if i == N - 1 and re.search("[A-Z\-\+]", char):
if element in parts:
parts[element] += 1
else:
parts[element] = 1
addit = False
return parts
def Mass(*args, **kwargs):
print(
"Function Chem.Mass() has been replaced by Chem.get_mass(). Remember that next time!"
)
return get_mass(*args, **kwargs)
def get_mass(
compound, forgiving=True
): # Returns the molar mass of any chemical formula.
if compound in Mdict:
return Mdict[compound]
parts = break_down(compound, forgiving)
M = 0
for element in parts:
# A = input('checking for ' + element + ' in ' + Compound)
if element == compound:
print("Dude, man,", compound, "just isn't a thing!")
if not forgiving:
raise ValueError
return 0
M += parts[element] * get_mass(element, forgiving=forgiving)
return M
def SimpleForm(*args, **kwargs):
print(
"function Chem.SimpleForm() is now called Chem.get_elements(). Remember that next time!!!"
)
return get_elements(*args, **kwargs)
def get_elements(formula, forgiving=True):
# Determines number of each element in a complex formula.
"""
Given a chemical formula like 'CH3(CH2)5CHO', returns the number of each
element, here {'C':7, 'H':14, 'O':1}
Works recursively if there are parentheses in the formula
If forgiving=True, considers unidentifiable things to be elements
"""
elements = {} # the broken down formula will go here with elements as keys
if type(formula) is str: # this function breaks down each key of a dict.
formula = {
formula: 1
} # so for a string, just treat it as the one key of a dict
cleared = (
[]
) # used to avoid an infinite loop in the case that something can't be broken down
for compound in formula:
nComp = formula[compound]
if compound in elements: # if it's already in elements, just add to the count!
elements[compound] += nComp
continue
elif compound in Zdict: # if it's an irriducible atom, just put it in elements!
elements[compound] = nComp
continue
# otherwise, we've got to break it down:
parts = break_down(compound, forgiving=forgiving)
if compound in parts and compound not in Zdict:
# If we can't break it down and it's not a recognizeable atom:
print("dude,", compound, "just isn't a thing")
if not forgiving:
raise ValueError
cleared += [
compound
] # So that it doesn't put the function in an infinite loop
for (
part,
nPart,
) in parts.items(): # Just everything from the breakdown into elements
if part in ["g", "l", "s", "aq"]:
continue # so that the state doesn't count as an element for e.g., CH3OH(aq)
if part in elements:
elements[part] += nPart * nComp
else:
elements[part] = nPart * nComp
# Check if we put anything reducible into elements, in which case we've got to call this again.
more = len(
[comp for comp in elements.keys() if comp not in Zdict and comp not in cleared]
)
if more:
# print('Simplifying', Output)
return get_elements(elements)
return elements
|
ScottSoren/EC_MS | src/EC_MS/utils/__init__.py | """
EC_MS.utils is a stash for project-specific tools
"""
from .extraction_class import Extraction
from .kinetic_models import solve_carbonic_burst
from .icpms import *
|
ScottSoren/EC_MS | src/EC_MS/Molecules.py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 22:36:15 2016
@author: scott
This module will define the Molecule class used to organize, access, and
manipulate information about molecules. Objects of this class will generate and
utilize text files in EC_MS/data/
"""
from __future__ import print_function, division
import os, re
import numpy as np
from matplotlib import pyplot as plt
from numbers import Number
from functools import wraps
from . import Chem
from .Object_Files import structure_to_lines, lines_to_dictionary, write_to_file
from .Object_Files import lines_to_structure, date_scott, update_lines
from .parsing_tools import get_cols_for_mass
preferencedir = os.path.dirname(os.path.realpath(__file__)) + os.sep + "preferences"
with open(preferencedir + os.sep + "standard_colors.txt", "r") as f:
lines = f.readlines()
standard_colors = lines_to_dictionary(lines, removecomments=False)[
"standard colors"
]
data_directory = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data"
cwd = os.getcwd()
# for python2:
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
MoleculeError = FileNotFoundError
class Molecule:
"""
This class will store physical and thermodynamic data about molecules that
we are interested in for EC_MS, as well as mass spectra and calibration
results for quantification. Objects of this class will link to data stored
in a text file in ./data/
"""
def __init__(
self,
name,
formula=None,
writenew=True,
verbose=False,
primary=None,
F_cal=None,
data_dir=data_directory,
molecule_dir=None,
):
self.name = name
self.real_name = name # for a trick with Calibration.load_calibration_results()
self.cal = {}
self.__str__ = "<" + name + ", instance of EC_MS class 'Molecule'>"
self.primary = primary # the primary mass to measure at
self.F_cal = F_cal
self.calibrations = [] # will store calibration data
self.attr_status = {"D": 0, "kH": 0, "n_el": 0}
self.formula = formula
self.data_dir = data_dir
if molecule_dir is None:
molecule_dir = os.path.join(data_dir, "molecules")
self.molecule_dir = molecule_dir
# 0 for undefined, 1 for loaded from file, 2 for set by function
file_name = self.name + ".txt"
cwd = os.getcwd()
os.chdir(self.molecule_dir)
try:
with open(file_name, "r") as f:
self.file_lines = f.readlines()
except FileNotFoundError: # I don't know the name of the error, so I'll have to actually try it first.
os.chdir(cwd)
print("Warning!!! No file found for Molecule " + self.name)
self.has_file = False
else:
self.has_file = True
if len(self.file_lines) == 0:
print("The file for " + name + " is empty!")
# raise MoleculeError
self.reset(verbose=verbose)
self.file_lines = ["name: " + self.name] + self.file_lines
os.chdir(cwd)
if self.formula is None:
self.attr_status[
"formula"
] = 0 # so that it asks me for a formula when initializing the file.
self.attr_status["M"] = 0
self.formula = name # but just put the name for now.
try:
self.spectrum = self.get_spectrum()
except MoleculeError:
print(
"Warning: __init__ function of Molecule "
+ name
+ " could not find spectrum()!!!"
)
if not self.has_file:
raise
if not self.has_file:
print(
"\n--- "
+ self.name
+ ": Returning a Molecule object with only the spectrum --- \n"
)
if verbose:
print(
"name = " + str(self.name) + " , formula = " + str(self.formula)
) # debugging
self.M = Chem.get_mass(self.formula)
if self.M == 0:
print("WARNING: could not get molecular mass for " + self.name + " !!!")
if not hasattr(self, "molecule_mass"):
# print('setting self.molecule_mass from self.M!') # debugging
self.molecule_mass = self.M * Chem.amu
# print('molecule_mass = ' + str(self.molecule_mass)) # debugging
self.transmission_function = None
# self.color = self.get_color()
@wraps(write_to_file)
def write(self, a=None, attr=None, data_dir=None, *args, **kwargs):
if data_dir is None:
data_dir = self.molecule_dir
return write_to_file(
self, a=None, attr=None, data_directory=data_dir, *args, **kwargs
)
def rewrite(self, file="default"):
if file == "default":
file = self.name + ".txt"
newlines = update_lines(
self.file_lines,
self.__dict__,
oldkeys=["file_lines", "calibrations", "attr_status", "__str__"],
)
if type(file) is str:
os.chdir(self.molecule_dir)
with open(file, "w") as f:
f.writelines(newlines)
os.chdir(cwd)
else:
file.writelines(newlines)
def p_vap(self, T=None):
if T is None:
if "T" in dir(self) and self.T is not None:
T = self.T
else:
T = 298.15
elif "T" not in dir(self) or self.T is None:
self.T = T
return Chem.p_vap(self.name, T)
def as_dict(self):
self_as_dict = {}
# fmt: off
attr_list = [
"name", "real_name", "formula", "M",
"molecule_diameter", "dynamic_viscosity",
"density_RTP", "D_gas_RTP", "D",
"H_0", "T_C", "kH", "thermo",
"sigma", "sigma_100eV", "sigma_70eV" "spectrum",
"primary", "F_cal", "F_mat"
]
# fmt: on
for attr in attr_list:
if hasattr(self, attr):
self_as_dict[attr] = getattr(self, attr)
return self_as_dict
def reset(self, verbose=False):
"""
Retrives data for new object from lines read from file or resets
attribute values to those originally in the file
"""
if verbose:
print(
"loading attributes for this "
+ self.name
+ " molecule fresh from original file."
)
dictionary = lines_to_dictionary(self.file_lines)
for (key, value) in dictionary.items():
if "calibration" in key:
self.add_calibration(value)
if "Spectrum" in key:
self.spectrum = value
elif (not hasattr(self, key)) or (getattr(self, key) is None):
setattr(self, key, value)
if "F_cal" not in dir(self) and "primary" in dir(self):
if not self.primary is None and len(self.calibrations) > 0:
self.F_cal = self.calibration_fit(
mass="primary", ax=None, useit=True, primary=True, verbose=True
)
elif type(self.F_cal) is list and len(self.F_cal) == 1:
self.F_cal = self.F_cal[0]
# print('F_cal was list of length 1. Now, F_cal = ' + str(self.F_cal))
else:
# print('F_cal was as it should be')
pass
def write_new(self, f):
for (attr, status) in self.attr_status.items():
if status == 0:
string = input(
"Enter "
+ attr
+ " for "
+ self.name
+ " or whitespace to get default.\n"
)
if len(string.strip()) == 0:
print("skipped that for now.")
continue
try:
value = float(string)
self.attr_status = 2
setattr(self, attr, value)
f.write(attr + "\t=\t" + str(value) + "\n")
except ValueError:
print("not a float but okay.")
value = string
self.attr_status = (
"2" # just for irony, I'll save this status as not a float.
)
setattr(self, attr, value)
f.write(attr + ": " + str(value) + "\n")
# self.file_lines = f.readlines() #doesn't work. But what if I need to reset later?
# else:
# f.write(attr + '\t=\t' + str(self.attr) + '\n') #not necessary...
def get_spectrum(self, data_dir=None):
if data_dir is None:
data_dir = self.data_dir
if hasattr(self, "spectrum"):
return self.spectrum
else: # that must mean there's no spectrum in the molecule's data file :(
# try and get the spectrum from the data.
try:
print("loading NIST spectrum from .jdx")
spectrum = get_NIST_spectrum(self, data_dir=data_dir)
except:
print("WARNING!!! Could not get spectrum for " + self.real_name)
raise # MoleculeError('no spectrum for ' + self.real_name)
else:
return spectrum
def get_RSF(
self,
RSF_source="NIST",
mass="primary",
transmission_function=None,
verbose=True,
):
"""
Requires that a spectrum and total ionization cross section are already
loaded, and preferably also a relative sensitivity factor.
Generates dictionaries of ionization-fragmentation cross sections 'IFCS'
and 'RSF' for each mass in the spectrum. Saves the respective value for
the stated primary mass also as 'ifcs' and 'rsf'.
"""
self.IFCS = {} # this will be a dictionary containing the electron ionization
# portion of the 'relative sensitivity' for each mass,
# i.e. the partial ionization cross-section in Ang^2 at 100keV
if transmission_function is None:
if self.transmission_function is None:
print(
"WARNING: no transmission function for "
+ self.name
+ ". using constant"
)
def transmission_function(M):
return 1
else:
transmission_function = (
self.transmission_function
) # T(M)=1 unless otherwise stated
elif self.transmission_function is None:
self.transmission_function = transmission_function
spec_total = 0
for value in self.spectrum.values():
if type(value) is not str:
spec_total += value
for (M, value) in self.spectrum.items():
if M == "Title":
continue
self.IFCS[M] = value / spec_total * self.sigma_100eV
if "primary" in dir(self):
self.ifcs = self.IFCS[self.primary]
if RSF_source == "Hiden":
try:
self.RSF = {} # this will be the relative sensivity factor at each
# mass, where N2 at M28 is 1, from Hiden Analytical
mH = self.Hiden[0]
vH = self.Hiden[1]
except AttributeError:
print("no Hiden RSF found for " + self.name)
return None
for (M, value) in self.spectrum.items():
if verbose:
print(str(M) + " " + str(value))
if M == "Title":
continue
self.RSF[M] = value / self.spectrum[mH] * vH
if "primary" in dir(self):
self.rsf = self.RSF[self.primary]
if verbose:
print(
"RSFs from Hiden rsf, adjusted to mass of measurement according"
+ "to NIST spectrum for "
+ self.name
)
elif RSF_source == "NIST":
self.RSF = dict(
(key, value * transmission_function(int(key[1:])))
for key, value in self.IFCS.items()
)
N2_M28_RSF = 2.283 * transmission_function(28)
# print('N2_M28_RSF = ' + str(N2_M28_RSF)) # debugging
self.RSF = dict(
[(key, value / N2_M28_RSF) for (key, value) in self.RSF.items()]
)
if "primary" in dir(self):
self.rsf = self.ifcs * transmission_function(
int(self.primary[1:])
) # / N2_M28_RSF
if verbose:
print(
"RSFs from ionization-fragmentation cross section in Ang^2 'ifcs'"
+ "based on NIST cross section and spectrum for "
+ self.name
+ " and the given transmission function T(M)"
)
if mass == "primary":
mass = self.primary
return self.RSF[mass]
def plot_spectrum(self, top=100, offset=0, width=0.5, ax="new", **kwargs):
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
x = []
y = []
try:
color = kwargs.pop("color")
except KeyError:
color = self.get_color()
for (mass, value) in self.spectrum.items():
if mass == "Title":
continue
x += [int(mass[1:])]
y += [value]
y = np.array(y) / max(y) * top
x = np.array(x)
ax.bar(x + offset, y, width=width, color=color, label=self.name, **kwargs)
ax.set_xticks(x)
ax.set_xticklabels([str(m) for m in x])
ax.set_title("literature QMS spectrum for " + self.name)
return ax
def add_calibration(
self, calibration, useit=True, primary=True, writeit=False, verbose=False
):
"""
'calibration' is a dictionary containing the calibration factor 'F_cal',
in C/mol; the mass measurement 'mass' for which it applies, as well as
data about how it was obtained. This function adds the calibration to
a list of calibration dictionaries for this instance of Molecule.
If 'useit', 'F_cal' is used to set attribute 'F_<mass>'
If 'primary', this is the primary mass for measurement, and
calibration['F_cal'] is (also) used to set 'F_cal' for this
instance of Molecule for easier access, e.g. CO2.F_cal
If 'writeit', the calibration is written to the molecule's data file.
"""
self.calibrations += [calibration]
mass = calibration["mass"]
F_cal = calibration["F_cal"]
title = calibration["title"]
if verbose:
print("added calibration " + title + " for " + self.name)
if useit: # use this calibration for this mass
self.cal[mass] = calibration["F_cal"]
attribute_name = "F_" + mass
setattr(self, attribute_name, F_cal)
if primary: # use this mass by default
self.primary = mass
setattr(self, "F_cal", F_cal)
if writeit: # write this calibration to file
self.write(self.write_calibration, calibration)
def new_calibration(
self,
calibration=None,
mass=None,
F_cal=None,
cal_type=None,
chip=None,
settings=None,
notes=None,
expdate=None,
andate=None,
title=None,
add_dates=True,
useit=True,
primary=True,
writeit=True,
):
"""
Puts values in a calibration dictionary and calls add_calibration
"""
andate = date_scott(andate)
expdate = date_scott(expdate)
if title is None:
title = expdate + "_" + andate
elif add_dates:
title = expdate + "_" + andate + "_" + title
if chip == "date":
# but ideally chip points to an object of the yet-to-be-written class chip
chip = expdate
if type(mass) is int:
mass = "M" + str(mass)
if type(settings) is list:
settings = {
"SEM voltage": settings[0],
"speed": settings[1],
"range": settings[2],
}
if type(notes) is str:
notes = [notes]
calibration_i = {
"title": title,
"F_cal": F_cal,
"mass": mass,
"type": cal_type,
"experiment date": expdate,
"analysis date": andate,
"chip": chip,
"QMS settings": settings,
"Notes": notes,
}
if calibration is None:
calibration = calibration_i
else:
for (key, value) in calibration_i.items():
if value is not None:
calibration[key] = value
self.add_calibration(calibration, useit=useit, primary=primary, writeit=writeit)
def read_calibration(self, lines, **kwargs):
"""
Generates a calibration dictionary from lines of text, which would
come from the molecule's data file. Then calls add_calibration.
Never used anymore because reset does the same thing.
"""
calibration = lines_to_structure(lines)
self.add_calibration(calibration, **kwargs)
return calibration
def write_calibration(self, f, calibration):
"""
Writes a calibration dictionary to the molecule's data file (pre-opened
as 'f') in a way readable by read_calibration.
Typically called by add_calibration.
There's a much cleverer way to write this type of function.
"""
print("\nWriting calibration for " + self.name + "\n")
title_line = "calibration_" + calibration["title"]
lines = structure_to_lines(calibration, preamble=title_line)
f.writelines(lines)
print("wrote calibration " + calibration["title"] + " for " + self.name)
def calibration_fit(
self,
mass="primary",
ax="new",
color="k",
plotfactor=1,
useit=True,
primary=True,
verbose=True,
):
if mass == "primary":
mass = self.primary
title = mass + " calibrations for " + self.name
n_mol = [
0,
] # include 0,0 as a calibration point!
Q_QMS = [
0,
] # include 0,0 as a calibration point!
for calibration in self.calibrations:
if calibration["mass"] == mass:
n_mol += [calibration["n_mol"]]
Q_QMS += [calibration["Q_QMS"]]
n_mol = np.array(n_mol)
Q_QMS = np.array(Q_QMS)
N = len(n_mol)
pf1 = np.polyfit(n_mol, Q_QMS, 1)
if verbose:
print("y = " + str(pf1[0]) + " x + " + str(pf1[1]))
F_cal = pf1[0]
print()
if useit: # use this calibration for this mass
attribute_name = "F_" + mass
if verbose:
print(
"using a fit value for "
+ attribute_name
+ " based on "
+ str(N)
+ " experiments."
)
self.cal[mass] = F_cal
setattr(self, attribute_name, F_cal)
if primary:
self.F_cal = F_cal
self.primary = mass
pf_fun = np.poly1d(pf1)
pf_x = np.array([min(n_mol), max(n_mol)])
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
if ax is not None:
ax.set_title(title)
ax.plot(
n_mol * 1e9 * plotfactor,
Q_QMS * 1e9 * plotfactor,
".",
color=color,
markersize=15,
)
ax.plot(
pf_x * 1e9 * plotfactor,
pf_fun(pf_x) * 1e9 * plotfactor,
"--",
color=color,
)
ax.set_xlabel("amount produced / nmol")
ax.set_ylabel("int. signal / nC")
return F_cal
def mass_transfer_coefficient(
self,
system="chip",
T=298.15,
phi=0.5,
l_p=100e-6,
d_p=20e-9,
p_chip=1e5,
n_dot_0=2.6e-9,
A=0.196e-4,
verbose=True,
):
K_H = self.kH * Chem.R * T
self.K_H = K_H
if verbose:
print("K_H = " + str(K_H * 1e-2) + " bar/mol")
if system == "chip":
self.h = K_H * n_dot_0 / (p_chip * A)
else:
self.h = (
K_H
* phi
* d_p
/ (3 * l_p)
* np.sqrt(8 / (np.pi * Chem.R * T * self.M * 1e-3))
)
if verbose:
print("h = " + str(self.h) + " m/s")
return self.h
def set_temperature(self, T):
self.T = T
print("The set_temperature function is not implemented yet.")
pass
def get_bg(self, *args, **kwargs):
"""
args and kwargs are given to self.get_flux()
sets self.background to the average signal from this call to get_flux()
returns background
"""
kwargs.update(unit="mol/s")
if "t_bg" in kwargs and "tspan" not in kwargs:
kwargs["tspan"] = kwargs["t_bg"]
x, y = self.get_flux(*args, **kwargs, removebackground=False)
if False: # debugging
fig, ax = plt.subplots()
ax.plot(x, y, self.get_color())
background = np.mean(y)
self.background = background
return background
def get_background(self, *args, **kwargs):
"""
see self.get_bg
"""
return self.get_bg(*args, **kwargs)
def get_color(self):
try:
return self.color
except AttributeError:
try:
return standard_colors[self.primary]
except AttributeError:
print("WARNING: " + str(self) + " has no attribute 'primary'")
except KeyError:
print("WARNING: standard_colors has no entry for " + str(self.primary))
def get_flux(
self,
MS_data,
tspan="tspan",
density=None,
unit=None,
verbose=False,
override=False,
x=None,
removebackground=None,
background=None,
t_bg=None,
endpoints=5,
):
"""
returns [x, y] where x is the t corresponding to the primary mass of the
molecule in 'mol' and y is the molecular flux in nmol/s, calculated from
the MS_data for its primary mass and the value of F_cal read from the
molecule's text file.
"""
if verbose:
print("calculating flux of " + self.name)
if hasattr(self, "cal_mat"):
cal_mat = self.cal_mat
else:
if verbose:
print("no cal_mat! using self.primary and self.F_cal instead.")
F_cal = self.F_cal
mass = self.primary
cal_mat = {mass: 1 / F_cal}
if tspan is None:
if x is not None:
tspan = [x[0], x[-1]]
else:
tspan = "tspan"
if type(tspan) is str and not tspan == "all":
try:
tspan = MS_data[tspan]
except KeyError:
tspan = "all"
unit = unit if unit else "nmol/s"
if x is None:
if density is None:
xcol, ycol = get_cols_for_mass(self.primary, MS_data)
x = MS_data[xcol]
if not (isinstance(tspan, str) and tspan == "all"):
mask = np.logical_and(tspan[0] < x, x < tspan[-1])
# Don't cut off outer endpoints before evt interpolation (if used by plot_vs_potential)
extra_left = np.append(mask[1:], False)
extra_right = np.append(False, mask[:-1])
mask = np.logical_or(extra_left, extra_right)
x = x[mask]
else:
x = np.linspace(
tspan[0], tspan[-1], density * np.floor(tspan[-1] - tspan[0])
)
# ^ approx 5 datapoints a second
y = 0
for mass, C in cal_mat.items():
xcol, ycol = get_cols_for_mass(mass, MS_data)
try:
x0 = MS_data[xcol]
s0 = MS_data[ycol]
except KeyError:
if self.primary and mass == self.primary:
raise
else:
print(
f"Skipping {mass} in matrix calibration of {self.name}"
" as it's missing from the data cols."
)
continue
s = np.interp(x, x0, s0)
y += s * C # units: [A] * [mol/C] = [mol/s]
if (
t_bg is not None and background is None
): # 19G01, I wonder why/how this wasn't here before
background = "constant"
if removebackground is None:
removebackground = background is not None
if (background is None or background == "preset") and hasattr(
self, "background"
):
background = self.background
elif background == "preset":
background = None
if removebackground:
if background is None:
background = "constant"
if background in ["start", "begining", "first"]:
background = np.mean(y[:endpoints])
elif background in ["finish", "end", "last"]:
background = np.mean(y[-endpoints:])
elif background == "constant":
if type(removebackground) is float:
background = removebackground * min(y)
elif t_bg is not None:
print(
"defining signal at t in "
+ str(t_bg)
+ " as background for "
+ self.name
)
if t_bg[0] > x[0] and t_bg[-1] < x[-1]:
try:
mask = np.logical_and(t_bg[0] < x, x < t_bg[-1])
background = np.mean(y[mask])
except TypeError:
background = np.interp(t_bg, x, y)
else:
background = self.get_bg(
MS_data,
tspan=t_bg,
density=density,
unit=unit,
verbose=verbose,
override=override,
endpoints=endpoints,
)
else:
print("using minimum value as constant background for " + self.name)
background = min(y)
if not hasattr(self, "background") or self.background is None:
self.background = background
elif background == "linear":
x_end = [np.average(x[:endpoints]), np.average(x[-endpoints:])]
y_end = [np.average(y[:endpoints]), np.average(y[-endpoints:])]
background = np.interp(x, x_end, y_end)
print("using linear background for " + self.name)
elif isinstance(background, Number):
# background = background
if verbose:
print("using preset constant background for " + self.name)
pass
y = y - 0.99 * background # so that we don't break the log scale.
# I should get rid of this and assume the caller knows what they're doing.
# important that this comes after the background bit, so that we don't
# subtract a background in different units than the signal
if "nmol" in unit:
y = y * 1e9
elif "pmol" in unit:
y = y * 1e12
if "cm$^{-2}$" in unit or "/cm^2" in unit:
y = y / MS_data["A_el"]
return x, y
def reset_datafiles(mols, attrs, mdict={}):
"""
loads all of the molecules in mols and rewrites their data files with
only the attributes listed in attrs.
Look through the .git history if you need any old information removed by
this function.
Tuple entries attrs[i] rename the attribut from attrs[i][0] to attrs[i][1]
"""
for mol in mols:
if mol in mdict:
continue
elif type(mol) is str:
mdict[mol] = Molecule(mol) # might return None.
else: # then mol is a Molecule object and mol.name is the key
mdict[mol.name] = mol
cwd = os.getcwd()
os.chdir(data_directory)
for mol, m in mdict.items():
if m is None:
continue
f = open(mol + ".txt", "w")
f.write("#cleaned up " + date_scott() + "\n")
f.close()
print("wiped " + mol + ".txt clean.")
# I think this makes blank datafiles that the molecules can then
# write to.
for attr in attrs:
if type(attr) is list or type(attr) is tuple: # rename attr
var = attr[0]
newvar = attr[1]
else:
var = attr
newvar = attr
try:
value = getattr(m, var) # keep the attribute's name
m.write((newvar, value))
except AttributeError:
print("no attribute " + var + " for Molecule " + mol)
os.chdir(cwd)
return mdict
def add_to_datafiles(attr, d, mdict={}, mols="all"):
"""
d is a dictionary containing attribute attr for specified molecules.
This function writes that attribute to each datafile
"""
for (key, value) in d.items():
if type(key) is str:
if not mols == "all" and key not in mols:
continue
if key in mdict:
m = mdict[key]
else:
m = Molecule(key)
mdict[key] = m
if m is not None:
setattr(m, attr, value)
m.write((attr, value))
return mdict
def add_script_to_datafiles(path, file_name, attrs="all", mdict={}, mols="all"):
"""
sorts data stored in another script in dictionary form into molecule data
form.
Tuple entries attrs[i] rename the attribut from attrs[i][0] to attrs[i][1]
"""
module_name = file_name.split(".")[0] # drop extension
cwd = os.getcwd()
os.chdir(path)
module = __import__(module_name)
os.chdir(cwd)
check = False
if attrs == "all":
check = True
attrs = (d for d in dir(module) if type(d) is dict)
for attr in attrs:
if check: # then check manually
var = attr
newvar = input("Write data from '" + var + "'? (y/<newname>/n)\n")
if newvar == "n":
continue
elif newvar == "y":
newvar = var
elif type(attr) is list or type(attr) is tuple:
var = attr[0]
newvar = attr[1]
else:
var = attr
newvar = attr
d = getattr(module, var)
mdict = add_to_datafiles(newvar, d, mdict, mols=mols)
def get_NIST_spectrum(mol, data_dir=data_directory):
"""
a parser for NIST-exported .jdx files
"""
data_folder = data_dir + os.sep + "NIST_spectra_data"
if type(mol) is not str:
try:
mol = mol.real_name
except AttributeError:
mol = mol.name
file_list = os.listdir(data_folder)
print(data_folder) # debugging
print(file_list) # debugging
try:
file = next(f for f in file_list if re.search("^" + mol, f))
except StopIteration:
print("WARNING!!! No Spectrum available for " + mol)
raise FileNotFoundError
# ^ file-extension-ambiguous because I might forget and save them as .txt at some point
with open(data_folder + os.sep + file) as f:
lines = f.readlines()
in_data = False
spectrum = {}
for line in lines:
if "END" in line:
break
if in_data:
# print(line) # debugging
mass_values = line.strip().split(" ")
for mass_value in mass_values:
mass, value = mass_value.split(",")
mass = "M" + mass
value = eval(value)
spectrum[mass] = value
elif "PEAK TABLE" in line:
in_data = True
return spectrum
|
ScottSoren/EC_MS | src/EC_MS/utils/extraction_class.py | # -*- coding: utf-8 -*-
"""
Created on Sun May 3 15:22:35 2020
@author: scott
"""
import os, json
import numpy as np
# from matplotlib import pyplot as plt
STANDARD_DATA_DIR = "../pickles/"
STANDARD_EXTRACTION_DIR = "../extractions/"
from EC_MS import Dataset
class Extraction(Dataset):
"""
Written 20E03 as a first set of ideas for how to implement the Experiment
class that I proposed some days ago to Reshma.
"""
def __init__(
self,
name=None,
dataset=None,
data_file=None,
data_files=None,
data_dir=STANDARD_DATA_DIR,
tspan_experiment=None,
tspan_exchange=None,
tspan_extraction=None,
RE_vs_RHE=None,
A_el=None,
t_bg=None,
calibration=None,
mdict=None,
calibration_file=None, # "20A25_sniffer.json",
electrolyte="16O",
film="18O",
element="Pt",
tspan_ratio=None,
alpha=None,
n_ex=None,
):
if name is None:
name = f"extraction {element}{film} in {electrolyte}"
self.name = name
self.calibration_file = calibration_file
if (not calibration) and calibration_file and not mdict:
print(f"requested calibration_file = {calibration_file}")
raise NotImplementedError(
"proper calibration is not implemented in EC_MS. "
"You have to import the calibration externally. "
"An EC_MS mdict should also work.\n"
"...The functionality should come soon to ixdat."
)
self.calibration = calibration
if calibration and not mdict:
mdict = get_EC_MS_mdict(calibration)
self.mdict = mdict
# Later we will just directly use the siQuant mdict!
self.data_file = data_file
self.data_files = data_files
self.data_dir = data_dir
if dataset is None:
if data_file is not None: # <-- load one data file
if os.sep not in str(data_file):
path_to_data = os.path.join(data_dir, data_file)
else:
path_to_data = data_file
dataset = Dataset(path_to_data)
elif data_files is not None: # <-- synchronize multiple datasets!
dataset = Dataset()
for data_file in data_files:
if os.sep not in data_file:
path_to_data = os.path.join(data_dir, data_file)
dataset = dataset + Dataset(path_to_data)
self.dataset = dataset
self.data = dataset.data
self.verbose = self.dataset.verbose
print(f"Extraction.__init__: type(dataset) = f{type(dataset)}")
self.data = dataset.data
self.RE_vs_RHE = RE_vs_RHE
self.A_el = A_el
if RE_vs_RHE is not None or A_el is not None:
self.normalize(RE_vs_RHE=RE_vs_RHE, A_el=A_el)
self.tspan_experiment = tspan_experiment
self.tspan_exchange = tspan_exchange
self.tspan_extraction = tspan_extraction
self.tspan_ratio = tspan_ratio
self.electrolyte = parse_isotope(electrolyte)
self.film = parse_isotope(film)
# ^ parse_isotope makes sure it's e.g. 18O instead of O18
self.element = element
self.t_bg = t_bg
if t_bg is not None:
self.set_background(t_bg=t_bg)
self.alpha = alpha
if alpha is None and tspan_ratio is not None:
self.get_alpha(tspan=tspan_ratio, ax=None)
if n_ex is None:
n_ex = {}
self.n_ex = n_ex # will store extraction results
def as_dict(self):
self_as_dict = {}
# fmt: off
self_as_dict.update(
name=self.name,
data_dir=self.data_dir, data_file=self.data_file,
data_files=self.data_files, calibration_file=self.calibration_file,
RE_vs_RHE=self.RE_vs_RHE, A_el=self.A_el,
tspan_experiment=self.tspan_experiment, tspan_exchange=self.tspan_exchange,
tspan_extraction=self.tspan_extraction, tspan_ratio=self.tspan_ratio,
alpha=self.alpha, n_ex=self.n_ex, t_bg=self.t_bg,
electrolyte=self.electrolyte, film=self.film, element=self.element
)
# fmt: on
return self_as_dict
def save(self, extraction_file=None):
if extraction_file is None:
extraction_file = self.name + ".json"
if os.sep in extraction_file:
path_to_file = extraction_file
else:
path_to_file = os.path.join(STANDARD_EXTRACTION_DIR, extraction_file)
self_as_dict = self.as_dict()
with open(path_to_file, "w") as f:
json.dump(self_as_dict, f, indent=4)
@classmethod
def load(cls, extraction_file, **kwargs):
path_to_file = os.path.join(STANDARD_EXTRACTION_DIR, extraction_file)
with open(path_to_file, "r") as f:
self_as_dict = json.load(f)
self_as_dict.update(kwargs)
return cls(**self_as_dict)
def plot_experiment(self, *args, **kwargs):
"""
Just adds to the plot_experiment of the Dataset class that it uses
the calibration and tspan_experiment by default.
Molecules can be given as str and the Extraction tries to look them
up in its calibration.
"""
to_plot = [] # this will be either mols or masses to plot
to_plot_0 = None # first we see if the function caller told us what to plot
if len(args) > 0:
to_plot_0 = args[0] # first positional argument should be mols or masses!
elif "mols" in kwargs: # ... but mols can also be given as a kwarg
to_plot_0 = kwargs["mols"]
elif "masses" in kwargs: # ... and so can masses
pass # but then we don't actually need to do anything to it
if to_plot_0:
# now we go through and put the requested calibrated objects in to_plot
for i, thing in enumerate(to_plot_0):
print(f"thing to plot = {thing}") # debugging
if isinstance(thing, str) and thing in self.mdict:
# excellent! then we've got the name of a calibrated molecule.
to_plot += [self.mdict[thing]]
elif isinstance(thing, list) or isinstance(thing, tuple):
# ^ this will be the case if they ask for mols left and mols right
to_plot += [[]] # we need to mirror the list in to_plot
for subthing in thing:
print(f"subthing to plot = {subthing}") # debugging
if isinstance(subthing, str) and subthing in self.mdict:
# excellent! then we've got the name of a calibrated molecule.
to_plot[-1] += [self.mdict[subthing]]
else:
# then we assume they know what they're doing.
to_plot[-1] += [subthing]
else:
# then we assume they know what they're doing.
to_plot += [thing]
elif "masses" not in kwargs:
# okay, so this is actually the case if they don't ask for anything to plot
# and then by default we try to plot everything in the calibration
to_plot = self.mdict
# print(f"to_plot_0 = {to_plot_0}") # debugging
# print(f"to_plot = {to_plot}") # debugging
if len(args) > 0:
# then we overwrite to_plot_0, which is args[0], with to_plot:
args[0] = to_plot
elif "mols" in kwargs or "masses" not in kwargs:
# this is the case if they asked for mols or didn't ask for anything.
# and then we've generated the calibrated plotted list:
kwargs["mols"] = to_plot
if len(args) < 2 and "tspan" not in kwargs:
# ah, yes, we use the Experiment tspan by default instead of the Dataset tspan:
kwargs.update(tspan=self.tspan_experiment)
# and now we're ready to call plot_experiment via Dataset.plot_experiment!
return super(Extraction, self).plot_experiment(*args, **kwargs)
def get_alpha(self, tspan=None, t_bg=None, simple=True, ax="new"):
if tspan is None:
tspan = self.tspan_ratio
if ax == "new":
tspan_plot = [2 * tspan[0] - tspan[-1], 2 * tspan[-1] - tspan[0]]
ax = self.plot_signal(
["M32", "M34", "M36"], tspan=tspan_plot, unit="pA", t_bg=t_bg
)
linewidth = 4
x32, y32 = self.get_signal("M32", tspan=tspan, t_bg=t_bg, unit="pA")
x34, y34 = self.get_signal("M34", tspan=tspan, t_bg=t_bg, unit="pA")
x36, y36 = self.get_signal("M36", tspan=tspan, t_bg=t_bg, unit="pA")
Y32 = np.mean(y32)
Y34 = np.mean(y34)
Y36 = np.mean(y36)
print(f"self.electrolyte = {self.electrolyte}") # debugging
if simple:
if self.electrolyte == "18O":
gamma = Y34 / Y36
self.gamma = gamma
alpha = gamma / (2 + gamma)
if ax is not None:
ax.plot(x34, y34, "r", linewidth=linewidth)
ax.plot(x36, y36, "g", linewidth=linewidth)
elif self.electrolyte == "16O":
beta = Y34 / Y32
self.beta = beta
alpha = 2 / (2 + beta)
if ax is not None:
ax.plot(x32, y32, "k", linewidth=linewidth)
ax.plot(x34, y34, "r", linewidth=linewidth)
else:
print(
f"Warining!!! Can't get ratio with self.electrolyte={self.electrolyte}"
)
alpha = None
else:
from scipy.optimize import minimize
y_hat = np.array([Y32, Y34, Y36])
y_hat = y_hat / np.sum(y_hat)
# g is the H2(16)O / H2(18)O ratio, called gamma elsewhere
def sqerror(alpha):
return (
(alpha ** 2 - y_hat[0]) ** 2
+ (2 * alpha * (1 - alpha) - y_hat[1]) ** 2
+ ((1 - alpha) ** 2 - y_hat[2]) ** 2
)
res = minimize(sqerror, 0.5)
alpha = res.x[0]
if ax is not None:
ax.plot(x32, y32, "k", linewidth=linewidth)
ax.plot(x34, y34, "r", linewidth=linewidth)
ax.plot(x36, y36, "g", linewidth=linewidth)
self.alpha = alpha
return alpha
def get_majors_and_minors(
self, mol="O2",
):
"""
Get the majority and minority isotopes of mol produced in the electrolyte
majors are ^{16}O2 and C^{16}O2 in 16O electrolyte,
^{18}O2 and (C^{16}O^{18}O and C^{18}O2) in 18O electrolyte,
minors are the other isotopes.
"""
if self.electrolyte == "18O":
if mol == "O2":
majors = [self.mdict["O2_M36"]]
minors = [self.mdict["O2_M34"], self.mdict["O2_M32"]]
elif mol == "CO2":
majors = [self.mdict["CO2_M46"], self.mdict["CO2_M48"]]
minors = [self.mdict["CO2_M44"]]
elif self.electrolyte == "16O":
if mol == "O2":
majors = [self.mdict["O2_M32"]]
minors = [self.mdict["O2_M34"], self.mdict["O2_M36"]]
elif mol == "CO2":
majors = [self.mdict["CO2_M44"]]
minors = [self.mdict["CO2_M46"], self.mdict["CO2_M48"]]
return majors, minors
def get_ratio(self):
alpha = self.alpha
if self.electrolyte == "18O":
ratio = 2 * alpha / (1 - alpha)
elif self.electrolyte == "16O":
ratio = 2 * (1 - alpha) / alpha
return ratio
def plot_exchange(
self, mol="O2", tspan=None, t_bg=None, axes="new", unit=None, **kwargs
):
if tspan is None or tspan == "experiment":
tspan = self.tspan_experiment
elif tspan == "exchange":
tspan = self.tspan_exchange
elif tspan == "extraction":
tspan = self.tspan_extraction
ratio = self.get_ratio()
majors, minors = self.get_majors_and_minors(mol=mol)
unit_right = kwargs.get("unit_right", unit)
unit_left = kwargs.get("unit_left", unit)
if axes == "new":
axes = self.plot_experiment(
mols=[minors, majors],
tspan=tspan,
t_bg=t_bg,
logplot=False,
ax=axes,
unit=unit,
**kwargs,
)
else:
for molecule in minors:
self.plot_flux(
molecule, ax=axes[0], tspan=tspan, unit=unit_left, logplot=False
)
for molecule in majors:
self.plot_flux(
molecule, ax=axes[-1], tspan=tspan, unit=unit_right, logplot=False
)
unit_ratio = 1
if (
unit_left
and unit_right
and unit_left.startswith("p")
and unit_right.startswith("n")
):
unit_ratio *= 1e3
axes_ratio = ratio * unit_ratio
if True: # highlight the labeled lattice oxygen evolution
x1, y1 = self.get_flux(majors[0], t_bg=t_bg, unit=unit_right, tspan=tspan)
x2, y2 = self.get_flux(minors[0], t_bg=t_bg, unit=unit_left, tspan=tspan)
y2_interp = np.interp(x1, x2, y2)
color_1 = minors[0].get_color()
color_2 = majors[0].get_color()
axes[0].fill_between(
x1,
y1 * axes_ratio,
y2_interp,
color=color_1,
where=y2_interp > y1 * ratio,
alpha=0.2,
)
axes[0].fill_between(
x1,
y1 * axes_ratio,
y2_interp,
color=color_2,
where=y2_interp < y1 * ratio,
alpha=0.1,
hatch="//",
)
axes[-1].set_ylim([l / axes_ratio for l in axes[0].get_ylim()])
return axes
def plot_extraction_vs_potential(
self, mol="CO2", tspan=None, unit=None, ax="new", reverse=True
):
if tspan is None:
tspan = self.tspan_extraction
ratio = self.get_ratio()
majors, minors = self.get_majors_and_minors(mol=mol)
print(f"majors={majors}, minors={minors}") # debugging
ax = self.plot_vs_potential(
mols=[minors, majors], tspan=tspan, unit=unit, ax=ax, logplot=False
)
ax[-1].set_ylim([l / ratio for l in ax[0].get_ylim()])
if reverse:
ax[0].invert_xaxis()
ax[1].invert_xaxis()
return ax
def create_excess_mol(self, mol="CO2", ratio=None, ratio_type=None, name=None):
"""Return EC_MS.Molecule object who's get_flux calculates lattice oxygen ex."""
if name is None:
name = "excess_" + mol
majors, minors = self.get_majors_and_minors(mol=mol)
if ratio is None:
if ratio_type is None:
if mol == "CO2" and self.electrolyte == "18O":
ratio_type = "single"
else:
ratio_type = "ratio"
if ratio_type == "single": # expect only one O atom from electrolyte
ratio = self.alpha / (1 - self.alpha)
elif ratio_type == "alpha": # expect only one O atom from electrolyte
ratio = self.alpha
else:
ratio = self.get_ratio()
print(f"ratio = {ratio}") # debugging
excess_molecule = minors[0]
excess_molecule.name = name
excess_molecule.cal_mat = {
minors[0].primary: 1 / minors[0].F_cal,
majors[0].primary: -1 / majors[0].F_cal * ratio,
}
self.mdict[name] = excess_molecule
return excess_molecule
def get_excess(self, mol="O2", x=None, tspan=None, unit="pmol/s"):
majors, minors = self.get_majors_and_minors(mol=mol)
x1, y1 = self.get_flux(minors[0], tspan=tspan, unit=unit)
x0, y0 = self.get_flux(majors[-1], tspan=tspan, unit=unit)
ratio = self.get_ratio()
if x is None:
if tspan is None:
tspan = self.tspan_exchange
x = np.linspace(tspan[0], tspan[-1], 100)
y = np.interp(x, x1, y1) - np.interp(x, x0, y0) * ratio
return x, y
def quantify_extraction(self, mol="O2", tspan=None, unit="pmol/s"):
x, y = self.get_excess(mol=mol, tspan=tspan, unit=unit)
Y = np.trapz(y, x)
self.n_ex[mol] = Y
return Y
def get_EC_MS_mdict(calibration, mols=None, get_cal_mat=True):
from EC_MS import Molecule
mdict = {}
if mols is None:
mols = set([])
if hasattr(calibration, "mol_list"):
mols = mols.union(calibration.mol_list)
if hasattr(calibration, "mdict"):
mols = mols.union(calibration.mdict) # adds mdict's keys to the set mols
for mol in mols:
print(f"geting EC_MS molecule {mol} from calibration")
molecule = calibration.molecule(mol, with_Q=get_cal_mat)
if mol in calibration.real_names:
mol_name = calibration.real_names[mol]
else:
mol_name = mol
mdict[mol] = Molecule(mol_name)
mdict[mol].name = mol
F = molecule.F
mdict[mol].F = F
if get_cal_mat:
try:
mdict[mol].cal_mat = molecule.Q
except AttributeError:
print(f"Warning!!! couldn't get Q for molecule {mol}.")
key_max = None
F_max = 0
for key, val in F.items():
if val > F_max:
F_max = val
key_max = key
mdict[mol].F_cal = F_max
mdict[mol].primary = key_max
return mdict
def parse_isotope(isotope):
if isotope in ["18O", "O18", "18", 18]:
return "18O"
elif isotope in ["16O", "O16", "16", 16]:
return "16O"
else:
return isotope
|
ScottSoren/EC_MS | src/EC_MS/Chem/PhysCon.py | <filename>src/EC_MS/Chem/PhysCon.py
# PhysCon, physical constants
import numpy as np
c = 2.997925e8 # speed of light / (m/s)
qe = 1.60219e-19 # fundamental charge / (C)
h = 6.62620e-34 # planck's constant / (J*s)
hbar = h / (2 * np.pi) # reduced planck's constant / (J*s)
NA = 6.02217e23 # Avogadro's number /(mol) or dimensionless
me = 9.10956e-31 # mass of electron / (kg)
kB = 1.38062e-23 # Boltzman constant / (J/K)
u0 = 4 * np.pi * 1e-7 # permeability of free space / (J*s^2/(m*C^2))
e0 = 1 / (u0 * c ** 2) # permittivity of free space / (C^2/(J*m))
R = NA * kB # gas constant / (J/(mol*K)) #NA in /mol
Far = NA * qe # Faraday's constant, C/mol
amu = 1e-3 / NA # atomic mass unit / (kg) # amu=1g/NA #NA dimensionless
# input('physical constants loaded')
|
ScottSoren/EC_MS | setup.py | import codecs
import os
import re
from setuptools import setup, find_packages
NAME = "EC_MS"
KEYWORDS = [
"electrochemistry",
"mass spectrometry",
]
CLASSIFIERS = [
"License :: Free for non-commercial use",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Natural Language :: English",
]
PACKAGES = find_packages(where="src")
# print(PACKAGES) # test
HERE = os.path.abspath(os.path.dirname(__file__))
META_PATH = os.path.join("src", "EC_MS", "__init__.py")
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
INSTALL_REQUIRES = []
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M
)
if meta_match:
print("Able to find __{meta}__ string".format(meta=meta))
return meta_match.group(1)
print("Unable to find __{meta}__ string".format(meta=meta))
raise RuntimeError("Unable to find __{meta}__ string".format(meta=meta))
# version = find_meta("version"),
# url = find_meta("url"),
# author = find_meta("author"),
# print("{}\n{}\n{}".format(version, url, author))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
long_description=read("README.rst"),
# long_description_content_type="text/x-rst", #gives an error
license=find_meta("license"),
version=find_meta("version"),
url=find_meta("url"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
packages=PACKAGES,
package_dir={"": "src"},
zip_safe=False,
include_package_data=True,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
options={"bdist_wheel": {"universal": "1"}},
)
|
ScottSoren/EC_MS | src/EC_MS/Time_Response.py | <reponame>ScottSoren/EC_MS
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 16:50:10 2016
Most recently edited: 16J27
@author: scott
This module will include functions for modelling mass transport
and fitting time response in the EC-MS setup.
See Scott's MSc thesis, chapter 2 and section 3.3, for discussion inc. prior
implementation in Matlab
"""
from __future__ import division, print_function
import os
import numpy as np
from scipy.optimize import curve_fit
from scipy.integrate import odeint
from . import Chem
from .Molecules import Molecule
from .Plotting import plot_operation
def fit_exponential(t, y, zero_time_axis=False):
"""Return (tao, y0, y1) for best fit of y = y0 + (y1-y0) * exp(-t/tao)
Args:
t (vector): time
y (vector): values
zero_time_axix (boolean): whether to subtract t[0] from t. False by default
"""
if zero_time_axis:
t = t - t[0] # zero time axis
tau_i = t[-1] / 10 # guess at time constant
# tau_i = t[-1] #often can't solve with this guess. A smaller tau helps.
y0_i = y[-1] # guess at approach value
y1_i = y[0] # guess at true initial value
pars_i = [tau_i, y0_i, y1_i]
def exp_fun(x, tau, y0, y1):
z = y0 + (y1 - y0) * np.exp(-x / tau)
# print([tau,y0,y1]) #for diagnosing curve_fit problems
return z
pars, pcov = curve_fit(exp_fun, t, y, p0=pars_i)
# pars = [tau, y0, y1]
return pars
def fit_step(t, y, tpulse=0, step="fall", ax=None, spec="r--", label=None, verbose=1):
"""
use step='rise' to fit onset and step='fall' to fit tail
assumes that data starts from the start of the pulse
"""
if verbose:
print("\n\nfunction 'fit_step' at your service!\n")
# zero time axis
t0 = t[0]
t = t - t0
print("t0 = " + str(t0))
if type(t) is list:
t = np.array(t) # 17B02
if step == "fall":
I_tail = np.array([I for (I, t_I) in enumerate(t) if tpulse < t_I])
# print(I_tail)
t_fit = t[I_tail] - tpulse
elif step == "rise":
if tpulse == 0:
tpulse = t[-1]
I_tail = np.array([I for (I, t_I) in enumerate(t) if t_I < tpulse])
t_fit = t[I_tail]
else:
print("use step='rise' to fit onset and step='fall' to fit tail")
pars = fit_exponential(t_fit, y[I_tail])
if ax:
tau = pars[0]
y0 = pars[1]
y1 = pars[2]
y_fit = y0 + (y1 - y0) * np.exp(-t_fit / tau)
t_fit = t_fit + t0 # put time axis back
if step == "fall":
t_fit = t_fit + tpulse
ax.plot(t_fit, y_fit, spec, label=label)
if label:
if label == "tau":
label = "tau = {0:5.2f} s".format(tau)
I_text = int(len(t_fit) / 2)
t_text = t_fit[I_text]
y_text = y_fit[I_text]
ax.text(t_text, y_text, label, color=spec[0])
if verbose:
print("tau = " + str(pars[0]) + " s")
print("\nfunction 'fit_step' finished!\n\n")
return pars
def stagnant_diffusion_ode(C, T, pars): # Note that C comes before T here!
"""
Scott's master p. 52 and appendix C. Z is the new X.
returns rate of change dC/dT of concentration profile for
non-dimensionalized stagnant sniffer diffusion problem.
C = C(X) where X goes from 0 (membrane) to 1 (electrode)
T is time non-dimensionalized on the diffusion scale t0 = L²/D
pars:
[0] alpha = h*L/D is the system parameter.
[1] J_fun returns the flux from the electrode as a unction of T. The flux
scale J0 is used to define the concentration scale, C0 = J0*L/D
#modified 17B02 to enable carrier gas introduction of element using Cg
# pars as a list rather than a dict is slightly faster (see fig06.out),
# but I like the dict so that I can remember what's what.
"""
dZ = pars["dZ"] # assigning this here replaces two lookups with one.
C_N = C[-1] + pars["J_fun"](T) * dZ # boundary condition dC/dZ = J(T) at electrode
C_ = (
C[0] - pars["alpha"] * (C[0] - pars["Cg"]) * dZ
) # boundary condition dC/dZ = -alpha*(C-Cg) at membrane
C_up = np.append(C[1:], C_N)
C_down = np.append(C_, C[:-1])
d2CdZ2 = (C_up - 2 * C + C_down) * pars["1/dZ**2"] # second derivative of C wrt Z
dCdT = d2CdZ2 # Fick's second law
return dCdT
def solve_stagnant(
alpha=1, J_fun=None, Cg=1, Tspan=[0, 10], startstate="zero", N=30, flux=1, verbose=1
):
"""solves the stagnant sniffer partial differential equations.
pars[0][0] is alpha = h*L/D is the system parameter.
pars[0][1] is J_fun. Returns the flux from the electrode as a unction of T.
Tspan is [Tstart, Tfinish] on the diffusion timescale t0 = L²/D
C0 = C0(X) is start concentration profile. If size(C0) = 1, assumes a
uniform concentration.
N is descretization (read from C0)
flux = 0 to return entire concentration profile (on C0 = J0*L/D scale)
flux = 1 to return flux through membrane (on J0 scale)
"""
if verbose:
print("\n\nfunction 'solve_stagnant' at your service!\n")
if startstate == "zero":
C0 = np.zeros([N])
elif startstate == "steady":
C0 = 1 / alpha + np.linspace(0, N) / (N + 1) # Scott's MSc thesis, p. 53
elif startstate == "saturated":
C0 = np.ones([N]) * Cg
elif np.size(startstate) == 1:
C0 = np.ones([N]) * startstate
else:
C0 = startstate
N = np.size()
if np.size(Tspan) == 2:
Tspan = np.linspace(Tspan[0], Tspan[1], 100)
dZ = 1 / N # N+1? N-1? I can never remember what's most 'correct'
pars = {"alpha": alpha, "J_fun": J_fun, "Cg": Cg, "dZ": dZ, "1/dZ**2": 1 / dZ ** 2}
CC = odeint(stagnant_diffusion_ode, C0, Tspan, args=(pars,))
# 16J18_02h10: this crashes the kernel, I don't know why... 18h56 found it! c before t!
J = (
CC[:, 1] - CC[:, 0]
) * N # (positive) J = dC/dX with dC = C0 - C_ and dZ = 1 / N
# J is a function of T
if verbose:
print("solution shape: " + str(np.shape(CC)))
print("\nfunction 'solve_stagnant' finished!\n\n")
if flux:
return Tspan, J
else:
return Tspan, CC
def stagnant_pulse(*args, **kwargs):
print(
"\n\n'stagnant_pulse' has been renamed 'stagnant_operator'. Remember that next time!"
)
return stagnant_operator(*args, **kwargs)
def stagnant_operator(
tj=None,
tpulse=10,
tspan=None,
j_el=-1,
L=100e-6,
A=0.196e-4,
q0=1.5e15 / Chem.NA,
p_m=1e5,
mol="H2",
p_gas=0,
normalize=False,
D=None,
kH=None,
n_el=None,
Temp=None,
unit="pmol/s",
flux_direction="out",
verbose=True,
ax=None,
plot_type=None,
startstate="zero",
N=30,
colormap="plasma",
aspect="auto",
):
"""
Models a pulse of current towards a specified product in our EC-MS setup.
Theory in chapter 2 of Scott's masters thesis.
all arguments are in pure SI units. The electrode output can either be given
as a steady-state square pulse of electrical current (tpulse, j_el, n_el),
or as a measured current (tj[1]) as a function of time (tj[0])
#tj[1] should have units A/m^2. 1 mA/cm^2 is 10 A/m^2
#17B02: p_gas is the partial pressure of the analyte in the carrier gas.
# this enables, e.g., CO depletion modelling.
"""
if verbose:
print("\n\nfunction 'stagnant_operator' at your service!\n")
if type(mol) is str:
mol = Molecule(mol)
if Temp is not None:
mol.set_temperature(Temp)
else:
Temp = 298.15 # standard temperature in K
if D is None:
D = mol.D
if kH is None:
kH = mol.kH
if n_el is None and not normalize:
n_el = mol.n_el
if tspan is None:
if tj is None:
tspan = [-0.1 * tpulse, 1.2 * tpulse]
else:
tspan = [tj[0][0], tj[0][-1]]
h = kH * Chem.R * Temp * q0 / (p_m * A) # mass transfer coefficeint
alpha = L * h / D # system parameter
# non-dimensional scales:
t0 = L ** 2 / D
if tj is None:
if normalize:
j0 = 1
else:
j0 = j_el / (n_el * Chem.Far)
else:
t = tj[0]
if normalize:
j0 = 1
j = tj[1] / np.max(np.abs(tj[1]))
else:
j = tj[1] / (n_el * Chem.Far) # A/m^2 --> mol/(m^2*s)
j0 = max(np.abs(j))
c0 = j0 * L / D
tau = L ** 2 / (2 * D) + L / h
# from the approximate analytical solution, Scott's thesis appendix D
Tpulse = tpulse / t0
Tspan = (
np.linspace(tspan[0], tspan[1], 1000) / t0
) # why do I give so many time points?
if tj is None:
def J_fun(T):
if T < 0:
return 0
if T < Tpulse:
return 1
return 0
else:
T_in = t / t0
J_in = j / max(np.abs(j))
# print('max(J_in) = ' + str(max(J_in)))
def J_fun(T):
if T < T_in[0]: # assume no current outside of the input tj data
return 0
if T < T_in[-1]:
return np.interp(T, T_in, J_in)
return 0
c_gas = p_gas / (Chem.R * Temp)
cg = c_gas / kH # concentration analyte in equilibrium with carrier gas, 17B02
Cg = (
cg / c0
) # non-dimensionalized concentration analyte at equilibrium with carrier gas, 17B02
# pars = ([alpha, J_fun, Cg],) #odeint needs the tuple. 17A12: Why ?!
[T, CC] = solve_stagnant(
alpha=alpha,
J_fun=J_fun,
Cg=Cg,
Tspan=Tspan,
startstate=startstate,
flux=False,
N=N,
)
cc = CC * c0
t = T * t0
j = h * (cc[:, 0] - cg) # mass transport at the membrane
# j1 = D * (cc[:,1] - cc[:,0])
# fick's first law at the membrane gives the same j :)
if verbose:
print(
"q0 = "
+ str(q0)
+ " mol/s, h = "
+ str(h)
+ " m/s, alpha = "
+ str(alpha)
+ ", j0 = "
+ str(j0)
+ " mol/(m^2*s), max(j)/j0 = "
+ str(max(j) / j0)
+ ", t0 = "
+ str(t0)
+ " s, c0 = "
+ str(c0)
+ " mol/m^3"
+ ", tau (analytical) = "
+ str(tau)
+ " s"
+ ", cg = "
+ str(cg)
+ " mM"
)
# get ready to plot:
N = np.shape(cc)[1]
z = np.arange(N) / (N - 1) * L
# this will only be used for heatmap, so it's okay if dx isn't quite right.
if "cm^2" not in unit:
j = j * A
if unit[0] == "u":
j = j * 1e6
elif unit[0] == "n":
j = j * 1e9
elif unit[0] == "p":
j = j * 1e12
if flux_direction == "in":
j = -j
if normalize:
s_int = np.trapz(j, t)
if verbose:
print("normalizing from area = " + str(s_int))
j = j / s_int
# plotting was moved on 17G30 some legacy code here:
if plot_type is not None and ax is not None:
print("We recommend you plot seperately, using the function 'plot_operation'.")
axes = plot_operation(
cc=cc,
t=t,
z=z,
j=j,
ax=ax,
plot_type=plot_type,
colormap=colormap,
aspect=aspect,
verbose=verbose,
)
if verbose:
print("\nfunction 'stagnant_operator' finished!\n\n")
return t, j, axes
results = {"t": t, "z": z, "j": j, "cc": cc, "dimensions": "tz"}
return results
def flow_diffusion_ode(C, X, pars):
"""
Scott's master, p. 60. X is the new Y and Z is the new X.
"""
C_N = C[-1]
C_ = C[0] - pars["alpha"] * (C[0] - pars["Cg"]) * pars["dZ"]
C_up = np.append(C[1:], C_N)
C_down = np.append(C_, C[:-1])
d2CdZ2 = (C_up - 2 * C + C_down) * pars["1/dZ**2"]
# I assume multiplication to be faster than division
dCdX = d2CdZ2 * pars["1/beta"]
return dCdX
def solve_flow(
alpha=1, beta=1, Cg=0, N=30, flux=False, verbose=True, Xspan=[0, 1], C0="uniform"
):
"""
This solves the flow ODE and returns either flux through membrane as a
function of position (Xspan and J),
or the concentration profile (Xspan and CC)
It assumes steady state. I think I can use this and then convolute if I
need time dependence.
"""
if verbose:
print("\n\nfunction 'solve_flow' at your service!\n")
if C0 == "uniform":
C0 = np.array([1] * N)
# nothing else really makes sense, since c0 defines a scale.
if np.size(Xspan) == 2:
Xspan = np.linspace(Xspan[0], Xspan[1], 100)
dZ = 1 / N # N+1? N-1? I can never remember what's most 'correct'
pars = {
"alpha": alpha,
"1/beta": 1 / beta,
"Cg": Cg,
"dZ": dZ,
"1/dZ**2": 1 / dZ ** 2,
}
CC = odeint(flow_diffusion_ode, C0, Xspan, args=(pars,))
# 16J18_02h10: this crashes the kernel, I don't know why... 18h56 found it! c before t!
J = (
CC[:, 1] - CC[:, 0]
) * N # (positive) J = dC/dZ with dC = C0 - C_ and dX = 1 / N
# J is a function of X.
if verbose:
print("solution shape: " + str(np.shape(CC)))
print("\nfunction 'solve_flow' finished!\n\n")
if flux:
return Xspan, J
else:
return Xspan, CC
pass
def flow_operator(
mode="steady", # in steady mode it's not really an operator.
system="chip", #
A_el=0.196e-4,
A=0.196e-4,
q0=1.5e15 / Chem.NA,
Temp=None, # universal pars
L=100e-6,
w=5e-3,
w2=5e-3,
F=1e-9, # geometry pars #w and w2 changed from 0.5e-3 to 5e-3 on 17K28.
c0=None,
j0=None,
j_el=-1, # inlet flow pars
p_m=1e5, # chip pars
phi=0.5,
dp=20e-9,
Lp=100e-6, # DEMS pars
mol="H2",
D=None,
kH=None,
n_el=None,
M=None, # mol pars
p_gas=0,
normalize=False,
unit="pmol/s",
flux_direction="out",
N=100, # solver pars
verbose=True,
):
"""
Follows the recipe in Scott's MSc, page 61, for calculating collection
efficiency in a flow system by solving a differential equation. This
can be used to compare different types of EC-MS
"""
if verbose:
print("\n\nfunction 'flow_operator' at your service!\n")
if type(mol) is str:
mol = Molecule(mol)
if Temp is not None:
mol.set_temperature(Temp)
else:
Temp = 298.15 # standard temperature in K
if D is None:
D = mol.D # diffusion constant in electrolyte / [m^2/s]
if kH is None:
kH = mol.kH # dimensionless henry's law constant
if M is None:
M = Chem.Mass(mol.name) * 1e-3 # molar mass / [kg/mol]
# print(c0)
if n_el is None and c0 is None and not normalize:
n_el = mol.n_el
if c0 is None:
if j0 is None:
j0 = j_el * A_el / (n_el * Chem.Far)
c0 = j0 / F
# concentration is production rate over flow rate: [mol/s] / [m^3/s)] = [mol/m^3] )
if system == "chip":
h = kH * Chem.R * Temp * q0 / (p_m * A) # Scott's MSc, page 50
elif system == "DEMS":
h = (
kH * phi * dp / (3 * Lp) * np.sqrt(8 / np.pi * Chem.R * Temp / M)
) # Scott's MSc, page 49
v0 = F / (L * w2)
alpha = h * L / D
beta = v0 * L ** 2 / (D * w) # There is a mistake in Scott's MSc page60!
# There I got the non-dimensionalization wrong, and wrote beta = v0*w**2/(D*L)
# in fact, beta = v0*L**2/(D*w)
Xspan = np.linspace(0, 1, 1000)
X, CC = solve_flow(alpha=alpha, beta=beta, Xspan=Xspan, N=N, verbose=verbose)
x = X * w
cc = CC * c0
j = cc[:, 0] * h # mol/m^3 * m/s = mol/(m^2*s)
Z = np.linspace(0, 1, N)
z = Z * L
eta_m = 1 - np.trapz(CC[-1, :], Z) # Scott's MSc, page 61
eta_m_check = (
w2 * np.trapz(j, x) / (c0 * F)
) # m*mol/(m^2*s)*m / ((mol/m^3)*m^3/s) = 1
qm = c0 * F * eta_m
if verbose:
print("portion not escaped = " + str(eta_m))
print("portion collected = " + str(eta_m_check) + "\n\n")
if system == "chip":
eta_v = 1
elif system == "DEMS":
p_w = Chem.p_vap(mol="H2O", T=Temp, unit="Pa")
M_w = Chem.Mass("H2O") * 1e-3
j_w = (
A
* p_w
/ (Chem.R * Temp)
* phi
* dp
/ (3 * Lp)
* np.sqrt(8 / np.pi * Chem.R * Temp / M_w)
)
eta_v = q0 / (qm + j_w) # fixed 17H10
eta = eta_m * eta_v
if verbose:
print(
"q0 = "
+ str(q0)
+ " mol/s, h = "
+ str(h)
+ " m/s, alpha = "
+ str(alpha)
+ ", j0 = "
+ str(j0)
+ " mol/s, max(c)/c0 = "
+ str(np.max(np.max(cc)) / c0)
+ ", kH = "
+ str(kH)
+ ", eta = "
+ str(eta)
+ ", mol = "
+ str(mol.name)
+ ", system = "
+ str(system)
+ ""
+ ", beta = "
+ str(beta)
+ ""
+ ", v0 = "
+ str(v0)
)
if verbose:
print("\nfunction 'flow_operator' at finished!\n\n")
results = {
"x": x,
"z": z,
"j": j,
"cc": cc,
"eta_m": eta_m,
"eta_v": eta_v,
"eta": eta,
"dimensions": "xz",
}
return results
def delta_response(
L=100e-6,
q0=1e15 / Chem.NA,
mol="H2",
D=None,
kH=None,
n_el=None,
A=0.196e-4,
p_m=1e5,
Temp=298.15,
verbose=True,
tspan="auto",
N_t=1000,
):
"""
Returns the output when a stagnant_operator operates on a delta function.
There's probably a much smarter way to do it, but for now I'll just do
triangle pulse of width tau/250
"""
if D is None or kH is None:
if type(mol) is str:
mol = Molecule(mol)
if D is None:
D = mol.D
if kH is None:
kH = mol.kH
if verbose:
print("calculating a delta function response.")
h = kH * Chem.R * Temp * q0 / (p_m * A) # mass transfer coefficeint
tau = L / h + L ** 2 / (2 * D)
if tspan == "auto":
tspan = [0, 4 * tau]
t = np.linspace(tspan[0], tspan[1], N_t)
j = np.append(np.array([1]), np.zeros(N_t - 1))
tj = [t, j]
print(type(tj))
return stagnant_pulse(
tj=tj,
normalize=True,
tspan=tspan,
L=L,
A=A,
q0=q0,
p_m=p_m,
D=D,
kH=kH,
n_el=n_el,
Temp=Temp,
verbose=True,
plot_type=None,
)
if __name__ == "__main__":
pass
##
|
ScottSoren/EC_MS | src/EC_MS/patches.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 23:10:02 2020
@author: scott
"""
def fix_timecols(data):
"""Update and fix old errors in the timecols item of old data dictionaries"""
if "timecols" not in data:
return
new_timecols = {}
# some old pickles have timecols as tuples:
if not isinstance(data["timecols"], dict):
data["timecols"] = dict(data["timecols"])
for col, tcol in data["timecols"].items():
if col.endswith("-x") and tcol.endswith("-y"):
new_timecols[tcol] = col
else:
new_timecols[col] = tcol
data["timecols"] = new_timecols
return data
|
ScottSoren/EC_MS | src/EC_MS/parsing_tools.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 15:45:37 2020
@author: scott
"""
import re
import time, datetime
import numpy as np
import os
"""
"""
float_match = "[-]?\d+[\.]?\d*(e[-]?\d+)?" # matches floats like '-3.5e4' or '7' or '245.13' or '1e-15'
# note, no white space included on the ends! Seems to work fine.
timestamp_match = "([0-9]{2}[:_.]){2}[0-9]{2}" # matches timestamps like '14:23:01' or '15_42_15' or '09.56.25'
date_match = "([0-9]{2}[/\-\.]){2}[0-9]{4}" # matches dates like '01/15/2018' or '09-07-2016' or '04.20.2019'
date_match_2 = (
"[0-9]{4}([/-][0-9]{2}){2}" # matches dates like '2018/01/15' or '2018-09-07'
)
mass_match = "^M[0-9]+(-x)?(-y)?$"
def is_time(col, data=None, verbose=False):
"""
determines if a column header is a time variable, 1 for yes 0 for no
"""
if verbose:
print("\nfunction 'is_time' checking '" + col + "'!")
timecol = get_timecol(col, data, verbose=verbose)
try:
return col.startswith(timecol) # so that a * at the end doesn't mess it up
except TypeError as e:
print(e)
print(f"\twith col = {col}, timecol = {timecol}")
if hasattr(data, "timecols"):
print(f"data.timecols = {data.timecols}")
elif "timecols" in data:
print(f"data['timecols'] = {data['timecols']}")
raise
def is_MS_data(col):
if re.search(r"^M[0-9]+-[xy]", col):
return True
return False
def is_EC_data(col):
from .EC import EC_cols_0
if col in EC_cols_0:
# this list should be extended as needed
return True
if col is None:
return False
if col[-1] == "*" and col[:-1] in EC_cols_0:
return True
return False
def is_mass(col):
if re.search(mass_match, col):
return True
return False
def is_Xray_data(col):
if is_EC_data(col):
return False
if is_MS_data(col):
return False
return True
def get_type(col, dataset=None):
if isinstance(dataset, dict):
# print("WARNING!!! Dataset dictionaries are no longer supported!!")
# ^ way too verbose, not a useful message for user.
if "col_types" in dataset and col in dataset["col_types"]:
return dataset["col_types"][col]
elif "data_type" in dataset and dataset["data_type"] in [
"EC",
"MS",
"SI",
]:
return dataset["data_type"]
elif dataset is not None:
if hasattr(dataset, "col_types") and col in dataset["col_types"]:
return dataset["col_types"][col]
elif hasattr(dataset, "data_type") and dataset["data_type"] in [
"EC",
"MS",
"SI",
"SPEC",
]:
return dataset["data_type"]
if is_EC_data(col):
return "EC"
if is_MS_data(col):
return "MS"
elif col[-2:] in ["-x", "-y"]:
return "cinfdata" # it's cinfdata but not from a mass channel
if col is None:
return None
print("WARNING: " + col + " is not recognized. Returning 'unknown'.\n ")
return "unknown"
def get_cols_for_mass(mass, dataset=None):
"""
Eventually this might make the 'rename_<format>_cols' functions obsolete.
"""
if dataset is None:
xcol, ycol = mass + "-x", mass + "-y"
elif "mass_cols" in dataset:
xcol, ycol = dataset["mass_cols"][mass]
else:
ycol = mass + "-y"
xcol = get_timecol(ycol, dataset)
return xcol, ycol
def get_timecol(col=None, dataset=None, data_type=None, verbose=False):
# print('getting timecol for ' + col + '. datset = ' + (str(dataset)+ 20*' ')[:20]) # debugging
if (
dataset is not None
and (
hasattr(dataset, "timecols")
or (isinstance(dataset, dict) and "timecols" in dataset)
)
and col in dataset["timecols"]
):
# funky: experession ("timecols" in dataset) looks for key 0
return dataset["timecols"][col]
if data_type is None:
data_type = get_type(col, dataset)
if data_type == "EC":
return "time/s"
elif data_type == "MS":
if col is None:
return (
"M32-x" # probably the least likely timecol to be missing from MS data
)
else:
return col[:-2] + "-x"
elif data_type == "SI":
return col.split(" - ")[0] + " - Time [s]"
elif data_type == "RGA":
return "Time(s)"
elif data_type == "Xray":
return "t" # to be refined later...
elif data_type == "CHI":
if dataset is not None and "Time/sec" in dataset:
return "Time/sec"
else:
return "Time/s"
elif col[-2:] in ["-y", "-x"]: # a timecol is its own timecol
return col[:-2] + "-x" # for any data downloaded from cinfdata
else:
print(
"couldn't get a timecol for " + str(col) + ". data_type=" + str(data_type)
)
return None
def timestamp_to_seconds(timestamp):
"""
seconds since midnight derived from timestamp hh:mm:ss
"""
h = int(timestamp[0:2])
m = int(timestamp[3:5])
s = int(timestamp[6:8])
seconds = 60 ** 2 * h + 60 * m + s
return seconds
def seconds_to_timestamp(seconds):
"""
timestamp hh:mm:ss derived from seconds since midnight
"""
h = int(seconds / 60 ** 2)
seconds = seconds - 60 ** 2 * h
m = int(seconds / 60)
seconds = seconds - 60 * m
s = int(seconds)
timestamp = "{0:2d}:{1:2d}:{2:2d}".format(h, m, s)
timestamp = timestamp.replace(" ", "0")
return timestamp
def numerize(data):
for col in data["data_cols"]: # numerize!
data[col] = np.array(data[col])
def get_empty_set(cols, **kwargs):
# get the colheaders and make space for data
# cols should be a set
data = {}
data.update(kwargs)
for col in cols:
data[col] = []
data["data_cols"] = cols
return data
def parse_timezone(tz=None):
"""
Gets a timezone object from a timezone string. Includes some abbreviations
useful for Scott. If the input is not a string, it is returned as is.
"""
abbreviations = {
"CA": "US/Pacific",
"DK": "Europe/Copenhagen",
}
if tz is not None:
import pytz # all three seem necessary for dealing with timezones.
if tz in abbreviations:
return pytz.timezone(abbreviations[tz])
elif type(tz) is str:
return pytz.timezone(tz)
return tz
def parse_date(line):
# ^ mm/dd/yyyy, as EC lab does
# older EC-Lab seems to have dashes in date, and newer has slashes.
# Both seem to save month before day, regardless of where the data was taken or .mpt exported.
d1 = re.search(date_match, line)
if d1:
date1 = d1.group()
yyyy, mm, dd = date1[-4:], date1[:2], date1[3:5]
date = yyyy + "/" + mm + "/" + dd
return date
# ^ yyyy/mm/dd, as cinfdata does
d2 = re.search(date_match_2, line)
if d2:
date2 = d2.group()
yyyy, mm, dd = date2[:4], date2[5:7], date2[-2:]
date = yyyy + "/" + mm + "/" + dd
return date
# shit, some dates are written in long form. This'll be a tough RE exercise. Here we go!
# I want to match dates like 'Apr. 12, 2019' or 'August 1, 2019'
month_names = {
"01": "January",
"02": "February",
"03": "March",
"04": "April",
"05": "May",
"06": "June",
"07": "July",
"08": "August",
"09": "September",
"10": "October",
"11": "November",
"12": "December",
}
month_match = (
"("
+ "".join(list([v[:3] + "|" for v in month_names.values()]))[:-1]
+ ")"
+ "[a-z]*(\.)?"
)
date_match_3 = month_match + " [0-9]+, [0-9]{4}"
d3 = re.search(date_match_3, line)
if d3:
date3 = d3.group()
month = re.search(month_match, date3).group()
mm = next(key for key, value in month_names.items() if value[:3] == month[:3])
yyyy = date3[-4:]
dd = re.search("[0-9]+,", date3).group()[:-1]
date = yyyy + "/" + mm + "/" + dd
return date
print("can't find date in line '" + line + "'. parse_date() is returning None.")
return None
def timestring_to_epoch_time(
timestring, date=None, tz=None, verbose=True, form="%Y/%m/%d %H:%M:%S", out="tstamp"
):
"""
A way to convert a number of timestrings read from my data into a standard-formatted
date and time, and then to an epoch (unix) time.
tz is the Timezone, which is only strictly necessary when synchronizing
data taken at different places or accross dst at a place with different dst
implementation than the local (dst is stupid!).
If timezone is a number, it is interpreted as the offset from GMT of the
data.
The epoch time is referred to here and elsewhere as tstamp.
"""
if timestring and not isinstance(timestring, str):
timestring = str(timestring)
if tz is not None:
import pytz # all three seem necessary for dealing with timezones.
tz = parse_timezone(tz)
if verbose:
print("getting epoch time given a timestamp local to " + str(tz))
epoch = pytz.utc.localize(datetime.datetime.utcfromtimestamp(0))
if timestring == "now":
return time.time()
elif type(timestring) is time.struct_time:
if verbose:
print(
"'timestring_to_epoch_time' revieved a time.struct_time object. "
+ "Returning the corresponding epoch time."
)
return time.mktime(timestring)
elif type(timestring) is not str:
if verbose:
print(
"WARNING: 'timestamp_to_unix_time' didn't receive a string. "
+ "Received: "
+ str(timestring)
+ " . Returning the argument."
)
return timestring
if len(timestring) > 8:
try:
# print(timestring) # debugging
timestamp = re.search(timestamp_match, timestring).group()
except AttributeError:
if verbose:
print(
f"WARNING: I got no clue what you're talking 'bout when you say {timestamp}."
+ f"It didn't match {timestamp_match}. Assuming you want 00:00:00"
)
timestamp = "00:00:00"
else:
hh = int(timestamp[0:2])
if "PM" in timestring and not hh == 12:
# Holy fuck the whole AM/PM thing is stupid
timestamp = str(hh + 12) + timestamp[2:]
elif "AM" in timestring and hh == 12:
timestamp = "00" + timestamp[2:]
if verbose:
print("found timestamp = " + timestamp)
else:
timestamp = timestring
if "_" in timestamp:
timestamp = timestamp.replace(
"_", ":"
) # otherwise time.strptime below crashes.
if "." in timestamp:
timestamp = timestamp.replace(
".", ":"
) # otherwise time.strptime below crashes.
if date is None:
if verbose:
print(
"'timestring_to_epoch_time' is assuming"
+ " the date is in the timestring."
)
date = parse_date(timestring)
if date is None:
if verbose:
print(
"couldn't figure out the date for " + timestring + ". Assuming today."
)
date = "today"
if date == "today":
date = time.strftime("%Y/%m/%d")
# print('timestring = ' + timestring) # debugging
if tz is None:
if "-" in date:
date = date.replace("-", "/") # 18D08
struct = time.strptime(date + " " + timestamp, form)
tstamp = time.mktime(struct)
else:
dt_naive = datetime.datetime.strptime(date + " " + timestamp, form)
dt = tz.localize(dt_naive)
tstamp = (dt - epoch).total_seconds()
if out == "all":
return tstamp, date, timestamp
return tstamp
def epoch_time_to_timestamp(tstamp, tz=None, verbose=True):
"""
tz is the Timezone, which is only strictly necessary when synchronizing
data taken at different places or accross dst at a place with different dst
implementation than the local (dst is stupid!).
If timezone is a number, it is interpreted as the offset from GMT of the
data in hours (+1 for Denmark, -8 for California)
"""
if tz is None:
struct = time.localtime(tstamp)
else:
tz = parse_timezone(tz)
if verbose:
print("getting the timestamp local to " + str(tz) + " from epoch time.")
dt_utc = datetime.datetime.utcfromtimestamp(tstamp)
dt_tz = tz.fromutc(dt_utc)
struct = dt_tz.timetuple()
hh = str(struct.tm_hour)
if len(hh) == 1:
hh = "0" + hh
mm = str(struct.tm_min)
if len(hh) == 1:
mm = "0" + mm
ss = str(struct.tm_sec)
if len(hh) == 1:
ss = "0" + ss
timestamp = hh + ":" + mm + ":" + ss
return timestamp
def timetag_to_timestamp(filename):
"""
Converts a time tag of format _<hh>h<mm>m<ss>_ to timestamp of format
<hh>:<mm>:<ss>, which is what synchronize reads (I should maybe change
synchronize to work with the unix epoch timestamp instead...)
The time tag is something we write in the file names to give an approximate
sense of when a measurement is started. It can be used as the measurement
start time if there really is no better way.
I can't beleive SPEC doesn't save time. I will pressure them to fix this.
"""
hm_match = re.search(r"_[0-9]{2}h[0-9]{2}", filename)
hm_str = hm_match.group()
hh = hm_str[1:3]
mm = hm_str[-2:]
ss_match = re.search(r"_[0-9]{2}h[0-9]{2}m[0-9]{2}", filename)
if ss_match is None:
ss = "00"
else:
ss = ss_match.group()[-2:]
return hh + ":" + mm + ":" + ss
def get_creation_timestamp(filepath):
"""
Returns creation timestamp of a file in the format that
combining.syncrhonize reads.
The timestamp is local time, not absolute time.
We need to move to epoch time everywhere!!!
"""
t = get_creation_time(filepath)
struct = time.localtime(t)
hh = str(struct.tm_hour)
mm = str(struct.tm_minute)
ss = str(struct.tm_second)
return hh + ":" + mm + ":" + ss
def get_creation_time(filepath, verbose=True):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
import platform
if platform.system() == "Windows":
tstamp = os.path.getctime(filepath)
if verbose:
print("In Windows. Using os.path.getctime('" + filepath + "') as tstamp.")
else:
stat = os.stat(filepath)
try:
tstamp = stat.st_birthtime
if verbose:
print(
"In linux. Using os.stat('"
+ filepath
+ "').st_birthtime as tstamp."
)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
tstamp = stat.st_mtime
if verbose:
print(
"Couldn't get creation time! Returing modified time.\n"
+ "In linux. Using os.stat('"
+ filepath
+ "').st_mtime as tstamp."
)
return tstamp
def timestamp_from_file(filepath, verbose=True):
a = re.search("[0-9]{2}h[0-9]{2}", filepath)
if a is None:
if verbose:
print("trying to read creation time")
timestamp = get_creation_timestamp(filepath)
else:
if verbose:
print("getting timestamp from filename " + filepath)
timestamp = timetag_to_timestamp(filepath)
return timestamp
def remove_comments(lines):
new_lines = []
for line in lines:
if "#" in line:
line = re.search("^.*\#", line).group()[:-1]
if re.search(r"\w", line): # to drop lines that only have comments
new_lines += [line]
else:
new_lines += [line] # I don't want to get rid of empty lines here
return new_lines
|
ScottSoren/EC_MS | src/EC_MS/dataset.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 18:12:03 2020
@author: scott
"""
import os, re, pickle
import numpy as np
from types import FunctionType
from functools import wraps
from matplotlib import pyplot as plt
from matplotlib import cm as colormap
from .EC import sync_metadata, make_selector, select_cycles
from .Data_Importing import load_from_file
from .Combining import synchronize, cut_dataset, sort_time, get_timecol, timeshift
from .Plotting import plot_experiment, plot_vs_potential, plot_flux, plot_signal
from .EC import correct_ohmic_drop, CV_difference, get_capacitance
from .Quantification import get_current, get_signal, get_potential
from .Calibration import calibration_curve, point_calibration, chip_calibration
def get_data_from_file(
file_name, data_type=None, verbose=True
): # assumes you're already in the folder
if type(file_name) is dict:
return file_name
# ^ so that the dataset can be initiated with data already in a dictionary
if re.search(".pkl$", str(file_name)):
with open(file_name, "rb") as f:
return pickle.load(f)
elif data_type is not None:
return load_from_file(file_name, data_type=data_type, verbose=verbose)
elif re.search(".mpt$", str(file_name)):
return load_from_file(file_name, data_type="EC", verbose=verbose)
elif re.search(".tsv$", str(file_name)):
return load_from_file(file_name, data_type="SI", verbose=verbose)
else:
print(
"WARNING: loading files of the type "
+ str(file_name)
+ " is not yet implemented in Dataset.__init__() !!!"
+ " Try specifying a data_type."
)
def with_update(method):
@wraps(method)
def method_with_update(self, *args, **kwargs):
ret = method(self, *args, **kwargs)
self.update_with_data()
return ret
return method_with_update
metadata_items = [
"data_type",
"mass_bgs",
]
class Dataset:
"""
This class implements the dataset. Its design is to be back-compatible
with the dataset dictionaries that were the main object in function-
centric EC_MS programming.
Dataset just serves as a wrapper around dataset dictionaries to
make the package seem object-oriented. It has __getitem__ and
__getattr__ methods that make (key,value) pairs and attributes somewhat
interchangable. It should be back-compatable, but I haven't really tested that yet.
It also binds some key EC_MS functions including various options for data importing in
data importing in __init__(); scutting (via cut_dataset or select_cycles) in cut();
plus several plotting (plot_experiment, plot_vs_potential) and getting (get_signal, get_current) functions
"""
def __init__(
self,
file_path=None,
folder=None,
tag=None,
data=None, # will get replaced if it's not initialized empty.
tstamp=None,
data_type=None,
file_type=None,
title=None,
verbose=True,
):
"""
Establishes the dataset by loading self.data
"""
self.type = "Dataset"
self.verbose = verbose
self.empty = False # will be set to True below if necessary
if folder is not None: # then go to the folder and remember how to get back
back = os.getcwd()
os.chdir(folder)
if type(file_path) is dict and "data_cols" in file_path:
# ^ user can intiate the dataset with a data dictionary
self.data = file_path
elif type(file_path) in (list, tuple):
# ^ user can intiate the dataset with a list of data files
datas = []
for file in file_path:
data = get_data_from_file(file, verbose=verbose, data_type=data_type)
datas += [data]
self.data = synchronize(datas, verbose=verbose)
elif file_path is not None:
self.folder, self.file = os.path.split(file_path)
if self.verbose:
print(f"loading from {file_path}") # debugging
# ^ ...or just one data file
self.data = get_data_from_file(file_path, data_type=data_type)
elif folder is not None:
# ^ ...or a bunch of files in a folder
print("Importing from a folder!!!")
files = os.listdir() # note, we are already in the folder
if tag is not None:
files = [f for f in files if re.search("^" + tag, f)]
if file_type is not None:
files = [f for f in files if re.search(file_type + "$", f)]
# print(files) # debugging
datas = []
for file in files:
data = get_data_from_file(file, verbose=verbose)
datas += [data]
self.data = synchronize(datas, verbose=verbose)
sort_time(self.data)
else:
if data is None:
data = {"tstamp": tstamp, "data_type": data_type}
self.data = data
if folder is not None: # time to go home.
os.chdir(back)
if not hasattr(self, "data"):
self.data = data
if not tstamp and data and "tstamp" in data:
tstamp = data["tstamp"]
self.tstamp = tstamp
if not "data_cols" in self.data:
print(
"Warning!!! Please specify file_name and/or folder."
+ " Returning an empty dataset"
)
self.empty = True
self.update_with_data()
if data_type:
self.data["data_type"] = data_type
if title:
self.data["title"] = data_type
def update_with_data(self):
if not hasattr(self, "data"):
self.data = {"data_cols": set([]), "timecols": {}}
return
if "data_cols" not in self.data:
self.data["data_cols"] = set([])
if "timecols" not in self.data:
self.data["timecols"] = {}
for key, value in self.data.items():
if key not in self.data["data_cols"]:
try:
setattr(self, key, value)
except Exception:
# not sure what the error is yet.
raise
def __getattr__(self, attr):
"""
Makes it so that you can get items in self.data as if they were
attributes to the Dataset.
"""
if attr == "t":
return self.data[self.t_str]
elif attr == "v":
return self.data[self.V_str]
elif attr == "j":
return self.data[self.J_str]
elif attr in ["data", "verbose"]:
# print('hm...' + attr) # debugging
raise AttributeError("Dataset has no attribute " + attr)
# print('getting attribute ' + attr + ' from self.data') # debugging
try:
# a = b # debugging
return self.data[attr]
except KeyError:
raise AttributeError("Dataset has no attribute " + attr)
def __getitem__(self, key):
"""
Makes it so that you can look up attributes to self as if they were
items in a dictionary.
Attributes pre-empt items in self.data.
"""
try:
return getattr(self, key)
except (AttributeError, TypeError):
raise KeyError(
f"Dataset has no attribute {key}"
+ f" and Dataset.data has no key {key}"
)
def __setitem__(self, key, value):
setattr(self, key, value)
self.data[key] = value
def __add__(self, dataset_2):
if dataset_2.empty:
print(
"WARNING!!!! Can't add an empty Dataset: Just returning the original Dataset"
)
return self
elif self.empty:
print("adding to an empty Dataset: Just returning the second Dataset")
return dataset_2
new_data = synchronize([self.data, dataset_2.data], override=True)
new_dataset = Dataset(new_data)
return new_dataset
def add_data_col(self, col, value, timecol=None, col_type=None):
self.data[col] = value
if not "data_cols" in self.data:
self.data["data_cols"] = set([])
self.data["data_cols"].add(col)
if timecol is not None:
if "timecols" not in self.data:
self.data["timecols"] = {}
self.data["timecols"][col] = timecol
if col_type is not None:
if "col_types" not in self.data:
self.data["col_types"] = {}
self.data["col_types"][col] = col_type
self.empty = False
def append_to_data_col(self, col, value, col_type=None):
"""
Handy thing, adds data to a col if it exist, otherwise creates the
col and puts it in self.data_cols
"""
if col in self.data_cols:
self.data[col] = np.append(self.data[col], value)
else:
self.add_data_col(col, value, col_type=col_type)
def save(self, file_name, data_type=None):
if data_type is None:
data = self.data
else:
data = {}
data_cols = set()
for key, value in self.data.items():
if key in self.data["data_cols"]:
if self.data["col_types"][key] == data_type:
data_cols.add(key)
data[key] = value
else:
continue
else:
data[key] = value
data["data_cols"] = data_cols
data["data_type"] = data_type
with open(file_name, "wb") as f:
pickle.dump(data, f)
# Binding existing functions. There is probably a much smarter way to do this...
@wraps(sync_metadata)
@with_update
def sync_metadata(self, *args, **kwargs):
# print('args = ' + str(args)) # debugging. proves that args[0] is self.
# print('kwargs = ' + str(kwargs)) # debugging
for key, value in kwargs.items():
setattr(self, key, value) # such that i writes to e.g. self.RE_vs_RHE
return sync_metadata(self.data, *args, **kwargs)
@wraps(make_selector)
@with_update
def make_selector(self, *args, **kwargs):
return make_selector(self.data, *args, **kwargs)
@wraps(correct_ohmic_drop)
@with_update
def correct_ohmic_drop(self, *args, **kwargs):
return correct_ohmic_drop(self.data, *args, **kwargs)
@wraps(sort_time)
def sort_time(self, *args, **kwargs):
return sort_time(self.data, *args, **kwargs)
@wraps(timeshift)
def timeshift(self, *args, **kwargs):
return timeshift(self.data, *args, **kwargs)
@wraps(plot_experiment)
def plot_experiment(self, *args, **kwargs):
return plot_experiment(self.data, *args, **kwargs)
@wraps(plot_flux)
def plot_flux(self, *args, **kwargs):
return plot_flux(self.data, *args, **kwargs)
@wraps(plot_signal)
def plot_signal(self, *args, **kwargs):
return plot_signal(self.data, *args, **kwargs)
@wraps(plot_vs_potential)
def plot_vs_potential(self, *args, **kwargs):
return plot_vs_potential(self.data, *args, **kwargs)
@wraps(get_current)
def get_current(self, *args, **kwargs):
return get_current(self.data, *args, **kwargs)
@wraps(get_signal)
def get_signal(self, *args, **kwargs):
return get_signal(self.data, *args, **kwargs)
@wraps(get_potential)
def get_potential(self, *args, **kwargs):
return get_potential(self.data, *args, **kwargs)
@wraps(get_timecol)
def get_timecol(self, col, **kwargs):
return get_timecol(col, dataset=self.data, **kwargs)
@wraps(calibration_curve)
def calibration_curve(self, **kwargs):
return calibration_curve(self.data, **kwargs)
@wraps(point_calibration)
def point_calibration(self, **kwargs):
return point_calibration(self.data, **kwargs)
@wraps(chip_calibration)
def chip_calibration(self, **kwargs):
return chip_calibration(self.data, **kwargs)
def get_flux(self, m, *args, **kwargs):
try:
return m.get_flux(self.data, *args, **kwargs)
except AttributeError:
print(
"WARNING!!! first argument to dataset.get_flux must be an object of class EC_MS.Molecule."
)
raise TypeError
# ... yes, there is! Just equate the function. If the getitem and getattr of
# Dataset work as well as I hope, the function won't notice it's getting
# the Dataset object as the first argument rather than the data dictionary.
# sync_metadata = sync_metadata
# make_selector = make_selector
# correct_ohmic_drop = correct_ohmic_drop
# plot_experiment = plot_experiment
# plot_vs_potential = plot_vs_potential
def normalize(self, *args, **kwargs):
return self.sync_metadata(*args, **kwargs)
def calibrate_EC(self, *args, **kwargs):
return self.sync_metadata(*args, **kwargs)
def set_background(self, t_bg=None, masses=None, mols=None, cols=None):
"""
TODO: if given mols, it sets a background in each of of the given molecule
objects using mol.get_bg #ToDo, that should be mol.set_bg instead
if given masses, it calculates the background of each
and stores it in a dictionary, and SUBTRACTS IT FROM THE DATA!
TODO: don't subtract it from the data, but have get_signal read it.
"""
self.reset() # to avoid losing the ability to restore the original
# by subtracting a new background from background-subtracted data
if masses is None and mols is None and cols is None and t_bg is not None:
masses = "all"
if masses == "all":
masses = [
col[:-2]
for col in self.data_cols
if (col[0] == "M" and col[-2:] == "-y")
]
if self.verbose:
print("masses = " + str(masses)) # debugging
if hasattr(self, "mass_bgs"):
mass_bgs = self.mass_bgs
else:
mass_bgs = {}
if masses is not None:
for mass in masses:
if t_bg is not None:
x, y = self.get_signal(mass=mass, tspan=t_bg, unit="A")
y_bg = np.mean(y)
else:
x, y = self.get_signal(mass=mass, unit="A")
y_bg = min(y)
# print('subtracting background for mass ' + mass + '!!!')
mass_bgs[mass] = y_bg
self.data[mass + "-y"] -= y_bg
self.mass_bgs = mass_bgs
return mass_bgs
def reset(self):
"""
so far only implemented for masses.
"""
if hasattr(self, "mass_bgs"):
for mass, y_bg in self.mass_bgs.items():
print("adding background back onto " + mass) # debugging
self.data[mass + "-y"] += y_bg
def cut(self, tspan=None, t_edge=None, verbose=True, **kwargs):
"""Return a dataset containing part of the this dataset.
This can be done either with a tspan (uses EC_MS.cut_dataset) or
cycles (uses EC_MS.select_cycles).
The default behavior (no arguments) is that of cut_dataset. I.e.,
cut the according to self.data["tspan"] buffered with t_edge=120
Args:
tspan: [t_start, t_finish] defining the part of the dataset to keep
t_edge: a buffer time to include before and after tspan
verbose: whether to print a lot of output to the terminal
**kwargs: If a key in ["cycle_number", "selector", "loop_number",
"file_number", "cycle", "sweep"] is given, call select_cycles
with that key as cycle_str. tspan and t_edge are ignored.
Additional key-word arguments passed on either to
cut_dataset or select_cycles
Returns a new dataset, cut in time as requested.
"""
for key in [
"cycle_number",
"selector",
"loop_number",
"file_number",
"cycle",
"sweep",
]:
# should add self.sel_str, but that would require major changes
if key in kwargs:
cycles = kwargs.pop(key)
new_data = select_cycles(
self.data, cycles=cycles, cycle_str=key, verbose=verbose, **kwargs,
)
break
else:
new_data = cut_dataset(self.data, tspan=tspan, t_edge=t_edge, **kwargs)
new_dataset = Dataset(new_data)
for attr in metadata_items:
if hasattr(self, attr):
setattr(new_dataset, attr, getattr(self, attr))
return new_dataset
def as_cv(self):
return CyclicVoltammagram(self)
class CyclicVoltammagram(Dataset):
"""
CyclicVoltammagram inherits from Dataset. It is easiest to initiate a CyclicVoltammagram
by cutting a Dataset. The main addition is that it has a mandatory default
selector called 'cycle', and indexing by this selects cycles.
The default plotting function plot() is plot_vs_potential.
It binds CV_difference(). It also has a few brand new functions including
redefine_cycle() which lets you say where CVs start,
plot_all() which plots cv's with a cmap, average() with averages cycles.
"""
def __init__(self, *args, verbose=True, **kwargs):
self.type = "Cyclic Voltammagram"
self.verbose = verbose
if "dataset" in kwargs:
dataset = kwargs.pop("dataset")
else:
dataset = args[0] # 'tuple' object has no attribute 'pop' :(
if len(args) > 0:
args = args[1:]
else:
args = []
if type(dataset) is dict:
dataset = Dataset(dataset)
if hasattr(dataset, "type"):
if dataset.type in ["Dataset", "Cyclic Voltammagram"]:
if len(args) > 0 or len(kwargs) > 0:
kwargs.update(verbose=False)
dataset = dataset.cut(*args, **kwargs)
data = dataset.data
for attr in metadata_items:
if hasattr(dataset, attr):
setattr(self, attr, getattr(dataset, attr))
else:
print(
"WARNING!!! CyclicVoltammagram.__init__ doesn't know "
+ "what "
+ str(dataset)
+ " is!"
)
data = {}
self.data = data
self.update_with_data()
self.redefine_cycle()
else:
dataset = Dataset(dataset, *args, **kwargs)
self.__init__(dataset)
def __getitem__(self, key):
"""
Makes it so that you can look up attributes to self as if they were
items in a dictionary.
Attributes pre-empt items in self.data.
"""
if type(key) is slice:
start, stop, step = key.start, key.stop, key.step
if step is None:
step = 1
key = list(range(start, stop, step))
if type(key) in [int, list]:
if type(key) is list and not all([type(i) is int for i in key]):
print("can't get an item of type list unless all elements are int")
print(f"you tried to get key = {key}.")
raise AttributeError
return CyclicVoltammagram(
self.cut(cycle=key, t_zero="start", verbose=False), verbose=False
)
try:
return getattr(self, key)
except AttributeError:
raise KeyError(
"Dataset has no attribute "
+ key
+ " and Dataset.data has no key "
+ key
)
def __len__(self):
return len(set(self.cycle))
def redefine_cycle(self, V=None, redox=None):
"""
Changes self.data['cycle'] to count each time the calibrated potential
passes V in the direction specified by redox (1 for anodic, 0 for cathodic)
"""
if V is None:
try:
selector = self[self["sel_str"]]
except KeyError:
sel_str = self.make_selector()
# print(self.data.keys()) # debugging
selector = self[sel_str]
cycle = selector - min(selector)
else:
cycle = np.zeros(self.t.shape)
c = 0
n = 0
N = len(self.t)
v = self.v
if redox in [0, -1, "red", "reduction"]:
# easiest way to reverse directions is to use the same > < operators
# but negate the arguments
V = -V
v = -v
while n < N:
mask_behind = v[n:] < V
if not True in mask_behind:
break
else:
n += (
np.argmax(mask_behind) + 5
) # have to be below V for 5 datapoints
# print('point number on way up: ' + str(n)) # debugging
mask_in_front = v[n:] > V
if not True in mask_in_front:
break
else:
n += np.argmax(mask_in_front)
c += 1 # and then when it crosses to above V again, we register a cyclce!
cycle[n:] = c # and subsequent points increase in cycle number
n += +5 # have to be above V for 5 datapoints
# print('point number on way down: ' + str(n)) # debugging
self.add_data_col("cycle", cycle, col_type="EC")
self.data["sel_str"] = "cycle"
def get_sweeps(self, min_sweep_points=10, scan_rate_cutoff=1):
try:
return self.data["sweep_types"]
except KeyError:
return self.make_sweeps(
min_sweep_points=min_sweep_points, scan_rate_cutoff=scan_rate_cutoff
)
def make_sweeps(self, min_sweep_points=10, scan_rate_cutoff=1):
"""
figures out when anodic (sweep_type=1) and cathodic (sweep_type=0) sweeps
and potential holds (sweep_type=None) are in the data based on the
scan rate.
min_sweep_points is the resolution in EC points.
scan_rate_cutoff is the minimum absolute scan rate in mV needed to be considered
an anodic or cathodic sweep
"""
print("\n\nfunction CyclicVoltammagram.make_sweeps at your service!\n")
sweep_types = {} # 1 for oxidation, -1 for reduction, None for staying still
sweep_index_to_sweep_type = {
0: 0,
1: 1,
2: None,
} # but we'll use a list in the grind
sweep = np.zeros(self.t.shape)
scan_rate = self.get_scan_rate(min_sweep_points=min_sweep_points)
cat_mask = scan_rate < -scan_rate_cutoff
an_mask = scan_rate > scan_rate_cutoff
hold_mask = abs(scan_rate) < scan_rate_cutoff
the_masks = [an_mask, cat_mask, hold_mask]
for mask in the_masks:
mask[
-2
] = False # because np.argmin(mask)=0 if mask is True all the time, giving problems
mask[
-1
] = True # because np.argmax(mask)=0 if mask is False all the time, giving problems
# print('the_masks:\n' + str(the_masks)) # debugging
N = len(self.t)
i_start = 0
i_finish = 0
n_sweep = 0
the_next_starts = [np.argmax(mask) for mask in the_masks]
sweep_index = np.argmin(the_next_starts)
while i_start < N - 1:
# print('\n\n') # debugging
# print('the next starts = ' + str(the_next_starts)) # debugging
I_out = np.argmin(the_masks[sweep_index][i_finish:])
# print(the_masks[sweep_index][i_finish:]) # debugging
i_start = i_finish + I_out + min_sweep_points
# can't start a new sweep until you've been out of the current sweep for at least min_sweep_points
try:
I_in_again = np.argmax(the_masks[sweep_index][i_start:])
except ValueError:
the_next_starts[sweep_index] = N
else:
# print(the_masks[sweep_index][i_start:]) # debugging
# ^ check how long until the next sweep of that type starts
the_next_starts[sweep_index] = i_start + I_in_again
# ^ and add it.
next_sweep_index = np.argmin(the_next_starts)
i_finish = the_next_starts[next_sweep_index]
# print('I_out = ' + str(I_out) + ', I_in_again = ' + str(I_in_again)) # debugging
# print('i_start = ' + str(i_start) + ', i_finish = ' + str(i_finish)) # debugging
# print('sweep index = ' + str(sweep_index) + ', the_masks[sweep_index] = ' + str(the_masks[sweep_index]))
# if n_sweep > 10: break # debugging
if not next_sweep_index == sweep_index:
sweep_index = next_sweep_index
sweep[i_finish:] += 1
sweep_types[n_sweep] = sweep_index_to_sweep_type[sweep_index]
n_sweep += 1
self.add_data_col("sweep", sweep, "EC")
self.data["sweep_types"] = sweep_types
print("\nfunction CyclicVoltammagram.make_sweeps finished!\n\n")
return self.data["sweep_types"]
def get_scan_rate(self, min_sweep_points=10, tspan=None, cycle=None):
"""
returns scan rate in mV/s. If a tspan or cycle is given, it returns
the average absolute scan rate for that time interval or cycle.
Otherwise it returns a vector.
"""
try:
scan_rate = self.data["scan_rate"]
except KeyError:
scan_rate = self.make_scan_rate(min_sweep_points=min_sweep_points)
if cycle is not None:
return np.mean(np.abs(self[cycle]).scan_rate)
if tspan is not None:
t, scan_rate = self.t, self.scan_rate
mask = np.logical_and(tspan[0] < t, t < tspan[-1])
return np.mean(scan_rate[mask])
return self.scan_rate
def get_capacitance(self, V_DL=None, V_str=None, J_str=None, t_i=None, out=None):
"""Return capacitance in F/cm^2 (geometric)"""
return get_capacitance(
self.data, V_DL=V_DL, V_str=V_str, J_str=J_str, t_i=t_i, out=out
)
def make_scan_rate(self, min_sweep_points=10):
"""
calculates scan rate in mV/s - negative for cathodic, positive for anodic.
min_sweep_points is a type of resolution in EC points.
"""
print("\n\nfunction CyclicVoltammagram.make_scan_rate at your service!\n")
v = self.v
t = self.t
# the scan rate is dV/dt. This is a numerical calculation of dV/dt:
v_behind = np.append(np.tile(v[0], min_sweep_points), v[:-min_sweep_points])
v_ahead = np.append(v[min_sweep_points:], np.tile(v[-1], min_sweep_points))
t_behind = np.append(np.tile(t[0], min_sweep_points), t[:-min_sweep_points])
t_ahead = np.append(t[min_sweep_points:], np.tile(t[-1], min_sweep_points))
scan_rate_middle = (v_ahead - v_behind) / (t_ahead - t_behind) * 1e3
# ^ this is "softened" at the anodic and cathodic turns.
# We can "sharpen" it by selectively looking ahead and behind:
scan_rate_behind = (v - v_behind) / (t - t_behind) * 1e3
scan_rate_ahead = (v_ahead - v) / (t_ahead - t) * 1e3
# but this gives problems right at the beginning, so set those to zeros
scan_rate_behind[:min_sweep_points] = np.zeros(min_sweep_points)
scan_rate_ahead[-min_sweep_points:] = np.zeros(min_sweep_points)
# now sharpen the scan rate!
scan_rate = scan_rate_middle
mask_use_ahead = np.logical_and(
np.abs(scan_rate_ahead) > np.abs(scan_rate),
np.abs(scan_rate_ahead) > np.abs(scan_rate_behind),
)
scan_rate[mask_use_ahead] = scan_rate_ahead[mask_use_ahead]
mask_use_behind = np.logical_and(
np.abs(scan_rate_behind) > np.abs(scan_rate),
np.abs(scan_rate_behind) > np.abs(scan_rate_ahead),
)
scan_rate[mask_use_behind] = scan_rate_behind[mask_use_behind]
if False: # plot it for debugging
fig, ax = plt.subplots()
ax.plot(t, scan_rate, "k") # t_ahead - t_behind,
self.add_data_col("scan_rate", scan_rate, "EC")
print("\nfunction CyclicVoltammagram.make_scan_rate finished!\n\n")
return self.scan_rate
@wraps(plot_vs_potential)
def plot(self, *args, **kwargs):
return self.plot_vs_potential(*args, **kwargs)
@wraps(CV_difference)
def get_difference(self, *args, **kwargs):
return CV_difference(self.data, *args, **kwargs)
def subtract(self, cv_2, min_sweep_points=10, scan_rate_cutoff=1):
"""
takes the datasets sweep by sweep, interpolates, and
"""
print("\n\nfunction CyclicVoltammagram.subtract at your service!\n")
# best to force a remake of the sweep numbers.
sweeps_1 = self.make_sweeps(
min_sweep_points=min_sweep_points, scan_rate_cutoff=scan_rate_cutoff
)
sweeps_2 = cv_2.make_sweeps(
min_sweep_points=min_sweep_points, scan_rate_cutoff=scan_rate_cutoff
)
s1s = list(sweeps_1.keys())
for s in s1s:
if sweeps_1[s] is None:
s1s.pop(s)
s1s.sort()
s2s = list(sweeps_2.keys())
for s in s2s:
if sweeps_2[s] is None:
s2s.pop(s)
s2s.sort()
try:
t_str = self.t_str
except AttributeError:
t_str = "time/s"
try:
E_str = self.E_str
except AttributeError:
E_str = "Ewe/V"
try:
I_str = self.I_str
except AttributeError:
I_str = "I/mA"
try:
V_str = self.V_str
except AttributeError:
V_str = E_str
try:
J_str = self.J_str
except AttributeError:
J_str = I_str
diff = Dataset(
{
"data_cols": set(),
"timecols": {},
"t_str": t_str,
"V_str": V_str,
"J_str": J_str,
"E_str": E_str,
"I_str": I_str,
}
)
debugging = False
if debugging:
fig, ax = plt.subplots()
for s1, s2 in zip(s1s, s2s):
print(
"interpolating sweep "
+ str(s2)
+ " of cv_2 (redox = "
+ str(sweeps_2[s2])
+ ") onto sweep "
+ str(s1)
+ " of self (redox = "
+ str(sweeps_1[s1])
+ ")."
)
if not sweeps_2[s2] == sweeps_1[s1]:
print(
"WARNING!!! subtracting sweeps of different directions. Results may be meaningless."
)
data1 = self.cut(sweep=s1).data
data2 = cv_2.cut(sweep=s2).data
t1, v = data1[t_str], data1[V_str]
t2_i, v2 = data2[t_str], data2[V_str]
if sweeps_1[s1] == 1: # anodic scan, interpolate normally
t2 = np.interp(v, v2, t2_i)
elif sweeps_1[s1] == 0: # cathodic scan, have to reverse the sine of v.
t2 = np.interp(-v, -v2, t2_i)
for col in data1["data_cols"]:
if col in [t_str, V_str]:
diff.append_to_data_col(col, data1[col], col_type="EC")
# this will add it if it's not already there.
elif col in data2["data_cols"]:
tcol = self.get_timecol(col)
if col == tcol:
continue
x1, y1_i = data1[tcol], data1[col]
x2, y2_i = data2[tcol], data2[col]
try:
y1 = np.interp(t1, x1, y1_i)
y2 = np.interp(t2, x2, y2_i)
except ValueError as e:
print(f"skipping column {col} due to ValueError {e}")
continue
y_diff = y1 - y2
if debugging:
if "J" in col:
ax.plot(t1, y1, "k")
ax.plot(t1, y2, "r")
ax.plot(t1, y_diff, "g")
diff.append_to_data_col(
col, y_diff, col_type="EC"
) # if we say the col_type is EC,
# then it should recognize "time/s" as the timecol.
diff["tspan"] = [diff[t_str][0], diff[t_str][-1]]
diff = CyclicVoltammagram(diff)
try:
diff.data["data_type"] = self.data["data_type"]
except KeyError:
print("Warning!!! no self.data['data_type']")
print("\nfunction CyclicVoltammagram.subtract finished!\n\n")
return diff
def plot_all(self, ax="new", colorscale="spectral", **kwargs):
cmap = colormap.get_cmap(colorscale)
if ax == "new":
fig, ax = plt.subplots()
C = len(self)
for c in range(C):
color = cmap(c / C)
self[c].plot(ax=ax, color=color, **kwargs)
ax.set_xlabel(self.V_str)
ax.set_ylabel(self.J_str)
return ax
def average(self):
# should in principle do this for much more
lists = {}
for col in self.data_cols:
lists[col] = []
# print('test') # debugging
for c in range(len(self)):
cv = self[c]
for col in self.data_cols:
lists[col] += [cv.data[col]]
ts = lists["time/s"]
N = min([len(t) for t in ts])
average_cv = self[0] # inherit all the metadata from self[0]
for col in self.data_cols:
x_stack = np.stack([x[:N] for x in lists[col]])
x_avg = np.mean(x_stack, axis=0)
average_cv.data[col] = x_avg
average_cv.update_with_data()
return average_cv
def cut(self, *args, **kwargs):
dataset = super(CyclicVoltammagram, self).cut(*args, **kwargs)
return CyclicVoltammagram(dataset)
|
ScottSoren/EC_MS | src/EC_MS/PVMassSpec.py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 11:06:17 2020
@author: <NAME>lets Lab_1
"""
import re
import numpy as np
from .parsing_tools import timestring_to_epoch_time
PVMassSpec_time_match = r"[0-9]{2}-[0-9]{2}-[0-9]{4} [0-9]{2}'[0-9]{2}'[0-9]{2}" # it has a ridiculous format
def PVMS_title_to_timestring(title):
match = re.search(PVMassSpec_time_match, title)
if not match:
print("Warning\! no timestamp found in " + title)
return
match_str = match.group()
m, d, Y = match_str[0:2], match_str[3:5], match_str[6:10]
H, M, S = match_str[11:13], match_str[14:16], match_str[17:19]
timestring = f"{Y}/{m}/{d} {H}:{M}:{S}"
return timestring
def read_PVMS(file_path, delim="\t", t_str="Time Relative (sec)"):
with open(file_path, "r") as f:
lines = f.readlines()
data = {
"file": file_path,
"header": "",
"timecols": {},
}
N_col_head = len(
lines
) # this will decrease when the loop knows when the column header line is comming
nondata_cols = [] # this will store abstime, so I don't have to parse.
for n, line in enumerate(lines):
l = line.strip()
if n < N_col_head:
data["header"] = data["header"] + line
if len(l) == 0:
N_col_head = n + 1
elif n == N_col_head:
data_cols = l.split(delim)
for col in data_cols:
data[col] = np.array([])
data["timecols"][col] = "Time Relative (sec)"
elif n > N_col_head:
for col, val in zip(data_cols, l.split(delim)):
if col in nondata_cols:
data[col] += [val]
continue
try:
try:
x = eval(val)
except NameError:
if "nan" in val:
x = np.nan
else:
raise SyntaxError
except SyntaxError:
print(
"removing "
+ col
+ " from data_cols due to value "
+ val
+ " on line "
+ str(n)
)
data[col] = list(data[col])
data[col] += [val]
nondata_cols += [col]
else:
data[col] = np.append(data[col], x)
data["data_cols"] = set([col for col in data_cols if col not in nondata_cols])
data["t_str"] = t_str
if t_str not in data["data_cols"]:
print("Warning!!! " + t_str + " not in data from " + file_path)
else:
data["timecols"] = dict([(col, t_str) for col in data["data_cols"]])
timestring = PVMS_title_to_timestring(file_path)
if not timestring:
print(
"couldn't find the timestring in "
+ file_path
+ ". Using the first point in Time Absolute."
)
if timestring:
# print(timestring + '!!!') # debugging
tstamp = timestring_to_epoch_time(timestring)
else:
try:
timestring = data["Time Absolute (Date_Time)"][0]
except (KeyError, IndexError):
print("Warning!!! could't get timestring from Time Absolute either.")
try:
tstamp = data["Time Absolute (UTC)"][0]
except (KeyError, IndexError):
tstamp = timestring_to_epoch_time(timestring)
data["timestring"] = timestring
data["tstamp"] = tstamp
rename_PVMS_cols(data)
return data
def read_PVMS_spectrum(*args, index=0, **kwargs):
"""
calls read_PVMS_spectra with args and kwargs, then returns spectra[index].
"""
spectra = read_PVMS_spectra(*args, **kwargs)
spectrum = spectra[index]
return spectrum
def rename_PVMS_cols(data):
data_cols = data["data_cols"].copy()
timecol = "Time Relative (sec)"
for col in data_cols:
if "_amu" in col:
mass = "M" + col.split("_amu")[0]
xcol = mass + "-x"
ycol = mass + "-y"
data[ycol] = data[col]
data[xcol] = data[timecol]
data["data_cols"].add(xcol)
data["data_cols"].add(ycol)
data["timecols"][ycol] = xcol
data["timecols"][xcol] = xcol
# remove_negatives(dataset) # not sure if this is useful.
return data # not really necessary
|
gallegonovato/no-google | scripts/parsed.py | <reponame>gallegonovato/no-google<filename>scripts/parsed.py
from datetime import date
today = date.today()
newfile = open('../google-domains', 'w')
newfile.write('# This blocklist helps Pi-hole\'s admin restrict access to Google and its domains.'+'\n')
newfile.write('# Last updated: ' + today.strftime('%d-%m-%Y') +'\n')
with open('../pihole-google.txt', 'r') as main:
for line in main:
if '#' in line:
newfile.write('# ' + line[2:])
elif not '#' in line:
newfile.write('0.0.0.0 ' + line.rstrip("\n") + '\n') |
kkiyama117/enterlist | enterlist/__version__.py | __title__ = 'enterlist'
__description__ = 'nothing.'
__url__ = 'https://github.com/kkiyama117/KUEventParser'
__version__ = '1.0.1'
__build__ = 0x010001
__author__ = 'kkiyama117'
__author_email__ = '<EMAIL>'
__maintainer__ = ' kkiyama117'
__maintainer_email__ = '<EMAIL>'
__license__ = 'Apache2'
__copyright__ = 'Copyright 2019 kkiyama117'
|
kkiyama117/enterlist | enterlist/core.py | from models import Enter
from spread import GetDataManager
from utils import post_message_to_channel
_spread_api = GetDataManager()
_first_message = '以下のエンター情報を読んで内容を確認して下さい'
def confirm_enters_data(user_id: str):
post_message_to_channel(user_id, 'CONFIRM START')
# mentor 毎に
for _name, _id in _spread_api.get_mentors_data().items():
post_message_to_channel(_id, _first_message)
# 該当するenterの行をゲット
for row in _spread_api.enter_rows_not_checked(_name):
# Enter の model
enter = Enter(**(_spread_api.get_enter_data(row)))
# slack に投稿
post_message_to_channel(_id, enter.detail())
# 確認済みのチェックを入れる
_spread_api.check_enter(row, _spread_api.check_col)
_last_message = '上記のエンターについては, spreadsheetに自動でチェックが入りました'
post_message_to_channel(_id, _last_message)
post_message_to_channel(user_id, 'CONFIRM END')
return "CONFIRM ENTERS DATA TO MENTORS"
if __name__ == '__main__':
confirm_enters_data("")
|
kkiyama117/enterlist | enterlist/api.py | import core
_commands_dict = {'check': 'confirm_enters_data'}
def request(event: dict):
# get data from event
message = event.get("event").get("text")
commander_id = event.get("event").get("user")
_command = parse_command(message)
if _command == "all":
_commands = _commands_dict.values()
else:
_commands = [_command]
answer = []
for x in _commands:
answer.append(getattr(core, x)(user_id=commander_id))
return answer
def parse_command(message: str):
"""Slack message 解析"""
commands = message.split()
main_command = commands[0]
if main_command == 'all':
return 'all'
return _commands_dict.get(main_command)
if __name__ == '__main__':
e = {
"event": {
"text": "all"
}
}
print(request(e))
|
kkiyama117/enterlist | enterlist/spread.py | import os
from pathlib import Path
import gspread
# from dotenv.main import find_dotenv, load_dotenv
from oauth2client.service_account import ServiceAccountCredentials
# load_dotenv(find_dotenv())
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
class GetDataManager:
def __init__(self):
_json_file = Path(__file__).parent / 'kiyama.json'
_credentials = ServiceAccountCredentials.from_json_keyfile_name(_json_file, scope)
client = gspread.authorize(_credentials)
_file_id = os.getenv('GSPREAD_SHEET')
self._gfile = client.open_by_key(_file_id)
self._enterlist = self._gfile.worksheet('①エンターリスト')
self._interview = self._gfile.worksheet('⑦面談CSV貼付')
self._mentor = self._gfile.worksheet('メンター')
# list used by sort
self.check_col = self._enterlist.find('チェック').col
self.first_contact_check_col = self._enterlist.find('エンターへ初回連絡').col
self.add_info_check_col = self._enterlist.find('定性情報\n入力').col
_checked_cells = self._enterlist.findall("済")
self._checked_rows = [x.row for x in _checked_cells if x.col is self.check_col]
self._first_contact_checked_rows = [x.row for x in _checked_cells if x.col is self.first_contact_check_col]
self._add_info_checked_rows = [x.row for x in _checked_cells if x.col is self.add_info_check_col]
# All sheets
def get_enter_data(self, row: int):
# initialize
_row = row
_enter_id = self._enterlist.cell(row, 1).value
# enterlist sheets
keys_data = {'name': '氏名', 'univ': '大学', 'department': '学部学科', 'gender': '性別'}
needed_dict = {k: self._enterlist.cell(_row, self._enterlist.find(v).col).value for k, v in keys_data.items()}
_row = self._interview.find(_enter_id).row
# 面談CSV
keys_data = {'interview': 'AD' + str(_row), 'demand': 'AG' + str(_row), 'line': 'AH' + str(_row)}
needed_dict2 = {k: self._interview.acell(v).value for k, v in keys_data.items()}
# 業界は複数
industry = self._interview.acell('AE' + str(_row)).value + '/' + self._interview.acell('AF' + str(_row)).value
# 纏める
needed_dict = {**needed_dict, **needed_dict2}
needed_dict.update(enter_id=_enter_id, industry=industry)
return needed_dict
# mentor sheet===============================================
def get_mentors_data(self):
return {_name: _id for _name, _id in zip(self._mentor.col_values(1), self._mentor.col_values(2)) if
_id is not ''}
# Enterlist sheet===================================
def _get_enter_id(self, row: int):
_col = self._enterlist.find('エンターID').col
return self._enterlist.cell(row, _col).value
def _enter_rows_with_mentor(self, mentor_name: str, sort_list: list = None):
"""条件に合ったエンターの行をListで返す(template)"""
mentor_rows = (x.row for x in self._enterlist.findall(mentor_name))
if sort_list is None:
sort_list = self._checked_rows
mentor_rows = set(mentor_rows) - set(sort_list)
return list(mentor_rows)
# checked
def enter_rows_not_checked(self, mentor_name):
return self._enter_rows_with_mentor(mentor_name, self._checked_rows)
def enter_rows_not_first_contact(self, mentor_name):
return self._enter_rows_with_mentor(mentor_name, self._first_contact_checked_rows)
def enter_rows_not_add_info(self, mentor_name):
return self._enter_rows_with_mentor(mentor_name, self._add_info_checked_rows)
def check_enter(self, row, col):
"""Add check to enter sheet"""
self._enterlist.update_cell(row, col, "済")
if __name__ == '__main__':
m = GetDataManager()
for i, j in m.get_mentors_data().items():
print(m._enter_rows_with_mentor(i))
|
kkiyama117/enterlist | enterlist/utils.py | import os
import requests
def post_message_to_channel(channel: str, message: str):
"""
Send message to slack
Args:
channel: ID of Channnel or member to send message
message: text of message
Returns:
"""
# load_dotenv(find_dotenv())
url = "https://slack.com/api/chat.postMessage"
token = os.getenv('SLACK_BOT_TOKEN')
headers = {
"Authorization": "Bearer " + token
}
payload = {
'channel': channel,
'text': message,
'as_user': True
}
requests.post(url, headers=headers, data=payload)
return "OK"
|
kkiyama117/enterlist | enterlist/aws.py | <gh_stars>0
# -*- coding: utf-8 -*-
import json
import logging
import boto3
# ログ設定
from api import request
from utils import post_message_to_channel
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# client for call lambda
lambda_client = boto3.client('lambda')
def sender_handler(event, context):
""" AWS Sender command
Args:
event:
context:
Returns:
"""
# 受け取ったイベント情報をCloud Watchログに出力
logging.info(json.dumps(event))
# 処理
post_message_to_channel(event.get('event').get('channel'), ' Calling Success!')
# Slackにメッセージを投稿する
return request(event)
def caller_handler(event: dict, context) -> str:
""" AWS Sender command
Args:
event:
context:
Returns:
"""
# 受け取ったイベント情報をCloud Watchログに出力
logging.info(json.dumps(event))
# Event API 認証
if "challenge" in event:
return event.get("challenge")
# ボットによるイベントまたはメッセージ投稿イベント以外の場合
# 反応させないためにそのままリターンする
if is_bot(event) or not is_message(event):
return "OK"
post_message_to_channel(event.get('event').get('channel'), 'Running...')
# 非同期処理のため,別関数(Sender)呼び出し
lambda_client.invoke(
FunctionName="enterlist_sender",
InvocationType="Event",
Payload=json.dumps(event)
)
return "OK"
def is_bot(event: dict) -> bool:
""" Check Bot or not
"""
return event.get("event").get("bot_id") is not None
def is_message(event: dict) -> bool:
"""Check Event is created by message send or not"""
return event.get("event").get("type") == "message"
|
kkiyama117/enterlist | enterlist/deploy.py | <reponame>kkiyama117/enterlist<filename>enterlist/deploy.py
# -*- coding: utf-8 -*-
import os
import shutil
import subprocess
from pathlib import Path
def deploy():
""" Deploy package to AWS
"""
cd = Path(__file__).parent
os.chdir(cd)
print(cd)
args = ['pip', 'install', '-U', '-r', cd.as_posix() + '/requirements.txt', '--target', '.']
subprocess.call(args)
username = os.getenv('USERNAME', '')
src = cd / (username + '.json')
copy = cd / 'gspread.json'
shutil.copyfile(src, copy)
pack = Path(shutil.make_archive((cd.parent / 'enterlist').as_posix(), 'zip'))
os.chdir(cd.parent)
args = ['aws', 's3', 'cp', pack.name, 's3://' + os.getenv('S3_BUCKET') + '/' + pack.name, '--profile', username]
subprocess.call(args)
return cd.parent
def create():
username = os.getenv('USERNAME', '')
os.chdir(deploy())
args = ['aws', 'lambda', 'create-function', '--cli-input-json', 'file://settings/aws_caller_' + username + '.json',
'--profile', username]
subprocess.call(args)
args = ['aws', 'lambda', 'create-function', '--cli-input-json', 'file://settings/aws_sender_' + username + '.json',
'--profile', username]
subprocess.call(args)
print("finish")
def update():
username = os.getenv('USERNAME', '')
os.chdir(deploy())
args = ['aws', 'lambda', 'update-function-code', '--cli-input-json',
'file://settings/aws_caller_update_' + username + '.json', '--profile',
username]
subprocess.call(args)
args = ['aws', 'lambda', 'update-function-code', '--cli-input-json',
'file://settings/aws_sender_update_' + username + '.json', '--profile',
username]
subprocess.call(args)
print("finish")
def main():
# create()
update()
if __name__ == '__main__':
main()
|
kkiyama117/enterlist | enterlist/models.py | <filename>enterlist/models.py
class Enter:
def __init__(self, enter_id: str, name: str, univ: str, department: str,
gender: str, interview: str, industry: str, demand: str, line: str, checked: bool = False):
self.enter_id = enter_id
self.name = name
self.univ = univ
self.department = department
self.gender = gender
self.interview = interview
self.industry = industry
self.demand = demand
self.line = line
self._checked = checked
def __str__(self):
return self.enter_id
def check(self):
self._checked = True
@property
def checked(self):
return self.checked
def detail(self) -> str:
text: str = f'名前: {self.name} \n 学部: {self.department} \n' \
f'性別: {self.gender} \n' \
f'希望面談内容: {self.interview} \n' \
f'志望業界: {self.industry} \n' \
f'メンターへの希望: {self.demand} \n' \
f'LINE ID: {self.line}'
return text
class Mentor:
def __init__(self, name: str, slack_id):
self._name = name
self._slack_id = slack_id
|
felipebastosweb/python-kivy-kivymd-hello-world | main.py | <filename>main.py
from kivy.uix.screenmanager import Screen
from kivymd.app import MDApp
from kivymd.uix.button import MDRectangleFlatButton
class MainApp(MDApp):
def build(self):
screen = Screen()
screen.add_widget(
MDRectangleFlatButton(
text="Hello, World",
pos_hint={"center_x": 0.5, "center_y": 0.5},
)
)
return screen
MainApp().run() |
felipebastosweb/python-kivy-kivymd-hello-world | screens/mylist/mylist.py | from kivy.lang import Builder
from kivymd.uix.screen import MDScreen
from kivymd.uix.list import TwoLineListItem
class MyListScreen(MDScreen):
# constructor
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_file('screens/mylist/mylist.kv')
# screen render
def build(self):
return self.screen
def on_start(self):
for i in range(20):
self.screen.ids.md_list.add_widget(
TwoLineListItem(text=f"Title {i}", secondary_text=f"Secundary title"
)
)
|
Usman-Ghani123/Car-Recognition | model_layers_params.py | <gh_stars>1-10
import keras
from resnet_50 import resnet50_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
from keras.callbacks import ReduceLROnPlateau
from keras.models import Model
img_width, img_height = 224, 224
num_channels = 3
num_classes = 196
resnet = resnet50_model(img_height, img_width, num_channels, num_classes)
for layer in resnet.layers:
layer.trainable = False
model = Model(inputs=resnet.input, outputs=resnet.output)
model.summary()
|
Usman-Ghani123/Car-Recognition | analyze.py | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
from utils import load_model
def decode_predictions(preds, top=5):
results = []
# preds is basically the list of probabilities
for pred in preds:
# it will print the last 5 values and will reverse the array
top_indices = pred.argsort()[-top:][::-1]
# it will store that array
result = [(class_names[i], pred[i]) for i in top_indices]
# It will print the first element of the array
result.sort(key=lambda x: x[1], reverse=True)
results.append(result)
return results
def predict(img_dir, model):
img_files = []
#we will go down the directory in data/valid
for root, dirs, files in os.walk(img_dir, topdown=False):
for name in files:
# this is the name of files in each directory
# append is a function of list to make a list of arrays
img_files.append(os.path.join(root, name))
# sort the list like it will reverse it
img_files = sorted(img_files)
y_pred = []
y_test = []
for img_path in tqdm(img_files):
# it will load img
img = image.load_img(img_path, target_size=(224, 224))
# convert the img to array
x = image.img_to_array(img)
# predict the image
preds = model.predict(x[None, :, :, :])
# function created above
decoded = decode_predictions(preds, top=1)
# predicted label
pred_label = decoded[0][0][0]
# print(pred_label)
y_pred.append(pred_label)
# to split the array
tokens = img_path.split("\\")
# extract the label given by us
class_id = int(tokens[-2])
# print(str(class_id))
y_test.append(class_id)
return y_pred, y_test
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
# each value in particular row will be drived by the sum of all elements in that row.
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def calc_acc(y_pred, y_test):
num_corrects = 0
for i in range(num_samples):
pred =int(y_pred[i])
test = int(y_test[i])
if pred == test:
num_corrects += 1
return num_corrects / num_samples
if __name__ == '__main__':
img_width, img_height = 224, 224
num_channels = 3
num_classes = 196
class_names = range(1, (num_classes + 1))
num_samples = 1629
print("\nLoad the trained ResNet model....")
model = load_model()
# y_predict is the labels predicted by the classifier and y_test are the orignal labels given by use
y_pred, y_test = predict('data/valid', model)
print("y_pred: " + str(y_pred))
print("y_test: " + str(y_test))
acc = calc_acc(y_pred, y_test)
print("%s: %.2f%%" % ('acc', acc * 100))
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
|
Usman-Ghani123/Car-Recognition | train_model.py | import keras
from resnet_50 import resnet50_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
from keras.callbacks import ReduceLROnPlateau
img_width, img_height = 224, 224
num_channels = 3
train_data = 'data/train'
valid_data = 'data/valid'
num_classes = 196
num_train_samples = 6515
num_valid_samples = 1629
verbose = 1
batch_size = 16
num_epochs = 100
patience = 40
if __name__ == '__main__':
# build a classifier model
model = resnet50_model(img_height, img_width, num_channels, num_classes)
# prepare data augmentation configuration
train_data_gen = ImageDataGenerator(rotation_range=20.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
horizontal_flip=True)
valid_data_gen = ImageDataGenerator()
# callbacks
# this creates a log file which is used to plot the graph
tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
log_file_path = 'logs/training.log'
csv_logger = CSVLogger(log_file_path, append=False)
# After 40 continuos epochs if val_acc dont improve so training will stop
early_stop = EarlyStopping('val_acc', patience=patience)
# After 10 continuos epochs if val_acc dont improve learning rate will reduce by some factor
reduce_lr = ReduceLROnPlateau('val_acc', factor=0.1, patience=int(patience / 4), verbose=1)
# Will save the model file in this path
trained_models_path = 'models/model'
# File name and format
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
# save the file
model_checkpoint = ModelCheckpoint(model_names, monitor='val_acc', verbose=1, save_best_only=True)
# details of model at the iteration
callbacks = [tensor_board, model_checkpoint, csv_logger, early_stop, reduce_lr]
# generators
# access the sub directories in the data/train folder
train_generator = train_data_gen.flow_from_directory(train_data, (img_width, img_height), batch_size=batch_size,
class_mode='categorical')
# access the sub directories in the data/train folder
valid_generator = valid_data_gen.flow_from_directory(valid_data, (img_width, img_height), batch_size=batch_size,
class_mode='categorical')
# fine tune the model
model.fit_generator(
train_generator,
steps_per_epoch=num_train_samples / batch_size,
validation_data=valid_generator,
validation_steps=num_valid_samples / batch_size,
epochs=num_epochs,
callbacks=callbacks,
verbose=verbose)
|
Usman-Ghani123/Car-Recognition | take_image_output.py | <gh_stars>1-10
import keras
from resnet_50 import resnet50_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.models import Model
from matplotlib import pyplot
from numpy import expand_dims
img_width, img_height = 224, 224
num_channels = 3
num_classes = 196
# load the model
model = resnet50_model(img_height, img_width, num_channels, num_classes)
# redefine model to output right after the first hidden layer
model = Model(inputs=model.inputs, outputs=model.layers[5].output)
model.summary()
# load the image with the required shape
img = load_img('data/test/00001.jpg', target_size=(224, 224))
# convert the image to an array
img = img_to_array(img)
# expand dimensions so that it represents a single 'sample'
img = expand_dims(img, axis=0)
# get feature map for first hidden layer
feature_maps = model.predict(img)
# plot all 64 maps in an 8x8 squares
square = 8
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in grayscale
pyplot.imshow(feature_maps[0, :, :, ix-1], cmap='gray')
ix += 1
# show the figure
pyplot.show() |
Usman-Ghani123/Car-Recognition | gui.py | import tkinter as tk
import tkinter.font
import cv2
from PIL import ImageTk, Image
import os
import random
import cv2 as cv
import keras.backend as K
import numpy as np
import scipy.io
from utils import load_model
from tkinter import filedialog
from utils import draw_str
def open_folder():
global folder_path
filename = filedialog.askdirectory()
folder_path.set(filename)
def get_path(file_path):
update = ''
label['text'] = update
label_3['text'] = update
predict_image(file_path + '/')
def predict_image(file_path):
try:
test_images = [f for f in os.listdir(file_path) if
os.path.isfile(os.path.join(file_path, f)) and f.endswith('.jpg')or f.endswith('png')]
num_samples = 1
samples = random.sample(test_images, num_samples)
for i, image_name in enumerate(samples):
filename = os.path.join(file_path, image_name)
path = f'Start processing image:\n{filename}'
label.config(text=label.cget('text') + path + '\n')
tokens = image_name.split('.')
img_name = tokens[0]
bgr_img = cv.imread(filename)
render_img = cv.resize(bgr_img, (img_width, img_height), cv.INTER_CUBIC)
rgb_img = cv.cvtColor(render_img, cv.COLOR_BGR2RGB)
rgb_img = np.expand_dims(rgb_img, 0)
preds = model.predict(rgb_img)
prob = np.max(preds)
class_id = np.argmax(preds)
format_result(class_names[class_id][0][0], prob)
render_img = cv.resize(render_img, (img_width, img_height), cv.INTER_CUBIC)
height, width = render_img.shape[:2]
model_1 = draw_str(render_img, (height, width), f'Class: {class_names[class_id][0][0]}', 'Prob: {:.4}'.format(prob))
cv.imwrite(f'img_copy.jpg', render_img)
load_img = Image.open(f'img_copy.jpg')
render_img_1 = ImageTk.PhotoImage(load_img)
label_3.image = render_img_1
label_3.configure(image=render_img_1)
except:
label['text'] = 'There is some problem in directory or image\nTry again'
label_3['text'] = 'No Image Found'
def format_result(cl_id, prob):
lab = {}
lab['cl'] = cl_id
lab['labl'] = '{:.4}'.format(prob)
final_str = 'Class: {}\nProbibility: {}\n'.format(cl_id, lab['labl'])
label.config(text=label.cget('text') + '\n' + final_str + '\n')
def my_quit():
quit()
# MAIN CODE ######################################################################################################
Height = 480
Width = 700
img_width, img_height = 224, 224
model = load_model()
model.load_weights('G:/fyp/resnet_50/model/epoch_100_aug/model.84-0.88.hdf5')
cars_meta = scipy.io.loadmat('devkit/cars_meta')
class_names = cars_meta['class_names'] # shape=(1, 196)
class_names = np.transpose(class_names)
root = tk.Tk()
root.title('Car Recognition Tool')
root.iconbitmap('icon.ico')
canvas = tk.Canvas(root, height=Height, width=Width)
canvas.pack()
img = cv2.imread('landscape.png')
img = cv2.resize(img, dsize=(Width, Height))
cv2.imwrite('new_landscape.png', img)
#
background_image = tk.PhotoImage(file='new_landscape.png')
image_label = tk.Label(root, image=background_image)
image_label.place(relheight=1, relwidth=1)
frame = tk.Frame(root, bg='Gray', bd=5)
frame.place(relx=0.5, rely=0.1, relheight=0.1, relwidth=0.85, anchor='n')
label_1 = tk.Label(root, bg='light grey', bd=5)
label_1.place(relx=0.120, rely=0.025, relheight=0.07, relwidth=0.6)
label_1['text'] = '*provide the directory of the image e.g data/test\n**1 random image will be recognized at a time'
button_2 = tk.Button(frame, text="Input Folder", activebackground='black', activeforeground='blue', font=('courier', 9), command=lambda: open_folder())
button_2.place(relx=0.01, rely=0, relheight=1, relwidth=0.185)
folder_path = tk.StringVar()
entry = tk.Entry(frame, font=('courier', 8), textvariable=folder_path)
entry.place(relx=0.2, rely=0, relheight=1, relwidth=0.48)
button = tk.Button(frame, text="Enter", activebackground='black', activeforeground='blue', font=('courier', 10), command=lambda: get_path(entry.get()))
button.place(relx=0.7, rely=0, relheight=1, relwidth=0.3)
label_2 = tk.Label(root, bg='light grey', bd=5)
label_2.place(relx=0.25, rely=0.20, relheight=0.05, relwidth=0.40)
label_2['text'] = 'Results will be shown here'
lower_frame = tk.Frame(root, bg='Gray', bd=10)
lower_frame.place(relx=0.5, rely=0.25, relheight=0.6, relwidth=1, anchor='n')
label = tk.Label(lower_frame, font=('courier', 8), anchor='nw', justify='left', bd=5)
label.place(relheight=1, relwidth=0.56)
label_3 = tk.Label(lower_frame, font=('courier', 8), anchor='n', justify='left', bd=5)
label_3.place(relx=0.56, rely=0, relheight=1, relwidth=0.45)
button_1 = tk.Button(root, text="Quit", activebackground='Red', activeforeground='black', font=('courier', 10), command=my_quit)
button_1.place(relx=0.47, rely=0.87, relheight=0.1, relwidth=0.09)
root.mainloop()
|
Usman-Ghani123/Car-Recognition | utils.py | <filename>utils.py
import cv2 as cv
from resnet_50 import resnet50_model
def load_model():
model_weights_path = 'G:/fyp/resnet_50/model/epoch_100_aug/model.48-0.89.hdf5'
img_width, img_height = 224, 224
num_channels = 3
num_classes = 196
model = resnet50_model(img_height, img_width, num_channels, num_classes)
model.load_weights(model_weights_path, by_name=True)
return model
def draw_str(dst, target, s1, s2):
x, y = target
cv.rectangle(dst, (x-220, y-220), (x-10, y-10), (255, 0, 0), 2)
# cv.putText(dst, s1, (x - 270, y - 200), cv.FONT_HERSHEY_SIMPLEX, 0.40, (255, 0, 255), thickness=2)
# cv.putText(dst, s2, (x - 270, y - 180), cv.FONT_HERSHEY_SIMPLEX, 0.40, (255, 0, 255), thickness=2)
|
jshom/poker-face-read | Rating_Hands/CardPair.py | <filename>Rating_Hands/CardPair.py
import pickle
class CardPair():
with open('Hands.pickle', 'rb') as pickle_in:
hands = pickle.load(pickle_in)
all_cards = [i for i in range(1,11)]
all_cards.extend(['J', 'Q', 'K', 'A'])
def __init__(self, cards = []):
#Cards in terms of['card pair', bool: same suit]
self.cards = cards
def get_cards(self):
return self.cards
def get_rank(self):
for i in range(len(self.hands)):
if self.hands[i][0]==self.cards[0] and self.hands[i][2]==self.cards[1]:
return self.hands[i][1]
return "Nothing Found"
|
jshom/poker-face-read | Rating_Hands/ScrapingPokerHands.py | <reponame>jshom/poker-face-read
import requests
from bs4 import BeautifulSoup
import pickle
from os import listdir, remove
def scape_and_pickle_hands():
resp = requests.get('https://www.tightpoker.com/poker_hands.html')
soup = BeautifulSoup(resp.content, 'html.parser')
hands = []
for tr in soup.findAll('tr'):
hands.append(str(tr).split('\n'))
hands.pop(0)
if 'Hands.pickle' in listdir():
remove('Hands.pickle')
with open('Hands.pickle', 'wb') as pickle_out:
pickle.dump(hands,pickle_out)
print('File successfully Pickled')
def modify_data(data = 'Hands.pickle'):
with open(data, 'rb') as pickle_in:
hands = pickle.load(pickle_in)
for i in range(len(hands)):
del hands[i][0]
del hands[i][2:]
#Just getting rid of all the html. I know it's not the best way!
#TODO: make this in regular expressions, kinda a hack right now.
hands[i][0] = hands[i][0].split(">")[1].split("<")[0]
hands[i][1] = hands[i][1].split(">")[1].split("<")[0]
#If they are the same suit (s at the end of the cards) The 's' will be removed. All list
#items not have a boolean at index 3 to indicate whether they are the same suit
hands[i].append(not hands[i][0] == hands[i][0].split(" ")[0])
hands[i][0] = hands[i][0].split(" ")[0]
del hands[hands.index(['Cards', 'EV', False])]
del hands[hands.index(['Cards', 'EV', False])]
if 'Hands.pickle' in listdir():
remove('Hands.pickle')
#Making sure all the EVs are > 0, and rounded to 2 decimal places
for i in range(len(hands)):
hands[i][1] = round(float(hands[i][1])+.16, 2)
with open('Hands.pickle', 'wb') as pickle_out:
pickle.dump(hands,pickle_out)
print('File successfully Pickled')
return hands
def main():
#Uncomment this line if you want to scrape the web for the data
# scape_and_pickle_hands()
#make sure that the data going into this function is directly from the web otherwise it will throw an error
print(modify_data())
if __name__ == '__main__':
main()
|
mo-pyy/micropython-st7735-esp8266 | main.py | # MicroPython ST7735 TFT display driver example usage
from machine import Pin, SPI
from tft import TFT_GREEN
gc.collect()
import font
import network, socket
# DC - RS/DC data/command flag
# CS - Chip Select, enable communication
# RST/RES - Reset
dc = Pin(4, Pin.OUT)
cs = Pin(2, Pin.OUT)
rst = Pin(5, Pin.OUT)
# SPI Bus (CLK/MOSI/MISO)
# check your port docs to see which Pins you can use
spi = SPI(1, baudrate=8000000, polarity=1, phase=0)
# TFT object, this is ST7735R green tab version
tft = TFT_GREEN(128, 160, spi, dc, cs, rst, rotate=90)
# init TFT
tft.init()
#Network
sta = network.WLAN(network.STA_IF)
ap = network.WLAN(network.AP_IF)
sta.active(True)
sta.connect("YOUR_SSID_HERE", "YOUR_PASSWORD_HERE")
while not sta.isconnected():
pass
print(sta.ifconfig())
ap.active(False)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = socket.getaddrinfo("0.0.0.0", 23)[0][-1]
s.bind(addr)
s.listen(1)
tft.clear(tft.rgbcolor(0, 0, 0)) #b, g, r
tft.text(0,0,"Test", font.terminalfont, tft.rgbcolor(255, 255, 255), 2)
r, a = s.accept()
tft.text(0,0,"Test", font.terminalfont, tft.rgbcolor(0, 0, 0), 2)
old = " "
while True:
d = r.recv(1024).decode("utf-8")
if d[0:8] == "rotation":
ro = int(d[9:12])
tft.changeRotate(ro)
continue
print(d)
tft.text(0,0,old, font.terminalfont, tft.rgbcolor(0, 0, 0), 2)
tft.text(0,0,d, font.terminalfont, tft.rgbcolor(255, 255, 255), 2)
old = d
#tft.pixel(127, 159, tft.rgbcolor(250,0,0))
|
mo-pyy/micropython-st7735-esp8266 | tft.py | <reponame>mo-pyy/micropython-st7735-esp8266
# MicroPython ST7735 TFT display HAL
import time
from st7735 import ST7735
class TFT(ST7735):
def __init__(self, width, height, spi, dc, cs, rst, bl=None):
"""
SPI - SPI Bus (CLK/MOSI/MISO)
DC - RS/DC data/command flag
CS - Chip Select, enable communication
RST/RES - Reset
BL/Lite - Backlight control
"""
# self.tab = tab
self.spi = spi
self.dc = dc
self.cs = cs
self.rst = rst
self.bl = bl
# ST7735 init
super().__init__(width, height)
# ST7735 HAL
def init(self):
self.margin_row = 0
self.margin_col = 0
self.reset()
# self.clear()
# self.power(True)
def reset(self):
"""
Hard reset the display.
"""
self.dc.value(0)
self.rst.value(1)
time.sleep_ms(500)
self.rst.value(0)
time.sleep_ms(500)
self.rst.value(1)
time.sleep_ms(500)
def backlight(self, state=None):
"""
Get or set the backlight status if the pin is available.
"""
if self.bl is None:
return None
else:
if state is None:
return self.backlight_on
self.bl.value(1 if state else 0)
self.backlight_on = state
def write_pixels(self, count, color):
"""
Write pixels to the display.
count - total number of pixels
color - 16-bit RGB value
"""
self.dc.value(1)
self.cs.value(0)
for _ in range(count):
self.spi.write(color)
self.cs.value(1)
def write_cmd(self, cmd):
"""
Display command write implementation using SPI.
"""
self.dc.value(0)
self.cs.value(0)
self.spi.write(bytearray([cmd]))
self.cs.value(1)
def write_data(self, data):
"""
Display data write implementation using SPI.
"""
self.dc.value(1)
self.cs.value(0)
self.spi.write(data)
self.cs.value(1)
class TFT_GREEN(TFT):
def __init__(self, width, height, spi, dc, cs, rst, bl=None, rotate=0):
if rotate==90 or rotate==270:
height, width = width, height
self.rotate = rotate
super().__init__(width, height, spi, dc, cs, rst, bl)
def init(self):
# set column and row margins
self.margin_row = 1
self.margin_col = 2
# hard reset first
self.reset()
self.write_cmd(TFT.CMD_SWRESET)
time.sleep_ms(150)
self.write_cmd(TFT.CMD_SLPOUT)
time.sleep_ms(255)
# TODO: optimize data streams and delays
self.write_cmd(TFT.CMD_FRMCTR1)
self.write_data(bytearray([0x01, 0x2C, 0x2D]))
self.write_cmd(TFT.CMD_FRMCTR2)
self.write_data(bytearray([0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D]))
time.sleep_ms(10)
self.write_cmd(TFT.CMD_INVCTR)
self.write_data(bytearray([0x07]))
self.write_cmd(TFT.CMD_PWCTR1)
self.write_data(bytearray([0xA2, 0x02, 0x84]))
self.write_cmd(TFT.CMD_PWCTR2)
self.write_data(bytearray([0xC5]))
self.write_cmd(TFT.CMD_PWCTR3)
self.write_data(bytearray([0x8A, 0x00]))
self.write_cmd(TFT.CMD_PWCTR4)
self.write_data(bytearray([0x8A, 0x2A]))
self.write_cmd(TFT.CMD_PWCTR5)
self.write_data(bytearray([0x8A, 0xEE]))
self.write_cmd(TFT.CMD_VMCTR1)
self.write_data(bytearray([0x0E]))
self.write_cmd(TFT.CMD_INVOFF)
self.write_cmd(TFT.CMD_MADCTL)
self.write_data(bytearray([0x00])) # RGB
self.write_cmd(TFT.CMD_COLMOD)
self.write_data(bytearray([0x05]))
self.write_cmd(TFT.CMD_CASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 127]))
self.write_cmd(TFT.CMD_RASET)
self.write_data(bytearray([0x00, 0x01, 0x00, 119]))
self.write_cmd(TFT.CMD_GMCTRP1)
self.write_data(bytearray([0x02, 0x1c, 0x07, 0x12, 0x37, 0x32,
0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01, 0x03, 0x10]))
self.write_cmd(TFT.CMD_GMCTRN1)
self.write_data(bytearray([0x03, 0x1d, 0x07, 0x06, 0x2e, 0x2c,
0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00, 0x02, 0x10]))
self.write_cmd(TFT.CMD_NORON)
time.sleep_ms(10)
self.write_cmd(TFT.CMD_DISPON)
time.sleep_ms(100)
|
amandewatnitrr/Aztecs-LogiTraffic | Team Aztecs_LogiTraffic_E-Ujjwala Hackathon 2020/Dev Files/permissions.py | <reponame>amandewatnitrr/Aztecs-LogiTraffic
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwner(BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
message = "You must be the owner"
my_safe_method = ['GET', 'POST']
# def has_permission(self, request, view):
# if request.method in self.my_safe_method:
# return True
# return False
def has_object_permission(self, request, view, obj):
# if request.method in SAFE_METHODS:
# return True
return obj.owner == request.user |
amandewatnitrr/Aztecs-LogiTraffic | Team Aztecs_LogiTraffic_E-Ujjwala Hackathon 2020/Dev Files/apps.py | <filename>Team Aztecs_LogiTraffic_E-Ujjwala Hackathon 2020/Dev Files/apps.py
from django.apps import AppConfig
class DeviceConfig(AppConfig):
name = 'Device'
|
amandewatnitrr/Aztecs-LogiTraffic | Team Aztecs_LogiTraffic_E-Ujjwala Hackathon 2020/Dev Files/admin.py | from django.contrib import admin
from Device.models import Device
# Register your models here.
admin.site.register(Device) |
jcassee/Garbage-Collection | custom_components/garbage_collection/__init__.py | <filename>custom_components/garbage_collection/__init__.py
"""
Component to integrate with garbage_colection.
"""
import os
from datetime import timedelta
import logging
from homeassistant import config_entries
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
from .sensor import GarbageCollection
from integrationhelper.const import CC_STARTUP_VERSION
from homeassistant.const import CONF_NAME
from .const import (
CONF_SENSORS,
CONF_ENABLED,
CONF_FREQUENCY,
DEFAULT_NAME,
DOMAIN_DATA,
DOMAIN,
ISSUE_URL,
PLATFORM,
VERSION,
CONFIG_SCHEMA,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up this component using YAML."""
if config.get(DOMAIN) is None:
# We get here if the integration is set up using config flow
return True
# Print startup message
_LOGGER.info(
CC_STARTUP_VERSION.format(name=DOMAIN, version=VERSION, issue_link=ISSUE_URL)
)
platform_config = config[DOMAIN].get(CONF_SENSORS, {})
# If platform is not enabled, skip.
if not platform_config:
return False
for entry in platform_config:
# If entry is not enabled, skip.
# if not entry[CONF_ENABLED]:
# continue
hass.async_create_task(
discovery.async_load_platform(hass, PLATFORM, DOMAIN, entry, config)
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up this integration using UI."""
if config_entry.source == config_entries.SOURCE_IMPORT:
# We get here if the integration is set up using YAML
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return False
# Print startup message
_LOGGER.info(
CC_STARTUP_VERSION.format(name=DOMAIN, version=VERSION, issue_link=ISSUE_URL)
)
config_entry.options = config_entry.data
config_entry.add_update_listener(update_listener)
# Add sensor
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(config_entry, PLATFORM)
)
return True
async def async_remove_entry(hass, config_entry):
"""Handle removal of an entry."""
try:
await hass.config_entries.async_forward_entry_unload(config_entry, PLATFORM)
_LOGGER.info(
"Successfully removed sensor from the garbage_collection integration"
)
except ValueError:
pass
async def update_listener(hass, entry):
"""Update listener."""
entry.data = entry.options
await hass.config_entries.async_forward_entry_unload(entry, PLATFORM)
hass.async_add_job(hass.config_entries.async_forward_entry_setup(entry, PLATFORM))
|
jcassee/Garbage-Collection | custom_components/garbage_collection/const.py | <gh_stars>0
import voluptuous as vol
from datetime import datetime, date
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME, WEEKDAYS, CONF_ENTITIES
"""Constants for garbage_collection."""
# Base component constants
DOMAIN = "garbage_collection"
DOMAIN_DATA = f"{DOMAIN}_data"
VERSION = "0.0.1"
PLATFORM = "sensor"
ISSUE_URL = "https://github.com/bruxy70/Garbage-Collection/issues"
ATTRIBUTION = "Data from this is provided by garbage_collection."
ATTR_NEXT_DATE = "next_date"
ATTR_DAYS = "days"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
# Configuration
CONF_SENSOR = "sensor"
CONF_ENABLED = "enabled"
CONF_FREQUENCY = "frequency"
CONF_ICON_NORMAL = "icon_normal"
CONF_ICON_TODAY = "icon_today"
CONF_ICON_TOMORROW = "icon_tomorrow"
CONF_VERBOSE_STATE = "verbose_state"
CONF_FIRST_MONTH = "first_month"
CONF_LAST_MONTH = "last_month"
CONF_COLLECTION_DAYS = "collection_days"
CONF_FORCE_WEEK_NUMBERS = "force_week_order_numbers"
CONF_WEEKDAY_ORDER_NUMBER = "weekday_order_number"
CONF_WEEK_ORDER_NUMBER = "week_order_number"
CONF_DATE = "date"
CONF_EXCLUDE_DATES = "exclude_dates"
CONF_INCLUDE_DATES = "include_dates"
CONF_MOVE_COUNTRY_HOLIDAYS = "move_country_holidays"
CONF_PROV = "prov"
CONF_STATE = "state"
CONF_OBSERVED = "observed"
CONF_PERIOD = "period"
CONF_FIRST_WEEK = "first_week"
CONF_FIRST_DATE = "first_date"
CONF_SENSORS = "sensors"
CONF_VERBOSE_FORMAT = "verbose_format"
CONF_DATE_FORMAT = "date_format"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_FIRST_MONTH = "jan"
DEFAULT_LAST_MONTH = "dec"
DEFAULT_FREQUENCY = "weekly"
DEFAULT_PERIOD = 1
DEFAULT_FIRST_WEEK = 1
DEFAULT_VERBOSE_STATE = False
DEFAULT_DATE_FORMAT = "%d-%b-%Y"
DEFAULT_VERBOSE_FORMAT = "on {date}, in {days} days"
# Icons
DEFAULT_ICON_NORMAL = "mdi:trash-can"
DEFAULT_ICON_TODAY = "mdi:delete-restore"
DEFAULT_ICON_TOMORROW = "mdi:delete-circle"
ICON = DEFAULT_ICON_NORMAL
# States
STATE_TODAY = "today"
STATE_TOMORROW = "tomorrow"
FREQUENCY_OPTIONS = [
"weekly",
"even-weeks",
"odd-weeks",
"every-n-weeks",
"every-n-days",
"monthly",
"annual",
"group",
]
MONTH_OPTIONS = [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
COUNTRY_CODES = [
"",
"AR",
"AT",
"AU",
"AW",
"BE",
"BG",
"BR",
"BY",
"CA",
"CH",
"CO",
"CZ",
"DE",
"DK",
"DO",
"ECB",
"EE",
"ES",
"FI",
"FRA",
"HR",
"HU",
"IE",
"IND",
"IS",
"IT",
"JP",
"KE",
"LT",
"LU",
"MX",
"NG",
"NI",
"NL",
"NO",
"NZ",
"PE",
"PL",
"PT",
"PTE",
"RU",
"SE",
"SI",
"SK",
"UA",
"UK",
"US",
"ZA",
]
def date_text(value):
if value is None or value == "":
return ""
try:
return datetime.strptime(value, "%Y-%m-%d").date().strftime("%Y-%m-%d")
except ValueError:
raise vol.Invalid(f"Invalid date: {value}")
def month_day_text(value):
if value is None or value == "":
return ""
try:
return datetime.strptime(value, "%m/%d").date().strftime("%m/%d")
except ValueError:
raise vol.Invalid(f"Invalid date: {value}")
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FREQUENCY): vol.In(FREQUENCY_OPTIONS),
vol.Optional(CONF_COLLECTION_DAYS): vol.All(cv.ensure_list, [vol.In(WEEKDAYS)]),
vol.Optional(CONF_FIRST_MONTH, default=DEFAULT_FIRST_MONTH): vol.In(
MONTH_OPTIONS
),
vol.Optional(CONF_LAST_MONTH, default=DEFAULT_LAST_MONTH): vol.In(
MONTH_OPTIONS
),
vol.Optional(CONF_WEEKDAY_ORDER_NUMBER, default=[1]): vol.All(
cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(min=1, max=5))]
),
vol.Optional(CONF_WEEK_ORDER_NUMBER, default=[]): vol.All(
cv.ensure_list, [vol.All(vol.Coerce(int), vol.Range(min=1, max=5))]
),
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): vol.All(
vol.Coerce(int), vol.Range(min=1, max=52)
),
vol.Optional(CONF_FIRST_WEEK, default=DEFAULT_FIRST_WEEK): vol.All(
vol.Coerce(int), vol.Range(min=1, max=52)
),
vol.Optional(CONF_FIRST_DATE): date_text,
vol.Optional(CONF_DATE): month_day_text,
vol.Optional(CONF_ENTITIES): cv.entity_ids,
vol.Optional(CONF_INCLUDE_DATES, default=[]): vol.All(
cv.ensure_list, [date_text]
),
vol.Optional(CONF_EXCLUDE_DATES, default=[]): vol.All(
cv.ensure_list, [date_text]
),
vol.Optional(CONF_MOVE_COUNTRY_HOLIDAYS): vol.In(COUNTRY_CODES),
vol.Optional(CONF_PROV): cv.string,
vol.Optional(CONF_STATE): cv.string,
vol.Optional(CONF_OBSERVED, default=True): bool,
vol.Optional(CONF_ICON_NORMAL, default=DEFAULT_ICON_NORMAL): cv.icon,
vol.Optional(CONF_ICON_TODAY, default=DEFAULT_ICON_TODAY): cv.icon,
vol.Optional(CONF_ICON_TOMORROW, default=DEFAULT_ICON_TOMORROW): cv.icon,
vol.Optional(CONF_VERBOSE_STATE, default=DEFAULT_VERBOSE_STATE): cv.boolean,
vol.Optional(CONF_DATE_FORMAT, default=DEFAULT_DATE_FORMAT): cv.string,
vol.Optional(CONF_VERBOSE_FORMAT, default=DEFAULT_VERBOSE_FORMAT): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
WEEKLY_FREQUENCY = ["weekly", "even-weeks", "odd-weeks"]
WEEKLY_FREQUENCY_X = ["every-n-weeks"]
DAILY_FREQUENCY = ["every-n-days"]
MONTHLY_FREQUENCY = ["monthly"]
ANNUAL_FREQUENCY = ["annual"]
GROUP_FREQUENCY = ["group"]
|
carlosnasc88/pythonbirds-1 | oo/Pessoa.py | class Pessoa:
olhos = 2
def __init__(self, *filhos , nome=None, idade=32):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Ola{id(self)}'
if __name__ == '__main__':
ailton = Pessoa(nome='Ailton')
luciano = Pessoa(ailton, nome='Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())
print(id(luciano.idade))
for filho in luciano.filhos:
print(filho.nome)
print(luciano.filhos)
luciano.sobrenome = 'Nascimento'
del luciano.filhos
luciano.olhos =1
del luciano.olhos
print(luciano.__dict__)
print(ailton.__dict__)
print(Pessoa.olhos)
print(luciano.olhos)
print(ailton.olhos)
print(id(Pessoa.olhos), id(luciano.olhos), id(ailton.olhos))
|
Weida-W/CMPUT404-assignment-ajax | server.py | #!/usr/bin/env python
# coding: utf-8
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You can start this by executing it in python:
# python server.py
#
# remember to:
# pip install flask
import flask
from flask import Flask, request, redirect
import json
app = Flask(__name__)
app.debug = True
# An example world
# {
# 'a':{'x':1, 'y':2},
# 'b':{'x':2, 'y':3}
# }
class World:
def __init__(self):
self.clear()
def update(self, entity, key, value):
entry = self.space.get(entity,dict())
entry[key] = value
self.space[entity] = entry
def set(self, entity, data):
self.space[entity] = data
def clear(self):
self.space = dict()
def get(self, entity):
return self.space.get(entity,dict())
def world(self):
return self.space
# you can test your webservice from the commandline
# curl -v -H "Content-Type: application/json" -X PUT http://127.0.0.1:5000/entity/X -d '{"x":1,"y":1}'
myWorld = World()
# I give this to you, this is how you get the raw body/data portion of a post in flask
# this should come with flask but whatever, it's not my project.
def flask_post_json():
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data.decode("utf8") != u''):
return json.loads(request.data.decode("utf8"))
else:
return json.loads(request.form.keys()[0])
@app.route("/")
def hello():
'''Return something coherent here.. perhaps redirect to /static/index.html '''
return redirect("/static/index.html")
@app.route("/entity/<entity>", methods=['POST','PUT'])
def update(entity):
'''update the entities via this interface'''
myWorld.set(entity,flask_post_json())
return myWorld.get(entity)
@app.route("/world", methods=['POST','GET'])
def world():
'''you should probably return the world here'''
return myWorld.world()
@app.route("/entity/<entity>")
def get_entity(entity):
'''This is the GET version of the entity interface, return a representation of the entity'''
return myWorld.get(entity)
@app.route("/clear", methods=['POST','GET'])
def clear():
'''Clear the world out!'''
myWorld.clear()
return myWorld.world()
if __name__ == "__main__":
app.run()
|
sesam-community/template-generator | service.py | <reponame>sesam-community/template-generator
from flask import Flask, request, jsonify
import os
import json
from statics.pipe_templates import collect_pipe, enrich_pipe, global_pipe, transform_pipe, share_pipe
from statics.system_templates import system_configs
from sesamutils import sesam_logger
app = Flask(__name__)
###
# Helpers
###
logger = sesam_logger("Steve the logger", app=app)
@app.route('/')
def index():
output = {
'service': 'Template generator up and running',
'remote_addr': request.remote_addr
}
return jsonify(output)
###
# Do the magic.
###
@app.route('/create_node_template', methods=['GET','POST'])
def create_dataflow():
if request.args.get('config_group'):
config_group = request.args.get('config_group')
system_type = request.args.get('system_type')
system_name = request.args.get('system_name')
datatype = request.args.get('datatype')
else:
config_group = "Default"
system_type = request.args.get('system_type')
system_name = request.args.get('system_name')
datatype = request.args.get('datatype')
###
# node .zip related stuff
###
path = "/local/path/to/repository/template-generator/"
try:
os.mkdir(f"{path}/node")
os.mkdir(f"{path}/node/systems")
os.mkdir(f"{path}/node/pipes")
except Exception:
logger.error("folders already created")
for type in [system_type]:
json_string = system_configs(type, config_group)
if config_group != "Default":
json_string["_id"] = f"{config_group}-{system_name}"
else:
json_string["_id"] = system_name
with open(f"{path}/node/systems/{json_string['_id']}.conf.json", "w") as outfile:
json.dump(json_string, outfile)
pipes_to_create = []
pipeNameAndDatatype = f"{system_name}-{datatype}"
pipes_to_create.append(collect_pipe(json_string, pipeNameAndDatatype, config_group))
pipes_to_create.append(enrich_pipe(pipeNameAndDatatype, config_group))
pipes_to_create.append(global_pipe(pipeNameAndDatatype, config_group))
pipes_to_create.append(transform_pipe(pipeNameAndDatatype, config_group))
pipes_to_create.append(share_pipe(pipeNameAndDatatype, config_group))
for pipe in pipes_to_create:
with open(f"{path}/node/pipes/{pipe['_id']}.conf.json", "w") as outfile:
json.dump(pipe, outfile)
return {"status": "your config has been created!"}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True) |
sesam-community/template-generator | statics/pipe_templates.py | <filename>statics/pipe_templates.py
def collect_pipe(system, pipeNameAndDatatype, config_group):
if config_group != "Default":
config = {
"_id": f"{config_group}-{pipeNameAndDatatype}-collect",
"type": "pipe",
"source": {
"type": f"{system['type']}",
"system": f"{system['_id']}"
},
"metadata": {
"$config-group": f"{config_group}"
},
"add_namespaces": False
}
else:
config = {
"_id": f"{pipeNameAndDatatype}-collect",
"type": "pipe",
"source": {
"type": f"{system['type']}",
"system": f"{system['_id']}"
},
"add_namespaces": False
}
return config
def enrich_pipe(pipeNameAndDatatype, config_group):
if config_group != "Default":
config = {
"_id": f"{config_group}-{pipeNameAndDatatype}-enrich",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": f"{pipeNameAndDatatype}-collect"
},
"transform": {
"type": "dtl",
"rules": {
"default": [
["copy", "*"],
["comment", "*** convention here is to add namespaced identifiers ***"],
["make-ni", "system-datatype", "datatype"],
["comment", "*** convention here is to add the property rdf:type ***"],
["add", "rdf:type",
["ni", "template:Example"]
]
]
}
},
"metadata": {
"$config-group": f"{config_group}"
},
"add_namespaces": True,
"namespaces": {
"identity": f"{pipeNameAndDatatype}",
"property": f"{pipeNameAndDatatype}"
}
}
else:
config = {
"_id": f"{pipeNameAndDatatype}-enrich",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": f"{pipeNameAndDatatype}-collect"
},
"transform": {
"type": "dtl",
"rules": {
"default": [
["copy", "*"],
["comment", "*** convention here is to add namespaced identifiers ***"],
["make-ni", "system-datatype", "datatype"],
["comment", "*** convention here is to add the property rdf:type ***"],
["add", "rdf:type",
["ni", "template:Example"]
]
]
}
},
"add_namespaces": True,
"namespaces": {
"identity": f"{pipeNameAndDatatype}",
"property": f"{pipeNameAndDatatype}"
}
}
return config
def global_pipe(pipeNameAndDatatype, config_group):
if config_group != "Default":
config = {
"_id": f"{config_group}-temporary",
"type": "pipe",
"source": {
"type": "merge",
"datasets": [f"{config_group}-{pipeNameAndDatatype}-enrich"],
"equality": [],
"identity": "first",
"strategy": "compact",
"version": 2
},
"metadata": {
"global": True,
"$config-group": f"{config_group}",
"tags": ["add your logical grouping here"]
}
}
else:
config = {
"_id": "global-temporary",
"type": "pipe",
"source": {
"type": "merge",
"datasets": [f"{pipeNameAndDatatype}-enrich"],
"equality": [],
"identity": "first",
"strategy": "compact",
"version": 2
},
"metadata": {
"global": True,
"tags": ["add your logical grouping here"]
}
}
return config
def transform_pipe(pipeNameAndDatatype, config_group):
if config_group != "Default":
config = {
"_id": f"{config_group}-{pipeNameAndDatatype}-transform",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": "global-template"
},
"transform": {
"type": "dtl",
"rules": {
"default": [
["comment", "*** convention to filter data on rdf:type ***"],
["filter",
["in",
["ni", "template:Example"], "_S.rdf:type"]
],
["comment", "*** Add target system properties ***"],
["add", "someNameForTargetSystem",
"_S.pick_a_global_property"
]
]
}
},
"metadata": {
"$config-group": f"{config_group}"
},
"remove_namespaces": True
}
else:
config = {
"_id": f"{pipeNameAndDatatype}-transform",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": "global-template"
},
"transform": {
"type": "dtl",
"rules": {
"default": [
["comment", "*** convention to filter data on rdf:type ***"],
["filter",
["in",
["ni", "template:Example"], "_S.rdf:type"]
],
["comment", "*** Add target system properties ***"],
["add", "someNameForTargetSystem",
"_S.pick_a_global_property"
]
]
}
},
"remove_namespaces": True
}
return config
def share_pipe(pipeNameAndDatatype, config_group):
if config_group != "Default":
config = {
"_id": f"{config_group}-{pipeNameAndDatatype}-share-operation",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": f"{config_group}-{pipeNameAndDatatype}-transform"
},
"sink": {
"type": "temporary",
"system": "temporary",
"operation": "temporary"
},
"transform": [{
"type": "dtl",
"rules": {
"default": [
["comment", "*** add discard or filter here to only expose curated data ***"],
["discard",
["is-not-empty", "_S.critial_property"]
],
["comment", "filter",
["eq", "_S._deleted", False]
],
["copy", "*"]
]
}
}, {
"type": "template",
"system": "template",
"operation": "get",
"replace_entity": False
}, {
"type": "dtl",
"rules": {
"default": [
["comment", "*** the above external transform is only required when checking for optimistic locking in updates ***"],
["comment", "*** optimistic locking ***"],
["add", "_old",
["first",
["hops", {
"datasets": ["you-collect-dataflow-pipe a"],
"where": [
["eq", "_S._id", "a._id"]
]
}]
]
],
["add", "_json_old",
["json-transit",
["apply", "remove-under", "_T._old"]
]
],
["add", "_json_new",
["first",
["json-transit",
["apply", "remove-under",
["first", "_S."]
]
]
]
],
["add", "_hash_old",
["hash128", "murmur3", "_T._json_old"]
],
["add", "_hash_new",
["hash128", "murmur3", "_T._json_new"]
],
["if",
["eq", "_T._hash_old", "_T._hash_new"],
[
["comment", "*** same data in system as in sesam collect ***"],
["comment", "*** expose your data ***"],
["comment", "*** example for a rest system is provided below ***"],
["add", "::payload",
["apply", "remove-under", "_S."]
],
["add", "::properties",
["dict", "url",
["concat", "your-endpoint-ressource/", "_S.entity.id"]
]
]
],
[
["comment", "**** different data in system than in sesam collect ****"],
["discard"]
]
]
],
"remove-under": [
["copy", "*", "_*"]
]
}
}],
"metadata": {
"$config-group": f"{config_group}"
},
"batch_size": 1
}
else:
config = {
"_id": f"{pipeNameAndDatatype}-share-operation",
"type": "pipe",
"source": {
"type": "dataset",
"dataset": f"{pipeNameAndDatatype}-transform"
},
"sink": {
"type": "temporary",
"system": "temporary",
"operation": "temporary"
},
"transform": [{
"type": "dtl",
"rules": {
"default": [
["comment", "*** add discard or filter here to only expose curated data ***"],
["discard",
["is-not-empty", "_S.critial_property"]
],
["comment", "filter",
["eq", "_S._deleted", False]
],
["copy", "*"]
]
}
}, {
"type": "template",
"system": "template",
"operation": "get",
"replace_entity": False
}, {
"type": "dtl",
"rules": {
"default": [
["comment", "*** the above external transform is only required when checking for optimistic locking in updates ***"],
["comment", "*** optimistic locking ***"],
["add", "_old",
["first",
["hops", {
"datasets": ["you-collect-dataflow-pipe a"],
"where": [
["eq", "_S._id", "a._id"]
]
}]
]
],
["add", "_json_old",
["json-transit",
["apply", "remove-under", "_T._old"]
]
],
["add", "_json_new",
["first",
["json-transit",
["apply", "remove-under",
["first", "_S."]
]
]
]
],
["add", "_hash_old",
["hash128", "murmur3", "_T._json_old"]
],
["add", "_hash_new",
["hash128", "murmur3", "_T._json_new"]
],
["if",
["eq", "_T._hash_old", "_T._hash_new"],
[
["comment", "*** same data in system as in sesam collect ***"],
["comment", "*** expose your data ***"],
["comment", "*** example for a rest system is provided below ***"],
["add", "::payload",
["apply", "remove-under", "_S."]
],
["add", "::properties",
["dict", "url",
["concat", "your-endpoint-ressource/", "_S.entity.id"]
]
]
],
[
["comment", "**** different data in system than in sesam collect ****"],
["discard"]
]
]
],
"remove-under": [
["copy", "*", "_*"]
]
}
}],
"batch_size": 1
}
return config |
sesam-community/template-generator | statics/system_templates.py | <filename>statics/system_templates.py
def system_configs(system, config_group):
if config_group != "Default":
systems = [{"microservice": {
"_id": None,
"type": "system:microservice",
"metadata": {
"$config-group": f"{config_group}"
},
"docker": None,
"verify_ssl": True
}, "kafka":
{
"_id": None,
"type": "system:kafka",
"metadata": {
"$config-group": f"{config_group}"
},
"bootstrap_servers": None
}, "rest":
{
"_id": None,
"type": "system:rest",
"metadata": {
"$config-group": f"{config_group}"
},
"operations": None,
"url_pattern": "",
"verify_ssl": True
}, "url":
{
"_id": None,
"type": "system:url",
"metadata": {
"$config-group": f"{config_group}"
},
"url_pattern": "",
"verify_ssl": True
}, "twilio":
{
"_id": None,
"type": "system:twilio",
"metadata": {
"$config-group": f"{config_group}"
},
"account": None,
"token": None
}, "elasticsearch":
{
"_id": None,
"type": "system:elasticsearch",
"metadata": {
"$config-group": f"{config_group}"
},
"hosts": None
}, "solr":
{
"_id": None,
"type": "system:solr",
"metadata": {
"$config-group": f"{config_group}"
},
"url": None
}, "smtp":
{
"_id": None,
"type": "system:smtp",
"metadata": {
"$config-group": f"{config_group}"
},
}, "sqlite":
{
"_id": None,
"type": "system:sqlite",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None
}, "postgresql":
{
"_id": None,
"type": "system:postgresql",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "oracle_tns":
{
"_id": None,
"type": "system:oracle_tns",
"metadata": {
"$config-group": f"{config_group}"
},
"password": None,
"tns_name": None,
"username": None
}, "sqlserver":
{
"_id": None,
"type": "system:sqlserver",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": None,
"username": None
}, "mssql-azure-dw":
{
"_id": None,
"type": "system:mssql-azure-dw",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "mssql":
{
"_id": None,
"type": "system:mssql",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "mysql":
{
"_id": None,
"type": "system:mysql",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "oracle":
{
"_id": None,
"type": "system:oracle",
"metadata": {
"$config-group": f"{config_group}"
},
"database": None,
"host": None,
"password": None,
"username": None
}, "ldap":
{
"_id": None,
"type": "system:ldap",
"metadata": {
"$config-group": f"{config_group}"
},
"host": None,
"password": None,
"username": None
}}]
else:
systems = [{"microservice": {
"_id": None,
"type": "system:microservice",
"docker": None,
"verify_ssl": True
}, "kafka":
{
"_id": None,
"type": "system:kafka",
"bootstrap_servers": None
}, "rest":
{
"_id": None,
"type": "system:rest",
"operations": None,
"url_pattern": "",
"verify_ssl": True
}, "url":
{
"_id": None,
"type": "system:url",
"url_pattern": "",
"verify_ssl": True
}, "twilio":
{
"_id": None,
"type": "system:twilio",
"account": None,
"token": None
}, "elasticsearch":
{
"_id": None,
"type": "system:elasticsearch",
"hosts": None
}, "solr":
{
"_id": None,
"type": "system:solr",
"url": None
}, "smtp":
{
"_id": None,
"type": "system:smtp"
}, "sqlite":
{
"_id": None,
"type": "system:sqlite",
"database": None
}, "postgresql":
{
"_id": None,
"type": "system:postgresql",
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "oracle_tns":
{
"_id": None,
"type": "system:oracle_tns",
"password": None,
"tns_name": None,
"username": None
}, "sqlserver":
{
"_id": None,
"type": "system:sqlserver",
"database": None,
"host": None,
"password": None,
"username": None
}, "mssql-azure-dw":
{
"_id": None,
"type": "system:mssql-azure-dw",
"database": None,
"host": None,
"password": None,
"username": None
}, "mssql":
{
"_id": None,
"type": "system:mssql",
"database": None,
"host": None,
"password": None,
"username": None
}, "mysql":
{
"_id": None,
"type": "system:mysql",
"database": None,
"host": None,
"password": <PASSWORD>,
"username": None
}, "oracle":
{
"_id": None,
"type": "system:oracle",
"database": None,
"host": None,
"password": None,
"username": None
}, "ldap":
{
"_id": None,
"type": "system:ldap",
"host": None,
"password": <PASSWORD>,
"username": None
}}]
for system_type in systems:
for key, value in system_type.items():
if key == system:
return value |
DKrepsky/Kicad-LCSC-BOM-Plugin | lcsc-bom-plugin.py | <filename>lcsc-bom-plugin.py
#!/usr/bin/env python3
import sys
import csv
import xml.etree.ElementTree as ET
class Part:
def __init__(self):
self.name = None
self.mpn = None
self.lcsc = None
self.qty = 1
def parse_xml(file):
tree = ET.parse(file)
root = tree.getroot()
part_list = []
missing = []
for f in root.findall('./components/'):
name = f.attrib['ref']
fields = f.find('fields')
part = Part()
part.name = name
if fields is not None:
for x in fields:
if x.attrib['name'].upper() == 'LCSC':
part.lcsc = x.text
if x.attrib['name'].upper() == 'MPN':
part.mpn = x.text
if part.mpn is None and part.lcsc is None:
missing.append(part.name)
continue
exist = next((p for p in part_list if p.mpn == part.mpn and p.lcsc == part.lcsc), None)
if exist is not None:
exist.qty += 1
continue
else:
part_list.append(part)
return part_list, missing
def write_bom(file, part_list):
column_headers = ['Quantity', 'Manufacture Part Number', 'LCSC Part Number']
with open("{}.csv".format(file), 'w') as csvfile:
bom = csv.DictWriter(csvfile, fieldnames=column_headers, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
bom.writeheader()
for part in sorted(part_list):
bom.writerow({'Quantity': part.qty,
'Manufacture Part Number': part.mpn,
'LCSC Part Number': part.lcsc})
if __name__ == "__main__":
input_file = sys.argv[1]
output_file = sys.argv[2]
part_list, missing = parse_xml(input_file)
write_bom(output_file, part_list)
if len(missing) > 0:
print("Ignoring parts:")
print(",".join(missing))
|
CheungZeeCn/fairseq | fairseq/tasks/sentence_prediction_chinese_bert.py | <gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq.data import (
data_utils,
TokenizerDictionary,
RawLabelDataset,
BertTokenizerDataset,
)
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task('sentence_prediction_chinese_bert')
class SentencePredictionChineseBertTask(FairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='FILE',
help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=-1,
help='number of classes or regression targets')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
def __init__(self, args, data_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
if not hasattr(args, 'max_positions'):
self._max_positions = args.max_source_positions
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
self.tokenizer = data_dictionary.tokenizer
self.args = args
@classmethod
def load_dictionary(cls, args, model_path):
dictionary = TokenizerDictionary.load(model_path)
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.num_classes > 0, 'Must set --num-classes'
# load data dictionary
data_dict = cls.load_dictionary(
args,
args.load_hf_bert_from,
)
return SentencePredictionChineseBertTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_raw_dataset(type):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_raw_str_dataset(
split_path,
)
return dataset
input0 = make_raw_dataset('input0')
assert input0 is not None, 'could not find dataset: {}'.format(get_path('input0', split))
src_raw = input0
tgt_raw = None
label_path = "{0}.label".format(get_path('label', split))
if os.path.exists(label_path):
def parse_target(i, line):
l = int(line.strip())
return l
with open(label_path) as h:
tgt_raw = RawLabelDataset([
parse_target(i, line.strip())
for i, line in enumerate(h.readlines())
])
self.datasets[split] = BertTokenizerDataset(src_raw, tgt_raw, self.tokenizer)
return self.datasets[split]
def build_model(self, args):
from fairseq import models
# logger.info("=" * 100 + " task.build_model " + "=" * 100)
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, 'classification_head_name', 'sentence_classification_head'),
num_classes=self.args.num_classes,
)
return model
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@property
def label_dictionary(self):
return self._label_dictionary
|
CheungZeeCn/fairseq | fairseq/tasks/rebert.py | <reponame>CheungZeeCn/fairseq<filename>fairseq/tasks/rebert.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import torch
from fairseq import metrics, options, utils
from fairseq.criterions.refinement_nat_loss import RefinementLabelSmoothedDualImitationCriterion
from fairseq.data import LanguagePairDataset
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.refinement import RefinementTask, load_tokenizer_plus_refinement_pair_dataset
from fairseq import utils
from fairseq.data import data_utils
# data part
from fairseq.data import TokenizerDictionary, Dictionary
logger = logging.getLogger(__name__)
@register_task('rebert')
class RefinementLevenshteinReBtpTask(RefinementTask):
"""
在之前翻译任务的基础上,做纠错.
相比lev-t 在encoder侧替换成了bert, 要指定bert预训练模型的路径
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
RefinementTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
parser.add_argument(
"--load-source-middle",
action="store_true",
help="load source middle files for better training",
)
parser.add_argument('--load-hf-bert-from', type=str, default='',
help='load huggingface pretrained bert from path')
parser.add_argument('--load-hf-bert-config-only', action='store_true',
help='only load config in the path so we can get a hf model')
parser.add_argument(
"--fix-bert-params",
action="store_true",
help='fix-bert-params'
)
parser.add_argument(
"--share-bert-params",
action="store_true",
help='fix-bert-params'
)
parser.add_argument(
"--pinyin-on",
action="store_true",
help='enable pinyin feature'
)
parser.add_argument(
"--dual-policy-ratio", default=0.5, type=float, metavar='N',
help='the probability of using dual policy in one pass of forward()'
)
parser.add_argument(
"--middle-mode-ratio", default=0.5, type=float, metavar='N',
help='the probability of using middle source data in one pass of forward()'
)
# parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
# help='decoder embedding dimension')
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
""" 新的数据集格式,需要新的数据加载方法
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_tokenizer_plus_refinement_pair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_source_middle=self.args.load_source_middle,
src_pinyin_dict=self.src_pinyin_dict,
tgt_pinyin_dict=self.tgt_pinyin_dict
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = 2 + ((target_length - 2) * target_score.new_zeros(
target_score.size(0), 1).uniform_()).long()
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = target_tokens.gather(
1, target_rank).masked_fill_(target_cutoff, pad).gather(
1,
target_rank.masked_fill_(target_cutoff,
max_len).sort(1)[1])
prev_target_tokens = prev_target_tokens[:, :prev_target_tokens.
ne(pad).sum(1).max()]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = target_tokens.ne(pad) & \
target_tokens.ne(bos) & \
target_tokens.ne(eos)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = target_tokens.eq(bos) | target_tokens.eq(
eos) | target_tokens.eq(pad)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == 'random_delete':
return _random_delete(target_tokens)
elif self.args.noise == 'random_mask':
return _random_mask(target_tokens)
elif self.args.noise == 'full_mask':
return _full_mask(target_tokens)
elif self.args.noise == 'no_noise':
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args):
"""
会在infer环节被使用 见 generate.py
:param models:
:param args:
:return:
"""
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator_rbtp import IterativeRefinementGeneratorRbtp
# infer 的时候, 调用这个对象的 generate
return IterativeRefinementGeneratorRbtp(
self.target_dictionary,
eos_penalty=getattr(args, 'iter_decode_eos_penalty', 0.0),
max_iter=getattr(args, 'iter_decode_max_iter', 10),
beam_size=getattr(args, 'iter_decode_with_beam', 1),
reranking=getattr(args, 'iter_decode_with_external_reranker', False),
decoding_format=getattr(args, 'decoding_format', None),
adaptive=not getattr(args, 'iter_decode_force_max_iter', False),
retain_history=getattr(args, 'retain_iter_history', False))
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError("Constrained decoding with the task is not supported")
raise NotImplementedError("")
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(self,
sample,
model,
criterion,
optimizer,
update_num,
ignore_grad=False):
model.train()
sample['prev_target'] = self.inject_noise(sample['target'])
if self.args.load_source_middle is True:
assert isinstance(criterion, RefinementLabelSmoothedDualImitationCriterion)
loss, sample_size, logging_output = criterion(model, sample, load_source_middle=True)
else:
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample, load_source_middle=True)
return loss, sample_size, logging_output
# src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
# 调用包装后的字典
@classmethod
def load_dictionary(cls, bert_pretrained_path):
dictionary = TokenizerDictionary.load(bert_pretrained_path)
return dictionary
@classmethod
def load_pinyin_dictionary(cls, file):
dictionary = Dictionary.load(file)
return dictionary
# 要重新写 任务的初始化
@classmethod
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# 这个task无论如何也是要指定原本预训练的model的路径才好初始化tokeniser的
assert args.load_hf_bert_from != ''
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(args.load_hf_bert_from)
tgt_dict = cls.load_dictionary(args.load_hf_bert_from)
if args.pinyin_on is True:
src_pinyin_dict = cls.load_pinyin_dictionary(os.path.join(paths[0], 'pinyin.dict'))
tgt_pinyin_dict = cls.load_pinyin_dictionary(os.path.join(paths[0], 'pinyin.dict'))
else:
src_pinyin_dict = None
tgt_pinyin_dict = None
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
if src_pinyin_dict is not None:
logger.info('pinyin ON, [{}] pinyin dictionary: {} types'.format(os.path.join(paths[0], 'pinyin.dict'),
len(src_pinyin_dict)))
else:
logger.info('pinyin OFF')
# 基类的这块的构造函数很简单,就是赋值了词典而已, 目前这个词典是被我们包起来的tokernizer
return cls(args, src_dict, tgt_dict, src_pinyin_dict, tgt_pinyin_dict)
|
CheungZeeCn/fairseq | fairseq/data/middle_enhanced_tokenizer_plus_language_pair_dataset.py | <filename>fairseq/data/middle_enhanced_tokenizer_plus_language_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
import pypinyin
from pypinyin import pinyin, Style
from fairseq.data import data_utils, FairseqDataset
from fairseq.data.dictionary import Dictionary
logger = logging.getLogger(__name__)
def collate(
samples,
src_dict,
tgt_dict,
pad_idx,
eos_idx,
# left_pad_source=True,
# left_pad_target=False,
input_feeding=True,
pad_to_length=None,
src_pinyin_dict=None,
tgt_pinyin_dict=None
):
if len(samples) == 0:
return {}
# logger.info("before collate samples {}".format(samples))
# raise NotImplementedError()
"""
输入的样子
example = {
'id': index,
'source': src_item, # 原始字符串
'source_middle': src_mid_item, # 原始字符串
'target': tgt_item, # 原始字符串
}
"""
assert pad_to_length == None, "pad_to_length not supported"
if src_pinyin_dict is None:
pinyin_on = False
else:
pinyin_on = True
# logger.info("in collate sampels: {}".format(samples))
def merge_str(key, dictionary, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[dictionary.encode_line(s[key]).long() for s in samples],
pad_idx, eos_idx, False, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
def merge_pinyin_str(key, tokenizer, dictionary, move_eos_to_beginning=False, pad_to_length=None, append_bos=True):
batch_pinyin_str = [get_pinyin(s[key], tokenizer, dictionary) for s in samples]
bos = dictionary.bos()
ret_tensor = data_utils.collate_tokens(
[dictionary.encode_line(s).long() if append_bos is False else torch.cat(
[torch.LongTensor([bos]), dictionary.encode_line(s).long()]) for s in batch_pinyin_str],
dictionary.pad(), dictionary.eos(), False, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
return ret_tensor
def merge(key, left_pad=False, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
def merge_from_str(key, tokenizer, pinyin_dict=None):
"""
return examples like:
{'input_ids': tensor([[101, 2644, 1962, 102, 0],
[101, 2769, 738, 1962, 102]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]),
'attention_mask': tensor([[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
'pinyin': ...
}
"""
ret = dict(tokenizer([s[key] for s in samples], padding=True, return_tensors='pt'))
# logger.info("f{pinyin_dict}")
# logger.info(dir(pinyin_dict))
# 拼音字符串列表
if pinyin_dict is not None:
batch_pinyin_encoded = merge_pinyin_str(key, tokenizer, pinyin_dict, move_eos_to_beginning=False,
pad_to_length=pad_to_length)
ret['pinyin'] = batch_pinyin_encoded
return ret
def get_pinyin(text, tokenizer, pinyin_dict):
t_list = tokenizer.tokenize(text)
to_strs = []
t_list = [t[-1] for t in t_list]
try:
for i, py_sub_list in enumerate(pinyin(t_list, style=Style.NORMAL)):
# logger.info(py_sub_list)
i_pinyin = py_sub_list[0]
if i_pinyin == t_list[i]:
i_pinyin = pinyin_dict.unk_word
to_strs.append(i_pinyin)
ret_pinyin_str = " ".join(to_strs)
except Exception as e:
logger.info(e)
logger.info(text)
logger.info(t_list)
logger.info(pinyin(t_list, style=Style.NORMAL))
logger.info(i)
raise e
# logger.info(ret_pinyin_str)
return ret_pinyin_str
id = torch.LongTensor([s['id'] for s in samples])
src_tokened = merge_from_str('source', src_dict.tokenizer, src_pinyin_dict)
# sort by descending source length
pad_idx = src_dict.pad_index
src_lengths = src_tokened['input_ids'].ne(pad_idx).sum(dim=1)
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
for k, v in src_tokened.items():
src_tokened[k] = v.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge_str('target', tgt_dict)
target = target.index_select(0, sort_order)
tgt_lengths = target.ne(pad_idx).sum(dim=1)
ntokens = tgt_lengths.sum().item()
if pinyin_on is True:
tgt_pinyins = merge_pinyin_str('target', tgt_dict.tokenizer, tgt_pinyin_dict, move_eos_to_beginning=False,
pad_to_length=pad_to_length)
tgt_pinyins = tgt_pinyins.index_select(0, sort_order)
else:
tgt_pinyins = None
if samples[0].get('prev_output_tokens', None) is not None:
# 什么时候会进来这里呢, infer 的时候? 进来的话,这里不能是str
raise NotImplementedError("check here")
prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge_str('target', tgt_dict, move_eos_to_beginning=True)
if pinyin_on is True:
prev_output_tokens_pinyins = merge_pinyin_str('target', tgt_dict.tokenizer, tgt_pinyin_dict,
move_eos_to_beginning=True,
pad_to_length=pad_to_length)
prev_output_tokens_pinyins = prev_output_tokens_pinyins.index_select(0, sort_order)
else:
prev_output_tokens_pinyins = None
else:
ntokens = src_lengths.sum().item()
# added for source middle
if samples[0].get('source_middle', None) is not None:
src_mid_tokens = merge_str('source_middle', src_dict)
src_mid_tokens = src_mid_tokens.index_select(0, sort_order)
src_mid_lengths = src_mid_tokens.ne(pad_idx).sum(dim=1)
if pinyin_on is True:
src_mid_pinyins = merge_pinyin_str('source_middle', src_dict.tokenizer, src_pinyin_dict,
move_eos_to_beginning=False,
pad_to_length=pad_to_length).index_select(0, sort_order)
else:
src_mid_pinyins = None
else:
src_mid_tokens = None
src_mid_lengths = None
src_mid_pinyins = None
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokened,
'src_lengths': src_lengths,
'src_mid_tokens': src_mid_tokens,
'src_mid_lengths': src_mid_lengths,
'src_mid_pinyins': src_mid_pinyins
},
'target': target,
# 其实这个没啥用,就是放这里看看做debug
'target_pinyins': tgt_pinyins,
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens.index_select(0, sort_order)
if pinyin_on is True:
batch['net_input']['prev_output_tokens_pinyins'] = prev_output_tokens_pinyins.index_select(0, sort_order)
else:
batch['net_input']['prev_output_tokens_pinyins'] = None
# logger.info("after collate collate samples {}".format(batch))
return batch
class MiddleEnhancedTokenizerPlusLanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
"""
def __init__(
self, src, src_sizes, src_dict,
src_mid=None, src_mid_sizes=None, src_mid_dict=None,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
shuffle=True, input_feeding=True,
# remove_eos_from_source=False, append_eos_to_target=False,
# align_dataset=None,
# constraints=None,
# append_bos=False, eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
src_pinyin_dict=None,
tgt_pinyin_dict=None,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(tgt), "Source and target must contain the same number of examples"
self.src = src
self.src_mid = src_mid
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.src_mid_sizes = np.array(src_mid_sizes) if src_mid_sizes is not None else None
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.src_dict = src_dict
self.src_mid_dict = src_mid_dict
self.tgt_dict = tgt_dict
# todo: 这两个按道理应该不影响后续逻辑,mark一下,待确认
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
# ---
self.shuffle = shuffle
# todo: mark 这块应该是 teacher forcing 的开关
self.input_feeding = input_feeding
# 直接注释掉先
# self.remove_eos_from_source = remove_eos_from_source
# self.append_eos_to_target = append_eos_to_target
# self.align_dataset = align_dataset
# if self.align_dataset is not None:
# assert self.tgt_sizes is not None, "Both source and target needed when alignments are provided"
# self.constraints = constraints
# self.append_bos = append_bos
# self.eos = (eos if eos is not None else src_dict.eos())
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
# 这块就是不能够支持呀,因为要提前pad,如果要支持,就是要先把tokenizer的各个子步骤拆解先
raise NotImplementedError("num_buckets > 0 Not Supported Yet")
self.buckets = None
self.src_pinyin_dict = src_pinyin_dict
self.tgt_pinyin_dict = tgt_pinyin_dict
def get_batch_shapes(self):
return self.buckets
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_mid_item = self.src_mid[index] if self.src_mid is not None else None
src_item = self.src[index]
# 如果后续要加pinyin,就在这里加,类似source_pinyin 这样加入进来
example = {
'id': index,
'source': src_item,
'source_middle': src_mid_item,
'target': tgt_item,
}
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
self.src_dict,
self.tgt_dict,
pad_idx=self.src_dict.pad(),
eos_idx=self.src_dict.eos(),
# left_pad_source=self.left_pad_source,
# left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
src_pinyin_dict=self.src_pinyin_dict,
tgt_pinyin_dict=self.tgt_pinyin_dict
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res['net_input']['src_tokens']
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res['net_input']['src_lang_id'] = torch.LongTensor(
[[self.src_lang_id]]
).expand(bsz, 1).to(src_tokens)
if self.tgt_lang_id is not None:
res['tgt_lang_id'] = torch.LongTensor(
[[self.tgt_lang_id]]
).expand(bsz, 1).to(src_tokens)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[
np.argsort(self.tgt_sizes[indices], kind='mergesort')
]
return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
raise NotImplementedError("buckets not supported")
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind='mergesort')
]
@property
def supports_prefetch(self):
return (
getattr(self.src, 'supports_prefetch', False)
and (getattr(self.tgt, 'supports_prefetch', False) or self.tgt is None)
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.src_mid is not None:
self.src_mid.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
""" Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if max_sizes is None:
return indices, []
if type(max_sizes) in (int, float):
max_src_size, max_tgt_size = max_sizes, max_sizes
else:
max_src_size, max_tgt_size = max_sizes
if self.tgt_sizes is None:
ignored = indices[self.src_sizes[indices] > max_src_size]
else:
ignored = indices[(self.src_sizes[indices] > max_src_size) |
(self.tgt_sizes[indices] > max_tgt_size)]
if len(ignored) > 0:
if self.tgt_sizes is None:
indices = indices[self.src_sizes[indices] <= max_src_size]
else:
indices = indices[(self.src_sizes[indices] <= max_src_size) &
(self.tgt_sizes[indices] <= max_tgt_size)]
return indices, ignored.tolist()
|
CheungZeeCn/fairseq | fairseq/models/huggingface/hf_bert.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from torch import nn
from fairseq import utils
from fairseq.models import (
FairseqEncoderModel,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
try:
from transformers import BertModel, BertTokenizer, BertConfig
has_hf = True
except ImportError:
has_hf = False
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 510
# class HuggingFaceBertLanguageModel(FairseqLanguageModel):
@register_model('hf_bert')
class HuggingFaceBertLanguageModel(FairseqEncoderModel):
def __init__(self, args, encoder, task):
super().__init__(encoder)
if not has_hf:
raise ImportError(
'\n\nPlease install huggingface/transformers with:'
'\n\n pip install transformers'
'\n\nOr to make local edits, install the submodule:'
'\n\n git submodule update --init '
'fairseq/models/huggingface/transformers'
)
self.task = task
self.classification_heads = {}
self.args = args
self.encoder = encoder
#self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
# todo 先不管这些, 我们后续一个个改成对应的
parser.add_argument('--embed-dim', type=int, metavar='N',
help='embedding dimension')
parser.add_argument('--num-attention-heads', type=int, metavar='N',
help='num attention heads')
parser.add_argument('--num-layers', type=int, metavar='N',
help='num layers')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability for all fully connected layers '
'in the embeddings, encoder, and pooler')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--max-positions', type=int,
help='number of positional embeddings to learn')
parser.add_argument('--load-hf-bert-from', type=str, default='',
help='load huggingface pretrained bert from path')
parser.add_argument('--load-hf-bert-config-only', action='store_true',
help='only load config in the path so we can get a hf model')
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# print("In build_model !!!")
default_architecture(args)
assert args.load_hf_bert_from != ''
encoder = HuggingFaceBertEncoder(args, task.dictionary)
return cls(args, encoder, task)
# copy it from roberta's code
def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, return_all_hiddens=False,
classification_head_name=None, **kwargs):
src_tokens = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask
}
x, extra = self.encoder(src_tokens, return_all_hiddens)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = HuggingFaceBertClassificationHead(
self.args.embed_dim, # self.args.encoder_embed_dim,
inner_dim or self.args.embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
self.args.quant_noise_pq,
self.args.quant_noise_pq_block_size,
)
class HuggingFaceBertClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, q_noise=0, qn_block_size=8):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = apply_quant_noise_(
nn.Linear(inner_dim, num_classes), q_noise, qn_block_size
)
def forward(self, features, **kwargs):
# logging.info("features {}: {}".format(features.shape, features))
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class HuggingFaceBertEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
try:
# Prepend the transformers submodule to the path, so that
# it's prioritized over other installations. This allows
# making local changes in the submodule.
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), 'transformers', 'src')
)
from transformers import BertModel, BertTokenizer, BertConfig
except ImportError:
raise ImportError(
'\n\nPlease install huggingface/transformers with:'
'\n\n pip install transformers'
'\n\nOr to make local edits, install the submodule:'
'\n\n git submodule update --init '
'fairseq/models/huggingface/transformers'
)
# logging.info(args)
# raise NotImplementedError(args.load_hf_bert_from)
load_hf_bert_from = getattr(args, 'load_hf_bert_from', '')
assert load_hf_bert_from != ''
model_path = load_hf_bert_from
config = BertConfig.from_pretrained(model_path)
#logging.info("args: {}".format(args))
if getattr(args, 'load_hf_bert_config_only', False) is True:
logger.info(
"now we will init the hf_bert model from config without the weights,"
" since we will restore the weights later")
self.model = BertModel(config)
else:
logger.info("now we will init the hf_bert model from {} with all the weights".format(model_path))
self.model = BertModel.from_pretrained(model_path)
self.tokenizer = dictionary.tokenizer
self.dictionary = dictionary
self.args = args
self.config = config
def forward(self, src_tokens, return_all_hiddens=False, ):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens)
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **unused):
inner_states = self.model(**src_tokens)
features = inner_states[0].float()
return features, {'inner_states': inner_states[2] if return_all_hiddens else None}
def max_positions(self):
"""Maximum output length supported by the encoder."""
return min(self.args.max_positions, self.model.config.max_position_embeddings - 2)
@register_model_architecture('hf_bert', 'hf_bert_base')
def default_architecture(args):
if getattr(args, 'max_target_positions', None) is None:
args.max_target_positions = getattr(
args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS
)
args.embed_dim = getattr(args, 'embed_dim', 768)
args.num_attention_heads = getattr(args, 'num_attention_heads', 8)
args.num_layers = getattr(args, 'num_layers', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.max_positions = getattr(args, 'max-positions', 510)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
|
CheungZeeCn/fairseq | fairseq/models/nat/levenshtein_refinement_rebert.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from abc import ABC
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture, FairseqEncoder
from pypinyin import pinyin, Style
from fairseq.models.transformer import (
Embedding,
TransformerDecoderLayer
)
import random
from fairseq.models.nat import (
FairseqNATModel,
# FairseqNATReBertDecoder,
FairseqNATReBertPlusDecoder,
FairseqNATDecoder,
FairseqNATEncoder,
ensemble_decoder
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .levenshtein_utils import (
_skip, _skip_encoder_out, _fill,
_get_ins_targets, _get_del_targets, _get_del_target_by_middle,
_apply_ins_masks, _apply_ins_words, _apply_del_words
)
DEFAULT_MAX_SOURCE_POSITIONS = 512
DEFAULT_MAX_TARGET_POSITIONS = 512
logger = logging.getLogger(__name__)
@register_model("levenshtein_refinement_rebert")
class LevenshteinRefinementBertTransformerPlusModel(FairseqNATModel):
# 暂时不支持这种nat
@property
def allow_length_beam(self):
return False
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument(
"--early-exit",
default="2,2,2",
type=str,
help="number of decoder layers before word_del, mask_ins, word_ins",
)
parser.add_argument(
"--no-share-discriminator",
action="store_true",
help="separate parameters for discriminator",
)
parser.add_argument(
"--no-share-maskpredictor",
action="store_true",
help="separate parameters for mask-predictor",
)
parser.add_argument(
"--share-discriminator-maskpredictor",
action="store_true",
help="share the parameters for both mask-predictor and discriminator",
)
parser.add_argument(
"--sampling-for-deletion",
action='store_true',
help='instead of argmax, use sampling to predict the tokens'
)
parser.add_argument(
"--share-bert",
action="store_true",
help="use the same bert for encoder and decoder",
)
# parser.add_argument(
# "--dual-policy-ratio", default=0.5, type=float, metavar='N',
# help='the probability of using dual policy in one pass of forward()'
# )
# parser.add_argument(
# "--middle-mode-ratio", default=0.5, type=float, metavar='N',
# help='the probability of using middle source data in one pass of forward()'
# )
parser.add_argument('--decoder-pinyin-embed-path', type=str, metavar='STR',
help='path to pre-trained pinyin decoder embedding')
parser.add_argument('--encoder-pinyin-embed-path', type=str, metavar='STR',
help='path to pre-trained pinyin encoder embedding')
# parser.add_argument('--load-hf-bert-from', type=str, default='',
# help='load huggingface pretrained bert from path')
# parser.add_argument('--load-hf-bert-config-only', action='store_true',
# help='only load config in the path so we can get a hf model')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
levenshtein_rebert_base_architecture(args)
# if args.encoder_layers_to_keep:
# args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
# if args.decoder_layers_to_keep:
# args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
# if args.share_all_embeddings:
# # 只能share pinyin的emb
# encoder_embed_tokens = cls.build_embedding(
# args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
# )
# raise NotImplementedError("等等,还不能share bert的encoder,待开发")
# else:
# # tgt_dict 这里主要是给出emb的输入的维度, 这块在rebert是没用的
# decoder_embed_tokens = cls.build_embedding(
# args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
# )
src_pinyin_dict = task.src_pinyin_dict
#tgt_pinyin_dict = task.tgt_pinyin_dict
if args.pinyin_on is True:
encoder_pinyin_embed_tokens = cls.build_embedding(
args, src_pinyin_dict, args.encoder_pinyin_embed_dim, args.encoder_pinyin_embed_path
)
if args.encoder_pinyin_embed_path is None:
encoder_pinyin_embed_tokens.apply(init_bert_params)
else:
encoder_pinyin_embed_tokens = None
encoder = cls.build_encoder(args, src_dict, src_pinyin_dict, encoder_pinyin_embed_tokens)
# decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
# 替换成encoder 直接给decoder 发挥类似 decoder_embed_tokens 的作用, 里面也有pinyin_embedding
decoder = cls.build_decoder(args, tgt_dict, encoder)
return cls(args, encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, src_pinyin_dict, pinyin_embed_tokens):
assert args.load_hf_bert_from != ''
encoder = HuggingFaceBertPlusEncoder(args, src_dict, src_pinyin_dict, pinyin_embed_tokens)
# if getattr(args, "apply_bert_init", False):
# encoder.apply(init_bert_params)
return encoder
"""
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = LevenshteinRefinementTransformerDecoder(args, tgt_dict, embed_tokens)
#if getattr(args, "apply_bert_init", False):
# decoder.apply(init_bert_params)
return decoder
"""
@classmethod
def build_decoder(cls, args, tgt_dict, encoder):
decoder = LevenshteinRefinementPlusTransformerDecoder(args, tgt_dict, encoder)
# if getattr(args, "apply_bert_init", False):
# decoder.apply(init_bert_params)
return decoder
@classmethod
def t2p(cls, t2p_buff, tokens_ids):
l = tokens_ids.shape[1]
pinyin_ids = t2p_buff.index_select(-1, tokens_ids.reshape(-1))
pinyin_ids = pinyin_ids.reshape((-1, l)).contiguous()
return pinyin_ids
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# logging.info("self.args.load_source_middle: {}".format(self.args.load_source_middle))
# logging.info("src_mid_tokens: {}".format(kwargs['src_mid_tokens']))
# logging.info("src_mid_lengths: {}".format(kwargs['src_mid_lengths']))
"""
想在训练这块做如下策略,
随机决定是否启用dual policy
一旦启用
从 prev 预测要增加几个,增加什么,然后用delete 来去在这个结果上进行学习
否则:
从 delete这块就只能使用middle_src来进行学习
随机决定是否使用middle_src
一旦启用
从middle_src 中学习 insert 内容,可以学习到正确答案的生成能力
如果不启用
在insert的学习中,可以来自random_delete策略, 强化生成能力
在不使用dual plicy的时候,也是要使用middle_src 来进行学习
"""
cut_off = random.random()
dual_policy_ratio = getattr(self.args, 'dual_policy_ratio', 0.5)
dual_policy_mode = False
if cut_off <= dual_policy_ratio:
dual_policy_mode = True
# 由于dual_policy 不启用的使用必须要使用middle_source 来学习删除
middle_mode = False
middle_mode_ratio = getattr(self.args, 'middle_mode_ratio', 0.5)
assert self.args.load_source_middle is True
src_mid_tokens = kwargs['src_mid_tokens']
src_mid_lengths = kwargs['src_mid_lengths']
assert src_mid_tokens is not None, "when load_source_middle is true, we should get source middle dataset"
# 多大的概率使用middle的数据来训练,
# 如果mode不是middle就可以针对target 用random delete等等数据强化的方式(逻辑和原来的lev-t一样),后续可以在这部分加入支持
if cut_off <= middle_mode_ratio:
middle_mode = True
# 会影响 encoder 如果不删除
del kwargs['src_mid_tokens']
del kwargs['src_mid_lengths']
"""
* 注意这里的改动 *
本来, forward 函数关键输入是这样的:
src_tokens, src_lengths,prev_output_tokens, tgt_tokens
关于上面提及的prev_output_tokens, 在训练阶段是来自sample['prev_target'], 默认是random delete后的字符串, 参考
nat_loss/refinement_nat_loss forward() 中的实现.
我们在kwargs 引入了事先计算好的 src_mid_tokens, src_mid_lengths, 代表 中间输入,
来自从source中删除对齐后和target字符串不一样的地方。
1.原来的训练有三个主要步骤:
a. forward_mask_ins, 针对被random delete(默认操作,可以命令行改) 后生成的prev_output_tokens, 我们去猜insert的个数和位置
b. forward_word_ins, 针对计算好的masked_tgt_tokens, 我们去猜对应的地方应该插入哪些token
c. forward_word_del, 针对上一步预测出来的词,和target进行对比,然后训练应该delete什么词
2.原生的过程是针对翻译和生成的,但是针对原文增强类任务,我们引入了对齐后的中间diff结果,可以做哪些训练过程的增强?
我们希望训练过程中可以更加有针对性(但是在原生的模型上,可能会影响语言模型的学习,估计效果会下降,不过训练次数提升,
或者使用预训练encoder估计会有提升)
a. 我们想训练的过程中有一定的概率更加关注,错误点,相当于我们的中间结果明确标注出来了,应该删除的地方;
b. 利用中间结果,是可以更好的去预测应该插入的个数和地方,相当于加强了forward_mask_ins和forward_word_ins
b1. forward_mask_ins(prev_output_tokens=source_middle)
b2. forward_word_ins(prev_output_tokens=source_middle和tgt_token计算得出的masked_tgt_tokens)
c. 利用中间结果,我们可以更好的去预测delete
c1. 按照原来的步骤预测出来和target不一致的,就应该delete; (这个跟原来的一样)
c2. 如果可以利用中间结果和原来的输入做一个对比,也可以标记出原始的输入多出来的是哪些,然后那些是应该删除的(利用libnat)
3. 同样的,有针对的训练过后,对应的decoder在生成的时候,prev可以是原来的字符串, 这样也要求decoder要有足够锐利的眼光去识别prev中的错误信息;
"""
# encoding
# encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# logging.info("in forward src_tokens: {}".format(src_tokens))
encoder_out = self.encoder(src_tokens)
# logging.info("encoder_out {}".format(encoder_out))
# generate training labels for insertion
# validating :
if self.training is False:
# 不是training 的话不应该进入这个环节, 不过training 的 valid的时候可以进来哦
# assert self.training is True, "should not be here in this forward() when you are not training the model"
# 如果不是训练,就应该按照gold label 来计算, 所以
# logger.info("self.training is False")
middle_mode = True
dual_policy_mode = False
# logger.info(
# "forward policies: dual_policy_mode:{} {} middle_mode:{} {}".format(dual_policy_mode, dual_policy_ratio,
# middle_mode, middle_mode_ratio))
# prev_output_tokens = src_mid_tokens
ori_prev_output_tokens = prev_output_tokens
# 训练insert的时候,使用gold label
if middle_mode is True:
prev_output_tokens = src_mid_tokens
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
# prev_output_tokens, tgt_tokens, self.pad, self.unk
prev_output_tokens, tgt_tokens, self.pad, self.tgt_dict.tokenizer.mask_token_id
)
else:
# 使用random_delete 等noise
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
# prev_output_tokens, tgt_tokens, self.pad, self.unk
prev_output_tokens, tgt_tokens, self.pad, self.tgt_dict.tokenizer.mask_token_id
)
mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction
mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
# logging.info(f"prev_output_tokens: {prev_output_tokens.shape} {prev_output_tokens}")
prev_output_tokens_plus = self.tgt_dict.add_batch_plus(prev_output_tokens)
masked_tgt_tokens_plus = self.tgt_dict.add_batch_plus(masked_tgt_tokens)
if self.args.pinyin_on is True:
prev_output_tokens_pinyin = self.t2p(self.encoder.t2p_buff, prev_output_tokens)
prev_output_tokens_plus['pinyin'] = prev_output_tokens_pinyin
masked_tgt_tokens_plus['pinyin'] = self.t2p(self.encoder.t2p_buff, masked_tgt_tokens)
# logging.info(f"prev_output_tokens_plus: {prev_output_tokens_plus}")
# raise NotImplementedError()
# insert how many
mask_ins_out, _ = self.decoder.forward_mask_ins(
normalize=False,
# prev_output_tokens=prev_output_tokens,
prev_output_tokens_plus=prev_output_tokens_plus,
# prev_output_tokens_pinyin=prev_output_tokens_pinyin,
encoder_out=encoder_out
)
# insert what
# logger.info(f"src_tokens['input_ids']: {src_tokens['input_ids']}")
# logger.info(f"prev_output_tokens: {prev_output_tokens}")
# logger.info(f"masked_tgt_tokens: {masked_tgt_tokens}")
# logger.info(f"masked_tgt_masks: {masked_tgt_masks}")
# logger.info(f"mask_ins_out: {mask_ins_out}")
# logger.info(f"mask_ins_targets: {mask_ins_targets}")
# raise NotImplementedError()
word_ins_out, _ = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens_plus=masked_tgt_tokens_plus,
# prev_output_tokens_pinyin=masked_tgt_tokens_pinyin,
encoder_out=encoder_out
)
# make online prediction #默认是False
if self.decoder.sampling_for_deletion:
raise NotImplementedError()
word_predictions = torch.multinomial(
F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1).view(
word_ins_out.size(0), -1)
else:
word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]
word_predictions.masked_scatter_(
~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]
)
# generate training labels for deletion
if dual_policy_mode is True: # 默认方法, 训练delete
# raise NotImplementedError("要待后续开发")
# 计算两者不一样的, 标记为要delete
word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)
# 加持
# 还得加拼音
word_predictions_plus = self.tgt_dict.add_batch_plus(word_predictions)
if self.args.pinyin_on is True:
word_predictions_plus['pinyin'] = self.t2p(self.encoder.t2p_buff, word_predictions)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
# prev_output_tokens=word_predictions,
prev_output_tokens_plus=word_predictions_plus,
encoder_out=encoder_out)
word_del_masks = word_predictions.ne(self.pad)
else: # 用middle的时候 相当于一个错误识别模块
word_del_targets = _get_del_target_by_middle(src_mid_tokens, src_tokens['input_ids'], self.pad)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
prev_output_tokens_plus=src_tokens,
# prev_output_tokens_pinyin=src_tokens['pinyin'],
encoder_out=encoder_out)
word_del_masks = src_tokens['input_ids'].ne(self.pad)
# logging.info("word_del_targets: {}".format(word_del_targets))
# raise NotImplementedError("zz")
return {
"mask_ins": {
"out": mask_ins_out, "tgt": mask_ins_targets,
"mask": mask_ins_masks, "ls": 0.01,
},
"word_ins": {
"out": word_ins_out, "tgt": tgt_tokens,
"mask": masked_tgt_masks, "ls": self.args.label_smoothing,
"nll_loss": True
},
"word_del": {
"out": word_del_out, "tgt": word_del_targets,
"mask": word_del_masks
}
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
history = decoder_out.history
# logger.info(f"output_tokens: {output_tokens}")
# logger.info(f"output_scores: {output_scores}")
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = torch.zeros_like(output_tokens).fill_(255)
else:
if encoder_out.encoder_padding_mask is None:
max_src_len = encoder_out.encoder_out.size(0)
src_lens = encoder_out.encoder_out.new(bsz).fill_(max_src_len)
else:
src_lens = (~encoder_out.encoder_padding_mask).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
output_tokens_plus = self.tgt_dict.add_batch_plus(output_tokens)
if self.args.pinyin_on is True:
output_tokens_plus['pinyin'] = self.t2p(self.encoder.t2p_buff, output_tokens)
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
# logger.info("can_del_word {} {}".format(can_del_word.shape, can_del_word))
if can_del_word.sum() != 0: # we cannot delete, skip
# word_del_score, word_del_attn = self.decoder.forward_word_del(
# normalize=True,
# prev_output_tokens_plus=_skip(output_tokens, can_del_word),
# encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word)
# )
word_del_score, word_del_attn = self.decoder.forward_word_del(
normalize=True,
prev_output_tokens_plus=_skip(output_tokens_plus, can_del_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word)
)
word_del_pred = word_del_score.max(-1)[1].bool()
# logger.info(word_del_score)
# logger.info(word_del_pred)
# raise NotImplementedError()
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.)
if history is not None:
history.append(output_tokens.clone())
output_tokens_plus = self.tgt_dict.add_batch_plus(output_tokens)
if self.args.pinyin_on is True:
output_tokens_plus['pinyin'] = self.t2p(self.encoder.t2p_buff, output_tokens)
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
# mask_ins_score, _ = self.decoder.forward_mask_ins(
# normalize=True,
# prev_output_tokens=_skip(output_tokens, can_ins_mask),
# encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask)
# )
mask_ins_score, _ = self.decoder.forward_mask_ins(
normalize=True,
prev_output_tokens_plus=_skip(output_tokens_plus, can_ins_mask),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask)
)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty
mask_ins_pred = mask_ins_score.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
if history is not None:
history.append(output_tokens.clone())
output_tokens_plus = self.tgt_dict.add_batch_plus(output_tokens)
if self.args.pinyin_on is True:
output_tokens_plus['pinyin'] = self.t2p(self.encoder.t2p_buff, output_tokens)
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
# word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
# normalize=True,
# prev_output_tokens=_skip(output_tokens, can_ins_word),
# encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word)
# )
word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
normalize=True,
prev_output_tokens_plus=_skip(output_tokens_plus, can_ins_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word)
)
word_ins_score, word_ins_pred = word_ins_score.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.)
if history is not None:
history.append(output_tokens.clone())
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=history
)
def initialize_output_tokens(self, encoder_out, src_tokens):
"""
在生成的时候会被调用来初始化原始的输出token;
todo: 对参数敏感的,使用 src_tokens 来进行初始化, 但是这个scores就比较尴尬
怎么取值合适, 这个模型的后续步骤有用到这个scores吗?
:param encoder_out:
:param src_tokens:
:return:
"""
"""
initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens[:, 1] = self.eos
"""
# 将src_tokens 作为原始的输出, 这样模型的下一步工作就是要识别src_token哪里有问题
initial_output_tokens = src_tokens['input_ids'].detach().clone()
# logging.info("copy src for init here")
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out.encoder_out)
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None
)
# class HuggingFaceBertEncoder(FairseqEncoder):
class HuggingFaceBertPlusEncoder(FairseqEncoder):
def __init__(self, args, dictionary, pinyin_dict=None, pinyin_embed_tokens=None):
super().__init__(dictionary)
try:
from transformers import BertModel, BertTokenizer, BertConfig
except ImportError:
raise ImportError(
'\n\nPlease install huggingface/transformers with:'
'\n\n pip install transformers'
'\n\nOr to make local edits, install the submodule:'
'\n\n git submodule update --init '
'fairseq/models/huggingface/transformers'
)
# logging.info(args)
# raise NotImplementedError(args.load_hf_bert_from)
load_hf_bert_from = getattr(args, 'load_hf_bert_from', '')
assert load_hf_bert_from != ''
model_path = load_hf_bert_from
config = BertConfig.from_pretrained(model_path)
# logging.info("args: {}".format(args))
if getattr(args, 'load_hf_bert_config_only', False) is True:
logger.info(
"now we will init the hf_bert model from config without the weights,"
" since we will restore the weights later")
self.model = BertModel(config)
else:
logger.info("now we will init the hf_bert model from {} with all the weights".format(model_path))
self.model = BertModel.from_pretrained(model_path)
# logging.info("DEBUG: after loading hf_bert: encoder.layer.11.output.dense.weight[0][10]{}".format(
# self.model.state_dict()['encoder.layer.11.output.dense.weight'][0][:10]))
# self.model = self.model
if args.fix_bert_params is True:
for p in self.model.parameters():
p.requires_grad = False
self.tokenizer = dictionary.tokenizer
self.dictionary = dictionary
self.args = args
self.config = config
# self.model.embeddings
# could be None
self.pinyin_dict = pinyin_dict
self.pinyin_embed_tokens = pinyin_embed_tokens
if args.pinyin_on is True:
t2p_buff = self.build_token_ids_to_pinyin_ids_buff(self.tokenizer, self.pinyin_dict)
self.register_buffer('t2p_buff', t2p_buff)
logging.info("t2p_buff.shape:{} self.t2p_buff:{}".format(self.t2p_buff.shape, self.t2p_buff))
else:
t2p_buff = None
self.register_buffer('t2p_buff', t2p_buff)
logging.info("t2p_buff is None")
# raise NotImplementedError()
# build tokenid2pinyinid buffer
@classmethod
def build_token_ids_to_pinyin_ids_buff(cls, tokenizer, pinyin_dict):
py_vocab = [x[0] for x in pinyin(tokenizer.vocab, style=Style.NORMAL)]
py_ids = []
for i_pinyin in py_vocab:
if i_pinyin == '[PAD]':
py_idx = pinyin_dict.pad()
elif i_pinyin == '[CLS]':
py_idx = pinyin_dict.bos()
elif i_pinyin == '[UNK]':
py_idx = pinyin_dict.unk()
elif i_pinyin == '[SEP]':
py_idx = pinyin_dict.eos()
else:
py_idx = pinyin_dict.index(i_pinyin)
py_ids.append(py_idx)
t2p_buff = torch.tensor(py_ids, requires_grad=False).long()
return t2p_buff
# 为了和一些decoder的基类代码兼容,加的
@property
def embedding(self):
return self.model.embeddings.word_embeddings
# def reorder_encoder_out(self, ):
# super().reorder_encoder_out()
@torch.jit.export
def reorder_encoder_out(self, encoder_out: EncoderOut, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
"""
Since encoder_padding_mask and encoder_embedding are both of type
Optional[Tensor] in EncoderOut, they need to be copied as local
variables for Torchscript Optional refinement
"""
encoder_padding_mask: Optional[Tensor] = encoder_out.encoder_padding_mask
encoder_embedding: Optional[Tensor] = encoder_out.encoder_embedding
new_encoder_out = (
encoder_out.encoder_out
if encoder_out.encoder_out is None
else encoder_out.encoder_out.index_select(1, new_order)
)
new_encoder_padding_mask = (
encoder_padding_mask
if encoder_padding_mask is None
else encoder_padding_mask.index_select(0, new_order)
)
new_encoder_embedding = (
encoder_embedding
if encoder_embedding is None
else encoder_embedding.index_select(0, new_order)
)
src_tokens = encoder_out.src_tokens
if src_tokens is not None:
src_tokens = src_tokens.index_select(0, new_order)
src_lengths = encoder_out.src_lengths
if src_lengths is not None:
src_lengths = src_lengths.index_select(0, new_order)
encoder_states = encoder_out.encoder_states
if encoder_states is not None:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return EncoderOut(
encoder_out=new_encoder_out, # T x B x C
encoder_padding_mask=new_encoder_padding_mask, # B x T
encoder_embedding=new_encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=src_tokens, # B x T
src_lengths=src_lengths, # B x 1
)
def forward(self, src_tokens, return_all_hiddens=False, return_pool=False):
"""
"""
# logger.info(src_tokens)
# raise NotImplementedError("xx")
x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens, return_pool=return_pool)
#
# logger.info(src_tokens)
# logger.info(x)
encoder_padding_mask = src_tokens['input_ids'].eq(self.dictionary.pad())
x = x.transpose(0, 1).contiguous()
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=None, # B x T x C
encoder_states=None, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
def extract_features(self, src_tokens, return_all_hiddens=False, return_pool=False, **unused):
# inner_states, _ = self.model(**src_tokens, output_hidden_states=not return_all_hiddens)
# logger.info(src_tokens)
bert_input = {}
for k in ('input_ids', 'token_type_ids', 'attention_mask'):
bert_input[k] = src_tokens[k]
inner_states = self.model(**bert_input, output_hidden_states=return_all_hiddens)
# logger.info(self.pinyin_embed_tokens, len(self.pinyin_dict.symbols))
# pinyin_input = src_tokens['pinyin']
# logger.info(pinyin_input)
# raise NotImplementedError()
# raise NotImplementedError("")
# 可以的,还是在cuda
# logger.info(inner_states)
# raise NotImplementedError("")
# 转 float 32 不然好像会自动变half
bert_features = inner_states[0]
# 这里面加入pinyin feature
if self.args.pinyin_on is True:
pinyin_input = src_tokens['pinyin']
embed_out = self.pinyin_embed_tokens(pinyin_input)
features = torch.cat([bert_features, embed_out], axis=-1)
else:
features = bert_features
# logging.info("bert_features.shape {}".format(bert_features.shape))
# logging.info("embed_out.shape {} embed_out {}".format(embed_out.shape, embed_out))
# logger.info(
# "bert_features.shape: {} ; embout.shape: {}, bert_features.shape:{}".format(bert_features.shape,
# embed_out.shape,
# features.shape))
# raise NotImplementedError()
return features, {'inner_states': inner_states[2] if return_all_hiddens else None,
'pool': inner_states[1] if return_pool else None,
'bert_features': bert_features}
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.model.config.max_position_embeddings
class LevenshteinRefinementPlusTransformerDecoder(FairseqNATReBertPlusDecoder):
"""
可以考虑使用encoder中的bert来替换掉emb层? 这块涉及要替换的父类层数有点多,所以我直接替换了两层父类
"""
def __init__(self, args, dictionary, encoder: HuggingFaceBertPlusEncoder, no_encoder_attn=False):
# 这个应该会默认的 build self.layers, plus 了以后,也要处理一下pinyin才行哦
super().__init__(
args, dictionary, encoder, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
# 256 是因为最多只能ins 256 的长度吧?这块实际上应该还能调整一下,如果是不需要完全的生成原文
self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)
self.embed_word_del = Embedding(2, self.output_embed_dim, None)
# del_word, ins_mask, ins_word
self.early_exit = [int(i) for i in args.early_exit.split(',')]
assert len(self.early_exit) == 3
# copy layers for mask-predict/deletion
self.layers_msk = None
if getattr(args, "no_share_maskpredictor", False):
self.layers_msk = nn.ModuleList([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[1])
])
self.layers_del = None
if getattr(args, "no_share_discriminator", False):
self.layers_del = nn.ModuleList([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[0])
])
if getattr(args, "share_discriminator_maskpredictor", False):
assert getattr(args, "no_share_discriminator", False), "must set saperate discriminator"
self.layers_msk = self.layers_del
# 默认情况下,layers_msk 和 layers_del 都是 None ?
self.encoder = encoder
self.pinyin_embed_tokens = encoder.pinyin_embed_tokens
# self.register_buffer('t2p_buff', self.encoder.t2p_buff)
self.t2p_buff = self.encoder.t2p_buff
if args.share_bert is False:
# 暂时先不写这块
raise NotImplementedError()
else:
self.model = self.encoder.model
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.encoder.max_positions()
def _extract_features(self, prev_tokens, return_all_hiddens=False, return_pool=False, **unused):
bert_input = {}
for k in ('input_ids', 'token_type_ids', 'attention_mask'):
bert_input[k] = prev_tokens[k]
inner_states = self.model(**bert_input, output_hidden_states=return_all_hiddens)
bert_features = inner_states[0]
# 这里面加入pinyin feature
if self.args.pinyin_on is True:
pinyin_input = prev_tokens['pinyin']
embed_out = self.pinyin_embed_tokens(pinyin_input)
features = torch.cat([bert_features, embed_out], axis=-1)
else:
features = bert_features
return features, {'inner_states': inner_states[2] if return_all_hiddens else None,
'pool': inner_states[1] if return_pool else None,
'bert_features': bert_features}
def extract_features(
#self, prev_output_tokens_plus, encoder_out=None, early_exit=None, layers=None,
self, prev_output_tokens_plus, encoder_out, early_exit=None, layers=None,
**unused
):
prev_output_tokens = prev_output_tokens_plus['input_ids']
x, _ = self._extract_features(prev_output_tokens_plus)
# B x T x C -> T x B x C
# logger.info("shape x.shape {}, and pad {}".format(x.shape, self.padding_idx))
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
layers = self.layers if layers is None else layers
early_exit = len(layers) if early_exit is None else early_exit
# done: 检查下这个 layer 什么时候知道 encoder_out 的维度信息? 在MHA的实现处,使用了 encoder_embed_dim, cool
# logger.info(layers[0])
for _, layer in enumerate(layers[: early_exit]):
x, attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
# return x, {"attn": attn, "inner_states": inner_states}
return x, {"attn": attn, "inner_states": inner_states}
@ensemble_decoder
def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens_plus, **unused):
#prev_output_tokens = prev_output_tokens_plus['input_ids']
features, extra = self.extract_features(
prev_output_tokens_plus, encoder_out=encoder_out, early_exit=self.early_exit[1],
layers=self.layers_msk,
**unused
)
# logger.info(f"features.shape {features.shape}")
features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
# logger.info(f"features_cat.shape {features_cat.shape}")
# logger.info(f"self.embed_mask_ins.weight.shape {self.embed_mask_ins.weight.shape}")
decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra['attn']
return decoder_out, extra['attn']
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens_plus, **unused):
features, extra = self.extract_features(
prev_output_tokens_plus, encoder_out=encoder_out, early_exit=self.early_exit[2],
layers=self.layers,
**unused
)
decoder_out = self.output_layer(features)
if normalize:
return F.log_softmax(decoder_out, -1), extra['attn']
return decoder_out, extra['attn']
@ensemble_decoder
def forward_word_del(self, normalize, encoder_out, prev_output_tokens_plus, **unused):
features, extra = self.extract_features(
prev_output_tokens_plus, encoder_out=encoder_out, early_exit=self.early_exit[0],
layers=self.layers_del,
**unused
)
decoder_out = F.linear(features, self.embed_word_del.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra['attn']
return decoder_out, extra['attn']
@register_model_architecture("levenshtein_refinement_rebert", "levenshtein_refinement_rebert")
def levenshtein_rebert_base_architecture(args):
# about pinyin
args.pinyin_on = getattr(args, "pinyin_on", False)
args.pinyin_embed_path = getattr(args, "pinyin_embed_path", None)
args.pinyin_embed_dim = getattr(args, "pinyin_embed_dim", 8)
args.encoder_pinyin_embed_path = getattr(args, "encoder_pinyin_embed_path", None)
args.encoder_pinyin_embed_dim = getattr(args, "encoder_pinyin_embed_dim", 8)
if args.encoder_pinyin_embed_path is None:
args.encoder_pinyin_embed_path = args.pinyin_embed_path
args.encoder_pinyin_embed_dim = args.pinyin_embed_dim
args.decoder_pinyin_embed_path = getattr(args, "decoder_pinyin_embed_path", None)
args.decoder_pinyin_embed_dim = getattr(args, "decoder_pinyin_embed_dim", 8)
if args.decoder_pinyin_embed_path is None:
args.decoder_pinyin_embed_path = args.encoder_pinyin_embed_path
args.decoder_pinyin_embed_dim = args.encoder_pinyin_embed_dim
# about pinyin done
if args.pinyin_on is True:
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768 + args.encoder_pinyin_embed_dim)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768 + args.decoder_pinyin_embed_dim)
else:
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
# args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
# 为了兼容底层的transformer
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_bert = getattr(args, "share_bert", True)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
# bert 预训练+微调 or 特征提取
args.fix_bert_params = getattr(args, "fix_bert_params", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
# todo ?? decoder_input_dim ? 就是input_dim 可以和embed_dim 不一样,这块就可以自动加一层转换
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.early_exit = getattr(args, "early_exit", "6,6,6")
args.no_share_discriminator = getattr(args, "no_share_discriminator", False)
args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False)
args.share_discriminator_maskpredictor = getattr(args, "share_discriminator_maskpredictor", False)
args.no_share_last_layer = getattr(args, "no_share_last_layer", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
@register_model_architecture(
"levenshtein_refinement_rebert", "levenshtein_refinement_rebert_decoder_2layers"
)
def levenshtein_refinement_rebert_decoder_2layers(args):
args.decoder_layers = getattr(args, "decoder_layers", 2)
levenshtein_rebert_base_architecture(args)
@register_model_architecture(
"levenshtein_refinement_rebert", "levenshtein_refinement_rebert_decoder_6layers"
)
def levenshtein_refinement_rebert_decoder_6layers(args):
args.decoder_layers = getattr(args, "decoder_layers", 6)
levenshtein_rebert_base_architecture(args)
@register_model_architecture(
"levenshtein_refinement_rebert", "levenshtein_refinement_rebert_decoder_12layers"
)
def levenshtein_refinement_rebert_decoder_12layers(args):
args.decoder_layers = getattr(args, "decoder_layers", 12)
levenshtein_rebert_base_architecture(args)
|
CheungZeeCn/fairseq | fairseq/models/nat/__init__.py | <gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .fairseq_nat_model import *
from .nonautoregressive_transformer import *
from .nat_crf_transformer import *
from .iterative_nonautoregressive_transformer import *
from .cmlm_transformer import *
from .levenshtein_transformer import *
from .insertion_transformer import *
from .levenshtein_refinement_rebert import *
|
aksr-aashish/xlue-spr | spr/modules/vote.py | from pyrogram import filters
from pyrogram.types import CallbackQuery
from spr import NSFW_LOG_CHANNEL, SPAM_LOG_CHANNEL, SUDOERS, spr
from spr.core import ikb
from spr.utils.db import downvote, ignore_nsfw, upvote, user_voted
from spr.utils.misc import clean, get_file_unique_id
@spr.on_callback_query(filters.regex(r"^upvote_"))
async def upvote_cb_func(_, cq: CallbackQuery):
if cq.message.chat.id not in [SPAM_LOG_CHANNEL, NSFW_LOG_CHANNEL]:
return await cq.answer()
data = cq.data.split("_")[1]
user_id = cq.from_user.id
mid = cq.message.message_id
if data == "spam":
if user_voted(mid, user_id):
return await cq.answer("What tf you doing this shit again and again.")
upvote(mid, user_id)
kb = cq.message.reply_markup.inline_keyboard
upvotes = clean(kb[0][0])
downvotes = clean(kb[0][1])
link = kb[1][0].url
keyb = ikb(
{
f"Right ({upvotes + 1})": "upvote_spam",
f"Wrong ({downvotes})": "downvote_spam",
"Chat": link,
},
2
)
await cq.edit_message_reply_markup(keyb)
elif data == "nsfw":
if user_id in SUDOERS:
await cq.message.delete()
await cq.answer()
else:
await cq.answer()
@spr.on_callback_query(filters.regex(r"^downvote_"))
async def downvote_cb_func(_, cq: CallbackQuery):
if cq.message.chat.id not in [SPAM_LOG_CHANNEL, NSFW_LOG_CHANNEL]:
return await cq.answer()
data = cq.data.split("_")[1]
user_id = cq.from_user.id
mid = cq.message.message_id
if data == "spam":
if user_voted(mid, user_id):
return await cq.answer("What tf you doing this shit again and again.")
downvote(mid, user_id)
kb = cq.message.reply_markup.inline_keyboard
upvotes = clean(kb[0][0])
downvotes = clean(kb[0][1])
link = kb[1][0].url
keyb = ikb(
{
f"Right ({upvotes})": "upvote_spam",
f"Wrong ({downvotes + 1})": "downvote_spam",
"Chat": link,
},
2
)
await cq.edit_message_reply_markup(keyb)
elif data == "nsfw":
if user_id in SUDOERS:
file_id = get_file_unique_id(cq.message)
ignore_nsfw(file_id)
await cq.message.delete()
await cq.answer()
else:
await cq.answer()
|
w3Abhishek/TwitterFeedBot | twitterfeed.py | <gh_stars>0
import requests
import telebot
import feedparser
import schedule
import time
import json
bot_token = 'TOKEN'
bot = telebot.TeleBot(bot_token)
def getJSON():
with open('feeds.json') as json_file:
feedd = json.load(json_file)
return feedd
feeds = getJSON()
@bot.message_handler(commands=['start'])
def start(message):
bot.send_message(message.chat.id, 'Hi %s,\nWelcome to Twitter Feed Bot. I can subscribe to Twitter feeds and send you latest tweets in Telegram.\nThanks.'%(message.from_user.first_name))
@bot.message_handler(commands=['add'])
def addFeed(message):
try:
twitter = message.text.split(' ')[1]
except:
twitter = None
chat_id = message.chat.id
try:
if twitter != None:
feed_url = f'https://nitter.net/{twitter}/rss'
feed_url = feed_url.replace('@','')
addFeeds(feed_url,chat_id)
else:
bot.send_message(message.chat.id, 'Please send Twitter account username like: \n/add @PlutoGateNetwork')
except:
bot.send_message(message.chat.id, 'Please send Twitter account username like: \n/add @PlutoGateNetwork. Something went wrong. Please report it @PlutoGateGroup')
def parseFeed(url):
try:
feed = feedparser.parse(url)
latest_url = feed.entries[0].link
latest_title = feed.entries[0].title
final = [latest_title, latest_url]
return final
except:
final = ['Error', 'Error']
def feedRunner():
global feeds
for tweets in feeds:
try:
latest_url = parseFeed(tweets['feedurl'])
if latest_url != tweets['latest_url']:
newparse = parseFeed(tweets['feedurl'])
tweets['latest_url'] = newparse[1]
bot.send_message(tweets['chat_id'], '%s\n%s'%(newparse[0],newparse[1].replace('nitter.net','twitter.com')))
else:
pass
except:
print("error")
pass
saveJSON(feeds)
def addFeeds(feedurl,chat_id):
global feeds
global count
try:
latest = feedparser.parse(feedurl)
latest_feed = latest.entries[0].link
latest_title = latest.entries[0].title
new_entry = {'feedurl':feedurl,'chat_id':chat_id, 'latest_url':latest_feed}
feeds.append(new_entry)
saveJSON(feeds)
bot.send_message(chat_id, f'Latest Tweet:\n\n{latest_title}\n\n{latest_feed.replace("nitter.net","twitter.com")}')
bot.send_message(chat_id, 'Twitter Feed subscribed successfully.')
except:
bot.send_message(chat_id, 'Something went wrong with your Twitter feed. Please report it @PlutoGateGroup')
def saveJSON(data):
with open('feeds.json', 'w') as outfile:
json.dump(data, outfile)
bot.polling()
schedule.every(10).minutes.do(feedRunner)
while True:
schedule.run_pending()
time.sleep(1)
|
markusweimer/azure-data-lake-store-python | tests/test_transfer.py | <filename>tests/test_transfer.py
# -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import time
from azure.datalake.store.core import AzureDLPath
from azure.datalake.store.transfer import ADLTransferClient
from tests.testing import azure, posix
def test_shutdown(azure):
def transfer(adlfs, src, dst, offset, size, blocksize, buffersize, retries=5, shutdown_event=None):
while shutdown_event and not shutdown_event.is_set():
time.sleep(0.1)
return size, None
client = ADLTransferClient(azure, transfer=transfer, chunksize=1,
chunked=False)
client.submit('foo', 'bar', 16)
client.run(monitor=False)
client.shutdown()
assert client.progress[0].state == 'finished'
def test_submit_and_run(azure):
def transfer(adlfs, src, dst, offset, size, blocksize, buffersize, shutdown_event=None):
return size, None
client = ADLTransferClient(azure, transfer=transfer, chunksize=8,
chunked=False)
client.submit('foo', 'bar', 16)
client.submit('abc', '123', 8)
nfiles = len(client.progress)
assert nfiles == 2
assert len([client.progress[i].chunks for i in range(nfiles)])
assert all([client.progress[i].state == 'pending' for i in range(nfiles)])
assert all([chunk.state == 'pending' for f in client.progress
for chunk in f.chunks])
expected = {('bar', 0), ('bar', 8), ('123', 0)}
assert {(chunk.name, chunk.offset) for f in client.progress
for chunk in f.chunks} == expected
client.run()
assert all([client.progress[i].state == 'finished' for i in range(nfiles)])
assert all([chunk.state == 'finished' for f in client.progress
for chunk in f.chunks])
assert all([chunk.expected == chunk.actual for f in client.progress
for chunk in f.chunks])
def test_update_progress(azure):
"""
Upload a 32 bytes file in chunks of 8 and test that the progress is incrementally
updated.
"""
calls = []
def recording_callback(progress, total):
calls.append((progress, total))
def transfer(adlfs, src, dst, offset, size, blocksize, buffersize, shutdown_event=None):
return size, None
client = ADLTransferClient(azure, transfer=transfer, chunksize=8,
chunked=True, progress_callback=recording_callback)
client.submit('foo', AzureDLPath('bar'), 32)
client.run()
assert calls == [(8, 32), (16, 32), (24, 32), (32, 32)]
def test_merge(azure):
calls = []
def merge(adlfs, outfile, files, shutdown_event=None, overwrite=False):
calls.append(files)
def transfer(adlfs, src, dst, offset, size, blocksize, buffersize, shutdown_event=None):
return size, None
class XLoaderMock(object):
_overwrite = False
file_size = 32
chunk_size = 8
client = ADLTransferClient(azure, parent=XLoaderMock(), transfer=transfer, merge=merge,
chunksize=chunk_size, chunked=True)
client.submit('foo', AzureDLPath('bar'), file_size)
client.run()
assert len(calls[0]) == file_size / chunk_size
def test_temporary_path(azure):
def transfer(adlfs, src, dst, offset, size, blocksize, buffersize):
return size, None
client = ADLTransferClient(azure, transfer=transfer, chunksize=8,
unique_temporary=False)
client.submit('foo', AzureDLPath('bar'), 16)
assert os.path.dirname(posix(client.progress[0].chunks[0].name)) == 'bar.segments'
|
markusweimer/azure-data-lake-store-python | samples/cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
An interface to be run from the command line/powershell.
This file is the only executable in the project.
"""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cmd
from datetime import datetime
import os
import stat
import sys
from azure.datalake.store.core import AzureDLFileSystem
from azure.datalake.store.multithread import ADLDownloader, ADLUploader
from azure.datalake.store.utils import write_stdout
class AzureDataLakeFSCommand(cmd.Cmd, object):
"""Accept commands via an interactive prompt or the command line."""
prompt = 'azure> '
undoc_header = None
_hidden_methods = ('do_EOF',)
def __init__(self, fs):
super(AzureDataLakeFSCommand, self).__init__()
self._fs = fs
def get_names(self):
return [n for n in dir(self.__class__) if n not in self._hidden_methods]
def do_close(self, line):
return True
def help_close(self):
print("close\n")
print("Exit the application")
def do_cat(self, line):
parser = argparse.ArgumentParser(prog="cat", add_help=False)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
for f in args.files:
write_stdout(self._fs.cat(f))
def help_cat(self):
print("cat file ...\n")
print("Display contents of files")
def do_chgrp(self, line):
parser = argparse.ArgumentParser(prog="chgrp", add_help=False)
parser.add_argument('group', type=str)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
for f in args.files:
self._fs.chown(f, group=args.group)
def help_chgrp(self):
print("chgrp group file ...\n")
print("Change file group")
def do_chmod(self, line):
parser = argparse.ArgumentParser(prog="chmod", add_help=False)
parser.add_argument('mode', type=str)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
for f in args.files:
self._fs.chmod(f, args.mode)
def help_chmod(self):
print("chmod mode file ...\n")
print("Change file permissions")
def _parse_ownership(self, ownership):
if ':' in ownership:
owner, group = ownership.split(':')
if not owner:
owner = None
else:
owner = ownership
group = None
return owner, group
def do_chown(self, line):
parser = argparse.ArgumentParser(prog="chown", add_help=False)
parser.add_argument('ownership', type=str)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
owner, group = self._parse_ownership(args.ownership)
for f in args.files:
self._fs.chown(f, owner=owner, group=group)
def help_chown(self):
print("chown owner[:group] file ...")
print("chown :group file ...\n")
print("Change file owner and group")
def _display_dict(self, d):
width = max([len(k) for k in d.keys()])
for k, v in sorted(list(d.items())):
print("{0:{width}} = {1}".format(k, v, width=width))
def do_df(self, line):
parser = argparse.ArgumentParser(prog="df", add_help=False)
parser.add_argument('path', type=str, nargs='?', default='.')
args = parser.parse_args(line.split())
self._display_dict(self._fs.df(args.path))
def help_df(self):
print("df [path]\n")
print("Display Azure account statistics of a path")
def _truncate(self, num, fmt):
return '{:{fmt}}'.format(num, fmt=fmt).rstrip('0').rstrip('.')
def _format_size(self, num):
for unit in ['B', 'K', 'M', 'G', 'T']:
if abs(num) < 1024.0:
return '{:>4s}{}'.format(self._truncate(num, '3.1f'), unit)
num /= 1024.0
return self._truncate(num, '.1f') + 'P'
def _display_path_with_size(self, name, size, human_readable):
if human_readable:
print("{:7s} {}".format(self._format_size(size), name))
else:
print("{:<9d} {}".format(size, name))
def do_du(self, line):
parser = argparse.ArgumentParser(prog="du", add_help=False)
parser.add_argument('files', type=str, nargs='*', default=[''])
parser.add_argument('-c', '--total', action='store_true')
parser.add_argument('-h', '--human-readable', action='store_true')
parser.add_argument('-r', '--recursive', action='store_true')
args = parser.parse_args(line.split())
total = 0
for f in args.files:
items = sorted(list(self._fs.du(f, deep=args.recursive).items()))
for name, size in items:
total += size
self._display_path_with_size(name, size, args.human_readable)
if args.total:
self._display_path_with_size("total", total, args.human_readable)
def help_du(self):
print("du [-c | --total] [-r | --recursive] [-h | --human-readable] [file ...]\n")
print("Display disk usage statistics")
def do_exists(self, line):
parser = argparse.ArgumentParser(prog="exists", add_help=False)
parser.add_argument('file', type=str)
args = parser.parse_args(line.split())
print(self._fs.exists(args.file, invalidate_cache=False))
def help_exists(self):
print("exists file\n")
print("Check if file/directory exists")
def do_get(self, line):
parser = argparse.ArgumentParser(prog="get", add_help=False)
parser.add_argument('remote_path', type=str)
parser.add_argument('local_path', type=str, nargs='?', default='.')
parser.add_argument('-b', '--chunksize', type=int, default=2**28)
parser.add_argument('-c', '--threads', type=int, default=None)
parser.add_argument('-f', '--force', action='store_true')
args = parser.parse_args(line.split())
ADLDownloader(self._fs, args.remote_path, args.local_path,
nthreads=args.threads, chunksize=args.chunksize,
overwrite=args.force)
def help_get(self):
print("get [option]... remote-path [local-path]\n")
print("Retrieve the remote path and store it locally\n")
print("Options:")
print(" -b <int>")
print(" --chunksize <int>")
print(" Set size of chunk to retrieve atomically, in bytes.\n")
print(" -c <int>")
print(" --threads <int>")
print(" Set number of multiple requests to perform at a time.")
print(" -f")
print(" --force")
print(" Overwrite an existing file or directory.")
def do_head(self, line):
parser = argparse.ArgumentParser(prog="head", add_help=False)
parser.add_argument('files', type=str, nargs='+')
parser.add_argument('-c', '--bytes', type=int, default=1024)
args = parser.parse_args(line.split())
for f in args.files:
write_stdout(self._fs.head(f, size=args.bytes))
def help_head(self):
print("head [-c bytes | --bytes bytes] file ...\n")
print("Display first bytes of a file")
def do_info(self, line):
parser = argparse.ArgumentParser(prog="info", add_help=False)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
for f in args.files:
self._display_dict(self._fs.info(f, invalidate_cache=False))
def help_info(self):
print("info file ...\n")
print("Display file information")
def _display_item(self, item, human_readable):
mode = int(item['permission'], 8)
if item['type'] == 'DIRECTORY':
permissions = "d"
elif item['type'] == 'SYMLINK':
permissions = "l"
else:
permissions = "-"
permissions += "r" if bool(mode & stat.S_IRUSR) else "-"
permissions += "w" if bool(mode & stat.S_IWUSR) else "-"
permissions += "x" if bool(mode & stat.S_IXUSR) else "-"
permissions += "r" if bool(mode & stat.S_IRGRP) else "-"
permissions += "w" if bool(mode & stat.S_IWGRP) else "-"
permissions += "x" if bool(mode & stat.S_IXGRP) else "-"
permissions += "r" if bool(mode & stat.S_IROTH) else "-"
permissions += "w" if bool(mode & stat.S_IWOTH) else "-"
permissions += "x" if bool(mode & stat.S_IXOTH) else "-"
timestamp = item['modificationTime'] // 1000
modified_at = datetime.fromtimestamp(timestamp).strftime('%b %d %H:%M')
if human_readable:
size = "{:5s}".format(self._format_size(item['length']))
else:
size = "{:9d}".format(item['length'])
print("{} {} {} {} {} {}".format(
permissions,
item['owner'][:8],
item['group'][:8],
size,
modified_at,
os.path.basename(item['name'])))
def do_ls(self, line):
parser = argparse.ArgumentParser(prog="ls", add_help=False)
parser.add_argument('dirs', type=str, nargs='*', default=[''])
parser.add_argument('-h', '--human-readable', action='store_true')
parser.add_argument('-l', '--detail', action='store_true')
args = parser.parse_args(line.split())
for d in args.dirs:
for item in self._fs.ls(d, detail=args.detail, invalidate_cache=False):
if args.detail:
self._display_item(item, args.human_readable)
else:
print(os.path.basename(item))
def help_ls(self):
print("ls [-h | --human-readable] [-l | --detail] [file ...]\n")
print("List directory contents")
def do_mkdir(self, line):
parser = argparse.ArgumentParser(prog="mkdir", add_help=False)
parser.add_argument('dirs', type=str, nargs='+')
args = parser.parse_args(line.split())
for d in args.dirs:
self._fs.mkdir(d)
def help_mkdir(self):
print("mkdir directory ...\n")
print("Create directories")
def do_mv(self, line):
parser = argparse.ArgumentParser(prog="mv", add_help=False)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
self._fs.mv(args.files[0], args.files[1])
def help_mv(self):
print("mv from-path to-path\n")
print("Rename from-path to to-path")
def do_put(self, line):
parser = argparse.ArgumentParser(prog="put", add_help=False)
parser.add_argument('local_path', type=str)
parser.add_argument('remote_path', type=str, nargs='?', default='.')
parser.add_argument('-b', '--chunksize', type=int, default=2**28)
parser.add_argument('-c', '--threads', type=int, default=None)
parser.add_argument('-f', '--force', action='store_true')
args = parser.parse_args(line.split())
ADLUploader(self._fs, args.remote_path, args.local_path,
nthreads=args.threads, chunksize=args.chunksize,
overwrite=args.force)
def help_put(self):
print("put [option]... local-path [remote-path]\n")
print("Store a local file on the remote machine\n")
print("Options:")
print(" -b <int>")
print(" --chunksize <int>")
print(" Set size of chunk to store atomically, in bytes.\n")
print(" -c <int>")
print(" --threads <int>")
print(" Set number of multiple requests to perform at a time.")
print(" -f")
print(" --force")
print(" Overwrite an existing file or directory.")
def do_quit(self, line):
return True
def help_quit(self):
print("quit\n")
print("Exit the application")
def do_rm(self, line):
parser = argparse.ArgumentParser(prog="rm", add_help=False)
parser.add_argument('files', type=str, nargs='+')
parser.add_argument('-r', '--recursive', action='store_true')
args = parser.parse_args(line.split())
for f in args.files:
self._fs.rm(f, recursive=args.recursive)
def help_rm(self):
print("rm [-r | --recursive] file ...\n")
print("Remove directory entries")
def do_rmdir(self, line):
parser = argparse.ArgumentParser(prog="rmdir", add_help=False)
parser.add_argument('dirs', type=str, nargs='+')
args = parser.parse_args(line.split())
for d in args.dirs:
self._fs.rmdir(d)
def help_rmdir(self):
print("rmdir directory ...\n")
print("Remove directories")
def do_tail(self, line):
parser = argparse.ArgumentParser(prog="tail", add_help=False)
parser.add_argument('files', type=str, nargs='+')
parser.add_argument('-c', '--bytes', type=int, default=1024)
args = parser.parse_args(line.split())
for f in args.files:
write_stdout(self._fs.tail(f, size=args.bytes))
def help_tail(self):
print("tail [-c bytes | --bytes bytes] file ...\n")
print("Display last bytes of a file")
def do_touch(self, line):
parser = argparse.ArgumentParser(prog="touch", add_help=False)
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args(line.split())
for f in args.files:
self._fs.touch(f)
def help_touch(self):
print("touch file ...\n")
print("Change file access and modification times")
def do_EOF(self, line):
return True
def do_list_uploads(self, line):
print(ADLUploader.load())
def help_list_uploads(self):
print("Shows interrupted but persisted downloads")
def do_clear_uploads(self, line):
ADLUploader.clear_saved()
def help_clear_uploads(self):
print("Forget all persisted uploads")
def do_resume_upload(self, line):
try:
up = ADLUploader.load()[line]
up.run()
except KeyError:
print("No such upload")
def help_resume_upload(self):
print("resume_upload name")
print()
print("Restart the upload designated by <name> and run until done.")
def do_list_downloads(self, line):
print(ADLDownloader.load())
def help_list_downloads(self):
print("Shows interrupted but persisted uploads")
def do_clear_downloads(self, line):
ADLDownloader.clear_saved()
def help_clear_downloads(self):
print("Forget all persisted downloads")
def do_resume_download(self, line):
try:
up = ADLDownloader.load()[line]
up.run()
except KeyError:
print("No such download")
def help_resume_download(self):
print("resume_download name")
print()
print("Restart the download designated by <name> and run until done.")
def setup_logging(default_level='WARNING'):
""" Setup logging configuration
The logging configuration can be overridden with one environment variable:
ADLFS_LOG_LEVEL (defines logging level)
"""
import logging
import os
import sys
log_level = os.environ.get('ADLFS_LOG_LEVEL', default_level)
levels = dict(
CRITICAL=logging.CRITICAL,
ERROR=logging.ERROR,
WARNING=logging.WARNING,
INFO=logging.INFO,
DEBUG=logging.DEBUG)
if log_level in levels:
log_level = levels[log_level]
else:
sys.exit("invalid ADLFS_LOG_LEVEL '{0}'".format(log_level))
logging.basicConfig(level=log_level)
if __name__ == '__main__':
setup_logging()
fs = AzureDLFileSystem()
if len(sys.argv) > 1:
AzureDataLakeFSCommand(fs).onecmd(' '.join(sys.argv[1:]))
else:
AzureDataLakeFSCommand(fs).cmdloop()
|
markusweimer/azure-data-lake-store-python | tests/settings.py | # -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import base64
import os
import time
from azure.datalake.store import core, lib, multithread
from azure.datalake.store.lib import auth, DataLakeCredential
from tests import fake_settings
PRINCIPAL_TOKEN = lib.auth(tenant_id=os.environ['azure_tenant_id'], client_secret=os.environ['azure_service_principal_secret'], client_id=os.environ['azure_service_principal'])
TOKEN = <PASSWORD>_TOKEN
STORE_NAME = os.environ['azure_data_lake_store_name']
TENANT_ID = fake_settings.TENANT_ID
SUBSCRIPTION_ID = fake_settings.SUBSCRIPTION_ID
RESOURCE_GROUP_NAME = fake_settings.RESOURCE_GROUP_NAME
RECORD_MODE = os.environ.get('RECORD_MODE', 'all').lower()
AZURE_ACL_TEST_APPID = os.environ.get('AZURE_ACL_TEST_APPID')
CLIENT_ID = os.environ['azure_service_principal']
'''
RECORD_MODE = os.environ.get('RECORD_MODE', 'none').lower()
if RECORD_MODE == 'none':
STORE_NAME = fake_settings.STORE_NAME
TENANT_ID = fake_settings.TENANT_ID
TOKEN = DataLakeCredential(
dict(
access=str(base64.b64encode(os.urandom(1420))),
refresh=str(base64.b64encode(os.urandom(718))),
time=time.time(), client='common',
resource="https://datalake.azure.net/",
tenant=TENANT_ID, expiresIn=3600,
tokenType='Bearer'))
SUBSCRIPTION_ID = fake_settings.SUBSCRIPTION_ID
RESOURCE_GROUP_NAME = fake_settings.RESOURCE_GROUP_NAME
PRINCIPAL_TOKEN = DataLakeCredential(
dict(
access=str(base64.b64encode(os.urandom(1420))),
client='e6f90497-409b-4a4e-81f2-8cede2fe6a65', # arbitrary guid.
secret=str(base64.b64encode(os.urandom(1420))),
refresh=None,
time=time.time(),
resource="https://datalake.azure.net/",
tenant=TENANT_ID, expiresIn=3600,
tokenType='Bearer'))
else:
STORE_NAME = os.environ['azure_data_lake_store_name']
TENANT_ID = os.environ.get('azure_tenant_id', 'common')
TOKEN = auth(TENANT_ID,
os.environ['azure_username'],
os.environ['azure_password'])
# set the environment variable to empty to avoid confusion in auth
to_reset_user = os.environ.pop('azure_username', None)
to_reset_pass = os.environ.pop('azure_password', None)
PRINCIPAL_TOKEN = auth(TENANT_ID,
client_id=os.environ['azure_service_principal'],
client_secret=os.environ['azure_service_principal_secret'])
# set it back after auth.
if to_reset_pass:
os.environ['azure_password'] = to_reset_pass
if to_reset_user:
os.environ['azure_username'] = to_reset_user
SUBSCRIPTION_ID = os.environ['azure_subscription_id']
RESOURCE_GROUP_NAME = os.environ['azure_resource_group_name']
'''
|
markusweimer/azure-data-lake-store-python | azure/datalake/store/exceptions.py | # -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
try:
FileNotFoundError = FileNotFoundError
except NameError:
class FileNotFoundError(IOError):
pass
try:
FileExistsError = FileExistsError
except NameError:
class FileExistsError(OSError):
pass
try:
PermissionError = PermissionError
except NameError:
class PermissionError(OSError):
pass
class DatalakeBadOffsetException(IOError):
pass
class DatalakeIncompleteTransferException(IOError):
pass
class DatalakeRESTException(IOError):
pass
|
markusweimer/azure-data-lake-store-python | azure/datalake/store/utils.py | # -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import array
from hashlib import md5
import os
import platform
import sys
import threading
PY2 = sys.version_info.major == 2
WIN = platform.system() == 'Windows'
if WIN:
datadir = os.path.join(os.environ['APPDATA'], 'azure-datalake-store')
else:
datadir = os.sep.join([os.path.expanduser("~"), '.config', 'azure-datalake-store'])
try:
os.makedirs(datadir)
except:
pass
def ensure_writable(b):
if PY2 and isinstance(b, array.array):
return b.tostring()
return b
def write_stdout(data):
""" Write bytes or strings to standard output
"""
try:
sys.stdout.buffer.write(data)
except AttributeError:
sys.stdout.write(data.decode('ascii', 'replace'))
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
fn: file object
a file object that supports seek, tell and read.
offset: int
Byte offset to start read
length: int
Maximum number of bytes to read
delimiter: bytes (optional)
Ensure reading stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
stops at or before the delimiter boundaries that follow the location
``offset + length``. For ADL, if no delimiter is found and the data
requested is > 4MB an exception is raised, since a single record cannot
exceed 4MB and be guaranteed to land contiguously in ADL.
The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'\\nCharlie, 300'
>>> f = BytesIO(bytearray(2**22)) # doctest: +SKIP
>>> read_block(f,0,2**22, delimiter=b'\\n') # doctest: +SKIP
IndexError: No delimiter found within max record size of 4MB.
Transfer without specifying a delimiter (as binary) instead.
"""
f.seek(offset)
bytes = f.read(length)
if delimiter:
# max record size is 4MB
max_record = 2**22
if length > max_record:
raise IndexError('Records larger than ' + str(max_record) + ' bytes are not supported. The length requested was: ' + str(length) + 'bytes')
# get the last index of the delimiter if it exists
try:
last_delim_index = len(bytes) -1 - bytes[::-1].index(delimiter)
# this ensures the length includes all of the last delimiter (in the event that it is more than one character)
length = last_delim_index + len(delimiter)
return bytes[0:length]
except ValueError:
# TODO: Before delimters can be supported through the ADLUploader logic, the number of chunks being uploaded
# needs to be visible to this method, since it needs to throw if:
# 1. We cannot find a delimiter in <= 4MB of data
# 2. If the remaining size is less than 4MB but there are multiple chunks that need to be stitched together,
# since the delimiter could be split across chunks.
# 3. If delimiters are specified, there must be logic during segment determination that ensures all chunks
# terminate at the end of a record (on a new line), even if that makes the chunk < 256MB.
if length >= max_record:
raise IndexError('No delimiter found within max record size of ' + str(max_record) + ' bytes. Transfer without specifying a delimiter (as binary) instead.')
return bytes
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(args)).encode()).hexdigest()
def commonprefix(paths):
""" Find common directory for all paths
Python's ``os.path.commonprefix`` will not return a valid directory path in
some cases, so we wrote this convenience method.
Examples
--------
>>> # os.path.commonprefix returns '/disk1/foo'
>>> commonprefix(['/disk1/foobar', '/disk1/foobaz'])
'/disk1'
>>> commonprefix(['a/b/c', 'a/b/d', 'a/c/d'])
'a'
>>> commonprefix(['a/b/c', 'd/e/f', 'g/h/i'])
''
"""
return os.path.dirname(os.path.commonprefix(paths))
def clamp(n, smallest, largest):
""" Limit a value to a given range
This is equivalent to smallest <= n <= largest.
Examples
--------
>>> clamp(0, 1, 100)
1
>>> clamp(42, 2, 128)
42
>>> clamp(1024, 1, 32)
32
"""
return max(smallest, min(n, largest))
class CountUpDownLatch:
"""CountUpDownLatch provides a thread safe implementation of Up Down latch
"""
def __init__(self):
self.lock = threading.Condition()
self.val = 0
self.total = 0
def increment(self):
self.lock.acquire()
self.val += 1
self.total += 1
self.lock.release()
def decrement(self):
self.lock.acquire()
self.val -= 1
if self.val <= 0:
self.lock.notifyAll()
self.lock.release()
def total_processed(self):
self.lock.acquire()
temp = self.total
self.lock.release()
return temp
def is_zero(self):
self.lock.acquire()
while self.val > 0:
self.lock.wait()
self.lock.release()
return True
|
markusweimer/azure-data-lake-store-python | tests/fake_settings.py | <gh_stars>10-100
# -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
STORE_NAME = os.environ['azure_data_lake_store_name']
TENANT_ID = os.environ['azure_tenant_id']
SUBSCRIPTION_ID = os.environ['azure_subscription_id']
RESOURCE_GROUP_NAME = os.environ['azure_resource_group_name']
|
markusweimer/azure-data-lake-store-python | tests/test_utils.py | <gh_stars>10-100
# -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
import pytest
from azure.datalake.store.utils import WIN
@pytest.mark.skipif(sys.platform == 'win32', reason="requires non-windows")
def test_non_windows_platform():
assert not WIN
@pytest.mark.skipif(sys.platform != 'win32', reason="requires windows")
def test_windows_platform():
assert WIN
|
markusweimer/azure-data-lake-store-python | tests/test_mock.py | <reponame>markusweimer/azure-data-lake-store-python<filename>tests/test_mock.py
# -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Tests to be executed with mock framework (HTTPretty) rather than actual server.
Do not use mock framework for functionality tests like end to end operation calls.
Only use it for specific internal logic which can't be tested on server.
Example: How will the logic behave in case of specific error from server side.
This was introduced to test the Retry Policy but can be carefully used for other tests as well.
"""
import responses
from requests import ConnectionError, ConnectTimeout, ReadTimeout, Timeout, HTTPError
from tests import settings
from azure.datalake.store.exceptions import DatalakeRESTException
from azure.datalake.store.lib import auth
from azure.datalake.store.lib import DataLakeCredential
from tests.testing import azure, azure_teardown, posix, working_dir
from tests.settings import TENANT_ID, CLIENT_ID
test_dir = working_dir()
a = posix(test_dir / 'a')
mock_url = 'https://%s.azuredatalakestore.net/webhdfs/v1/' % settings.STORE_NAME
def test_retry_read_timeout(azure):
__test_retry_error(azure, 200, 2, body=ReadTimeout())
def test_retry_timeout(azure):
__test_retry_error(azure, 200, 2, body=Timeout())
def test_retry_connection_error(azure):
__test_retry_error(azure, 200, 2, body=ConnectionError())
def test_retry_connection_timeout(azure):
__test_retry_error(azure, 200, 2, body=ConnectTimeout())
def test_retry_500(azure):
__test_retry_error(azure, 500, 2)
def test_retry_401(azure):
__test_retry_error(azure, 401, 3)
def test_retry_408(azure):
__test_retry_error(azure, 408, 4)
def test_retry_429(azure):
__test_retry_error(azure, 429, 2)
def test_retry_500_5retry(azure):
__test_retry_error(azure, 500, 5)
def test_retry_500_6retry(azure):
# exceeded max tries
__test_retry_error(azure, 500, 6, is_exception_expected=True)
def test_retry_400(azure):
__test_retry_error(azure, 400, 2, is_exception_expected=True)
def test_retry_501(azure):
__test_retry_error(azure, 501, 2, is_exception_expected=True)
def test_retry_505(azure):
__test_retry_error(azure, 505, 2, is_exception_expected=True)
def test_retry_200(azure):
__test_retry_error(azure, 200, 1)
@responses.activate
def __test_retry_error(azure,
error_status,
total_tries,
is_exception_expected=False,
last_try_status=200,
body=""):
mock_url_a = mock_url + a
while total_tries>1:
responses.add(responses.PUT, mock_url_a,
body=body, status=error_status)
total_tries -= 1
responses.add(responses.PUT, mock_url_a,
body="", status=last_try_status)
# teardown not required in mock tests
try:
azure.mkdir(a)
assert not is_exception_expected
except DatalakeRESTException:
assert is_exception_expected
@responses.activate
def __test_retry_auth(error_code, error_string, is_exception_expected, total_tries=4, last_try_status=200,
last_try_body=None):
import re, adal
end_point_discovery = re.compile("https:\/\/login\.microsoftonline\.com\/common\/discovery\/"
"instance\?authorization_endpoint=.+")
mock_url_auth = "https://login.microsoftonline.com/" + settings.TENANT_ID + "/oauth2/token"
body_discovery = r'{"tenant_discovery_endpoint":"https://login.microsoftonline.com/' + TENANT_ID + \
'/.well-known/openid-configuration"}'
body_error = r'{"error":"' + error_string + r'","error_description":"0","error_codes":[0],"timestamp":"0",' \
r'"trace_id":"0","correlation_id":"0"}'
if last_try_body is None:
last_try_body = r'{"token_type":"Bearer","expires_in":"1","ext_expires_in":"1","expires_on":"1",' \
r'"not_before":"1","resource":"https://datalake.azure.net/","access_token":"a"}'
while total_tries > 0:
responses.add(responses.GET, end_point_discovery, body=body_discovery, status=200)
responses.add(responses.POST, mock_url_auth, body=body_error, status=error_code)
total_tries -= 1
responses.add(responses.GET, end_point_discovery, body=body_discovery, status=200)
responses.add(responses.POST, mock_url_auth, body=last_try_body, status=last_try_status)
try:
token = auth(tenant_id=TENANT_ID, client_secret='GARBAGE', client_id=CLIENT_ID)
assert isinstance(token, DataLakeCredential)
assert not is_exception_expected
except (HTTPError, adal.adal_error.AdalError):
assert is_exception_expected
def test_retry_auth_401():
__test_retry_auth(error_code=401, error_string=r'invalid_client', total_tries=1, is_exception_expected=True)
def test_retry_auth_400():
__test_retry_auth(error_code=400, error_string=r'invalid_client', total_tries=1, is_exception_expected=False)
def test_retry_auth_104():
__test_retry_auth(error_code=104, error_string=r'Connection Error', total_tries=1, is_exception_expected=False )
__test_retry_auth(error_code=104, error_string=r'Connection Error', is_exception_expected=True, total_tries=6)
def test_retry_auth_429():
__test_retry_auth(error_code=429, error_string=r'Too many requests', total_tries=2, is_exception_expected=False)
__test_retry_auth(error_code=429, error_string=r'Too many requests', is_exception_expected=True, total_tries=6)
def test_retry_auth_501():
__test_retry_auth(error_code=501, error_string=r'invalid_client', total_tries=1, is_exception_expected=False)
|
markusweimer/azure-data-lake-store-python | azure/datalake/store/core.py | # -*- coding: utf-8 -*-
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
The main file-system class and functionality.
Provides an pythonic interface to the Azure Data-lake Store, including
file-system commands with typical names and options, and a File object
which is compatible with the built-in File.
"""
# standard imports
import io
import logging
import sys
import uuid
import json
# local imports
from .exceptions import DatalakeBadOffsetException, DatalakeIncompleteTransferException
from .exceptions import FileNotFoundError, PermissionError
from .lib import DatalakeRESTInterface
from .utils import ensure_writable, read_block
from .enums import ExpiryOptionType
from .retry import ExponentialRetryPolicy, NoRetryPolicy
from .multiprocessor import multi_processor_change_acl
if sys.version_info >= (3, 4):
import pathlib
else:
import pathlib2 as pathlib
logger = logging.getLogger(__name__)
valid_expire_types = [x.value for x in ExpiryOptionType]
class AzureDLFileSystem(object):
"""
Access Azure DataLake Store as if it were a file-system
Parameters
----------
store_name: str ("")
Store name to connect to.
token: credentials object
When setting up a new connection, this contains the authorization
credentials (see `lib.auth()`).
url_suffix: str (None)
Domain to send REST requests to. The end-point URL is constructed
using this and the store_name. If None, use default.
api_version: str (2018-09-01)
The API version to target with requests. Changing this value will
change the behavior of the requests, and can cause unexpected behavior or
breaking changes. Changes to this value should be undergone with caution.
per_call_timeout_seconds: float(60)
This is the timeout for each requests library call.
kwargs: optional key/values
See ``lib.auth()``; full list: tenant_id, username, password, client_id,
client_secret, resource
"""
_singleton = [None]
def __init__(self, token=None, per_call_timeout_seconds=60, **kwargs):
self.token = token
self.kwargs = kwargs
self.per_call_timeout_seconds = per_call_timeout_seconds
self.connect()
self.dirs = {}
self._emptyDirs = []
AzureDLFileSystem._singleton[0] = self
@classmethod
def current(cls):
""" Return the most recently created AzureDLFileSystem
"""
if not cls._singleton[0]:
return cls()
else:
return cls._singleton[0]
def connect(self):
"""
Establish connection object.
"""
self.azure = DatalakeRESTInterface(token=self.token, req_timeout_s=self.per_call_timeout_seconds, **self.kwargs)
self.token = self.azure.token
def __setstate__(self, state):
self.__dict__.update(state)
self.connect()
def open(self, path, mode='rb', blocksize=2 ** 25, delimiter=None):
""" Open a file for reading or writing
Parameters
----------
path: string
Path of file on ADL
mode: string
One of 'rb', 'ab' or 'wb'
blocksize: int
Size of data-node blocks if reading
delimiter: byte(s) or None
For writing delimiter-ended blocks
"""
if 'b' not in mode:
raise NotImplementedError("Text mode not supported, use mode='%s'"
" and manage bytes" % (mode[0] + 'b'))
return AzureDLFile(self, AzureDLPath(path), mode, blocksize=blocksize,
delimiter=delimiter)
def _ls_batched(self, path, batch_size=4000):
"""Batched ListStatus calls. Internal Method"""
if batch_size <= 1:
raise ValueError("Batch size must be strictly greater than 1")
parms = {'listSize': batch_size}
ret = []
continuation_token = "<PASSWORD>"
while continuation_token != "":
ls_call_result = self.azure.call('LISTSTATUS', path, **parms)
data = ls_call_result['FileStatuses']['FileStatus']
ret.extend(data)
continuation_token = ls_call_result['FileStatuses']['continuationToken']
parms['listAfter'] = continuation_token # continuationToken to be used as ListAfter
return ret
def _ls(self, path, invalidate_cache=True, batch_size=4000):
""" List files at given path """
path = AzureDLPath(path).trim()
key = path.as_posix()
if invalidate_cache:
self.invalidate_cache(key)
if key not in self.dirs:
self.dirs[key] = self._ls_batched(key, batch_size=batch_size)
for f in self.dirs[key]:
f['name'] = (path / f['pathSuffix']).as_posix()
return self.dirs[key]
def ls(self, path="", detail=False, invalidate_cache=True):
"""
List all elements under directory specified with path
Parameters
----------
path: str or AzureDLPath
Path to query
detail: bool
Detailed info or not.
invalidate_cache: bool
Whether to invalidate cache or not
Returns
-------
List of elements under directory specified with path
"""
path = AzureDLPath(path)
files = self._ls(path, invalidate_cache)
if not files:
# in this case we just invalidated the cache (if it was true), so no need to do it again
inf = self.info(path, invalidate_cache=False)
if inf['type'] == 'DIRECTORY':
# always return an empty array in this case, because there are no entries underneath the folder
return []
raise FileNotFoundError(path)
if detail:
return files
else:
return [f['name'] for f in files]
def info(self, path, invalidate_cache=True, expected_error_code=None):
"""
File information for path
Parameters
----------
path: str or AzureDLPath
Path to query
invalidate_cache: bool
Whether to invalidate cache or not
expected_error_code: int
Optionally indicates a specific, expected error code, if any.
Returns
-------
File information
"""
path = AzureDLPath(path).trim()
path_as_posix = path.as_posix()
root = path.parent
root_as_posix = root.as_posix()
# in the case of getting info about the root itself or if the cache won't be hit
# simply return the result of a GETFILESTATUS from the service
if invalidate_cache or path_as_posix in {'/', '.'}:
to_return = self.azure.call('GETFILESTATUS', path_as_posix, expected_error_code=expected_error_code)[
'FileStatus']
to_return['name'] = path_as_posix
# add the key/value pair back to the cache so long as it isn't the root
if path_as_posix not in {'/', '.'}:
if root_as_posix not in self.dirs:
self.dirs[root_as_posix] = [to_return]
else:
found = False
for f in self.dirs[root_as_posix]:
if f['name'] == path_as_posix:
found = True
break
if not found:
self.dirs[root_as_posix].append(to_return)
return to_return
for f in self._ls(root, invalidate_cache):
if f['name'] == path_as_posix:
return f
raise FileNotFoundError(path)
def _walk(self, path, invalidate_cache=True, include_dirs=False):
"""
Walk a path recursively and returns list of files and dirs(if parameter set)
Parameters
----------
path: str or AzureDLPath
Path to query
invalidate_cache: bool
Whether to invalidate cache
include_dirs: bool
Whether to include dirs in return value
Returns
-------
List of files and (optionally) dirs
"""
ret = list(self._ls(path, invalidate_cache))
self._emptyDirs = []
current_subdirs = [f for f in ret if f['type'] != 'FILE']
while current_subdirs:
dirs_below_current_level = []
for apath in current_subdirs:
try:
sub_elements = self._ls(apath['name'], invalidate_cache)
except FileNotFoundError:
# Folder may have been deleted while walk is going on. Infrequent so we can take the linear hit
ret.remove(apath)
continue
if not sub_elements:
self._emptyDirs.append(apath)
else:
ret.extend(sub_elements)
dirs_below_current_level.extend([f for f in sub_elements if f['type'] != 'FILE'])
current_subdirs = dirs_below_current_level
if include_dirs:
return ret
else:
return [f for f in ret if f['type'] == 'FILE']
def _empty_dirs_to_add(self):
""" Returns directories found empty during walk. Only for internal use"""
return self._emptyDirs
def walk(self, path='', details=False, invalidate_cache=True):
"""
Get all files below given path
Parameters
----------
path: str or AzureDLPath
Path to query
details: bool
Whether to include file details
invalidate_cache: bool
Whether to invalidate cache
Returns
-------
List of files
"""
return [f if details else f['name'] for f in self._walk(path, invalidate_cache)]
def glob(self, path, details=False, invalidate_cache=True):
"""
Find files (not directories) by glob-matching.
Parameters
----------
path: str or AzureDLPath
Path to query
details: bool
Whether to include file details
invalidate_cache: bool
Whether to invalidate cache
Returns
-------
List of files
"""
path = AzureDLPath(path).trim()
path_as_posix = path.as_posix()
prefix = path.globless_prefix
allfiles = self.walk(prefix, details, invalidate_cache)
if prefix == path:
return allfiles
return [f for f in allfiles if AzureDLPath(f['name'] if details else f).match(path_as_posix)]
def du(self, path, total=False, deep=False, invalidate_cache=True):
"""
Bytes in keys at path
Parameters
----------
path: str or AzureDLPath
Path to query
total: bool
Return the sum on list
deep: bool
Recursively enumerate or just use files under current dir
invalidate_cache: bool
Whether to invalidate cache
Returns
-------
List of dict of name:size pairs or total size.
"""
if deep:
files = self._walk(path, invalidate_cache)
else:
files = self.ls(path, detail=True, invalidate_cache=invalidate_cache)
if total:
return sum(f.get('length', 0) for f in files)
else:
return {p['name']: p['length'] for p in files}
def df(self, path):
""" Resource summary of path
Parameters
----------
path: str
Path to query
"""
path = AzureDLPath(path).trim()
current_path_info = self.info(path, invalidate_cache=False)
if current_path_info['type'] == 'FILE':
return {'directoryCount': 0, 'fileCount': 1, 'length': current_path_info['length'], 'quota': -1,
'spaceConsumed': current_path_info['length'], 'spaceQuota': -1}
else:
all_files_and_dirs = self._walk(path, include_dirs=True)
dir_count = 1 # 1 as walk doesn't return current directory
length = file_count = 0
for item in all_files_and_dirs:
length += item['length']
if item['type'] == 'FILE':
file_count += 1
else:
dir_count += 1
return {'directoryCount': dir_count, 'fileCount': file_count, 'length': length, 'quota': -1,
'spaceConsumed': length, 'spaceQuota': -1}
def chmod(self, path, mod):
""" Change access mode of path
Note this is not recursive.
Parameters
----------
path: str
Location to change
mod: str
Octal representation of access, e.g., "0777" for public read/write.
See [docs](http://hadoop.apache.org/docs/r2.4.1/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Permission)
"""
path = AzureDLPath(path).trim()
self.azure.call('SETPERMISSION', path.as_posix(), permission=mod)
self.invalidate_cache(path.as_posix())
def set_expiry(self, path, expiry_option, expire_time=None):
"""
Set or remove the expiration time on the specified file.
This operation can only be executed against files.
Note: Folders are not supported.
Parameters
----------
path: str
File path to set or remove expiration time
expire_time: int
The time that the file will expire, corresponding to the expiry_option that was set
expiry_option: str
Indicates the type of expiration to use for the file:
1. NeverExpire: ExpireTime is ignored.
2. RelativeToNow: ExpireTime is an integer in milliseconds representing the expiration date relative to when file expiration is updated.
3. RelativeToCreationDate: ExpireTime is an integer in milliseconds representing the expiration date relative to file creation.
4. Absolute: ExpireTime is an integer in milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00.
"""
parms = {}
value_to_use = [x for x in valid_expire_types if x.lower() == expiry_option.lower()]
if len(value_to_use) != 1:
raise ValueError(
'expiry_option must be one of: {}. Value given: {}'.format(valid_expire_types, expiry_option))
if value_to_use[0] != ExpiryOptionType.never_expire.value and not expire_time:
raise ValueError(
'expire_time must be specified if the expiry_option is not NeverExpire. Value of expiry_option: {}'.format(
expiry_option))
path = AzureDLPath(path).trim()
parms['expiryOption'] = value_to_use[0]
if expire_time:
parms['expireTime'] = int(expire_time)
self.azure.call('SETEXPIRY', path.as_posix(), is_extended=True, **parms)
self.invalidate_cache(path.as_posix())
def _acl_call(self, action, path, acl_spec=None, invalidate_cache=False):
"""
Helper method for ACL calls to reduce code repetition
Parameters
----------
action: str
The ACL action being executed. For example SETACL
path: str
The path the action is being executed on (file or folder)
acl_spec: str
The optional ACL specification to set on the path in the format
'[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,...'
Note that for remove acl entries the permission (rwx) portion is not required.
invalidate_cache: bool
optionally indicates that the cache of files should be invalidated after this operation
This should always be done for set and remove operations, since the state of the file or folder has changed.
"""
parms = {}
path = AzureDLPath(path).trim()
posix_path = path.as_posix()
if acl_spec:
parms['aclSpec'] = acl_spec
to_return = self.azure.call(action, posix_path, **parms)
if invalidate_cache:
self.invalidate_cache(posix_path)
return to_return
def set_acl(self, path, acl_spec, recursive=False, number_of_sub_process=None):
"""
Set the Access Control List (ACL) for a file or folder.
Note: this is by default not recursive, and applies only to the file or folder specified.
Parameters
----------
path: str
Location to set the ACL on.
acl_spec: str
The ACL specification to set on the path in the format
'[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,...'
recursive: bool
Specifies whether to set ACLs recursively or not
"""
if recursive:
multi_processor_change_acl(adl=self, path=path, method_name="set_acl", acl_spec=acl_spec,
number_of_sub_process=number_of_sub_process)
else:
self._acl_call('SETACL', path, acl_spec, invalidate_cache=True)
def modify_acl_entries(self, path, acl_spec, recursive=False, number_of_sub_process=None):
"""
Modify existing Access Control List (ACL) entries on a file or folder.
If the entry does not exist it is added, otherwise it is updated based on the spec passed in.
No entries are removed by this process (unlike set_acl).
Note: this is by default not recursive, and applies only to the file or folder specified.
Parameters
----------
path: str
Location to set the ACL entries on.
acl_spec: str
The ACL specification to use in modifying the ACL at the path in the format
'[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,[default:]user|group|other:[entity id or UPN]:r|-w|-x|-,...'
recursive: bool
Specifies whether to modify ACLs recursively or not
"""
if recursive:
multi_processor_change_acl(adl=self, path=path, method_name="mod_acl", acl_spec=acl_spec,
number_of_sub_process=number_of_sub_process)
else:
self._acl_call('MODIFYACLENTRIES', path, acl_spec, invalidate_cache=True)
def remove_acl_entries(self, path, acl_spec, recursive=False, number_of_sub_process=None):
"""
Remove existing, named, Access Control List (ACL) entries on a file or folder.
If the entry does not exist already it is ignored.
Default entries cannot be removed this way, please use remove_default_acl for that.
Unnamed entries cannot be removed in this way, please use remove_acl for that.
Note: this is by default not recursive, and applies only to the file or folder specified.
Parameters
----------
path: str
Location to remove the ACL entries.
acl_spec: str
The ACL specification to remove from the ACL at the path in the format (note that the permission portion is missing)
'[default:]user|group|other:[entity id or UPN],[default:]user|group|other:[entity id or UPN],...'
recursive: bool
Specifies whether to remove ACLs recursively or not
"""
if recursive:
multi_processor_change_acl(adl=self, path=path, method_name="rem_acl", acl_spec=acl_spec,
number_of_sub_process=number_of_sub_process)
else:
self._acl_call('REMOVEACLENTRIES', path, acl_spec, invalidate_cache=True)
def get_acl_status(self, path):
"""
Gets Access Control List (ACL) entries for the specified file or directory.
Parameters
----------
path: str
Location to get the ACL.
"""
return self._acl_call('MSGETACLSTATUS', path)['AclStatus']
def remove_acl(self, path):
"""
Remove the entire, non default, ACL from the file or folder, including unnamed entries.
Default entries cannot be removed this way, please use remove_default_acl for that.
Note: this is not recursive, and applies only to the file or folder specified.
Parameters
----------
path: str
Location to remove the ACL.
"""
self._acl_call('REMOVEACL', path, invalidate_cache=True)
def remove_default_acl(self, path):
"""
Remove the entire default ACL from the folder.
Default entries do not exist on files, if a file
is specified, this operation does nothing.
Note: this is not recursive, and applies only to the folder specified.
Parameters
----------
path: str
Location to set the ACL on.
"""
self._acl_call('REMOVEDEFAULTACL', path, invalidate_cache=True)
def chown(self, path, owner=None, group=None):
"""
Change owner and/or owning group
Note this is not recursive.
Parameters
----------
path: str
Location to change
owner: str
UUID of owning entity
group: str
UUID of group
"""
parms = {}
if owner is None and group is None:
raise ValueError('Must supply owner and/or group')
if owner:
parms['owner'] = owner
if group:
parms['group'] = group
path = AzureDLPath(path).trim()
self.azure.call('SETOWNER', path.as_posix(), **parms)
self.invalidate_cache(path.as_posix())
def exists(self, path, invalidate_cache=True):
"""
Does such a file/directory exist?
Parameters
----------
path: str or AzureDLPath
Path to query
invalidate_cache: bool
Whether to invalidate cache
Returns
-------
True or false depending on whether the path exists.
"""
try:
self.info(path, invalidate_cache, expected_error_code=404)
return True
except FileNotFoundError:
return False
def cat(self, path):
"""
Return contents of file
Parameters
----------
path: str or AzureDLPath
Path to query
Returns
-------
Contents of file
"""
with self.open(path, 'rb') as f:
return f.read()
def tail(self, path, size=1024):
"""
Return last bytes of file
Parameters
----------
path: str or AzureDLPath
Path to query
size: int
How many bytes to return
Returns
-------
Last(size) bytes of file
"""
length = self.info(path)['length']
if size > length:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(length - size)
return f.read(size)
def head(self, path, size=1024):
"""
Return first bytes of file
Parameters
----------
path: str or AzureDLPath
Path to query
size: int
How many bytes to return
Returns
-------
First(size) bytes of file
"""
with self.open(path, 'rb', blocksize=size) as f:
return f.read(size)
def get(self, path, filename):
"""
Stream data from file at path to local filename
Parameters
----------
path: str or AzureDLPath
ADL Path to read
filename: str or Path
Local file path to write to
Returns
-------
None
"""
with self.open(path, 'rb') as f:
with open(filename, 'wb') as f2:
while True:
data = f.read(f.blocksize)
if len(data) == 0:
break
f2.write(data)
def put(self, filename, path, delimiter=None):
"""
Stream data from local filename to file at path
Parameters
----------
filename: str or Path
Local file path to read from
path: str or AzureDLPath
ADL Path to write to
delimiter:
Optional delimeter for delimiter-ended blocks
Returns
-------
None
"""
with open(filename, 'rb') as f:
with self.open(path, 'wb', delimiter=delimiter) as f2:
while True:
data = f.read(f2.blocksize)
if len(data) == 0:
break
f2.write(data)
def mkdir(self, path):
"""
Make new directory
Parameters
----------
path: str or AzureDLPath
Path to create directory
Returns
-------
None
"""
""" """
path = AzureDLPath(path).trim()
self.azure.call('MKDIRS', path.as_posix())
self.invalidate_cache(path)
def rmdir(self, path):
"""
Remove empty directory
Parameters
----------
path: str or AzureDLPath
Directory path to remove
Returns
-------
None
"""
if self.info(path)['type'] != "DIRECTORY":
raise ValueError('Can only rmdir on directories')
# should always invalidate the cache when checking to see if the directory is empty
if self.ls(path, invalidate_cache=True):
raise ValueError('Directory not empty: %s' % path)
self.rm(path, False)
def mv(self, path1, path2):
"""
Move file between locations on ADL
Parameters
----------
path1:
Source Path
path2:
Destination path
Returns
-------
None
"""
path1 = AzureDLPath(path1).trim()
path2 = AzureDLPath(path2).trim()
self.azure.call('RENAME', path1.as_posix(),
destination=path2.as_posix())
self.invalidate_cache(path1)
self.invalidate_cache(path2)
def concat(self, outfile, filelist, delete_source=False):
""" Concatenate a list of files into one new file
Parameters
----------
outfile: path
The file which will be concatenated to. If it already exists,
the extra pieces will be appended.
filelist: list of paths
Existing adl files to concatenate, in order
delete_source: bool (False)
If True, assume that the paths to concatenate exist alone in a
directory, and delete that whole directory when done.
Returns
-------
None
"""
outfile = AzureDLPath(outfile).trim()
delete = 'true' if delete_source else 'false'
sourceList = [AzureDLPath(f).as_posix() for f in filelist]
sources = {}
sources["sources"] = sourceList
self.azure.call('MSCONCAT', outfile.as_posix(),
data=bytearray(json.dumps(sources, separators=(',', ':')), encoding="utf-8"),
deleteSourceDirectory=delete,
headers={'Content-Type': "application/json"},
retry_policy=NoRetryPolicy())
self.invalidate_cache(outfile)
merge = concat
def cp(self, path1, path2):
""" Not implemented. Copy file between locations on ADL """
# TODO: any implementation for this without download?
raise NotImplementedError
def rm(self, path, recursive=False):
"""
Remove a file or directory
Parameters
----------
path: str or AzureDLPath
The location to remove.
recursive: bool (True)
Whether to remove also all entries below, i.e., which are returned
by `walk()`.
Returns
-------
None
"""
path = AzureDLPath(path).trim()
# Always invalidate the cache when attempting to check existence of something to delete
if not self.exists(path, invalidate_cache=True):
raise FileNotFoundError(path)
self.azure.call('DELETE', path.as_posix(), recursive=recursive)
self.invalidate_cache(path)
if recursive:
matches = [p for p in self.dirs if p.startswith(path.as_posix())]
[self.invalidate_cache(m) for m in matches]
def invalidate_cache(self, path=None):
"""
Remove entry from object file-cache
Parameters
----------
path: str or AzureDLPath
Remove the path from object file-cache
Returns
-------
None
"""
if path is None:
self.dirs.clear()
else:
path = AzureDLPath(path).trim()
self.dirs.pop(path.as_posix(), None)
parent = AzureDLPath(path.parent).trim()
self.dirs.pop(parent.as_posix(), None)
def touch(self, path):
"""
Create empty file
Parameters
----------
path: str or AzureDLPath
Path of file to create
Returns
-------
None
"""
with self.open(path, 'wb'):
pass
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from an ADL file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on ADL
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> adl.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> adl.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> adl.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
distributed.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.info()['length']
if offset >= size:
return b''
if length is None:
length = size
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
# ALIASES
listdir = ls
access = exists
rename = mv
stat = info
unlink = remove = rm
class AzureDLFile(object):
"""
Open ADL key as a file. Data is only loaded and cached on demand.
Parameters
----------
azure: azure connection
path: AzureDLPath
location of file
mode: str {'wb', 'rb', 'ab'}
blocksize: int
Size of the write or read-ahead buffer. For writing(and appending, will be
truncated to 4MB (2**22).
delimiter: bytes or None
If specified and in write mode, each flush will send data terminating
on this bytestring, potentially leaving some data in the buffer.
Examples
--------
>>> adl = AzureDLFileSystem() # doctest: +SKIP
>>> with adl.open('my-dir/my-file.txt', mode='rb') as f: # doctest: +SKIP
... f.read(10) # doctest: +SKIP
See Also
--------
AzureDLFileSystem.open: used to create AzureDLFile objects
"""
def __init__(self, azure, path, mode='rb', blocksize=2 ** 25,
delimiter=None):
self.mode = mode
if mode not in {'rb', 'wb', 'ab'}:
raise NotImplementedError("File mode must be {'rb', 'wb', 'ab'}, not %s" % mode)
self.path = path
self.azure = azure
self.cache = b""
self.loc = 0
self.delimiter = delimiter
self.start = 0
self.end = 0
self.closed = False
self.trim = True
self.buffer = io.BytesIO()
self.blocksize = blocksize
uniqueid = str(uuid.uuid4())
self.filesessionid = uniqueid
self.leaseid = uniqueid
# always invalidate the cache when checking for existence of a file
# that may be created or written to (for the first time).
try:
file_data = self.azure.info(path, invalidate_cache=True, expected_error_code=404)
exists = True
except FileNotFoundError:
exists = False
# cannot create a new file object out of a directory
if exists and file_data['type'] == 'DIRECTORY':
raise IOError(
'path: {} is a directory, not a file, and cannot be opened for reading or writing'.format(path))
if mode == 'ab' or mode == 'wb':
self.blocksize = min(2 ** 22, blocksize)
if mode == 'ab' and exists:
self.loc = file_data['length']
elif (mode == 'ab' and not exists) or (mode == 'wb'):
# Create the file
_put_data_with_retry(
rest=self.azure.azure,
op='CREATE',
path=self.path.as_posix(),
data=None,
overwrite='true',
write='true',
syncFlag='DATA',
leaseid=self.leaseid,
filesessionid=self.filesessionid)
logger.debug('Created file %s ' % self.path)
else: # mode == 'rb':
if not exists:
raise FileNotFoundError(path.as_posix())
self.size = file_data['length']
def info(self):
""" File information about this path """
return self.azure.info(self.path)
def tell(self):
""" Current file location """
return self.loc
def seek(self, loc, whence=0):
""" Set current file location
Parameters
----------
loc: int
byte location
whence: {0, 1, 2}
from start of file, current location or end of file, resp.
"""
if not self.mode == 'rb':
raise ValueError('Seek only available in read mode')
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError(
"invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
if nloc > self.size:
raise ValueError('ADLFS does not support seeking beyond file')
self.loc = nloc
return self.loc
def readline(self, length=-1):
"""
Read and return a line from the stream.
If length is specified, at most size bytes will be read.
"""
if length < 0:
length = self.size
line = b""
while True:
# if cache has last bytes of file and its read, return line and exit loop
if self.end >= self.size and self.loc >= self.end:
return line
self._read_blocksize()
found = self.cache[self.loc - self.start:].find(b'\n') + 1
if found:
partialLine = self.cache[
self.loc - self.start: min(self.loc - self.start + found, self.loc - self.start + length)]
else:
partialLine = self.cache[self.loc - self.start:]
self.loc += len(partialLine)
line += partialLine
if found:
return line
def __next__(self):
out = self.readline()
if not out:
raise StopIteration
return out
next = __next__
def __iter__(self):
return self
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def _fetch(self, start, end):
self.start = start
self.end = min(end, self.size)
response = _fetch_range_with_retry(
self.azure.azure, self.path.as_posix(), self.start, self.end, filesessionid=self.filesessionid)
self.cache = getattr(response, 'content', response)
def _read_blocksize(self, offset=-1):
"""
Reads next blocksize of data and updates the cache if read offset is not within cache otherwise nop
Parameters
----------
offset: int (-1)
offset from where to read; if <0, last read location or beginning of file.
Returns
-------
None
"""
if offset < 0:
offset = self.loc
if offset >= self.size:
self.start = self.size
self.end = self.size
self.cache = b""
return
if offset >= self.start and offset < self.end:
return
if offset > self.size:
raise ValueError('Read offset is outside the File')
self._fetch(offset, offset + self.blocksize)
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length: int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
if self.mode != 'rb':
raise ValueError('File not in read mode')
if length < 0:
length = self.size
if self.closed:
raise ValueError('I/O operation on closed file.')
flag = 0
out = b""
while length > 0:
self._read_blocksize()
data_read = self.cache[self.loc - self.start:
min(self.loc - self.start + length, self.end - self.start)]
if not data_read: # Check to catch possible server errors. Ideally shouldn't happen.
flag += 1
if flag >= 5:
raise DatalakeIncompleteTransferException('Could not read data: {}. '
'Repeated zero byte reads. '
'Possible file corruption'.format(self.path))
out += data_read
self.loc += len(data_read)
length -= len(data_read)
if self.loc >= self.size:
length = 0
return out
read1 = read
def readinto(self, b):
"""
Reads data into buffer b
Parameters
----------
b: bytearray
Buffer to which bytes are read into
Returns
-------
Returns number of bytes read.
"""
temp = self.read(len(b))
b[:len(temp)] = temp
return len(temp)
def write(self, data):
"""
Write data to buffer.
Buffer only sent to ADL on flush() or if buffer is bigger than
blocksize.
Parameters
----------
data: bytes
Set of bytes to be written.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('File not in write mode')
if self.closed:
raise ValueError('I/O operation on closed file.')
# TODO Flush may be simplified
# Buffered writes so a very large buffer is not copied leading to very large memory consumption
bytes_written = 0
for i in range(0, len(data), self.blocksize):
out = self.buffer.write(ensure_writable(data[i:i + self.blocksize]))
self.loc += out
bytes_written += out
self.flush(syncFlag='DATA')
return bytes_written
def flush(self, syncFlag='METADATA', force=False):
"""
Write buffered data to ADL.
Without delimiter: Uploads the current buffer.
With delimiter: writes an amount of data less than or equal to the
block-size, which ends on the delimiter, until buffer is smaller than
the blocksize. If there is no delimiter in a block uploads whole block.
If force=True, flushes all data in the buffer, even if it doesn't end
with a delimiter; appropriate when closing the file.
"""
if not self.writable() or self.closed:
return
if not (syncFlag == 'METADATA' or syncFlag == 'DATA' or syncFlag == 'CLOSE'):
raise ValueError('syncFlag must be one of these: METADATA, DATA or CLOSE')
common_args_append = {
'rest': self.azure.azure,
'op': 'APPEND',
'path': self.path.as_posix(),
'append': 'true',
'leaseid': self.leaseid,
'filesessionid': self.filesessionid
}
self.buffer.seek(0) # Go to start of buffer
data = self.buffer.read()
while len(data) > self.blocksize:
data_to_write_limit = self.blocksize
if self.delimiter:
delimiter_index = data.rfind(self.delimiter, 0, self.blocksize)
if delimiter_index != -1: # delimiter found
data_to_write_limit = delimiter_index + len(self.delimiter)
offset = self.tell() - len(data)
_put_data_with_retry(syncFlag='DATA', data=data[:data_to_write_limit], offset=offset, **common_args_append)
logger.debug('Wrote %d bytes to %s' % (data_to_write_limit, self))
data = data[data_to_write_limit:]
if force:
offset = self.tell() - len(data)
_put_data_with_retry(syncFlag=syncFlag, data=data, offset=offset, **common_args_append)
logger.debug('Wrote %d bytes to %s' % (len(data), self))
data = b''
self.buffer = io.BytesIO(data)
self.buffer.seek(0, 2) # seek to end for other writes to buffer
def close(self):
""" Close file
If in write mode, causes flush of any unwritten data.
"""
logger.info("closing stream")
if self.closed:
return
if self.writable():
self.flush(syncFlag='CLOSE', force=True)
self.azure.invalidate_cache(self.path.as_posix())
self.closed = True
def readable(self):
"""Return whether the AzureDLFile was opened for reading"""
return self.mode == 'rb'
def seekable(self):
"""Return whether the AzureDLFile is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Return whether the AzureDLFile was opened for writing"""
return self.mode in {'wb', 'ab'}
def __str__(self):
return "<ADL file: %s>" % (self.path.as_posix())
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _fetch_range(rest, path, start, end, stream=False, retry_policy=ExponentialRetryPolicy(), **kwargs):
logger.debug('Fetch: %s, %s-%s', path, start, end)
# if the caller gives a bad start/end combination, OPEN will throw and
# this call will bubble it up
return rest.call(
'OPEN', path, offset=start, length=end - start, read='true', stream=stream, retry_policy=retry_policy, **kwargs)
def _fetch_range_with_retry(rest, path, start, end, stream=False, retries=10,
delay=0.01, backoff=3, **kwargs):
err = None
retry_policy = ExponentialRetryPolicy(max_retries=retries, exponential_retry_interval=delay,
exponential_factor=backoff)
try:
return _fetch_range(rest, path, start, end, stream=False, retry_policy=retry_policy, **kwargs)
except Exception as e:
err = e
exception = RuntimeError('Max number of ADL retries exceeded: exception ' + repr(err))
rest.log_response_and_raise(None, exception)
def _put_data(rest, op, path, data, retry_policy=ExponentialRetryPolicy(), **kwargs):
logger.debug('Put: %s %s, %s', op, path, kwargs)
return rest.call(op, path=path, data=data, retry_policy=retry_policy, **kwargs)
def _put_data_with_retry(rest, op, path, data, retries=10, delay=0.01, backoff=3,
**kwargs):
err = None
retry_policy = ExponentialRetryPolicy(max_retries=retries, exponential_retry_interval=delay,
exponential_factor=backoff)
try:
return _put_data(rest, op, path, data, retry_policy=retry_policy, **kwargs)
except (PermissionError, FileNotFoundError) as e:
rest.log_response_and_raise(None, e)
except DatalakeBadOffsetException as e:
try:
# There is a possibility that a call in previous retry succeeded in the backend
# but didn't generate a response. In that case, any other retry will fail as the
# data is already written. We can try a zero byte append at len(data) + offset
# and see if it succeeds. If it does, we assume that data is written and carry on.
current_offset = kwargs.pop('offset', None)
if current_offset is None:
raise e
return _put_data(rest, op, path, [], retry_policy=retry_policy, offset=current_offset + len(data), **kwargs)
except:
rest.log_response_and_raise(None, e)
except Exception as e:
err = e
logger.debug('Exception %s on ADL upload',
repr(err))
exception = RuntimeError('Max number of ADL retries exceeded: exception ' + repr(err))
rest.log_response_and_raise(None, exception)
class AzureDLPath(type(pathlib.PurePath())):
"""
Subclass of native object-oriented filesystem path.
This is used as a convenience class for reducing boilerplate and
eliminating differences between system-dependent paths.
We subclass the system's concrete pathlib class due to this issue:
http://stackoverflow.com/questions/29850801/subclass-pathlib-path-fails
Parameters
----------
path: AzureDLPath or string
location of file or directory
Examples
--------
>>> p1 = AzureDLPath('/Users/foo') # doctest: +SKIP
>>> p2 = AzureDLPath(p1.name) # doctest: +SKIP
"""
def __contains__(self, s):
""" Return whether string is contained in path. """
return s in self.as_posix()
def __getstate__(self):
return self.as_posix()
def __setstate__(self, state):
self.__init__(state)
@property
def globless_prefix(self):
""" Return shortest path prefix without glob quantifiers. """
parts = []
for part in self.parts:
if any(q in part for q in ['*', '?']):
break
parts.append(part)
return pathlib.PurePath(*parts)
def startswith(self, prefix, *args, **kwargs):
""" Return whether string starts with the prefix.
This is equivalent to `str.startswith`.
"""
return self.as_posix().startswith(prefix.as_posix(), *args, **kwargs)
def trim(self):
""" Return path without anchor (concatenation of drive and root). """
return self.relative_to(self.anchor)
|
morrisgreenberg/hdp-py | hdp_py/HDP.py | import numpy as np
import pandas as pd
from scipy import sparse
from scipy.special import gammaln as logg
from functools import partial
from numba import jit, float64, int64, int32
def pois_fk_cust(i, x, k, Kmax, ha, hb, new=False):
"""
Computes the mixture components for a given customer across all k values.
MODEL: base measure H ~ Gamma(ha, hb), F(x|phi) ~ Poisson(phi)
All components are calculated exactly in log-space and then exponentiated.
returns: (Kmax,) vector; if new=True, returns a scalar
"""
x = x.flatten() # reshape to 1D, since gibbs routine passes in a 2D array
# Calculate the case where k has no members
fknew_cust = np.exp( -logg(x[i] + 1) + logg(x[i] + ha) - logg(ha) -
(x[i] + ha)*np.log(1 + hb) + ha*np.log(hb) )
if new == True: return fknew_cust
x_kks = [x[k == kk] for kk in range(Kmax)] # subset of customers eating kk
xi_in = np.zeros(Kmax) # offset if x[i] is in this subset
xi_in[k[i]] = 1
# Compute (a,b) params from gamma kernel tricks done in fk function
av = np.array(list(map(np.sum, x_kks))) - xi_in*x[i] + ha
bv = np.array(list(map(len, x_kks))) - xi_in + hb
fk_cust = np.exp( -logg(x[i] + 1) + logg(x[i] + av) - logg(av) -
(x[i] + av)*np.log(1 + bv) + av*np.log(bv) )
return fk_cust
def pois_fk_tabl(jj, tt, x, j, t, k, Kmax, ha, hb, new=False):
"""
Computes the mixture components for a given table across all k values.
MODEL: base measure H ~ Gamma(ha, hb), F(x|phi) ~ Poisson(phi)
All components are calculated exactly in log-space and then exponentiated.
returns: (Kmax,) vector; if new=True, returns a scalar
"""
x = x.flatten() # reshape to 1D, since gibbs routine passes in a 2D array
x_jt = x[np.logical_and(j == jj, t == tt)]
kk = k[np.logical_and(j == jj, t == tt)]
fknew_tabl = np.exp( -np.sum(logg(x_jt + 1)) + logg(np.sum(x_jt) + ha) - logg(ha) -
(np.sum(x_jt) + ha)*np.log(len(x_jt) + hb) + ha*np.log(hb) )
# If table jt doesn't exist, just return the "new" mixture component
if len(x_jt) == 0:
#print(f"WARNING: table {(jj, tt)} does not exist currently")
new = True
if new == True: return np.full(Kmax, fknew_tabl)
x_kks = [x[k == kk] for kk in range(Kmax)] # subset of customers at tables serving kk
xjt_in = np.zeros(Kmax) # offset if table x_jt is in this subset
xjt_in[kk[0]] = 1
# Compute (a,b) params from gamma kernel tricks done in fk function
av = np.array(list(map(np.sum, x_kks))) - xjt_in*np.sum(x_jt) + ha
bv = np.array(list(map(len, x_kks))) - xjt_in*len(x_jt) + hb
fk_tabl = np.exp( -np.sum(logg(x_jt + 1)) + logg(np.sum(x_jt) + av) - logg(av) -
(np.sum(x_jt) + av)*np.log(len(x_jt) + bv) + av*np.log(bv) )
return fk_tabl
def mnom_fk_cust(i, x, k, Kmax, L, ha, new=False):
"""
Computes the mixture components for a given customer across all k values.
MODEL: base measure H ~ Dirichlet(L, ha_1,...,ha_L),
F(x|phi) ~ Multinomial(n_ji, phi_1,...,phi_L)
All components are calculated exactly in log-space and then exponentiated.
X can be a dense or a sparse csr-style matrix.
returns: (Kmax,) vector; if new=True, returns a scalar
"""
xi, ni = x[i, :], np.sum(x[i, :])
log_con = logg(ni + 1) - np.sum(logg(xi + np.ones(L))) # term constant for all k
# Calculate the case where k has no members
if new == True:
fknew_cust = np.exp( log_con + np.sum(logg(xi + ha)) - logg(np.sum(xi + ha)) +
logg(np.sum(ha)) - np.sum(logg(ha)) )
return fknew_cust
# Get subset of customers eating kk; each entry is a (#, L) matrix
x_kks = [x[k == kk, :] for kk in range(Kmax)]
# Compute params from Dirichlet kernel tricks done in fk function
a_bot = np.vstack([np.sum(x_kk, axis=0) for x_kk in x_kks]) + ha[None, :] # (Kmax, L)
a_bot[k[i], :] -= xi # offset if xi is in this subset
a_top = np.apply_along_axis(lambda row: row + xi, 1, a_bot)
fk_cust = np.exp( log_con + np.sum(logg(a_top), axis=1) - logg(np.sum(a_top, axis=1)) +
logg(np.sum(a_bot, axis=1)) - np.sum(logg(a_bot), axis=1) )
# Convert back to a dense array in case X was sparse
return np.asarray(fk_cust).ravel()
def mnom_fk_tabl(jj, tt, x, j, t, k, Kmax, L, ha, new=False):
"""
Computes the mixture components for a given customer across all k values.
MODEL: base measure H ~ Dirichlet(L, ha_1,...,ha_L),
F(x|phi) ~ Multinomial(n_ji, phi_1,...,phi_L)
All components are calculated exactly in log-space and then exponentiated.
returns: (Kmax,) vector; if new=True, returns a scalar
"""
x_jt = x[np.logical_and(j == jj, t == tt), :] # (|T|, L)
kk = k[np.logical_and(j == jj, t == tt)]
n_jt = np.sum(x_jt, axis=1) # (|T|,)
sum_jt = np.sum(x_jt, axis=0) # (L,)
log_con = np.sum(logg(n_jt + 1)) - np.sum(logg(x_jt + 1)) # term constant for all k
fknew_tabl = np.exp( log_con + np.sum(logg(sum_jt + ha)) - logg(np.sum(sum_jt + ha)) +
logg(np.sum(ha)) - np.sum(logg(ha)) )
# If table jt doesn't exist, just return the "new" mixture component
if x_jt.shape[0] == 0:
#print(f"WARNING: table {(jj, tt)} does not exist currently")
new = True
if new == True: return fknew_tabl
# Get subset of customers eating kk; each entry is a (#, L) matrix
x_kks = [x[k == kk, :] for kk in range(Kmax)]
# Compute params from Dirichlet kernel tricks done in fk function
a_bot = np.vstack([np.sum(x_kk, axis=0) for x_kk in x_kks]) + ha[None, :] # (Kmax, L)
a_bot[kk[0], :] -= sum_jt # offset if table x_jt is in this subset
a_top = a_bot + sum_jt[None, :]
fk_tabl = np.exp( log_con + np.sum(logg(a_top), axis=1) - logg(np.sum(a_top, axis=1)) +
logg(np.sum(a_bot, axis=1)) - np.sum(logg(a_bot), axis=1) )
return fk_tabl
def cat_fk_cust(i, x, k, Kmax, L, ha, new=False):
"""
Computes the mixture components for a given customer across all k values.
MODEL: base measure H ~ Dirichlet(L, ha_1,...,ha_L),
F(x|phi) ~ Categorical(L, phi_1,...,phi_L)
All components are calculated exactly in log-space and then exponentiated.
X can be a dense or a sparse csr-style matrix.
returns: (Kmax,) vector; if new=True, returns a scalar
"""
xi = x[i, :]
ll = sparse.find(xi)[1][0] # get column index of the 1 value
# Calculate the case where k has no members
if new == True:
return ha[ll] / np.sum(ha)
# Store the size of sets V and V_l for each k
V_kks = np.array([np.sum(k == kk) for kk in range(Kmax)])
Vl_kks = np.array([np.sum(x[k == kk, ll]) for kk in range(Kmax)])
fk_cust = (Vl_kks + ha[ll]) / (V_kks + np.sum(ha))
return fk_cust
def cat_fk_cust2(i, x, k, Kmax, L, ha, new=False):
"""Faster version of the above."""
xi = x[i, :]
ll = sparse.find(xi)[1][0] # get column index of the 1 value
# Calculate the case where k has no members
if new == True:
return ha[ll] / np.sum(ha)
# Store the size of sets V and V_l for each k
V_kks = np.zeros(Kmax)
kk_counts = pd.Series(k).value_counts()
V_kks[kk_counts.index] = kk_counts
Vl_kks = np.array([np.sum(x[k == kk, ll]) for kk in range(Kmax)])
fk_cust = (Vl_kks + ha[ll]) / (V_kks + np.sum(ha))
return fk_cust
@jit(float64[:](int64, int32[:,:], int32[:], int64, int64, float64[:]), nopython=True)
def cat_fk_cust3(i, x, k, Kmax, L, ha):
"""Numba-compiled version of the above where New=False. Does not support sparse matrices."""
ll = 0 # get column index of the 1 value
for idx in range(L):
if x[i, idx] == 1:
ll = idx
break
ha_sum = 0
for idx in range(L):
ha_sum += ha[idx]
# Store the size of sets V and V_l for each k
V_kks = np.zeros(Kmax)
Vl_kks = np.zeros(Kmax)
fk_cust = np.zeros(Kmax)
N = x.shape[0]
for kk in range(Kmax):
# Compute a mask which gives the i indices of observations with value k
for idx in range(N):
if k[idx] == kk:
V_kks[kk] += 1
Vl_kks[kk] += x[idx, ll]
fk_cust[kk] = (Vl_kks[kk] + ha[ll]) / (V_kks[kk] + ha_sum)
return fk_cust
@jit(float64(int64, int32[:,:], int32[:], int64, int64, float64[:]), nopython=True)
def cat_fk_cust3_new(i, x, k, Kmax, L, ha):
"""Numba-compiled version of the above where new=True."""
ll = 0 # get column index of the 1 value
for idx in range(L):
if x[i, idx] == 1:
ll = idx
break
# Calculate the case where k has no members
ha_sum = 0
for idx in range(L):
ha_sum += ha[idx]
return ha[ll] / ha_sum
########################################################################################
class StirlingEngine:
"""
Numerically efficient engine for computing and storing computed Stirling numbers.
CONSTRUCTOR PARAMETERS
- Nmax: largest integer n for which s(n,m) will need to be computed
PRIVATE ATTRIBUTES
- s_memo_, slog_memo_: running tables of previously computed values
"""
def __init__(self, Nmax):
self.s_memo_ = np.full((Nmax, Nmax), np.nan)
self.slog_memo_ = np.full((Nmax, Nmax), np.nan)
def stirling(self, n, m):
"""
Computes an unsigned Stirling number of the first kind.
Uses dynamic programming to store previously computed s(n,m) values,
as this is a repeatedly-called recursive algorithm.
"""
assert n < self.s_memo_.shape[0] and m < self.s_memo_.shape[0]
# If this has already been computed, return stored value
if not np.isnan(self.s_memo_[n, m]):
return self.s_memo_[n, m]
else:
return_val = np.nan
# Base cases
if (n == 0 and m == 0) or (n == 1 and m == 1):
return_val = 1
elif (n > 0 and m == 0) or m > n:
return_val = 0
# Recursion relation
else:
return_val = self.stirling(n-1, m-1) + (n-1)*self.stirling(n-1, m)
self.s_memo_[n, m] = return_val
return return_val
def stirlog(self, n, m):
"""
Computes the natural logarithm of an unsigned Stirling number,
using the same dynamic programming approach as above.
If s(n,m) = 0, this gets returned as -inf (np.exp(-inf) == 0.0)
This is the preferred function, as stirling() can encounter overflow errors.
"""
assert n < self.slog_memo_.shape[0] and m < self.slog_memo_.shape[0]
# If this has already been computed, return stored value
if not np.isnan(self.slog_memo_[n, m]):
return self.slog_memo_[n, m]
else:
return_val = np.nan
# Base cases
if (n == 0 and m == 0) or (n == 1 and m == 1):
return_val = 0
elif (n > 0 and m == 0) or m > n:
return_val = -np.inf
# Recursion relation
else:
log_s1, log_s2 = self.stirlog(n-1, m-1), self.stirlog(n-1, m)
# If s1 == 0 (log_s1 == -inf), just return (n-1)*log_s2
# By definition, must have s2 > s1, so only need to check s1
if np.isfinite(log_s1):
val = (n-1) * np.exp(log_s2 - log_s1)
# If there is overflow/underflow in `val`, approximate log(1+x) = log(x)
if np.isfinite(val):
return_val = log_s1 + np.log1p(val)
else:
return_val = log_s2 + np.log(n-1)
else:
return_val = log_s2 + np.log(n-1)
self.slog_memo_[n, m] = return_val
return return_val
########################################################################################
class HDP:
"""
Model implementing the Chinese Restaurant Franchise Process formulation of the HDP.
CONSTRUCTOR PARAMETERS
- gamma, alpha0: scaling parameters > 0 for base measures H and G0
- f: string representing distribution of data; h is chosen to be conjugate
- hypers: tuple of hyperparameter values specific to f/h scheme chosen
PRIVATE ATTRIBUTES (volatile)
- tk_map_: (J x Tmax) matrix of k values for each (j,t) pair
- n_: (J x Tmax) matrix specifying counts of customers (gibbs_cfr)
- q_: (J x Kmax) matrix specifying counts of customers (gibbs_direct)
- m_: (J x Kmax) matrix specifying counts of tables
- fk_cust_, fk_cust_new_, fk_tabl_: functions to compute mixing components for Gibbs sampling
- stir_: an object of class StirlingEngine which computes Stirling numbers
PUBLIC ATTRIBUTES
cfr_samples: (S x N x 2) matrix of (t, k) values for each data point i;
exists only after gibbs_cfr() has been called
direct_samples: (S x N) matrix of k values for each data point i;
exists only after gibbs_direct() has been called
beta_samples: (S x Kmax+1) matrix of beta values after each iteration;
exists only after gibbs_direct() has been called
"""
def __init__(self, gamma=1, alpha0=1, f='multinomial', hypers=None):
self.g_ = gamma
self.a0_ = alpha0
self.set_priors(f, hypers)
def set_priors(self, f, hypers):
"""
Initializes the type of base measure h_ and data-generation function f_.
Also sets hypers_, the relevelant hyperparameters and
fk_routine_, the function to compute mixing components.
"""
if f == 'poisson':
# Specify parameters of H ~ Gamma(a,b)
if hypers is None:
self.hypers_ = (1,1)
else: self.hypers_ = hypers
self.fk_cust_ = pois_fk_cust
self.fk_cust_new_ = partial(pois_fk_cust, new=True)
self.fk_tabl_ = pois_fk_tabl
elif f == 'multinomial':
if hypers is None:
L = 2
self.hypers_ = (L, np.ones(L))
else: self.hypers_ = hypers
self.fk_cust_ = mnom_fk_cust
self.fk_cust_new_ = partial(mnom_fk_cust, new=True)
self.fk_tabl_ = mnom_fk_tabl
elif f == 'categorical':
# Identical to multinomial, but with some efficiency upgrades
if hypers is None:
L = 2
self.hypers_ = (L, np.ones(L))
else: self.hypers_ = hypers
self.fk_cust_ = cat_fk_cust
self.fk_cust_new_ = partial(cat_fk_cust, new=True)
self.fk_tabl_ = mnom_fk_tabl
elif f == 'categorical_fast':
# Even more efficient version of categorical; does not support sparse matrices
if hypers is None:
L = 2
self.hypers_ = (L, np.ones(L))
else:
# Ensure hyperparameters have proper data types, for numba functions
self.hypers_ = (int(hypers[0]), hypers[1].astype('float'))
self.fk_cust_ = cat_fk_cust3
self.fk_cust_new_ = cat_fk_cust3_new
self.fk_tabl_ = mnom_fk_tabl
else: raise ValueError
def tally_up(self, it, which=None):
"""
Helper function for computing maps and counts in gibbs().
Given a current iteration in the cfr_samples attribute, does a full
recount of customer/table allocations, updating n_ and m_.
Set which = 'n' or 'm' to only tally up that portion
"""
if which == 'n':
jt_pairs = self.cfr_samples[it,:,0:2]
# Count customers at each table (jt)
cust_counts = pd.Series(map(tuple, jt_pairs)).value_counts()
j_idx, t_idx = tuple(map(np.array, zip(*cust_counts.index)))
self.n_ *= 0
self.n_[j_idx, t_idx] = cust_counts
elif which == 'm':
jt_pairs = self.cfr_samples[it,:,0:2]
# First filter by unique tables (jt), then count tables with each k value
jt_unique, k_idx = np.unique(jt_pairs, axis=0, return_index=True)
jk_pairs = np.c_[self.cfr_samples[it, k_idx, 0],
self.cfr_samples[it, k_idx, 2]]
tabl_counts = pd.Series(map(tuple, jk_pairs)).value_counts()
j_idx, k_idx = tuple(map(np.array, zip(*tabl_counts.index)))
self.m_ *= 0
self.m_[j_idx, k_idx] = tabl_counts
elif which == 'q':
jk_pairs = self.direct_samples[it,:,:]
# Counts customers at each j eating k
cust_counts = pd.Series(map(tuple, jk_pairs)).value_counts()
j_idx, k_idx = tuple(map(np.array, zip(*cust_counts.index)))
self.q_ *= 0
self.q_[j_idx, k_idx] = cust_counts
def get_dist(self, old, new, used, size):
"""
Helper function which standardizes the operation of computing a
full conditional distribution, for both t and k values.
Also normalizes and ensures there are no NANs.
- old: a (size,) vector of probability values for used values
- new: a scalar representing the combined probability of all unused values
- used: a (size,) mask encoding which values in the sample space are being used
- size: the size of the sample space
"""
num_unused = size - np.sum(used)
dist = None
if num_unused == 0:
# In our truncated sample space, there is no room for "new" values
dist = old
else:
dist = old * used + (new / num_unused) * np.logical_not(used)
# Remove nans and add epsilon so that distribution is all positive
#print(f"{dist.round(3)} (sum = {np.sum(dist)})")
dist[np.logical_not(np.isfinite(dist))] = 0
dist += 1e-10
return dist / np.sum(dist)
def draw_t(self, it, x, j, Tmax, Kmax, verbose):
"""
Helper function which does the draws from the t_ij full conditional.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_cfr()
"""
t_next, k_next = self.cfr_samples[it,:,1], self.cfr_samples[it,:,2]
# Cycle through the t value of each customer, conditioning on everything
# Randomize the order in which updates occur
for i in np.random.permutation(len(j)):
jj, tt0, kk0 = j[i], t_next[i], k_next[i]
# Get vector of customer f_k values (dependent on model specification)
old_mixes = self.fk_cust_(i, x, k_next, Kmax, *self.hypers_)
new_mixes = self.fk_cust_new_(i, x, k_next, Kmax, *self.hypers_)
# Calculate pointwise likelihoods p(x_ji | ...)
M = np.sum(self.m_)
Mk = np.sum(self.m_, axis=0) # number of tables serving k
lik = old_mixes @ (Mk / (M + self.g_)) + new_mixes * (self.g_ / (M + self.g_))
cust_offset = np.zeros(Tmax)
cust_offset[tt0] = 1
old_t = (self.n_[jj, :] - cust_offset) * old_mixes[self.tk_map_[jj, :]]
new_t = self.a0_ * lik
# If a table is in use, prob comes from old_t; otherwise, from new_t
# Distribute the weight of new_t across all possible new allocations
t_used = self.n_[jj, :] > 0
t_dist = self.get_dist(old_t, new_t, t_used, Tmax)
tt1 = np.random.choice(Tmax, p=t_dist)
t_next[i] = tt1
self.tally_up(it, which='n')
# If this table was previously unoccupied, we need to select a k
if self.n_[jj, tt1] == 1 and tt0 != tt1:
old_k = np.sum(self.m_, axis=0) * old_mixes
new_k = self.g_ * new_mixes
k_used = np.sum(self.m_, axis=0) > 0
k_dist = self.get_dist(old_k, new_k, k_used, Kmax)
kk1 = np.random.choice(Kmax, p=k_dist)
self.tk_map_[jj, tt1] = kk1
k_next[i] = self.tk_map_[jj, tt1]
self.tally_up(it, which='m')
def draw_k(self, it, x, j, Kmax, verbose):
"""
Helper function which does the draws from the t_ij full conditional.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_cfr()
"""
t_next, k_next = self.cfr_samples[it,:,1], self.cfr_samples[it,:,2]
# Cycle through the k values of each table
j_idx, t_idx = np.where(self.n_ > 0) # find the occupied tables
for i in np.random.permutation(len(j_idx)):
jj, tt = j_idx[i], t_idx[i]
kk0 = self.tk_map_[jj, tt]
# Get vector of table f_k values (dependent on model specification)
old_mixes = self.fk_tabl_(jj, tt, x, j, t_next, k_next, Kmax, *self.hypers_)
new_mixes = self.fk_tabl_(jj, tt, x, j, t_next, k_next, Kmax, *self.hypers_, new=True)
tabl_offset = np.zeros(Kmax)
tabl_offset[kk0] = 1
old_k = (np.sum(self.m_, axis=0) - tabl_offset) * old_mixes
new_k = self.g_ * new_mixes
k_used = np.sum(self.m_, axis=0) > 0
k_dist = self.get_dist(old_k, new_k, k_used, Kmax)
kk1 = np.random.choice(Kmax, p=k_dist)
self.tk_map_[jj, tt] = kk1
k_next[np.logical_and(j == jj, t_next == tt)] = kk1
self.tally_up(it, which='m')
def draw_z(self, it, x, j, Kmax, verbose):
"""
Helper function which does the draws from the z_ij full conditional.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_direct()
"""
k_next = self.direct_samples[it,:,1]
# Cycle through the k values of each customer
for i in np.random.permutation(len(j)):
jj, kk0 = j[i], k_next[i]
# Get vector of customer f_k values (dependent on model specification)
old_mixes = self.fk_cust_(i, x, k_next, Kmax, *self.hypers_)
new_mixes = self.fk_cust_new_(i, x, k_next, Kmax, *self.hypers_)
cust_offset = np.zeros(Kmax)
cust_offset[kk0] = 1
old_k = (self.q_[jj, :] - cust_offset +
self.a0_ * self.beta_samples[it, :-1]) * old_mixes
new_k = self.a0_ * self.beta_samples[it, -1] * new_mixes
k_used = np.sum(self.m_, axis=0) > 0
k_dist = self.get_dist(old_k, new_k, k_used, Kmax)
kk1 = np.random.choice(Kmax, p=k_dist)
k_next[i] = kk1
self.q_[jj, kk0] -= 1
self.q_[jj, kk1] += 1
# If this k value was previously unused, must also set the beta_k component
if np.sum(self.q_[:, kk1]) == 1:
b = np.random.beta(1, self.g_)
beta_u = self.beta_samples[it, -1]
self.beta_samples[it, kk1] = b * beta_u
self.beta_samples[it, -1] = (1-b) * beta_u
def draw_m(self, it, x, j, Kmax, verbose):
"""
Helper function which does the draws from the z_ij full conditional.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_direct()
"""
k_next = self.direct_samples[it,:,1]
self.m_ *= 0 # reset the m counts
# Cycle through the k values of each restaurant
j_idx, k_idx = np.where(self.q_ > 0) # find the consumed dishes
for i in np.random.permutation(len(j_idx)):
jj, kk = j_idx[i], k_idx[i]
max_m = self.q_[jj, kk]
abk = self.a0_ * self.beta_samples[it, kk]
m_range = np.arange(max_m) + 1
log_s = np.array([self.stir_.stirlog(max_m, m) for m in m_range])
m_dist = np.exp( logg(abk) - logg(abk + max_m) +
log_s + m_range * np.log(abk) )
"""MOSTLY FIXED. m_dist should be a proper distribution"""
m_dist[np.logical_not(np.isfinite(m_dist))] = 0
m_dist += 1e-10
mm1 = np.random.choice(m_range, p=m_dist/np.sum(m_dist))
self.m_[jj, kk] = mm1
def gibbs_cfr(self, x, j, iters, Tmax=None, Kmax=None, verbose=False):
"""
Runs the Gibbs sampler to generate posterior estimates of t and k.
x: data matrix, stored row-wise if multidimensional
j: vector of group labels; must have same #rows as x
iters: number of iterations to run
Tmax: maximum number of clusters for each group
Kmax: maximum number of atoms to draw from base measure H
returns: this HDP object with cfr_samples attribute
"""
group_counts = pd.Series(j).value_counts()
J, N = np.max(j) + 1, len(j)
# Set default Tmax and Kmax, if not provided
if Tmax is None: Tmax = min(100, np.max(group_counts))
if Kmax is None: Kmax = min(100, N)
self.n_ = np.zeros((J, Tmax), dtype='int')
self.m_ = np.zeros((J, Kmax), dtype='int')
self.cfr_samples = np.zeros((iters+1, N, 3), dtype='int')
self.cfr_samples[:,:,0] = j
np.seterr('ignore')
# Set random initial values for t and k assignments
t0, k0 = self.cfr_samples[0,:,1], self.cfr_samples[0,:,2]
t0[:] = np.random.randint(0, Tmax, size=N)
self.tk_map_ = np.random.randint(0, Kmax//2, (J, Tmax))
self.tally_up(it=0, which='n')
for jj in range(J):
for tt in np.where(self.n_[jj, :] > 0)[0]:
k0[np.logical_and(j == jj, t0 == tt)] = self.tk_map_[jj, tt]
self.tally_up(it=0, which='m')
for s in range(iters):
t_prev, k_prev = self.cfr_samples[s,:,1], self.cfr_samples[s,:,2]
t_next, k_next = self.cfr_samples[s+1,:,1], self.cfr_samples[s+1,:,2]
# Copy over the previous iteration as a starting point
t_next[:], k_next[:] = t_prev, k_prev
self.draw_t(s+1, x, j, Tmax, Kmax, verbose)
self.draw_k(s+1, x, j, Kmax, verbose)
self.cfr_samples = self.cfr_samples[1:,:,1:]
return self
def gibbs_direct(self, x, j, iters, Kmax=None, resume=False, verbose=False):
"""
Runs the Gibbs sampler to generate posterior estimates of k.
x: data matrix, stored row-wise if multidimensional
j: vector of group labels; must have same #rows as x
iters: number of iterations to run
Kmax: maximum number of atoms to draw from base measure H
resume: if True, will continue from end of previous direct_samples, if dimensions match up
returns: this HDP object with direct_samples attribute
"""
group_counts = pd.Series(j).value_counts()
J, N = np.max(j) + 1, len(j)
if Kmax is None: Kmax = min(100, N)
prev_direct, prev_beta = None, None
start = 0
if resume == True:
# Make sure the x passed in is the same size as it previously was
assert (N == self.direct_samples.shape[1] and
Kmax == self.beta_samples.shape[1] - 1), "Cannot resume with different data."
iters += self.direct_samples.shape[0]
prev_direct, prev_beta = self.direct_samples, self.beta_samples
start = self.direct_samples.shape[0]
self.direct_samples = np.zeros((iters+1, N, 2), dtype='int')
self.direct_samples[:,:,0] = j
self.beta_samples = np.zeros((iters+1, Kmax+1))
self.stir_ = StirlingEngine(np.max(group_counts) + 1)
np.seterr('ignore')
if resume == True:
# Fill in the start of the samples with the previously computed samples
self.direct_samples[1:start+1,:,1] = prev_direct
self.beta_samples[1:start+1,:] = prev_beta
# q_ and m_ attributes should already still exist within the object
else:
self.q_ = np.zeros((J, Kmax), dtype='int') # performs the same function as n_
self.m_ = np.zeros((J, Kmax), dtype='int')
# Set random initial values for k assignments
k0 = self.direct_samples[0,:,1]
k0[:] = np.random.randint(0, Kmax, size=N)
self.tally_up(it=0, which='q')
# Implicitly set random t assignments by drawing possible m counts (m_jk <= q_jk)
for jj in range(J):
for kk in range(Kmax):
max_m = self.q_[jj, kk]
if max_m == 1:
self.m_[jj, kk] = 1
elif max_m > 1:
self.m_[jj, kk] = np.random.randint(1, max_m)
# Compute the corresponding beta values from m assignments
Mk = np.sum(self.m_, axis=0)
self.beta_samples[0,:] = np.random.dirichlet(np.append(Mk, self.g_) + 1e-10)
for s in range(start, iters):
# Copy over the previous iteration as a starting point
self.direct_samples[s+1,:,1] = self.direct_samples[s,:,1]
self.beta_samples[s+1,:] = self.beta_samples[s,:]
self.draw_z(s+1, x, j, Kmax, verbose)
self.draw_m(s+1, x, j, Kmax, verbose)
Mk = np.sum(self.m_, axis=0)
# Dirichlet weights must be > 0, so in case some k is unused, add epsilon
self.beta_samples[s+1,:] = np.random.dirichlet(np.append(Mk, self.g_) + 1e-10)
if verbose: print(self.beta_samples[s+1,:].round(3))
self.direct_samples = self.direct_samples[1:,:,1]
self.beta_samples = self.beta_samples[1:,:]
return self
|
morrisgreenberg/hdp-py | hdp_py/get_data.py | <filename>hdp_py/get_data.py<gh_stars>1-10
import urllib
import string
from itertools import compress
from nltk.corpus import stopwords
import pandas as pd
from functools import reduce
import numpy as np
import re
import os
import pkgutil
from bs4 import BeautifulSoup
import gensim
import gensim.corpora as corpora
def docsToList(data):
'''
This function takes a string of abstracts and converts it to a list of lists of the words in each abstract.
This function was made specifically for the data obtained here:
https://raw.githubusercontent.com/tdhopper/topic-modeling-datasets/master/data/raw/Nematode%20biology%20abstracts/cgcbib.txt
'''
# Remove '\n' and '\r'
data = data.lower().translate(str.maketrans('\n', ' '))
data = data.translate(str.maketrans('\r', ' '))
# Remove punctuation except for '-' so we can split after each abstract
data = data.translate(str.maketrans('', '', '!"#$%&\'()*+,./;<=>?@[\\]^_`{|}~'))
# Remove numbers
data = data.translate(str.maketrans('','', string.digits))
# Split after 'abstract' is stated
data = data.split('-------------------')
# Remove '-' punctuation now
data = [abstract.translate(str.maketrans('-', ' ')) for abstract in data]
# Remove entries without the word "abstract" in it
abs_check = ['abstract' in i for i in data]
data = list(compress(data, abs_check))
# Only keep the words after 'abstract'
data = [abstract.split('abstract:')[1] for abstract in data]
# Remove any remaining :'s
data = [abstract.translate(str.maketrans(':', ' ')) for abstract in data]
# Remove abstracts that only state 'in french'
not_french = ['in french' not in i for i in data]
data = list(compress(data, not_french))
# Create list of lists output
output = [i.split() for i in data]
return output
def reducedVocab(lists, stop_words = None, min_word_count = 10):
'''
This function takes a list of words in a list of documents and returns the lists of lists with a reduced
vocabulary, the flattened list, and the vocabulary
'''
if stop_words == None:
stop_words = set(stopwords.words('english'))
# Remove stop words
words = [i for sublist in lists for i in sublist if not i in stop_words]
# Remove words that appear less than min_word_count times
wordSeries = pd.Series(words)
vocab = list(compress(wordSeries.value_counts().index, wordSeries.value_counts() >= min_word_count))
# Recreate lists with filtered vocab
docs = []
for j in range(len(lists)):
docs.append([i for i in lists[j] if i in vocab])
#flatten docs
one_list = [i for sublist in docs for i in sublist]
return docs, one_list, vocab
def listsToVec(lists, stop_words = None, min_word_count = 10, verbose = 1):
'''
This function takes a list of lists of the words in each document. It removes any stop words, removes words that
appear 'min_word_count' times or less, and maps each word in the documents' vocabulary to a number.
Returns: data matrix X, where each row is a draw from a categorical distribution representing one word
vector j encoding the corresponding documents each word belongs to'''
# Remove stop words and words that appear less than 'min_word_count' times
docs, one_list, vocab = reducedVocab(lists, stop_words, min_word_count)
# Map each word to a number
#numbers = list(range(len(vocab)))
#vocab_dict = dict(zip(vocab, numbers))
#x = list(map(vocab_dict.get, one_list))
# Check for empty lists and print warning if one is found
counter = 0
for i in range(len(docs)-1 ,-1, -1):
if len(docs[i]) == 0:
if verbose > 1:
print(f'WARNING: Document {i} is empty and being removed...')
del docs[i]
counter += 1
if verbose == 1 and counter > 1:
print(f'WARNING: {counter} documents are empty and being removed...')
elif verbose == 1 and counter == 1:
print(f'WARNING: {counter} document is empty and being removed...')
X_matrix = pd.DataFrame(np.zeros((len(one_list), len(vocab))),
columns=vocab)
for i, word in enumerate(one_list):
X_matrix.loc[i, word] = 1
# Determine which document each word belongs to
count, j = 0, []
for i in docs:
j.append([count]*len(i))
count += 1
# Reduce to a flattened list
j = [i for sublist in j for i in sublist]
return X_matrix.astype('int'), np.array(j)
### DATA GETTING FUNCTIONS
def get_nematode(max_docs = None, min_word_count = 1, LDA = False):
"""
Returns the data matrix X and document encodings j from the nematode abstracts
used in the HDP paper.
"""
url = 'https://raw.githubusercontent.com/tdhopper/topic-modeling-datasets/master/data/raw/Nematode%20biology%20abstracts/cgcbib.txt'
file = urllib.request.urlopen(url)
data = file.read().decode("ISO-8859-1")
lists = docsToList(data)
if max_docs is None:
max_docs = len(lists)
if LDA == False:
return listsToVec(lists[:max_docs], min_word_count=min_word_count)
else:
return reducedVocab(lists[:max_docs], min_word_count = min_word_count)
def get_reuters(max_docs = None, min_word_count = 1, LDA = False):
"""
Returns the data matrix X and document encodings j in the Reuters data.
data_dir: a path to the directory containing the pre-downloaded Reuters data.
"""
#directory = pkgutil.get_data('hdp_py', 'data') #os.fsencode(data_dir)
docs = []
for i in range(22):
suffix = '%03i' % i
#root = directory.decode('ascii')
#filename = os.fsdecode(file)
#print(filename)
#f = open(filename, 'r')
data = pkgutil.get_data('hdp_py', f'data/reut2-{suffix}.sgm')
soup = BeautifulSoup(data, features='lxml')
contents = soup.findAll('text')
#f.close()
docs.append(str(contents).split('</text>'))
docs = [i for doc in docs for i in doc]
# split on </dateline> and keep everything after it
docs = list(compress(docs, ['</dateline>' in i for i in docs]))
docs = [i.split('</dateline>')[1] for i in docs]
docs = [i.lower().translate(str.maketrans('\n', ' ')) for i in docs]
docs = [i.translate(str.maketrans('\r', ' ')) for i in docs]
docs = [i.translate(str.maketrans('\x03', ' ')) for i in docs]
docs = [i.translate(str.maketrans('', '', string.punctuation)) for i in docs]
docs = [i.translate(str.maketrans('', '', string.digits)) for i in docs]
docs = [i.replace('said',' ') for i in docs] # another stop word
docs = [i.replace('reuter', ' ') for i in docs] # the name of the company at the end of most articles
docs = [i.split() for i in docs]
if max_docs is None:
max_docs = len(docs)
if LDA == False:
return listsToVec(docs[:max_docs], min_word_count=min_word_count)
else:
return reducedVocab(docs[:max_docs], min_word_count = min_word_count)
def get_test_data(N, L, Jmax):
"""
Returns the data matrix X and group encodings j for a random set of multinomial data.
X is an (N,L) matrix and j is an (N,) vector with values drawn from [0,Jmax-1]
"""
j = np.random.choice(Jmax, size=N)
Xtest = np.zeros((N, L), dtype='int')
col_choices = np.random.choice(L, size=N)
Xtest[range(N), col_choices] = 1
return Xtest, j
def get_simulated_pop_data():
"""
Returns the data matrix X, study encodings j, and latent study-group information
z for 3 simulated studies of ant populations. Each row corresponds to a unique trial
"""
np.random.seed(111)
Study1_rates = np.random.uniform(low=0, high=50, size=4)
np.random.seed(112)
Study1_rates[3] = Study1_rates[2] + Study1_rates[1] + np.random.uniform(low=-.1,high=.1)*Study1_rates[2]*Study1_rates[1]
np.random.seed(222)
Study2_rates = np.array((Study1_rates[0]+np.random.uniform(low=-0.5, high=0.5),
np.random.uniform(low=0, high=50),
Study1_rates[2]+np.random.uniform(low=-0.5, high=0.5),
np.random.uniform(low=0, high=50)))
np.random.seed(223)
Study2_rates[3] = Study2_rates[2] + Study2_rates[1] + np.random.uniform(low=-.1,high=.1)*Study2_rates[2]*Study2_rates[1]
np.random.seed(333)
Study3_rates = np.random.uniform(low=0, high=50, size=4)
np.random.seed(334)
Study3_rates[0] = Study2_rates[0]+np.random.uniform(low=-0.5, high=0.5)
np.random.seed(335)
Study3_rates[3] = Study3_rates[2] + Study3_rates[1] + np.random.uniform(low=-.1,high=.1)*Study3_rates[2]*Study3_rates[1]
#Each set of conditions in study 1 done 20 times, study 2 16 times,
#study 3 10 times:
np.random.seed(113)
study1_obs = np.random.poisson(lam=Study1_rates, size=(20,4))
np.random.seed(224)
study2_obs = np.random.poisson(lam=Study2_rates, size=(16,4))
np.random.seed(336)
study3_obs = np.random.poisson(lam=Study3_rates, size=(10,4))
pop_obs = np.concatenate((study1_obs.flatten(), study2_obs.flatten(), study3_obs.flatten()))
study_tracker = np.repeat(np.array(["S1", "S2", "S3"]), [20*4, 16*4, 10*4])
cond_tracker = np.concatenate(np.array((["Control", "Alt", "Temp", "Alt + Temp"]*20,
["Control", "Light", "Temp", "Light + Temp"]*16,
["Control", "Food", "Dirt", "Food + Dirt"]*10)).flatten())
study_factor = np.unique(study_tracker, return_inverse=True)[1]
return pop_obs[:, None], study_factor, cond_tracker
### LDA FUNCTIONS
def LDA_preprocessing(data, n_documents, test_size, min_word_count):
'''
This function takes in data formatted by any of the get_{topic}_data functions with LDA = true called.
n_documents: the number of documents to select from data.
test_size: the proportion of n_documents that should be held out for testing
min_word_count: the minimum number of times a word should appear to be kept in vocabulary
This function returns id2word and corpus for LDA training and testing
'''
selected = np.random.choice(len(data), n_documents, replace = False)
subset_data = [data[i] for i in selected]
docs, one_list, vocab = reducedVocab(subset_data, min_word_count = min_word_count)
cut_off = int(np.floor(n_documents * test_size))
train, test = docs[:cut_off], docs[cut_off:]
id2word = corpora.Dictionary(docs)
train_corpus = [id2word.doc2bow(doc) for doc in train]
test_corpus = [id2word.doc2bow(doc) for doc in test]
return id2word, train_corpus, test_corpus, test
def LDA(id2word, corpus, n_topics):
'''
This function runs gensim's LdaModel.
'''
lda_model = gensim.models.ldamodel.LdaModel(corpus = corpus,
id2word = id2word,
num_topics = n_topics,
random_state = 23,
alpha = 'asymmetric',
iterations = 500)
return lda_model
def perplexity(model, test_corpus, test):
'''
This function takes a trained LDA model and calculates the perplexity of the test corpus
'''
model.get_document_topics(test_corpus, minimum_probability = 1e-8, per_word_topics = True)
new_topics = model[test_corpus]
log_perplex = 0
for i in range(len(test_corpus)):
theta = [e for _, e in new_topics[i][0]]
phi = []
for j in range(len(new_topics[i][2])):
first, second = new_topics[i][2][j]
for k in range(len(theta)):
phi.append([e for _, e in second if _ == k])
if len(phi[j*len(theta) + k]) == 0:
phi[j*len(theta) + k] = [0]
phi = np.array(phi).reshape(-1, len(theta))
log_perplex -= np.sum(np.log(np.inner(theta, phi)))
N = len([i for sublist in test for i in sublist])
return np.exp(log_perplex / N)
def plt_perplexity(perplexity, min_topics, max_topics):
'''
This function plots the perplexity given perplexity array.
First row of perplexity array is the perplexity values
Second row of perplexity array is the corresponding number of topics used for LDA training
'''
plt.plot(perplexity[1,:], perplexity[0,:])
plt.xlabel('Number of LDA Topics')
plt.ylabel('Perplexity')
plt.title('Perplexity of LDA Model on Test Documents')
plt.show()
|
adriapr/socialight | wifi_tweets_feeder_2.py | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from pprint import pprint
import json
from random import choice
import requests
import time
# SERVER_IP = 'localhost'
url_root = 'http://192.168.43.59/backdoor'
def blink(text = ''):
base_mode = str(5)
if 'hack' in text.lower():
mode = '4'
else:
mode = '2'
print('blinking with mode ', mode)
r = requests.post(url_root, data={'playfile': mode})
print('Changed mode {}: {}'.format(mode, r.reason))
time.sleep(3)
mode = base_mode
r = requests.post(url_root, data={'playfile': mode})
print('Changed mode {}: {}'.format(mode, r.reason))
class StdOutListener(StreamListener):
def on_data(self, data):
data_dict = json.loads(data)
# pprint(data_dict)
user = data_dict['user']['name']
text = data_dict['text']
print()
print(user)
print(text)
blink(data_dict['text'])
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
print('Start')
r = requests.post(url_root, data={'playfile': '5'})
print('Changed to initial mode (0): {}'.format(r.reason))
print('test initial blink')
blink()
if True:
#Variables that contains the user credentials to access Twitter API
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['protopixel', 'hackaton', 'hackaton19', 'protopx', 'llumbcn19', 'llumbcn2019', 'llumbcn'])
osc_terminate()
|
captbrando/OptionsQuote | optquote.py | <reponame>captbrando/OptionsQuote
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import requests
import cgi, cgitb
import string
# Get the specific contract we want passed in the GET request.
args = cgi.FieldStorage()
symbol = args.getvalue('c').strip()
# Build our URL and get the HTML back and parse with BeautifulSoup
url = 'https://finance.yahoo.com/quote/' + symbol + '?p=' + symbol
r = requests.get(url)
soup = BeautifulSoup(r.text,'lxml')
# This is easily found with any number of dev tools, but its the "name"
# of the HTML tag that has the actual value inside. There is only one
# tag with this name (HUZZUH!) so we can just use the find() method.
current_price = soup.find('span', {'class':'Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)'})
# OK, here's lazybones Brando coming out. I could make this a proper API
# to return a JSON object or something, but given the integration with
# Google Sheets, this was the easiest way to reliably get the value out.
print("Content-type:text/html\r\n\r\n")
print(current_price.text)
|
moorugi98/pyhanabi | hanabi.py | import random
import sys
import copy
import time
GREEN = 0
YELLOW = 1
WHITE = 2
BLUE = 3
RED = 4
ALL_COLORS = [GREEN, YELLOW, WHITE, BLUE, RED]
COLORNAMES = ["green", "yellow", "white", "blue", "red"]
COUNTS = [3,2,2,2,1] # num. of cards in game of each rank
def f(something):
'''
semi-intelligently format cards in any format. For printing, not important for game itself.
:param something:
:return:
'''
if type(something) == list:
return map(f, something)
elif type(something) == dict:
return {k: something(v) for (k,v) in something.iteritems()}
elif type(something) == tuple and len(something) == 2:
return (COLORNAMES[something[0]],something[1])
return something
def make_deck():
'''
Function to initalise the deck
:return: list, deck
'''
deck = []
for col in ALL_COLORS:
for num, cnt in enumerate(COUNTS):
for i in xrange(cnt):
deck.append((col, num+1))
random.seed() # ADD: otherwise deck is always same?
random.shuffle(deck)
return deck
def initial_knowledge():
'''
initial common knowledge is just counts of all ranks (3,2,2,2,1)
:return: list, knowledge
'''
knowledge = []
for col in ALL_COLORS:
knowledge.append(COUNTS[:])
return knowledge
def hint_color(knowledge, color, truth):
result = []
for col in ALL_COLORS:
if truth == (col == color):
result.append(knowledge[col][:])
else:
result.append([0 for i in knowledge[col]])
return result
def hint_rank(knowledge, rank, truth):
result = []
for col in ALL_COLORS:
colknow = []
for i,k in enumerate(knowledge[col]):
if truth == (i + 1 == rank):
colknow.append(k)
else:
colknow.append(0)
result.append(colknow)
return result
def iscard((c,n)):
knowledge = []
for col in ALL_COLORS:
knowledge.append(COUNTS[:])
for i in xrange(len(knowledge[-1])):
if col != c or i+1 != n:
knowledge[-1][i] = 0
else:
knowledge[-1][i] = 1
return knowledge
# different actions are codified with integers
HINT_COLOR = 0
HINT_NUMBER = 1
PLAY = 2
DISCARD = 3
class Action(object):
def __init__(self, type, pnr=None, col=None, num=None, cnr=None):
self.type = type # what kind of action?
self.pnr = pnr # player number
self.col = col # color
self.num = num # rank, 0-based
self.cnr = cnr # card number (n-th card), 0-based
def __str__(self):
if self.type == HINT_COLOR:
return "hints " + str(self.pnr) + " about all their " + COLORNAMES[self.col] + " cards"
if self.type == HINT_NUMBER:
return "hints " + str(self.pnr) + " about all their " + str(self.num)
if self.type == PLAY:
return "plays their " + str(self.cnr)
if self.type == DISCARD:
return "discards their " + str(self.cnr)
def __eq__(self, other):
return (self.type, self.pnr, self.col, self.num, self.cnr) == (other.type, other.pnr, other.col, other.num, other.cnr)
class Player(object):
def __init__(self, name, pnr):
self.name = name
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
return random.choice(valid_actions)
def inform(self, action, player, game):
pass
def get_explanation(self):
return self.explanation
def get_possible(knowledge):
'''
Get all possibility of each card
:param knowledge: nested list, knowledge of a single card of nr-th player
:return: list of tuple, all possible combinations of color and rank
'''
result = []
for col in ALL_COLORS:
for rank, count in enumerate(knowledge[col]): # for each [col][rank] combination
if count > 0: # if there are still cards left
result.append((col,rank+1))
return result
def playable(possible, board):
'''
return True if the card is surely playable
:param possible:
:param board:
:return:
'''
for (col,nr) in possible:
if board[col][1] + 1 != nr:
return False
return True
def potentially_playable(possible, board):
'''
return True if the card might be playable
:param possible:
:param board:
:return:
'''
for (col,nr) in possible:
if board[col][1] + 1 == nr:
return True
return False
def discardable(possible, board):
'''
return True if the card is surely discardable
:param possible:
:param board:
:return:
'''
for (col,nr) in possible:
if board[col][1] < nr:
return False
return True
def potentially_discardable(possible, board):
'''
return True if the card might be discardable
:param possible:
:param board:
:return:
'''
for (col,nr) in possible:
if board[col][1] >= nr:
return True
return False
# # ADD: reason about possiblity of keeping a card. This is not needed anymore
# def potentially_keep(possible, board, trash):
# for (color,rank) in possible:
# if board[color][1] < rank: # the card is still useful
# for (trashedcolor,trashedrank) in trash: # keep the card if it has been already trashed
# if (color,rank) == (trashedcolor,trashedrank):
# print('match!:', color, rank)
# return True
# return False
def update_knowledge(knowledge, used):
'''
update the knowledge structure of all agents
:param knowledge:
:param used:
:return:
'''
result = copy.deepcopy(knowledge)
for r in result:
for (c,nr) in used:
r[c][nr-1] = max(r[c][nr-1] - used[c,nr], 0)
return result
############# AGENTS ############################
################################################
class InnerStatePlayer(Player):
def __init__(self, name, pnr):
self.name = name
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = []
duplicates = []
for i, p in enumerate(possible):
if playable(p, board):
return Action(PLAY, cnr=i)
if discardable(p, board):
discards.append(i)
if discards:
return Action(DISCARD, cnr=random.choice(discards))
playables = []
for i, h in enumerate(hands):
if i != nr:
for j, (col, n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i, j))
if playables and hints > 0:
i, j = playables[0]
if random.random() < 0.5:
return Action(HINT_COLOR, pnr=i, col=hands[i][j][0])
return Action(HINT_NUMBER, pnr=i, num=hands[i][j][1])
for i, k in enumerate(knowledge):
if i == nr:
continue
cards = range(len(k))
random.shuffle(cards)
c = cards[0]
(col, num) = hands[i][c]
hinttype = [HINT_COLOR, HINT_NUMBER]
if hinttype and hints > 0:
if random.choice(hinttype) == HINT_COLOR:
return Action(HINT_COLOR, pnr=i, col=col)
else:
return Action(HINT_NUMBER, pnr=i, num=num)
prefer = []
for v in valid_actions:
if v.type in [HINT_COLOR, HINT_NUMBER]:
prefer.append(v)
prefer = []
if prefer and hints > 0:
return random.choice(prefer)
return random.choice([Action(DISCARD, cnr=i) for i in xrange(len(knowledge[0]))])
def inform(self, action, player, game):
pass
class OuterStatePlayer(Player):
def __init__(self, name, pnr):
self.name = name
self.hints = {}
self.pnr = pnr
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = []
duplicates = []
for i,p in enumerate(possible):
if playable(p,board):
return Action(PLAY, cnr=i)
if discardable(p,board):
discards.append(i)
if discards:
return Action(DISCARD, cnr=random.choice(discards))
playables = []
for i,h in enumerate(hands):
if i != nr:
for j,(col,n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i,j))
playables.sort(key=lambda (i,j): -hands[i][j][1])
while playables and hints > 0:
i,j = playables[0]
knows_rank = True
real_color = hands[i][j][0]
real_rank = hands[i][j][0]
k = knowledge[i][j]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (j,i) not in self.hints:
self.hints[(j,i)] = []
for h in self.hints[(j,i)]:
hinttype.remove(h)
t = None
if hinttype:
t = random.choice(hinttype)
if t == HINT_NUMBER:
self.hints[(j,i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=hands[i][j][1])
if t == HINT_COLOR:
self.hints[(j,i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=hands[i][j][0])
playables = playables[1:]
for i, k in enumerate(knowledge):
if i == nr:
continue
cards = range(len(k))
random.shuffle(cards)
c = cards[0]
(col,num) = hands[i][c]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (c,i) not in self.hints:
self.hints[(c,i)] = []
for h in self.hints[(c,i)]:
hinttype.remove(h)
if hinttype and hints > 0:
if random.choice(hinttype) == HINT_COLOR:
self.hints[(c,i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=col)
else:
self.hints[(c,i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=num)
return random.choice([Action(DISCARD, cnr=i) for i in xrange(handsize)])
def inform(self, action, player, game):
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr,player) in self.hints:
self.hints[(action.cnr,player)] = []
for i in xrange(10):
if (action.cnr+i+1,player) in self.hints:
self.hints[(action.cnr+i,player)] = self.hints[(action.cnr+i+1,player)]
self.hints[(action.cnr+i+1,player)] = []
def generate_hands(knowledge, used={}):
if len(knowledge) == 0:
yield []
return
for other in generate_hands(knowledge[1:], used):
for col in ALL_COLORS:
for i, cnt in enumerate(knowledge[0][col]):
if cnt > 0:
result = [(col, i + 1)] + other
ok = True
thishand = {}
for (c, n) in result:
if (c, n) not in thishand:
thishand[(c, n)] = 0
thishand[(c, n)] += 1
for (c, n) in thishand:
if used[(c, n)] + thishand[(c, n)] > COUNTS[n - 1]:
ok = False
if ok:
yield result
def generate_hands_simple(knowledge, used={}):
if len(knowledge) == 0:
yield []
return
for other in generate_hands_simple(knowledge[1:]):
for col in ALL_COLORS:
for i, cnt in enumerate(knowledge[0][col]):
if cnt > 0:
yield [(col, i + 1)] + other
a = 1
class SelfRecognitionPlayer(Player):
def __init__(self, name, pnr, other=OuterStatePlayer):
self.name = name
self.hints = {}
self.pnr = pnr
self.gothint = None
self.last_knowledge = []
self.last_played = []
self.last_board = []
self.other = other
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
if self.gothint:
possiblehands = []
wrong = 0
used = {}
for c in ALL_COLORS:
for i, cnt in enumerate(COUNTS):
used[(c, i + 1)] = 0
for c in trash + played:
used[c] += 1
for h in generate_hands_simple(knowledge[nr], used):
newhands = hands[:]
newhands[nr] = h
other = self.other("Pinocchio", self.gothint[1])
act = other.get_action(self.gothint[1], newhands, self.last_knowledge, self.last_trash,
self.last_played, self.last_board, valid_actions, hints + 1)
lastact = self.gothint[0]
if act == lastact:
possiblehands.append(h)
def do(c, i):
newhands = hands[:]
h1 = h[:]
h1[i] = c
newhands[nr] = h1
print other.get_action(self.gothint[1], newhands, self.last_knowledge, self.last_trash,
self.last_played, self.last_board, valid_actions, hints + 1)
# import pdb
# pdb.set_trace()
else:
wrong += 1
# print len(possiblehands), "would have led to", self.gothint[0], "and not:", wrong
# print f(possiblehands)
if possiblehands:
mostlikely = [(0, 0) for i in xrange(len(possiblehands[0]))]
for i in xrange(len(possiblehands[0])):
counts = {}
for h in possiblehands:
if h[i] not in counts:
counts[h[i]] = 0
counts[h[i]] += 1
for c in counts:
if counts[c] > mostlikely[i][1]:
mostlikely[i] = (c, counts[c])
# print "most likely:", mostlikely
m = max(mostlikely, key=lambda (card, cnt): cnt)
second = mostlikely[:]
second.remove(m)
m2 = max(second, key=lambda (card, cnt): cnt)
if m[1] >= m2[1] * a:
# print ">>>>>>> deduced!", f(m[0]), m[1],"vs", f(m2[0]), m2[1]
knowledge = copy.deepcopy(knowledge)
knowledge[nr][mostlikely.index(m)] = iscard(m[0])
self.gothint = None
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = []
duplicates = []
for i, p in enumerate(possible):
if playable(p, board):
return Action(PLAY, cnr=i)
if discardable(p, board):
discards.append(i)
if discards:
return Action(DISCARD, cnr=random.choice(discards))
playables = []
for i, h in enumerate(hands):
if i != nr:
for j, (col, n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i, j))
playables.sort(key=lambda (i, j): -hands[i][j][1])
while playables and hints > 0:
i, j = playables[0]
knows_rank = True
real_color = hands[i][j][0]
real_rank = hands[i][j][0]
k = knowledge[i][j]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (j, i) not in self.hints:
self.hints[(j, i)] = []
for h in self.hints[(j, i)]:
hinttype.remove(h)
if HINT_NUMBER in hinttype:
self.hints[(j, i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=hands[i][j][1])
if HINT_COLOR in hinttype:
self.hints[(j, i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=hands[i][j][0])
playables = playables[1:]
for i, k in enumerate(knowledge):
if i == nr:
continue
cards = range(len(k))
random.shuffle(cards)
c = cards[0]
(col, num) = hands[i][c]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (c, i) not in self.hints:
self.hints[(c, i)] = []
for h in self.hints[(c, i)]:
hinttype.remove(h)
if hinttype and hints > 0:
if random.choice(hinttype) == HINT_COLOR:
self.hints[(c, i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=col)
else:
self.hints[(c, i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=num)
return random.choice([Action(DISCARD, cnr=i) for i in xrange(handsize)])
def inform(self, action, player, game):
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr, player) in self.hints:
self.hints[(action.cnr, player)] = []
for i in xrange(10):
if (action.cnr + i + 1, player) in self.hints:
self.hints[(action.cnr + i, player)] = self.hints[(action.cnr + i + 1, player)]
self.hints[(action.cnr + i + 1, player)] = []
elif action.pnr == self.pnr:
self.gothint = (action, player)
self.last_knowledge = game.knowledge[:]
self.last_board = game.board[:]
self.last_trash = game.trash[:]
self.played = game.played[:]
TIMESCALE = 40.0 / 1000.0 # ms
SLICETIME = TIMESCALE / 10.0
APPROXTIME = SLICETIME / 8.0
def priorities(c, board):
(col, val) = c
if board[col][1] == val - 1:
return val - 1
if board[col][1] >= val:
return 5
if val == 5:
return 15
return 6 + (4 - val)
SENT = 0
ERRORS = 0
COUNT = 0
CAREFUL = True
class TimedPlayer(object):
def __init__(self, name, pnr):
self.name = name
self.explanation = []
self.last_tick = time.time()
self.pnr = pnr
self.last_played = False
self.tt = time.time()
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
global SENT, ERRORS, COUNT
tick = time.time()
duration = round((tick - self.last_tick) / SLICETIME)
other = (self.pnr + 1) % len(hands)
# print(self.pnr, "got", duration)
if duration >= 10:
duration = 9
if duration != SENT:
ERRORS += 1
# print("mismatch", nr, f(hands), f(board), duration, SENT)
COUNT += 1
other_hand = hands[other][:]
def prio(c):
return priorities(c, board)
other_hand.sort(key=prio)
# print(f(other_hand), f(board), list(map(prio, other_hand)), f(hands))
p = prio(other_hand[0])
delta = 0.0
if p >= 5:
delta += 5
# print("idx", hands[other].index(other_hand[0]))
def fix(n):
if n >= len(other_hand):
return len(other_hand) - 1
return int(round(n))
delta += hands[other].index(other_hand[0])
if duration >= 5:
action = Action(DISCARD, cnr=fix(duration - 5))
else:
action = Action(PLAY, cnr=fix(duration))
if self.last_played and hints > 0 and CAREFUL:
action = Action(HINT_COLOR, pnr=other, col=other_hand[0][0])
t1 = time.time()
SENT = delta
# print(self.pnr, "convey", round(delta))
delta -= 0.5
while (t1 - tick) < delta * SLICETIME:
time.sleep(APPROXTIME)
t1 = time.time()
self.last_tick = time.time()
return action
def inform(self, action, player, game):
self.last_played = (action.type == PLAY)
self.last_tick = self.tt
self.tt = time.time()
# print(action, player)
def get_explanation(self):
return self.explanation
CANDISCARD = 128
def format_intention(i):
'''
for pretty printin
:param i:
:return:
'''
if isinstance(i, str):
return i
if i == PLAY:
return "Play"
elif i == DISCARD:
return "Discard"
elif i == CANDISCARD:
return "Can Discard"
return "Keep"
def whattodo(knowledge, pointed, board):
'''
This is not needed anymore because I explicitly implemented this.
The idea is to reason whattodo given a hint
:param knowledge:
:param pointed:
:param board:
:return:
'''
# @pointed: whether my card is postively identified and there is a possibility of my card being that
# e.g.) given a hint about all cards that are red, whether my second card can be red
possible = get_possible(knowledge)
play = potentially_playable(possible, board)
discard = potentially_discardable(possible, board)
if play and pointed: # if I can play the card and I possibly have that card
return PLAY
if discard and pointed:
return DISCARD
return None
def pretend(action, knowledge, intentions, hand, board):
'''
predict the action of the other player given my action and how good it is
:param action: tuple, (type,value)
:param knowledge: nested list, knowledge of [1-nr] player (so the other player)
:param intentions: list, my inferred intention for all players
:param hand: list, hand of 1-nr-th player
:param board: top cards
:return: (bool=isvalid, int=score, expl=list prediction of other players action)
'''
(type,value) = action # type; color or rank, value; the actual value (e.g. red ...)
positive = []
haspositive = False # True if some card is positively identified (e.g.told that it is red) by the hint
change = False
if type == HINT_COLOR:
newknowledge = [] # M'_B in paper
for i,(col,num) in enumerate(hand): # color and rank of each i-th card
positive.append(value==col) # which cards are actually red?
newknowledge.append(hint_color(knowledge[i], value, value == col))
if value == col:
haspositive = True
if newknowledge[-1] != knowledge[i]:
change = True
else: # rank hint, analog
newknowledge = []
for i,(col,num) in enumerate(hand):
positive.append(value==num)
newknowledge.append(hint_rank(knowledge[i], value, value == num))
if value == num:
haspositive = True
if newknowledge[-1] != knowledge[i]:
change = True
if not haspositive:
return False, 0, ["Invalid hint"]
if not change:
return False, 0, ["No new information"]
score = 0
predictions = []
pos = False
for i,c,k,p in zip(intentions, hand, newknowledge, positive):
action = whattodo(k, p, board) # assume that the co-player will follow my logic to choose her action!
if action == PLAY and i != PLAY:
#print "would cause them to play", f(c)
return False, 0, predictions + [PLAY]
if action == DISCARD and i not in [DISCARD, CANDISCARD]:
#print "would cause them to discard", f(c)
return False, 0, predictions + [DISCARD]
if action == PLAY and i == PLAY:
pos = True
predictions.append(PLAY)
score += 3
elif action == DISCARD and i in [DISCARD, CANDISCARD]:
pos = True
predictions.append(DISCARD)
if i == DISCARD:
score += 2
else:
score += 1
else:
predictions.append(None)
if not pos:
return False, score, predictions
return True,score, predictions
HINT_VALUE = 0.5 # hint is worth half a point in maydiscard decision, but this is not needed anymore
def pretend_discard(act, knowledge, board, trash):
'''
It tries to compute the expected loss of discarding each card.
This will be not necessary anymore, I propose to simply discard the oldest card.
:param act:
:param knowledge:
:param board:
:param trash:
:return:
'''
which = copy.deepcopy(knowledge[act.cnr])
for (col,num) in trash:
if which[col][num-1]: # if the same type of card is already trashed, better not throw another one awy
which[col][num-1] -= 1
for col in ALL_COLORS:
for i in xrange(board[col][1]): # for the rank of each color
if which[col][i]: # if the card can be played, rather not throw it
which[col][i] -= 1
possibilities = sum(map(sum, which)) # normalization constant
expected = 0 # expected value of each possible discarding
terms = []
for col in ALL_COLORS:
for i,cnt in enumerate(which[col]):
rank = i+1 # index shift
if cnt > 0: # if I still have card left of 'col' and 'rank'
prob = cnt*1.0/possibilities # more likely to discard if more cards left
if board[col][1] >= rank: # if this specific potential realization is not needed anymore
expected += prob*HINT_VALUE
terms.append((col,rank,cnt,prob,prob*HINT_VALUE))
else: # if card is still needed
dist = rank - board[col][1] # how relevant is it in the near future?
if cnt > 1:
value = prob*(6-rank)/(dist*dist)
else:
value = (6-rank)
if rank == 5: # take into account that you win a value of getting back a hint token by playing 5
value += HINT_VALUE
value *= prob
expected -= value
terms.append((col,rank,cnt,prob,-value))
return (act, expected, terms)
def format_knowledge(k):
'''
for pretty printing
:param k:
:return:
'''
result = ""
for col in ALL_COLORS:
for i,cnt in enumerate(k[col]):
if cnt > 0:
result += COLORNAMES[col] + " " + str(i+1) + ": " + str(cnt) + "\n"
return result
class IntentionalPlayer(Player):
def __init__(self, name, pnr):
self.name = name
self.hints = {}
self.pnr = pnr
self.gothint = None
self.last_knowledge = []
self.last_played = []
self.last_board = []
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
result = None
self.explanation = []
self.explanation.append(["Your Hand:"] + map(f, hands[1 - nr]))
self.gothint = None
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = []
duplicates = []
for i, p in enumerate(possible):
if playable(p, board):
result = Action(PLAY, cnr=i)
if discardable(p, board):
discards.append(i)
if discards and hints < 8 and not result:
result = Action(DISCARD, cnr=random.choice(discards))
playables = []
useless = []
discardables = []
othercards = trash + board
intentions = [None for i in xrange(handsize)]
for i, h in enumerate(hands):
if i != nr:
for j, (col, n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i, j))
intentions[j] = PLAY
if board[col][1] >= n:
useless.append((i, j))
if not intentions[j]:
intentions[j] = DISCARD
if n < 5 and (col, n) not in othercards:
discardables.append((i, j))
if not intentions[j]:
intentions[j] = CANDISCARD
self.explanation.append(["Intentions"] + map(format_intention, intentions))
if hints > 0:
valid = []
for c in ALL_COLORS:
action = (HINT_COLOR, c)
# print "HINT", COLORNAMES[c],
(isvalid, score, expl) = pretend(action, knowledge[1 - nr], intentions, hands[1 - nr], board)
self.explanation.append(["Prediction for: Hint Color " + COLORNAMES[c]] + map(format_intention, expl))
# print isvalid, score
if isvalid:
valid.append((action, score))
for r in xrange(5):
r += 1
action = (HINT_NUMBER, r)
# print "HINT", r,
(isvalid, score, expl) = pretend(action, knowledge[1 - nr], intentions, hands[1 - nr], board)
self.explanation.append(["Prediction for: Hint Rank " + str(r)] + map(format_intention, expl))
# print isvalid, score
if isvalid:
valid.append((action, score))
if valid and not result:
valid.sort(key=lambda (a, s): -s)
# print valid
(a, s) = valid[0]
if a[0] == HINT_COLOR:
result = Action(HINT_COLOR, pnr=1 - nr, col=a[1])
else:
result = Action(HINT_NUMBER, pnr=1 - nr, num=a[1])
self.explanation.append(["My Knowledge"] + map(format_knowledge, knowledge[nr]))
possible = [Action(DISCARD, cnr=i) for i in xrange(handsize)]
scores = map(lambda p: pretend_discard(p, knowledge[nr], board, trash), possible)
def format_term((col, rank, n, prob, val)):
return COLORNAMES[col] + " " + str(rank) + " (%.2f%%): %.2f" % (prob * 100, val)
self.explanation.append(
["Discard Scores"] + map(lambda (a, s, t): "\n".join(map(format_term, t)) + "\n%.2f" % (s), scores))
scores.sort(key=lambda (a, s, t): -s)
if result:
return result
return scores[0][0]
return random.choice([Action(DISCARD, cnr=i) for i in xrange(handsize)])
def inform(self, action, player, game):
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr, player) in self.hints:
self.hints[(action.cnr, player)] = []
for i in xrange(10):
if (action.cnr + i + 1, player) in self.hints:
self.hints[(action.cnr + i, player)] = self.hints[(action.cnr + i + 1, player)]
self.hints[(action.cnr + i + 1, player)] = []
elif action.pnr == self.pnr:
self.gothint = (action, player)
self.last_knowledge = game.knowledge[:]
self.last_board = game.board[:]
self.last_trash = game.trash[:]
self.played = game.played[:]
# ADD
def shift_index(acted_index, keeplist):
'''
aux function to help with shifting keeplist if a card is played, discarded ...
:param acted_index: int, 0-based index of the card that is acted upon
: param keeplist: set, set of index of cards to be kept
:return: None
'''
new_set = copy.deepcopy(keeplist)
for cnr in keeplist:
if cnr >= acted_index:
new_set.remove(cnr)
if cnr > acted_index:
new_set.add(cnr - 1) # shift all cards that had higher or same index to the played index
keeplist = copy.deepcopy(new_set)
return keeplist
###### !!!!!!!!! THIS IS THE SHIT !!!!!!!!! #
##############################################
class SelfIntentionalPlayer(Player):
def __init__(self, name, pnr):
self.name = name
self.hints = {}
self.pnr = pnr # player number
self.gothint = None # boolean, True iff hinted last round
self.last_knowledge = []
self.last_played = []
self.last_board = []
self.explanation = []
self.keeplist = set() # ADD: add a set of cards that shouldn't be discarded
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
'''
choose the action of this agent.
:param nr: int, action of nr-th player
:param hands: list of list, hands[nr]
:param knowledge: nested list, knowledge[nr][i-th card][Color][rank] contains probability
:param trash: list, discarded cards
:param played: list, all cards played successfully
:param board: list, convenience param showing top cards of played
:param valid_actions: list, all possible actions, no hints action given no hint tokens
:param hints: int, number of hint tokens left
:return: tuple, result(of class Action) and score (int)
'''
##### reason about possible hands of yours, return possible #####
handsize = len(knowledge[0]) # all player have same handsize so just take the handsize of first player
possible = [] # list of list of tuples, possible[card][possibilities] = (color,rank)
result = None # What will I do? PLAY, DISCARD, HINT...
self.explanation = [] # Text to be shown in UI
self.explanation.append(["Your Hand:"] + map(f, hands[1-nr]))
###############################################################
##### (1) do what the other player (human) wants me to do #####
# ### 1A) original ###
# action = [] # list, list of all plausible action
# if self.gothint:
# (act, plr) = self.gothint
# if act.type == HINT_COLOR:
# for k in knowledge[nr]:
# action.append(whattodo(k,
# sum(k[act.col]) > 0, # if positively identified
# # e.g.) I am given a hint about red cards, and my ci-th card is possibly red
# board))
# elif act.type == HINT_NUMBER:
# for k in knowledge[nr]:
# cnt = 0
# for c in ALL_COLORS:
# cnt += k[c][act.num - 1]
# action.append(whattodo(k, cnt > 0, board))
#
# if action:
# self.explanation.append(["What you want me to do"] + map(format_intention, action))
# for i, a in enumerate(action): # play if it's possible to play, otherwise discard
# if a == PLAY and (not result or result.type == DISCARD):
# result = Action(PLAY, cnr=i)
# elif a == DISCARD and not result:
# result = Action(DISCARD, cnr=i)
#######################################################################################
# ### 1B) ADD: play conservatively ###
# # Try to keep all cards that are hinted, and play only when it's sure
# action = None # ADD: just keep one possible action and not a list of whole possible actions
# card_index = None
# if self.gothint: # if I am given a hint about my hands
# (act,plr) = self.gothint
# if act.type == HINT_COLOR:
# for ci, card in enumerate(knowledge[nr]):
# pointed = sum(card[act.col]) > 0
# possible_hint = get_possible(card)
# play = playable(possible_hint, board) # should play the surely playable card if possible
# discard = discardable(possible_hint, board) # and only surely discardable
# if play and pointed:
# action = PLAY # the last (the newest) surely playable card should be played
# card_index = ci
# elif discard and pointed and action != PLAY: # discard only if no card is playable
# action = DISCARD
# card_index = ci
# elif act.type == HINT_NUMBER: # analog to color hint
# for ci, card in enumerate(knowledge[nr]):
# cnt = 0
# for c in ALL_COLORS:
# cnt += card[c][act.num-1]
# pointed = cnt > 0
# possible_hint = get_possible(card)
# play = playable(possible_hint, board)
# discard = discardable(possible_hint, board)
# if play and pointed:
# action = PLAY
# card_index = ci
# elif discard and pointed and action != PLAY:
# action = DISCARD
# card_index = ci
#
# # when the hint doesn't lead to any surely possible actions
# if action is None:
# print('KEEP function activated')
# if act.type == HINT_COLOR:
# for ci, card in enumerate(knowledge[nr]):
# pointed = sum(card[act.col]) > 0
# if pointed:
# self.keeplist.add(ci)
# elif act.type == HINT_NUMBER:
# for ci,card in enumerate(knowledge[nr]):
# cnt = 0
# for c in ALL_COLORS:
# cnt += card[c][act.num - 1]
# pointed = cnt > 0
# if pointed:
# self.keeplist.add(ci)
# else:
# result = Action(action, cnr=card_index)
# shift_index(acted_index=card_index)
#
# # this is now deprecated. The idea was to keep the card only if
# # there is exactly one card of that specific type left
# # # ADD: KEEP if no hints about play or discard is given
# # # if (len(set(action)) == 1) and (action[0] is None):
# # if action is None:
# # print('KEEP option activated')
# # if act.type == HINT_COLOR:
# # for ci, card in enumerate(knowledge[nr]):
# # pointed = sum(card[act.col]) > 0
# # if pointed:
# # print('pointed')
# # for col in ALL_COLORS:
# # for rank in range(5):
# # if card[col][rank] == 1: # exactly one card left of that specific sort
# # self.keeplist.add(ci)
# # print('keep {}th card given color hint, card might be col:{} rank:{}'.format(
# # ci+1, COLORNAMES[col], rank+1), self.keeplist)
# # else:
# # print(ci+1, COLORNAMES[col], rank+1, card[col][rank])
# # elif act.type == HINT_NUMBER:
# # for ci, card in enumerate(knowledge[nr]):
# # cnt = 0
# # for c in ALL_COLORS:
# # cnt += card[c][act.num - 1]
# # pointed = cnt > 0
# # if pointed:
# # print('pointed')
# # for col in ALL_COLORS:
# # for rank in range(5):
# # if card[col][rank] == 1:
# # self.keeplist.add(ci)
# # print('keep {}th card given num hint, card might be col:{} rank:{}'.format(
# # ci+1, COLORNAMES[col], rank+1), self.keeplist)
# # else:
# # print(ci+1, COLORNAMES[col], rank+1, card[col][rank])
# self.explanation.append(["What you want me to do"] + map(format_intention, action))
# # this is now deprecated, needed when I have a list of actions
# # for i,a in enumerate(action):
# # if a == PLAY and (not result or result.type == DISCARD): # playing is preferred over discarding
# # result = Action(PLAY, cnr=i)
# # shift_index(i)
# # print('shift keeplist after hintedplay: ', self.keeplist)
# # elif a == DISCARD and not result: # rather discard the first card
# # result = Action(DISCARD, cnr=i)
# # shift_index(i)
# # print('shift keeplist after hinteddiscard: ', self.keeplist)
# ###############################################################################
# ### 1C) ADD: play a bit more aggressively ###
# # prioritise playing if there is any possibility to play a card
# action = None
# card_index = None
#
# if self.gothint: # if I am given a hint about my hands
# (act, plr) = self.gothint
# if act.type == HINT_COLOR:
# for ci, card in enumerate(knowledge[nr]):
# pointed = sum(card[act.col]) > 0
# possible_hint = get_possible(card)
# play = playable(possible_hint, board)
# mayplay = potentially_playable(possible_hint, board) # possibility of playing is prioritised
# discard = discardable(possible_hint, board)
# if play and pointed:
# action = PLAY # the last (the newest) surely playable card should be played
# card_index = ci
# elif mayplay and pointed and action != PLAY:
# action = PLAY # keep action if it's play for sure
# card_index = ci
# elif discard and pointed and action != PLAY:
# action = DISCARD
# card_index = ci
# elif act.type == HINT_NUMBER: # analog to color hint
# for ci, card in enumerate(knowledge[nr]):
# cnt = 0
# for c in ALL_COLORS:
# cnt += card[c][act.num - 1]
# pointed = cnt > 0
# possible_hint = get_possible(card)
# play = playable(possible_hint, board)
# mayplay = potentially_playable(possible_hint, board)
# discard = discardable(possible_hint, board)
# if play and pointed:
# action = PLAY
# card_index = ci
# elif mayplay and pointed and action != PLAY:
# action = PLAY # keep action if it's play for sure
# card_index = ci
# elif discard and pointed and action != PLAY:
# action = DISCARD
# card_index = ci
#
# # when the hint doesn't lead to any possible actions
# if action is None:
# print('KEEP function activated')
# if act.type == HINT_COLOR:
# for ci, card in enumerate(knowledge[nr]):
# pointed = sum(card[act.col]) > 0
# if pointed:
# self.keeplist.add(ci)
# elif act.type == HINT_NUMBER:
# for ci, card in enumerate(knowledge[nr]):
# cnt = 0
# for c in ALL_COLORS:
# cnt += card[c][act.num - 1]
# pointed = cnt > 0
# if pointed:
# self.keeplist.add(ci)
# else:
# result = Action(action, cnr=card_index)
# self.keeplist = shift_index(acted_index=card_index, keeplist=self.keeplist)
# ##############################################################################
### 1D) heuristically favour interpreting hints as keep sign for older cards ###
action = None # PLAY, DISCARD
card_index = None # index of the card to be played or discarded
if self.gothint: # if I am given a hint about my hands
(act, plr) = self.gothint
pointed_set = set() # all cards that is positively identified
play_set = set() # surely playable cards
mayplay_set = set() # might be playable
keep_set = set() # only one realization might be left
discard_set = set() # surely discardable
# Color hint
if act.type == HINT_COLOR:
for ci, card in enumerate(knowledge[nr]):
pointed = sum(card[act.col]) > 0 # positively identified
possible_hint = get_possible(card)
play = playable(possible_hint, board)
# ADD: Keep
keep = False
if pointed:
pointed_set.add(ci)
for col in ALL_COLORS:
for rank in range(5):
if card[col][rank] == 1: # exactly one realization left
# TODO: also check if it is actually useful
# print('keep {}th card given num hint, card might be col:{} rank:{}'.format(
# ci+1, COLORNAMES[col], rank+1), self.keeplist)
keep = True
mayplay = potentially_playable(possible_hint, board)
discard = discardable(possible_hint, board)
# ADD: append all possibile interpretations
if play and pointed:
play_set.add(ci)
elif mayplay and pointed:
mayplay_set.add(ci)
if keep and pointed:
keep_set.add(ci)
if discard and pointed:
discard_set.add(ci)
# Rank hint, analog
elif act.type == HINT_NUMBER:
for ci, card in enumerate(knowledge[nr]):
cnt = 0
for c in ALL_COLORS:
cnt += card[c][act.num - 1]
pointed = cnt > 0
if pointed:
pointed_set.add(ci)
possible_hint = get_possible(card)
play = playable(possible_hint, board)
# ADD: Keep
keep = False
if pointed:
for col in ALL_COLORS:
for rank in range(5):
if card[col][rank] == 1:
# print('keep {}th card given num hint, card might be col:{} rank:{}'.format(
# ci+1, COLORNAMES[col], rank+1), self.keeplist)
keep = True
mayplay = potentially_playable(possible_hint, board)
discard = discardable(possible_hint, board)
# ADD: append all possibile interpretations
if play and pointed:
play_set.add(ci)
elif mayplay and pointed:
mayplay_set.add(ci)
if keep and pointed:
keep_set.add(ci)
if discard and pointed:
discard_set.add(ci)
# ADD: Deciding what to do based on all possible interpretations
print('play, mayplay, keep, discard sets: ', play_set, mayplay_set, keep_set, discard_set)
if len(play_set): # if there exists a (surely) playable card
action = PLAY
card_index = min(play_set) # play the oldest playable and keep all others
self.keeplist = self.keeplist.union( # keep all cards except for the played one and discardables
play_set.union(mayplay_set).union(keep_set).difference(set([card_index])))
print('play a playable, keep others')
# TODO: if multiple cards are mayplayable, you should wait to make it sure
elif len(mayplay_set) > 1: # keep all hinted cards if unsure which one is to be played
self.keeplist = self.keeplist.union(mayplay_set.union(keep_set))
print('more than one potentially playable cards')
elif len(mayplay_set) == 1: # if only one card might be playable
if min(mayplay_set) >= 2: # new cards are more likely to be hinted to be played then to be kept
action = PLAY
card_index = min(mayplay_set)
self.keeplist = self.keeplist.union(mayplay_set.union(keep_set).difference(set([card_index])))
print('play a mayplayable that is relatively new')
elif len(mayplay_set.difference(keep_set)): # cards that are only mayplayable and not keep
action = PLAY
card_index = min(mayplay_set.difference(keep_set))
self.keeplist = self.keeplist.union(mayplay_set.union(keep_set).difference(set([card_index])))
print('play an unambiguous mayplayable')
else: # if the card is old, it is probably a sign to keep it
self.keeplist = self.keeplist.union(mayplay_set).union(keep_set)
print('old cards are rather hinted to be kept')
elif len(discard_set): # only if all other options are gone, discard safely
action = DISCARD
card_index = random.choice(discard_set)
self.keeplist = self.keeplist.union(keep_set.difference(discard_set))
elif len(keep_set): # only keep_set is not empty
self.keeplist = self.keeplist.union(keep_set)
else:
print('I am not sure what you are trying to tell me')
self.keeplist = self.keeplist.union(pointed_set)
# if play and pointed:
# if not sure_played: # this is the oldest surely playable card
# action = PLAY
# card_index = ci
# print('PLAY: the first playable')
# else: # although this card is also playable, the older one is first played
# self.keeplist.add(ci)
# print('KEEP: playable but another playable')
# elif mayplay and keep and pointed: # when a card is both pot.play. and keep., heurstics
# if ci < 2: # if cards are old, it is more likely to be hinted to keep to save it
# self.keeplist.add(ci)
# print('KEEP: old cards are more likely to be hinted to keep')
# else:
# if action != PLAY:
# action = PLAY
# card_index = ci
# print('PLAY: no playable, card is new enough')
# else: # Not sure what this might be, rather keep it
# print('KEEP: unsure')
# elif mayplay and pointed:
# if action != PLAY:
# action = PLAY
# card_index = ci
# print('PLAY: mayplayable when no surely playable')
# else:
# self.keeplist.add(ci)
# print('KEEP: mayplayable but other card already to be played')
# elif keep:
# self.keeplist.add(ci)
# print('KEEP: hinted to be kept')
# elif discard and pointed and action != PLAY:
# action = DISCARD
# card_index = ci
# print('DISCARD')
if action is not None:
result = Action(action, cnr=card_index)
self.keeplist = shift_index(acted_index=card_index, keeplist=self.keeplist)
###############################################################################
##### infer my hands #####
self.gothint = None
for card in knowledge[nr]:
possible.append(get_possible(card)) # all possibilities of my hands
###########################
##### 2,3) decide to play or discard #####
# ### A: original ###
# discards = [] # list of all useless cards
# for i,card in enumerate(possible): # for each card in hands
# if playable(card, board) and not result:
# print('i-th card is surely playable: ', i)
# result = Action(PLAY, cnr=i) # surely playable card
# if discardable(card, board):
# discards.append(i) # surely discardable cards
#
# if discards and hints < 8 and not result: # discard if no card is playable and hint token is not max
# ci = random.choice(discards)
# result = Action(DISCARD, cnr=ci)
# ##########################################################
### B: minor changes due to shifting and so on ###
discards = [] # list of all useless cards
if not result: # no action from direct hints in stage 1
for i, card in enumerate(possible): # for each card in hands
if playable(card, board):
print('i-th card is surely playable: ', i+1)
result = Action(PLAY, cnr=i) # should play the newest card if possible
self.keeplist = shift_index(i, self.keeplist) # shift so that keeplist makes sense
print('shift keeplist after play: ', self.keeplist)
elif discardable(card, board):
discards.append(i) # surely discardable cards
if discards and hints < 8 and not result: # discard if no card is playable and hint token is not max
ci = random.choice(discards)
result = Action(DISCARD, cnr=ci)
self.keeplist = shift_index(ci, self.keeplist)
print('shift keeplist after discard: ', self.keeplist)
##########################################################
###############################################################################
##### 4-1) Hint, CalculateGoals: what should other players (human) do ? #####
playables = [] # playables = [(0,2)] 2nd card of first player can be played
useless = []
discardables = []
othercards = trash + board
intentions = [None for i in xrange(handsize)] # for printing
for i,h in enumerate(hands): # hand of i-th player
if i != nr: # no need to infer intention of my own (I just know it)
for j, (color, rank) in enumerate(h): # j+1-th card of i+1-th players hand
if board[color][1] + 1 == rank: # if rank of the card is exactly one more than board, it's playable
playables.append((i,j)) # j-th card of i-th player can be played
intentions[j] = PLAY # then j+1-th card should be played
if board[color][1] >= rank: # rank of the card is less or equal to already played card,
useless.append((i, j)) # so it can be safely discard
if not intentions[j]:
intentions[j] = DISCARD
if rank < 5 and (color, rank) not in othercards:
discardables.append((i, j)) # if the card is not in the pile, it might be discarded
if not intentions[j]:
intentions[j] = CANDISCARD # can be discarded (otherwise Intentions is printed as KEEP,
# but it actually did not had any effect in original version)
self.explanation.append(["Intentions"] + map(format_intention, intentions))
#######################################################################
##### 4-2 ) Predict the action of other players given my hint #####
# TODO: it's nerve wrecking that it keep giving out useless hints (about discarding) when tokens are scarce
if hints > 0:
valid = []
for c in ALL_COLORS: # giving color hint
action = (HINT_COLOR, c)
# reason about expected action of my co-player
(isvalid,score,expl) = pretend(action, knowledge[1-nr], intentions, hands[1-nr], board)
self.explanation.append(["Prediction for: Hint Color " + COLORNAMES[c]] + map(format_intention, expl))
if isvalid:
valid.append((action,score)) # all valid (action,score) pair are saved in valid
for r in xrange(5): # rank hint, analog to color hint
r += 1
action = (HINT_NUMBER, r)
(isvalid,score, expl) = pretend(action, knowledge[1-nr], intentions, hands[1-nr], board)
self.explanation.append(["Prediction for: Hint Rank " + str(r)] + map(format_intention, expl))
if isvalid:
valid.append((action,score))
if valid and not result:
valid.sort(key=lambda (a,s): -s)
(a,s) = valid[0] # chose hint with highest score based on heuristics
if a[0] == HINT_COLOR:
result = Action(HINT_COLOR,
pnr=1-nr, # hint the 1-nr-th (the other player because this is 2-player based)
col=a[1])
else:
result = Action(HINT_NUMBER, pnr=1-nr, num=a[1])
#############################################################################################
##### 5) may discard if nothing else makes sense #####
# ### A) original, discard a card with lowest expected loss ###
# if result:
# return result
#
# self.explanation.append(["My Knowledge"] + map(format_knowledge, knowledge[nr])) # printing
# possible = [Action(DISCARD, cnr=i) for i in xrange(handsize)]
#
# scores = map(lambda p: pretend_discard(p, knowledge[nr], board, trash), # computed expected loss
# possible) # for each card
#
# def format_term((col, rank, n, prob, val)): # for printing
# return COLORNAMES[col] + " " + str(rank) + " (%.2f%%): %.2f" % (prob * 100, val)
#
# self.explanation.append( # for printing
# ["Discard Scores"] + map(lambda (a, s, t): "\n".join(map(format_term, t)) + "\n%.2f" % (s), scores))
# scores.sort(key=lambda (a, s, t): -s)
# return scores[0][0] # this will somehow get interpreted as discarding I guess
# ########################################
### B)ADD: If AI can be hinted about KEEP option, here you can just discard the oldest ###
if result: # if you any other priorities, return it
return result
self.explanation.append(["My Knowledge"] + map(format_knowledge, knowledge[nr]))
diff = lambda l1, l2: [x for x in l1 if x not in l2]
maydiscard = diff(xrange(handsize), self.keeplist) # maydiscard only if not to be kept
print('may be discarded: ', maydiscard)
if len(maydiscard):
self.keeplist = shift_index(maydiscard[0], self.keeplist) # the oldest from discardable cards will be discarded
print('shift keeplist after discarding the oldest: ', self.keeplist)
return Action(DISCARD, cnr=maydiscard[0])
else:
self.keeplist = shift_index(0, self.keeplist)
print('you now told to me to keep every card')
return Action(DISCARD, cnr=0)
#############################################################################
#############################################################################
#############################################################################
def inform(self, action, player, game):
# TODO: does AI take the information of negatively identified cards?
# e.g.: If I am given a hint that my 1st and 3rd card is red,
# it should know that 2,4,5 are not red
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr,player) in self.hints: # this just never happens?
print('aaaaaaaaaaaaaa')
self.hints[(action.cnr,player)] = []
for i in xrange(10):
if (action.cnr+i+1,player) in self.hints:
print('BBBBBBBBBBBB') # this neither
self.hints[(action.cnr+i,player)] = self.hints[(action.cnr+i+1,player)]
self.hints[(action.cnr+i+1,player)] = []
elif action.pnr == self.pnr: # when I am hinted
self.gothint = (action,player)
self.last_knowledge = game.knowledge[:]
self.last_board = game.board[:]
self.last_trash = game.trash[:]
self.played = game.played[:]
########################################
def do_sample(knowledge):
if not knowledge:
return []
possible = []
for col in ALL_COLORS:
for i,c in enumerate(knowledge[0][col]):
for j in xrange(c):
possible.append((col,i+1))
if not possible:
return None
other = do_sample(knowledge[1:])
if other is None:
return None
sample = random.choice(possible)
return [sample] + other
def sample_hand(knowledge):
result = None
while result is None:
result = do_sample(knowledge)
return result
used = {}
for c in ALL_COLORS:
for i,cnt in enumerate(COUNTS):
used[(c,i+1)] = 0
class SamplingRecognitionPlayer(Player):
def __init__(self, name, pnr, other=IntentionalPlayer, maxtime=5000):
self.name = name
self.hints = {}
self.pnr = pnr
self.gothint = None
self.last_knowledge = []
self.last_played = []
self.last_board = []
self.other = other
self.maxtime = maxtime
self.explanation = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
if self.gothint:
possiblehands = []
wrong = 0
used = {}
for c in trash + played:
if c not in used:
used[c] = 0
used[c] += 1
i = 0
t0 = time.time()
while i < self.maxtime:
i += 1
h = sample_hand(update_knowledge(knowledge[nr], used))
newhands = hands[:]
newhands[nr] = h
other = self.other("Pinocchio", self.gothint[1])
act = other.get_action(self.gothint[1], newhands, self.last_knowledge, self.last_trash,
self.last_played, self.last_board, valid_actions, hints + 1)
lastact = self.gothint[0]
if act == lastact:
possiblehands.append(h)
def do(c, i):
newhands = hands[:]
h1 = h[:]
h1[i] = c
newhands[nr] = h1
print other.get_action(self.gothint[1], newhands, self.last_knowledge, self.last_trash,
self.last_played, self.last_board, valid_actions, hints + 1)
# import pdb
# pdb.set_trace()
else:
wrong += 1
# print "sampled", i
# print len(possiblehands), "would have led to", self.gothint[0], "and not:", wrong
# print f(possiblehands)
if possiblehands:
mostlikely = [(0, 0) for i in xrange(len(possiblehands[0]))]
for i in xrange(len(possiblehands[0])):
counts = {}
for h in possiblehands:
if h[i] not in counts:
counts[h[i]] = 0
counts[h[i]] += 1
for c in counts:
if counts[c] > mostlikely[i][1]:
mostlikely[i] = (c, counts[c])
# print "most likely:", mostlikely
m = max(mostlikely, key=lambda (card, cnt): cnt)
second = mostlikely[:]
second.remove(m)
m2 = max(second, key=lambda (card, cnt): cnt)
if m[1] >= m2[1] * a:
# print ">>>>>>> deduced!", f(m[0]), m[1],"vs", f(m2[0]), m2[1]
knowledge = copy.deepcopy(knowledge)
knowledge[nr][mostlikely.index(m)] = iscard(m[0])
self.gothint = None
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = []
duplicates = []
for i, p in enumerate(possible):
if playable(p, board):
return Action(PLAY, cnr=i)
if discardable(p, board):
discards.append(i)
if discards:
return Action(DISCARD, cnr=random.choice(discards))
playables = []
for i, h in enumerate(hands):
if i != nr:
for j, (col, n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i, j))
playables.sort(key=lambda (i, j): -hands[i][j][1])
while playables and hints > 0:
i, j = playables[0]
knows_rank = True
real_color = hands[i][j][0]
real_rank = hands[i][j][0]
k = knowledge[i][j]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (j, i) not in self.hints:
self.hints[(j, i)] = []
for h in self.hints[(j, i)]:
hinttype.remove(h)
if HINT_NUMBER in hinttype:
self.hints[(j, i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=hands[i][j][1])
if HINT_COLOR in hinttype:
self.hints[(j, i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=hands[i][j][0])
playables = playables[1:]
for i, k in enumerate(knowledge):
if i == nr:
continue
cards = range(len(k))
random.shuffle(cards)
c = cards[0]
(col, num) = hands[i][c]
hinttype = [HINT_COLOR, HINT_NUMBER]
if (c, i) not in self.hints:
self.hints[(c, i)] = []
for h in self.hints[(c, i)]:
hinttype.remove(h)
if hinttype and hints > 0:
if random.choice(hinttype) == HINT_COLOR:
self.hints[(c, i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=col)
else:
self.hints[(c, i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=num)
return random.choice([Action(DISCARD, cnr=i) for i in xrange(handsize)])
def inform(self, action, player, game):
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr, player) in self.hints:
self.hints[(action.cnr, player)] = []
for i in xrange(10):
if (action.cnr + i + 1, player) in self.hints:
self.hints[(action.cnr + i, player)] = self.hints[(action.cnr + i + 1, player)]
self.hints[(action.cnr + i + 1, player)] = []
elif action.pnr == self.pnr:
self.gothint = (action, player)
self.last_knowledge = game.knowledge[:]
self.last_board = game.board[:]
self.last_trash = game.trash[:]
self.played = game.played[:]
# I think Eger stopped in middle of the development
class FullyIntentionalPlayer(Player):
def __init__(self, name, pnr):
self.name = name
self.hints = {}
self.pnr = pnr
self.gothint = None
self.last_knowledge = []
self.last_played = []
self.last_board = []
def get_action(self, nr, hands, knowledge, trash, played, board, valid_actions, hints):
handsize = len(knowledge[0])
possible = []
''' ignore hints from other player?'''
self.gothint = None
for k in knowledge[nr]:
possible.append(get_possible(k))
discards = [] # can be discarded for sure
plays = [] # can be played for sure
duplicates = []
for i,card in enumerate(possible):
if playable(card,board):
plays.append(i)
if discardable(card,board):
discards.append(i)
#!!!! I added the line so it can play something
if plays != []:
toplay = random.choice(plays)
print('toplay: ', toplay)
return Action(PLAY, cnr=toplay)
''' compute what should be done with cards of teammates'''
playables = []
useless = []
discardables = []
othercards = trash + board
intentions = [None for i in xrange(handsize)]
for i,h in enumerate(hands):
if i != nr:
for j,(col,n) in enumerate(h):
if board[col][1] + 1 == n:
playables.append((i,j))
intentions[j] = PLAY
if board[col][1] <= n:
useless.append((i,j))
if not intentions[j]:
intentions[j] = DISCARD
if n < 5 and (col,n) not in othercards:
discardables.append((i,j))
if not intentions[j]:
intentions[j] = CANDISCARD
''' compute the best hint'''
if hints > 0:
valid = []
for c in ALL_COLORS:
action = (HINT_COLOR, c)
#print "HINT", COLORNAMES[c],
(isvalid,score,expl) = pretend(action, knowledge[1-nr], intentions, hands[1-nr], board)
#print isvalid, score
if isvalid:
valid.append((action,score))
for r in xrange(5):
r += 1
action = (HINT_NUMBER, r)
#print "HINT", r,
(isvalid,score,expl) = pretend(action, knowledge[1-nr], intentions, hands[1-nr], board)
#print isvalid, score
if isvalid:
valid.append((action,score))
if valid:
valid.sort(key=lambda (a,s): -s)
#print valid
(a,s) = valid[0]
if a[0] == HINT_COLOR:
return Action(HINT_COLOR, pnr=1-nr, col=a[1])
else:
return Action(HINT_NUMBER, pnr=1-nr, num=a[1])
'''NEW: '''
for i, player in enumerate(knowledge):
if i == nr or True: # ???
continue
else:
print('wtf???')
cards = range(len(player))
random.shuffle(cards)
c = cards[0] # a random card of i-th player
(col,num) = hands[i][c] # color and number of that card
hinttype = [HINT_COLOR, HINT_NUMBER]
if (c,i) not in self.hints:
self.hints[(c,i)] = []
for h in self.hints[(c,i)]:
hinttype.remove(h)
if hinttype and hints > 0:
if random.choice(hinttype) == HINT_COLOR:
self.hints[(c,i)].append(HINT_COLOR)
return Action(HINT_COLOR, pnr=i, col=col)
else:
self.hints[(c,i)].append(HINT_NUMBER)
return Action(HINT_NUMBER, pnr=i, num=num)
return random.choice([Action(DISCARD, cnr=i) for i in xrange(handsize)])
def inform(self, action, player, game):
if action.type in [PLAY, DISCARD]:
x = str(action)
if (action.cnr,player) in self.hints:
self.hints[(action.cnr,player)] = []
for i in xrange(10):
if (action.cnr+i+1,player) in self.hints:
self.hints[(action.cnr+i,player)] = self.hints[(action.cnr+i+1,player)]
self.hints[(action.cnr+i+1,player)] = []
elif action.pnr == self.pnr:
self.gothint = (action,player)
self.last_knowledge = game.knowledge[:]
self.last_board = game.board[:]
self.last_trash = game.trash[:]
self.played = game.played[:]
###########################################################################
def format_card((col,num)):
return COLORNAMES[col] + " " + str(num)
def format_hand(hand):
return ", ".join(map(format_card, hand))
class Game(object):
def __init__(self, players, log=sys.stdout, format=0):
self.players = players
self.hits = 3
self.hints = 8
self.current_player = 0
self.board = map(lambda c: (c,0), ALL_COLORS)
self.played = []
self.deck = make_deck()
self.extra_turns = 0
self.hands = []
self.knowledge = []
self.make_hands()
self.trash = []
self.log = log
self.turn = 1
self.format = format
self.dopostsurvey = False
self.study = False
if self.format:
print >> self.log, self.deck
def make_hands(self):
handsize = 4
if len(self.players) < 4:
handsize = 5
for i, p in enumerate(self.players):
self.hands.append([])
self.knowledge.append([])
for j in xrange(handsize):
self.draw_card(i)
def draw_card(self, pnr=None):
if pnr is None:
pnr = self.current_player
if not self.deck:
return
self.hands[pnr].append(self.deck[0])
self.knowledge[pnr].append(initial_knowledge())
del self.deck[0]
def perform(self, action):
for p in self.players:
p.inform(action, self.current_player, self)
if format:
print >> self.log, "MOVE:", self.current_player, action.type, action.cnr, action.pnr, action.col, action.num
if action.type == HINT_COLOR:
self.hints -= 1
print >>self.log, self.players[self.current_player].name, "hints", self.players[action.pnr].name, "about all their", COLORNAMES[action.col], "cards", "hints remaining:", self.hints
print >>self.log, self.players[action.pnr].name, "has", format_hand(self.hands[action.pnr])
for (col,num),knowledge in zip(self.hands[action.pnr],self.knowledge[action.pnr]):
if col == action.col:
for i, k in enumerate(knowledge):
if i != col:
for i in xrange(len(k)):
k[i] = 0
else:
for i in xrange(len(knowledge[action.col])):
knowledge[action.col][i] = 0
elif action.type == HINT_NUMBER:
self.hints -= 1
print >>self.log, self.players[self.current_player].name, "hints", self.players[action.pnr].name, "about all their", action.num, "hints remaining:", self.hints
print >>self.log, self.players[action.pnr].name, "has", format_hand(self.hands[action.pnr])
for (col,num),knowledge in zip(self.hands[action.pnr],self.knowledge[action.pnr]):
if num == action.num:
for k in knowledge:
for i in xrange(len(COUNTS)):
if i+1 != num:
k[i] = 0
else:
for k in knowledge:
k[action.num-1] = 0
elif action.type == PLAY:
(col,num) = self.hands[self.current_player][action.cnr]
print >>self.log, self.players[self.current_player].name, "plays", format_card((col,num)),
if self.board[col][1] == num-1:
self.board[col] = (col,num)
self.played.append((col,num))
if num == 5:
self.hints += 1
self.hints = min(self.hints, 8)
print >>self.log, "successfully! Board is now", format_hand(self.board)
else:
self.trash.append((col,num))
self.hits -= 1
print >>self.log, "and fails. Board was", format_hand(self.board)
del self.hands[self.current_player][action.cnr]
del self.knowledge[self.current_player][action.cnr]
self.draw_card()
print >>self.log, self.players[self.current_player].name, "now has", format_hand(self.hands[self.current_player])
else:
self.hints += 1
self.hints = min(self.hints, 8)
self.trash.append(self.hands[self.current_player][action.cnr])
print >>self.log, self.players[self.current_player].name, "discards", format_card(self.hands[self.current_player][action.cnr])
print >>self.log, "trash is now", format_hand(self.trash)
del self.hands[self.current_player][action.cnr]
del self.knowledge[self.current_player][action.cnr]
self.draw_card()
print >>self.log, self.players[self.current_player].name, "now has", format_hand(self.hands[self.current_player])
def valid_actions(self):
valid = []
for i in xrange(len(self.hands[self.current_player])):
valid.append(Action(PLAY, cnr=i))
valid.append(Action(DISCARD, cnr=i))
if self.hints > 0:
for i, p in enumerate(self.players):
if i != self.current_player:
for col in set(map(lambda (col,num): col, self.hands[i])):
valid.append(Action(HINT_COLOR, pnr=i, col=col))
for num in set(map(lambda (col,num): num, self.hands[i])):
valid.append(Action(HINT_NUMBER, pnr=i, num=num))
return valid
def run(self, turns=-1):
self.turn = 1
while not self.done() and (turns < 0 or self.turn < turns):
self.turn += 1
if not self.deck:
self.extra_turns += 1
hands = []
for i, h in enumerate(self.hands):
if i == self.current_player:
hands.append([])
else:
hands.append(h)
action = self.players[self.current_player].get_action(self.current_player, hands, self.knowledge, self.trash, self.played, self.board, self.valid_actions(), self.hints)
self.perform(action)
self.current_player += 1
self.current_player %= len(self.players)
print >>self.log, "Game done, hits left:", self.hits
points = self.score()
print >>self.log, "Points:", points
return points
def score(self):
return sum(map(lambda (col,num): num, self.board))
def single_turn(self):
if not self.done():
if not self.deck:
self.extra_turns += 1
hands = []
for i, h in enumerate(self.hands):
if i == self.current_player:
hands.append([])
else:
hands.append(h)
action = self.players[self.current_player].get_action(self.current_player, hands, self.knowledge, self.trash, self.played, self.board, self.valid_actions(), self.hints)
self.perform(action)
self.current_player += 1
self.current_player %= len(self.players)
def external_turn(self, action):
if not self.done():
if not self.deck:
self.extra_turns += 1
self.perform(action)
self.current_player += 1
self.current_player %= len(self.players)
def done(self):
if self.extra_turns == len(self.players) or self.hits == 0:
return True
for (col,num) in self.board:
if num != 5:
return False
return True
def finish(self):
if self.format:
print >> self.log, "Score", self.score()
self.log.close()
class NullStream(object):
def write(self, *args):
pass
random.seed(123)
playertypes = {"random": Player, "inner": InnerStatePlayer, "outer": OuterStatePlayer, "self": SelfRecognitionPlayer,
"intentional": IntentionalPlayer, "sample": SamplingRecognitionPlayer, "full": SelfIntentionalPlayer,
"timed": TimedPlayer}
names = ["Shangdi", "<NAME>", "Tian", "<NAME>", "Pangu"]
def make_player(player, i):
if player in playertypes:
return playertypes[player](names[i], i)
elif player.startswith("self("):
other = player[5:-1]
return SelfRecognitionPlayer(names[i], i, playertypes[other])
elif player.startswith("sample("):
other = player[7:-1]
if "," in other:
othername, maxtime = other.split(",")
othername = othername.strip()
maxtime = int(maxtime.strip())
return SamplingRecognitionPlayer(names[i], i, playertypes[othername], maxtime=maxtime)
return SamplingRecognitionPlayer(names[i], i, playertypes[other])
return None
def main(args):
if not args:
args = ["random"]*3
if args[0] == "trial":
treatments = [["intentional", "intentional"], ["intentional", "outer"], ["outer", "outer"]]
#[["sample(intentional, 50)", "sample(intentional, 50)"], ["sample(intentional, 100)", "sample(intentional, 100)"]] #, ["self(intentional)", "self(intentional)"], ["self", "self"]]
results = []
print treatments
for i in xrange(int(args[1])):
result = []
times = []
avgtimes = []
print "trial", i+1
for t in treatments:
random.seed(i)
players = []
for i,player in enumerate(t):
players.append(make_player(player,i))
g = Game(players, NullStream())
t0 = time.time()
result.append(g.run())
times.append(time.time() - t0)
avgtimes.append(times[-1]*1.0/g.turn)
print ".",
print
print "scores:",result
print "times:", times
print "avg times:", avgtimes
return
players = []
for i,a in enumerate(args):
players.append(make_player(a, i))
n = 10000
out = NullStream()
if n < 3:
out = sys.stdout
pts = []
for i in xrange(n):
if (i+1)%100 == 0:
print "Starting game", i+1
random.seed(i+1)
g = Game(players, out)
try:
pts.append(g.run())
if (i+1)%100 == 0:
print "score", pts[-1]
except Exception:
import traceback
traceback.print_exc()
if n < 10:
print pts
import numpy
print "average:", numpy.mean(pts)
print "stddev:", numpy.std(pts, ddof=1)
print "range", min(pts), max(pts)
if __name__ == "__main__":
main(sys.argv[1:]) |
dongmengshi/easylearn | eslearn/machine_learning/classfication/el_classify_sensitive_person_test.py | # -*- coding: utf-8 -*-
"""
Created on 2020/03/16
Feature selection: Relief-based feature selection algorithm.
------
@author: <NAME>
"""
import numpy as np
from sklearn import preprocessing
import os
from sklearn.externals import joblib
from el_classify_sensitive_person_train_validation import ClassifyFourKindOfPersonTrain
from eslearn.utils.lc_evaluation_model_performances import eval_performance
class ClassifyFourKindOfPersonTest():
"""
This class is used to testing classification model for 2 kind of sensitive person identification.
Parameters
----------
data_test_file: path str
Path of the dataset
label_test_file: path str
Path of the label
path_out :
Path to save results
is_feature_selection : bool
if perfrome feature selection.
is_showfig_finally: bool
If show figure after all iteration finished.
Returns
-------
Save all classification results and figures to local disk.
"""
def __init__(selftest,
data_test_file=None,
label_test_file=None,
data_train_file=None,
models_path=None,
path_out=None,
is_feature_selection=False,
is_showfig_finally=True):
selftest.data_test_file = data_test_file
selftest.label_test_file = label_test_file
selftest.data_train_file = data_train_file
selftest.path_out = path_out
selftest.models_path = models_path
selftest.is_feature_selection = is_feature_selection
selftest.is_showfig_finally = is_showfig_finally
def main_function(selftest):
"""
"""
print('Training model and testing...\n')
# load data and mask
mask_lassocv = joblib.load(os.path.join(selftest.path_out, 'mask_selected_features_lassocv.pkl'))
model_feature_selection = joblib.load(os.path.join(selftest.models_path, 'model_feature_selection.pkl'))
model_classification = joblib.load(os.path.join(selftest.models_path, 'model_classification.pkl'))
feature_test, selftest.label_test, feature_train = selftest._load_data()
# Age encoding
feature_test[:,2] = ClassifyFourKindOfPersonTrain().age_encodeing(feature_train[:,2], feature_test[:,2])
# Feature selection
if selftest.is_feature_selection:
feature_test = feature_test[:, mask_lassocv != 0]
# Testting
selftest.prediction, selftest.decision = selftest.testing(model_classification, feature_test)
# Evaluating classification performances
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
accuracy_kfold=None, sensitivity_kfold=None, specificity_kfold=None, AUC_kfold=None,
verbose=1, is_showfig=0)
# Save results and fig to local path
selftest.save_results()
selftest.save_fig()
print("--" * 10 + "Done!" + "--" * 10 )
return selftest
def _load_data(selftest):
"""
Load data
"""
data_test = np.load(selftest.data_test_file)
label_test = np.load(selftest.label_test_file)
data_train = np.load(selftest.data_train_file)
return data_test, label_test, data_train
def testing(selftest, model, test_X):
predict = model.predict(test_X)
decision = model.decision_function(test_X)
return predict, decision
def save_results(selftest):
# Save performances and others
import pandas as pd
performances_to_save = np.array([selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC]).reshape(1,4)
de_pred_label_to_save = np.vstack([selftest.decision.T, selftest.prediction.T, selftest.label_test.T]).T
performances_to_save = pd.DataFrame(performances_to_save, columns=[['Accuracy','Sensitivity', 'Specificity', 'AUC']])
de_pred_label_to_save = pd.DataFrame(de_pred_label_to_save, columns=[['Decision','Prediction', 'Sorted_Real_Label']])
performances_to_save.to_csv(os.path.join(selftest.path_out, 'test_Performances.txt'), index=False, header=True)
de_pred_label_to_save.to_csv(os.path.join(selftest.path_out, 'test_Decision_prediction_label.txt'), index=False, header=True)
def save_fig(selftest):
# Save ROC and Classification 2D figure
acc, sens, spec, auc = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC,
verbose=0, is_showfig=selftest.is_showfig_finally, is_savefig=1,
out_name=os.path.join(selftest.path_out, 'Classification_performances_test.pdf'),
legend1='Healthy', legend2='Unhealthy')
#
if __name__ == '__main__':
# =============================================================================
# All inputs
data_file = r'D:\workstation_b\Fundation\给黎超.xlsx'
path_out = r'D:\workstation_b\Fundation'
models_path = r'D:\workstation_b\Fundation'
# =============================================================================
selftest = ClassifyFourKindOfPersonTest(data_test_file=r'D:\workstation_b\Fundation\feature_test.npy',
label_test_file=r'D:\workstation_b\Fundation\label_test.npy',
data_train_file=r'D:\workstation_b\Fundation\feature_train.npy',
path_out=path_out,
models_path=models_path,
is_feature_selection=1)
selftest.main_function()
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_elasticNetCV.py | <reponame>dongmengshi/easylearn<filename>eslearn/machine_learning/classfication/lc_elasticNetCV.py
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 19:23:42 2018
ElasticNetCV
Minimizes the objective function:
1 / (2 * n_samples) * ||y_train - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
l1_ratio = 1 is the lasso penalty
a * L1 + b * L2
where:
alpha = a + b and l1_ratio = a / (a + b)
@author: lenovo
"""
from scipy.stats import pearsonr
from sklearn.linear_model import ElasticNetCV
import numpy as np
class ElasticNetCV():
def __init__(sel,
k=10,
l1_ratio=np.linspace(0.1,1,10),
alphas=np.linspace(0.001,100,100)):
sel.k=k
sel.l1_ratio=l1_ratio
sel.alpha=alphas
def train(sel,x_train,y_train):
sel.regr = ElasticNetCV(random_state=0,cv=sel.k,
l1_ratio=sel.l1_ratio,
alphas=sel.alpha)
sel.regr.fit(x_train,y_train)
sel.best_alpha=sel.regr.alpha_
sel.best_l1_ratio=sel.regr.l1_ratio_
sel.best_coef=sel.regr.coef_
sel.best_intercept=sel.regr.intercept_
return sel
def test(sel,x_test):
sel.pred=sel.regr.predict(x_test)
return sel
if __name__=='__main__':
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
x, y = make_regression(n_samples=500,n_features=5, random_state=0)
x_train, x_test, y_train, y_test = \
train_test_split(x, y, random_state=0)
import lc_elasticNetCV as ENCV
sel=ENCV.ElasticNetCV()
sel.train(x_train,y_train)
results=sel.test(x_test).__dict__
r,p=pearsonr(results['pred'],y_test)
print('r={}\np={}'.format(r,p))
|
dongmengshi/easylearn | eslearn/utils/lc_copy_selected_file_V6.py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 13:05:28 2018:
在版本3的基础上,根据pandas的join方法来求交集
根据从量表中筛选的样本,来获得符合要求的原始数据的路径
数据结构neuroimageDataPath//subject00001//files
也可以是任何的数据结构,只要给定subjName在哪里就行
总之,最后把file复制到其他地方(可以给每个subject限定某个符合条件file,比如以'.nii'结尾的file)
input:
# reference_file:需要复制的被试名字所在text文件(大表中的uid)
# keywork_of_reference_uid:如提取量表中唯一识别号的正则表达式
# ith_number_of_reference_uid: 量表中的唯一识别号有多个匹配项时,选择第几个 (比如有一个名字为subj0001_bold7000, 此时可能匹配到0001和7000,遇到这种情况选择第几个匹配项)
# keyword_of_parent_folder_containing_target_file:想把被试的哪个模态/或那个文件夹下的文件复制出来(如同时有'resting'和'dti'时,选择那个模态)
# matching_point_number_of_target_uid_in_backwards:与referenceid匹配的唯一识别号在倒数第几个block内(以target file为起点计算,第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'的唯一识别号在倒数第3个中
# keyword_of_target_file_uid:用来筛选mri数据中唯一识别号的正则表达式
# ith_number_of_targetfile_uid: target file中的唯一识别号有多个匹配项时,选择第几个.
# keyword_of_target_file_uid:用来筛选file的正则表达式或keyword
# targe_file_folder:原始数据的根目录
# save_path: 将原始数据copy到哪个大路径
# n_processess=5几个线程
# is_save_log:是否保存复制log
# is_copy:是否执行复制功能
# is_move:是否移动(0)
# save_into_one_or_more_folder:保存到每个被试文件夹下,还是保存到一个文件夹下
# save_suffix:文件保存的尾缀('.nii')
# is_run:是否真正对文件执行移动或复制(0)
# 总体来说被复制的文件放在如下的路径:save_path/saveFolderName/subjName/files
@author: <NAME>
new featrue:真多核多线程处理,类的函数统一返回self
匹配file name:正则表达式匹配
"""
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
import time
import os
import shutil
import sys
sys.path.append(
r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Utils')
class CopyFmri():
def __init__(
self,
reference_file=r'E:\wangfeidata\uid.txt',
targe_file_folder=r'E:\wangfeidata\FunImgARWD',
keywork_of_reference_uid='([1-9]\d*)',
ith_number_of_reference_uid=0,
keyword_of_target_file_uid='([1-9]\d*)',
ith_number_of_targetfile_uid=0,
matching_point_number_of_target_uid_in_backwards=2,
keywork_of_target_file_not_for_uid='nii',
keyword_of_parent_folder_containing_target_file='',
save_path=r'E:\wangfeidata',
n_processess=2,
is_save_log=1,
is_copy=0,
is_move=0,
save_into_one_or_more_folder='one_file_one_folder',
save_suffix='.nii',
is_run=0):
self.reference_file = reference_file
self.targe_file_folder = targe_file_folder
self.keywork_of_reference_uid = keywork_of_reference_uid
self.ith_number_of_reference_uid = ith_number_of_reference_uid
self.keyword_of_target_file_uid = keyword_of_target_file_uid
self.matching_point_number_of_target_uid_in_backwards = matching_point_number_of_target_uid_in_backwards
self.ith_number_of_targetfile_uid = ith_number_of_targetfile_uid
self.keywork_of_target_file_not_for_uid = keywork_of_target_file_not_for_uid
self.keyword_of_parent_folder_containing_target_file = keyword_of_parent_folder_containing_target_file
self.save_path = save_path
self.n_processess = n_processess
self.is_save_log = is_save_log
self.is_copy = is_copy
self.is_move = is_move
self.save_into_one_or_more_folder = save_into_one_or_more_folder
self.save_suffix = save_suffix
self.is_run = is_run
# %% process the input
def _after_init(self):
""" handle the init parameter
"""
# chech param
if self.is_copy == 1 & self.is_move == 1:
print('### Cannot copy and move at the same time! ###\n')
print('### please press Ctrl+C to close the progress ###\n')
# create save folder
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# read reference_file(excel or text)
try:
self.subjName_forSelect = pd.read_excel(
self.reference_file, dtype='str', header=None, index=None)
except BaseException:
self.subjName_forSelect = pd.read_csv(
self.reference_file, dtype='str', header=None)
print('###提取subjName_forSelect中的匹配成分,默认为数字###\n###当有多个匹配时默认是第1个###\n')
if self.keywork_of_reference_uid:
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0].str.findall(self.keywork_of_reference_uid)
self.subjName_forSelect = [self.subjName_forSelect_[self.ith_number_of_reference_uid]
for self.subjName_forSelect_ in
self.subjName_forSelect
if len(self.subjName_forSelect_)]
def walkAllPath(self):
self.allWalkPath = os.walk(self.targe_file_folder)
# allWalkPath=[allWalkPath_ for allWalkPath_ in allWalkPath]
return self
def fetch_allFilePath(self):
self.allFilePath = []
for onePath in self.allWalkPath:
for oneFile in onePath[2]:
target_folder = os.path.join(onePath[0], oneFile)
self.allFilePath.append(target_folder)
return self
def fetch_allSubjName(self):
'''
matching_point_number_of_target_uid_in_backwards:subjName在倒数第几个block内(第一个计数为1)
# 如'D:\myCodes\workstation_20180829_dynamicFC\FunImgARW\1-500\00002_resting\dti\dic.txt'
# 的subjName在倒数第3个中
'''
self.allSubjName = self.allFilePath
for i in range(self.matching_point_number_of_target_uid_in_backwards - 1):
self.allSubjName = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allSubjName]
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSubjName_raw = self.allSubjName
return self
def fetch_folerNameContainingFile(self):
'''
如果file上一级uid不是subject name,那么就涉及到选择那个文件夹下的file
此时先确定每一个file上面的uid name(可能是模态名),然后根据你的关键词来筛选
'''
self.folerNameContainingFile = [os.path.dirname(
allFilePath_) for allFilePath_ in self.allFilePath]
self.folerNameContainingFile = [os.path.basename(
folderName) for folderName in self.folerNameContainingFile]
return self
def fetch_allFileName(self):
'''
获取所有file name,用于后续的筛选。
适用场景:假如跟file一起的有我们不需要的file,
比如混杂在dicom file中的有text文件,而这些text是我们不想要的。
'''
self.allFileName = [os.path.basename(
allFilePath_) for allFilePath_ in self.allFilePath]
return self
# %% screen according several rules
def screen_pathLogicalLocation_accordingTo_yourSubjName(self):
""" 匹配subject name:注意此处用精确匹配,只有完成匹配时,才匹配成功"""
"""maker sure subjName_forSelect is pd.Series and its content is string"""
if isinstance(self.subjName_forSelect, type(pd.DataFrame([1]))):
self.subjName_forSelect = self.subjName_forSelect.iloc[:, 0]
if not isinstance(self.subjName_forSelect[0], str):
self.subjName_forSelect = pd.Series(
self.subjName_forSelect, dtype='str')
# 一定要注意匹配对之间的数据类型要一致!!!
try:
# 提取所有被试的uid
# self.logic_index_subjname=\
# np.sum(
# pd.DataFrame(
# [self.allSubjName.iloc[:,0].str.contains\
# (name_for_self) for name_for_self in self.subjName_forSelect]
# ).T,
# axis=1)
#
# self.logic_index_subjname=self.logic_index_subjname>=1
self.allSubjName = self.allSubjName.iloc[:, 0].str.findall(
self.keyword_of_target_file_uid)
# 正则表达提取后,可能有的不匹配而为空list,此时应该把空list当作不匹配而去除
allSubjName_temp = []
for name in self.allSubjName.values:
if name:
allSubjName_temp.append(name[self.ith_number_of_targetfile_uid])
else:
allSubjName_temp.append(None)
self.allSubjName = allSubjName_temp
self.allSubjName = pd.DataFrame(self.allSubjName)
self.subjName_forSelect = pd.DataFrame(self.subjName_forSelect)
self.logic_index_subjname = pd.DataFrame(
np.zeros(len(self.allSubjName)) == 1)
for i in range(len(self.subjName_forSelect)):
self.logic_index_subjname = self.logic_index_subjname.mask(
self.allSubjName == self.subjName_forSelect.iloc[i, 0], True)
except BaseException:
print('subjName mismatch subjName_forSelected!\nplease check their type')
sys.exit(0)
return self
def screen_pathLogicalLocation_accordingTo_folerNameContainingFile(self):
""" 匹配folerNameContainingFile:注意此处用的连续模糊匹配,只要含有这个关键词,则匹配
"""
if self.keyword_of_parent_folder_containing_target_file:
self.logic_index_foler_name_containing_file = [
self.keyword_of_parent_folder_containing_target_file in oneName_ for oneName_ in self.folerNameContainingFile]
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
else:
self.logic_index_foler_name_containing_file = np.ones(
[len(self.folerNameContainingFile), 1]) == 1
self.logic_index_foler_name_containing_file = pd.DataFrame(
self.logic_index_foler_name_containing_file)
return self
def screen_pathLogicalLocation_accordingTo_fileName(self):
""" 匹配file name (不是用于提取uid):正则表达式匹配
"""
if self.keywork_of_target_file_not_for_uid:
self.allFileName = pd.Series(self.allFileName)
self.logic_index_file_name = self.allFileName.str.contains(
self.keywork_of_target_file_not_for_uid)
else:
self.logic_index_file_name = np.ones([len(self.allFileName), 1]) == 1
self.logic_index_file_name = pd.DataFrame(self.logic_index_file_name)
return self
# %% final logical location of selfected file path
def fetch_totalLogicalLocation(self):
self.logic_index_all = pd.concat(
[
self.logic_index_file_name,
self.logic_index_foler_name_containing_file,
self.logic_index_subjname],
axis=1)
self.logic_index_all = np.sum(
self.logic_index_all,
axis=1) == np.shape(
self.logic_index_all)[1]
return self
def fetch_selfectedFilePath_accordingPathLogicalLocation(self):
# target_folder
self.allFilePath = pd.DataFrame(self.allFilePath)
self.allSelectedFilePath = self.allFilePath[self.logic_index_all]
self.allSelectedFilePath = self.allSelectedFilePath.dropna()
# uid name
self.allSubjName = pd.DataFrame(self.allSubjName)
self.allSelectedSubjName = self.allSubjName[self.logic_index_all]
self.allSelectedSubjName = self.allSelectedSubjName.dropna()
# raw name
self.allSubjName_raw = pd.DataFrame(self.allSubjName_raw)
self.allSelectedSubjName_raw = self.allSubjName_raw[self.logic_index_all]
self.allSelectedSubjName_raw = self.allSelectedSubjName_raw.dropna()
return self
# %% run copy
def copy_base(self, i, subjName):
n_allSelectedSubj = len(np.unique(self.allSelectedSubjName_raw))
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
folder_name = subjName.split('.')[0]
output_folder = os.path.join(self.save_path, folder_name)
# 新建subjxxx文件夹
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 所有file保存到一个uid下面(file的名字以subjxxx命名)
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
output_folder = os.path.join(
self.save_path, subjName + self.save_suffix)
# copying OR moving OR do nothing
fileIndex = self.allSelectedSubjName_raw[(
self.allSelectedSubjName_raw.values == subjName)].index.tolist()
if self.is_copy == 1 and self.is_move == 0:
[shutil.copy(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 1:
[shutil.move(self.allSelectedFilePath.loc[fileIndex_, :][0],
output_folder) for fileIndex_ in fileIndex]
elif self.is_copy == 0 and self.is_move == 0:
print('### No copy and No move ###\n')
else:
print('### Cannot copy and move at the same time! ###\n')
print('Copy the {}/{}th subject: {} OK!\n'.format(i + 1, n_allSelectedSubj, subjName))
def copy_multiprocess(self):
s = time.time()
# 每个file保存到每个subjxxx文件夹下面
if self.save_into_one_or_more_folder == 'one_file_one_folder':
pass
elif self.save_into_one_or_more_folder == 'all_file_one_folder':
pass
else:
print(
"###没有指定复制到一个文件夹还是每个被试文件夹###\n###{}跟'all_file_one_folder' OR 'one_file_one_folder'都不符合###".format(
self.save_into_one_or_more_folder))
# 多线程
# unique的name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
print('Copying...\n')
"""
# 单线程
for i,subjName in enumerate(uniSubjName):
self.copy_base(i,subjName)
"""
# 多线程
cores = multiprocessing.cpu_count()
if self.n_processess > cores:
self.n_processess = cores - 1
with ThreadPoolExecutor(self.n_processess) as executor:
for i, subjName in enumerate(uniSubjName):
executor.submit(self.copy_base, i, subjName)
print('=' * 30)
#
e = time.time()
print('Done!\nRunning time is {:.1f} second'.format(e - s))
# %%
def main_run(self):
# all target_folder and name
self._after_init()
self = self.walkAllPath()
self = self.fetch_allFilePath()
self = self.fetch_allSubjName()
self = self.fetch_allFileName()
# selfect
self = self.fetch_folerNameContainingFile()
# logicLoc_subjName:根据被试名字匹配所得到的logicLoc。以此类推。
# fileName≠subjName,比如fileName可以是xxx.nii,但是subjName可能是subjxxx
self = self.screen_pathLogicalLocation_accordingTo_yourSubjName()
self = self.screen_pathLogicalLocation_accordingTo_folerNameContainingFile()
self = self.screen_pathLogicalLocation_accordingTo_fileName()
self = self.fetch_totalLogicalLocation()
self = self.fetch_selfectedFilePath_accordingPathLogicalLocation()
self.unmatched_ref = \
pd.DataFrame(list(
set.difference(set(list(self.subjName_forSelect.astype(np.int32).iloc[:, 0])),
set(list(self.allSelectedSubjName.astype(np.int32).iloc[:, 0])))
)
)
print('=' * 50 + '\n')
print(
'Files that not found are : {}\n\nThey may be saved in:\n[{}]\n'.format(
self.unmatched_ref.values,
self.save_path))
print('=' * 50 + '\n')
# save for checking
if self.is_save_log:
# time information
now = time.localtime()
now = time.strftime("%Y-%m-%d %H:%M:%S", now)
# all matched name
uniSubjName = self.allSelectedSubjName_raw.iloc[:, 0].unique()
uniSubjName = [uniSubjName_ for uniSubjName_ in uniSubjName]
uniSubjName = pd.DataFrame(uniSubjName)
uniSubjName.to_csv(
os.path.join(
self.save_path,
'log_allSelectedSubjName.txt'),
index=False,
header=False)
# 所有不匹配的被试名称
self.unmatched_ref.to_csv(
os.path.join(
self.save_path,
'log_unmatched_reference.txt'),
index=False,
header=False)
# 被选路径下所有的文件夹名称
pd.DataFrame(pd.unique(self.allSubjName.iloc[:, 0])).dropna().to_csv(
os.path.join(self.save_path, 'log_alltargetfilename.txt'), index=False, header=False)
# 所有匹配的文件路径
self.allSelectedFilePath.to_csv(
os.path.join(
self.save_path,
'log_allSelectedFilePath.txt'),
index=False,
header=False)
# 保存log
f = open(
os.path.join(
self.save_path,
"log_copy_inputs.txt"),
'a')
f.write("\n\n")
f.write('====================' + now + '====================')
f.write("\n\n")
f.write("reference_file is: " + self.reference_file)
f.write("\n\n")
f.write(
"keyword_of_parent_folder_containing_target_file are: " +
self.keyword_of_parent_folder_containing_target_file)
f.write("\n\n")
f.write("matching_point_number_of_target_uid_in_backwards is: " +
str(self.matching_point_number_of_target_uid_in_backwards))
f.write("\n\n")
f.write("keyword_of_target_file_uid is: " +
str(self.keyword_of_target_file_uid))
f.write("\n\n")
f.write("keyword_of_target_file_uid is: " +
str(self.keyword_of_target_file_uid))
f.write("\n\n")
f.write("targe_file_folder is: " + self.targe_file_folder)
f.write("\n\n")
f.write("save_path is: " + self.save_path)
f.write("\n\n")
f.write("n_processess is: " + str(self.n_processess))
f.write("\n\n")
f.close()
# copy
if self.is_run:
self.copy_multiprocess()
return self
# %%
if __name__ == '__main__':
uid = r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\ID_Scale_Headmotion\held_out_samples.txt'
target_folder = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\ROISignals_FumImgARWSFC_screened'
save_path = r'D:\WorkStation_2018\WorkStation_dynamicFC_V3\Data\held_out_samples'
matching_point_number_of_target_uid_in_backwards = 1
keywork_of_target_file_not_for_uid = ''
save_suffix= ''
copy = CopyFmri(
reference_file=uid,
targe_file_folder=target_folder,
keywork_of_reference_uid='([1-9]\d*)',
ith_number_of_reference_uid=0,
keyword_of_target_file_uid='([1-9]\d*)',
ith_number_of_targetfile_uid=0,
matching_point_number_of_target_uid_in_backwards=matching_point_number_of_target_uid_in_backwards,
keywork_of_target_file_not_for_uid=keywork_of_target_file_not_for_uid,
keyword_of_parent_folder_containing_target_file='',
save_path=save_path,
n_processess=8,
is_save_log=1,
is_copy=1,
is_move=0,
save_into_one_or_more_folder='all_file_one_folder',
save_suffix=save_suffix,
is_run=1)
results = copy.main_run()
# --------------------------------
results=results.__dict__
print(results.keys())
print('Done!') |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_permutation_svc_multiprocessing_block.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 21:40:21 2018
@author: lenovo
"""
import multiprocessing
import time
#from scipy import io
import sys
sys.path.append(r'D:\myCodes\LC_MVPA\Python\MVPA_Python\utils')
# import module
from lc_read_write_Mat import write_mat
import time,os
import numpy as np
import lc_svc_rfe_cv as lsvc
##
class Perm_mvpa():
# # initial parameters
def __init__(self,\
model=lsvc.svc_rfe_cv(permutation=1,num_jobs=1),\
N_perm=20,\
batchsize=100,\
n_processess=5,\
fileName=r'D:\myCodes\LC_MVPA\Python\MVPA_Python\perm',\
k=5):
self.model=model
self.N_perm=N_perm
self.batchsize=batchsize
self.n_processess=n_processess
self.fileName=fileName
self.k=k # k fold CV of model
##
def perm_mvpa(self,X,y):
s=time.time()
blocks=int(np.ceil(self.N_perm/self.batchsize))
s=time.time()
start=0
end=min(self.batchsize,self.N_perm)# make sure no greater than N_erm
for i in range(blocks):
print('running {}/{}......'.format(i+1,blocks))
pool = multiprocessing.Pool(processes=self.n_processess)
for n_perm in np.arange(start,end):
pool.apply_async(self.run_svc,\
(X,y,n_perm))
start+=self.batchsize
end=min(end+self.batchsize,self.N_perm)
pool.close()
pool.join()
e=time.time()
print('Done!\n running time is {:.1f}'.format(e-s))
#
def run_svc(self,X,y,n_perm):
# print('we have processing {} permutation'.format(n_perm))
y_rand=np.random.permutation(y)
predict,dec,y_sorted,weight=\
self.model.main_svc_rfe_cv(X,y_rand,self.k)
# write mat
write_mat(os.path.join(self.fileName,str(n_perm)),\
dataset_name=['predict','dec','y_sorted','weight'],\
dataset=[predict,dec,y_sorted,weight])
###
if __name__=='__main__':
import lc_permutation_svc_multiprocessing_block as Perm
perm=Perm.Perm_mvpa()
perm.perm_mvpa(X,y)
# perm.run_svc(X,y,1)
|
dongmengshi/easylearn | eslearn/utils/SelectRawData_Run.py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 15:06:51 2018
@author: lenovo
"""
# import modules
from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog
from PyQt5 import QtWidgets
#from PyQt5 import QtWidgets
from SelectRawData_Window import Ui_CopySelectedData
import pandas as pd
import time
#from sel import Ui_Dialog
# ==============define class and initialization===================
class select(QWidget, Ui_CopySelectedData):
def __init__(self):
super().__init__()
self.setupUi(self)
self.ScaleFolder.clicked.connect(self.scaleFolder)
self.RawFolder.clicked.connect(self.rawFolder)
self.SaveFolder.clicked.connect(self.saveFolder)
self.RunCopy.clicked.connect(self.runCopy)
# DIY
def scaleFolder(self):
self.fileName, filetype = QFileDialog.getOpenFileName(self,
"选择参考ID(Folder)文件(待选)",
"D:\myCodes\LC_MVPA\workstation_20180829_dynamicFC",
"All Files (*);;Text Files (*.txt)")
self.folder_scale = pd.read_excel(self.fileName)
# print (self.folder_scale)
self.folder_scale = self.folder_scale.iloc[:, 0]
self.ScaleFolder.setText('选择参考ID(Folder)文件(已选)')
print('你选择的参考ID(Folder)为:\n[{}]'.format(self.fileName))
def rawFolder(self):
self.directory_RawFolder = QFileDialog.getExistingDirectory(
self, "选取原始数据文件夹(待选)", "D:\myCodes\LC_MVPA\workstation_20180829_dynamicFC")
self.RawFolder.setText('选取原始数据文件夹(已选)')
print('你选择的参考原始数据文件夹为:\n[{}]'.format(self.directory_RawFolder))
def saveFolder(self):
self.directory_SaveFolder = QFileDialog.getExistingDirectory(
self, "选取结果保存文件夹(待选)", "D:\myCodes\LC_MVPA\workstation_20180829_dynamicFC")
self.SaveFolder.setText('选取结果保存文件夹(已选)')
print('你选择的结果保存文件夹为:\n[{}]'.format(self.directory_SaveFolder))
def runCopy(self):
import copySelectedDicomFile as copy
# 获取当前时间
# Time=time.asctime(time.localtime(time.time()) )
# Time=Time.split(' ')
# Time=Time[5]+'_'+Time[1]+'_'+Time[3]+'_'+Time[4]
sel = copy.copy_fmri(
subjID_forSelect=self.folder_scale,
modalityName_forSelect='resting',
templates={'path': '*\\*'},
rootPath=self.directory_RawFolder,
savePath=self.directory_SaveFolder,
saveFolderName='resting_' + str(time.time()),
n_processess=10)
path_subject_all, folder_mri, path_subject_sel, path_modality_all\
= sel.main_run()
# =====================close window=======================
def closeEvent(self, event):
reply = QtWidgets.QMessageBox.question(
self,
'复制程序',
"是否要退出程序?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
else:
event.ignore()
# ===========================================================
def main():
import sys
app = QApplication(sys.argv)
w = select()
w.show()
sys.exit(app.exec_())
# ===================executing==========================
if __name__ == '__main__':
main()
|
dongmengshi/easylearn | eslearn/utils/lc_extract_head_motion_paremeters.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 20:53:20 2019
用来提取头动参数
@author: lenovo
"""
|
dongmengshi/easylearn | eslearn/utils/lc_read_write_Mat.py | <filename>eslearn/utils/lc_read_write_Mat.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 10:42:13 2018
read and write .mat(MATLAB) data
@author: <NAME>
"""
# import module
from scipy import io
import numpy as np
# def
def read_mat(fileName, dataset_name=None):
dataset_struct = io.loadmat(fileName)
if dataset_name:
dataset = dataset_struct[dataset_name]
else:
dataset = dataset_struct[list(dataset_struct.keys())[3]]
return dataset
#
def write_mat(fileName='lc_test.mat', dataset_name=['data1', 'data2'],
dataset=[np.ones([10, 7]), np.ones([10, 8])]):
cmdStr = str()
if len(dataset_name) == 1 or isinstance(dataset_name, str):
cmdStr = cmdStr + 'dataset_name' + ':' + 'dataset' + ','
else:
for i in range(len(dataset_name)):
cmdStr = cmdStr + \
'dataset_name[' + str(i) + ']' + ':' + 'dataset' + '[' + str(i) + ']' + ','
cmdStr = cmdStr[:-1]
cmdStr = 'io.savemat(' + 'fileName' + ',' + '{' + cmdStr + '}' + ')'
eval(cmdStr)
if __name__ == '__main__':
fileName_R = r'J:\Research_2017go\GCA+Degree\GCA\Frontiers2018\NewIdea_201708\投稿\Frontier in Neurology\时域BOLD信号\Signals_R62\_signal.mat'
fileName_L = r'J:\Research_2017go\GCA+Degree\GCA\Frontiers2018\NewIdea_201708\投稿\Frontier in Neurology\时域BOLD信号\Signals_R63\signalAllSubj.mat'
dataset_struct1, dataset1 = read_mat(fileName_R, 'Signal')
dataset_struct2, dataset2 = read_mat(fileName_L, 'Signal')
|
dongmengshi/easylearn | eslearn/utils/lc_write_read_h5py.py | <reponame>dongmengshi/easylearn<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 7 11:29:22 2018
write and read h5py file
@author: lenovo
"""
# import
import h5py
import numpy as np
# def
# r 只读,文件必须已存在
# r+ 读写,文件必须已存在
# w 新建文件,若存在覆盖
# w- 或x,新建文件,若存在报错
# a 如存在则读写,不存在则创建(默认)
def write_h5py(fileName, group_name, dataset_name, dataset):
f = h5py.File(fileName + ".hdf5", "a")
g = f.create_group(group_name)
for i in range(len(dataset)):
g.create_dataset(dataset_name[i], data=dataset[i])
f.close()
#
def read_h5py(fileName='aa'):
f = h5py.File(fileName + ".hdf5", "a")
print('group are:\n')
for g in f.keys():
print(g)
d = f[g]
print('group structure is:\n{}'.format([key for key in d.keys()]))
one_value = np.array([])
one_value = np.append(one_value, [value for value in d.values()])
return one_value
f.close()
|
dongmengshi/easylearn | eslearn/SSD_classification/Visulization/lc_plot_average_cohen_circle.py | cr# -*- coding: utf-8 -*-
"""
This script is used to plot cohen'd using circle format.
"""
import numpy as np
import pytest
import scipy.io as sio
import matplotlib.pyplot as plt
from mne.viz import plot_connectivity_circle, circular_layout
def test_plot_connectivity_circle():
"""
Test plotting connectivity circle.
"""
# node_order = ['Amyg', 'BG', 'Tha', 'Hipp', 'Limbic', 'Visual', 'SomMot', 'Control', 'Default', 'DorsAttn', 'Sal/VentAttn'];
# label_names = ['Amyg', 'BG', 'Tha', 'Hipp', 'Limbic', 'Visual', 'SomMot', 'Control', 'Default', 'DorsAttn', 'Sal/VentAttn'];
node_order = [str(i) for i in range(246)];
label_names = [str(i) for i in range(246)];
# group_boundaries = [0, 2, 4, 6, 8, 10]
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=group_boundaries)
con_medicated = sio.loadmat(r'D:\WorkStation_2018\SZ_classification\Data\Stat_results\cohen_medicated1.mat')
con_unmedicated = sio.loadmat(r'D:\WorkStation_2018\SZ_classification\Data\Stat_results\cohen_feu1.mat')
con_medicated = con_medicated['cohen_medicated']
con_unmedicated = con_unmedicated['cohen_feu']
con_medicated[np.abs(con_medicated) <= 0.5] = 0
con_unmedicated[np.abs(con_unmedicated) <= 0.8] = 0
figs, ax = plt.subplots(1,2, facecolor ='k')
n_lines = np.sum(con_medicated[:] != 0)
plot_connectivity_circle(con_medicated, label_names, n_lines=n_lines,
node_angles=node_angles, title='test',
colormap='RdBu_r', vmin=-1, vmax=1, linewidth=2,
fontsize_names=12, textcolor='k', facecolor='w',
subplot=121, fig=figs, colorbar=True,)
n_lines = np.sum(con_unmedicated[:] != 0)
plot_connectivity_circle(con_unmedicated, label_names, n_lines=n_lines,
node_angles=node_angles, title='test',
colormap='RdBu_r', vmin=-1, vmax=1, linewidth=1.5,
fontsize_names=12, textcolor='k', facecolor='w',
subplot=122, fig=figs, colorbar=True)
# plt.tight_layout()
plt.subplots_adjust(wspace = 0.2, hspace = 0)
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[-1])
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[20, 0])
# plt.close('all')
if __name__ == "__main__":
test_plot_connectivity_circle() |
dongmengshi/easylearn | eslearn/visualization/lc_pyecharts_pir.py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 10:55:08 2018
@author: lenovo
"""
from pyecharts import Pie
import numpy as np
attr = ['male','female']
v1 = sex4Num
pie = Pie('饼图示例')
pie.add('',attr,v1,is_label_show = True)
pie.render(r'D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\pie01.html')
#
df = pd.DataFrame({'HC': sex1Num,\
'MDD': sex2Num,\
'SZ':sex3Num,\
'BD': sex4Num},\
index=['male', 'female'])
plot = df.plot.pie(y='BD', figsize=(5, 5)) |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_cnn_compare_with_svm.py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 26 16:56:08 2018
@author: lenovo
"""
from sklearn.svm import SVC
clf = SVC()
clf.fit(sel.x_train, sel.y_train)
pred=clf.predict(test_data)
pred[pred==1]=0
pred[pred==3]=1
a=pred-test_label.T
a=a.T
sum(a==0)/206 |
dongmengshi/easylearn | eslearn/visualization/lc_clusterhotmap_v2.py | <filename>eslearn/visualization/lc_clusterhotmap_v2.py
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
import matplotlib.gridspec
import pandas as pd
import numpy as np
data = pd.read_excel(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Plot\data.xlsx')
kind = data.pop('KIND')
lut = dict(zip(kind.unique(), np.random.randn(20,3)))
row_colors = kind.map(lut)
data.index = data.iloc[:,0]
data = data.iloc[:,2:]
clname = list(data.columns)
data = data[['q_A',
'q_A_unmedicated',
'q_A_medicated',
'q_B',
'q_B_unmedicated',
'q_B_medicated',
'q_C',
'q_C_unmedicated',
'q_C_medicated',]]
#First create the clustermap figure
g = sns.clustermap(data, row_colors= np.random.randn(94,3), figsize=(13,8))
# set the gridspec to only cover half of the figure
#g.gs.update(left=0.05, right=0.45)
#
##create new gridspec for the right part
#gs2 = matplotlib.gridspec.GridSpec(1,1, left=0.6)
## create axes within this new gridspec
#ax2 = g.fig.add_subplot(gs2[0])
## plot boxplot in the new axes
#sns.boxplot(data=iris, orient="h", palette="Set2", ax = ax2)
plt.show()
np.random.randint(0,256,3)
|
dongmengshi/easylearn | eslearn/utils/lc_copyFiles.py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 21 22:37:32 2018
move selected files to selected folder
Note. Code will create folder for each file with the same name
as those of the source file
#复制单个文件
shutil.copy("C:\\a\\1.txt","C:\\b")
#复制并重命名新文件
shutil.copy("C:\\a\\2.txt","C:\\b\\121.txt")
#复制整个目录(备份)
shutil.copytree("C:\\a","C:\\b\\new_a")
#删除文件
os.unlink("C:\\b\\1.txt")
os.unlink("C:\\b\\121.txt")
#删除空文件夹
try:
os.rmdir("C:\\b\\new_a")
except Exception as ex:
print("错误信息:"+str(ex))#提示:错误信息,目录不是空的
#删除文件夹及内容
shutil.rmtree("C:\\b\\new_a")
#移动文件
shutil.move("C:\\a\\1.txt","C:\\b")
#移动文件夹
shutil.move("C:\\a\\c","C:\\b")
#重命名文件
shutil.move("C:\\a\\2.txt","C:\\a\\new2.txt")
#重命名文件夹
shutil.move("C:\\a\\d","C:\\a\\new_d")
@author: lenovo
"""
# import
from lc_selectFile_ import selectFile
import shutil
import os
# def
def copyFiles_multi(in_files, out_folder):
[moveFiles_single(in_file, out_folder) for in_file in in_files]
#
def moveFiles_single(file, folder):
# find the folder contain the file
dirname = os.path.dirname(file)
file_folder = os.path.basename(dirname)
# create folder to contain file in output folder
try:
output_folder = os.mkdir(os.path.join(folder, file_folder))
except FileExistsError:
output_folder = os.path.join(folder, file_folder)
print(
'folder [{}]\nhave already exist'.format(
os.path.join(
folder,
file_folder)))
# move file
try:
shutil.copytree(file, output_folder)
print('{} copy successfully!'.format(file_folder))
except BaseException:
print('{} no need to copy'.format(file_folder))
#
def obtainAllFile(folder):
files = selectFile(folder)
return files
def main():
# input
out_folder = r'I:\Data_Code\insomnia\workstation_MVPA_2018_05\FunImgARWS'
in_folder = r'I:\Data_Code\insomnia\workstation_MVPA_2018_05\FunImgARW1'
# all files
files = obtainAllFile(in_folder)
copyFiles_multi(files, out_folder)
|
dongmengshi/easylearn | eslearn/GUI/easylearn_main_run.py | <filename>eslearn/GUI/easylearn_main_run.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Main GUI of the easylearn
# Author: <NAME> <<EMAIL>>
# License: MIT
"""
import sys
import os
import json
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtGui import QIcon, QPixmap
from eslearn.stylesheets.PyQt5_stylesheets import PyQt5_stylesheets
from easylearn_main_gui import Ui_MainWindow
from easylearn_data_loading_run import EasylearnDataLoadingRun
class EasylearnMainGUI(QMainWindow, Ui_MainWindow):
"""This class is used to display the main GUI of the easylearn.
"""
def __init__(self):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.working_directory = ""
self.textBrowser.setText("Hi~, I'm easylearn. I hope I can help you finish this project successfully\n")
# Set appearance
self.set_logo()
self.set_skin()
# Connecting to functions
self.select_working_directory.triggered.connect(self.select_workingdir_fun)
self.create_configuration_file.triggered.connect(self.initialize_configuration_fun)
self.choose_configuration_file.triggered.connect(self.load_configuration_fun)
self.data_loading.clicked.connect(self.data_loading_fun)
self.feature_engineering.clicked.connect(self.feature_engineering_fun)
self.machine_learning.clicked.connect(self.machine_learning_fun)
self.model_evaluation.clicked.connect(self.model_evaluation_fun)
self.statistical_analysis.clicked.connect(self.statistical_analysis_fun)
self.run.clicked.connect(self.run_fun)
self.quit.clicked.connect(self.closeEvent_button)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_skin)
self.actionBlack.triggered.connect(self.set_skin)
self.actionDarkOrange.triggered.connect(self.set_skin)
self.actionGray.triggered.connect(self.set_skin)
self.actionBlue.triggered.connect(self.set_skin)
self.actionNavy.triggered.connect(self.set_skin)
self.actionClassic.triggered.connect(self.set_skin)
def set_logo(self):
qss_logo = """#logo{background-color: black;
border: 2px solid white;
border-radius: 20px;
border-image: url('../logo/logo-lower.jpg');
}
#logo:hover {border-radius: 0px;}
"""
self.logo.setStyleSheet(qss_logo)
self.setWindowTitle('easylearn')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
# Run Icon
self.run.setIcon(QIcon("../logo/run.png"));
self.run.setIconSize(QPixmap("../logo/run.png").size());
self.run.resize(QPixmap("../logo/run.png").size());
# Close Icon
self.quit.setIcon(QIcon("../logo/close.png"));
self.quit.setIconSize(QPixmap("../logo/close.png").size());
self.quit.resize(QPixmap("../logo/close.png").size());
def set_skin(self):
"""Set a appearance for easylearn (skin, etc).
"""
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
def select_workingdir_fun(self):
"""
This function is used to select the working working_directory, then change directory to this directory.
"""
# If has selected working working_directory previously, then I set it as initial working working_directory.
if self.working_directory == "":
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", os.getcwd())
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
else:
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", self.working_directory)
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
# If already choose a working directory, change directory to the working directory
if self.working_directory != "":
os.chdir(self.working_directory)
def initialize_configuration_fun(self):
"""Create file to save settings
This function will add the configuration_file to self
"""
if self.working_directory != "":
configuration_file_name, ok = QInputDialog.getText(self, "Initialize configuration", "Please name the configuration file:", QLineEdit.Normal, "configuration_file.json")
self.configuration_file = os.path.join(self.working_directory, configuration_file_name)
with open(self.configuration_file, 'w') as configuration_file:
config = {"data_loading": {}, "feature_engineering": {}, "machine_learning": {}, "model_evaluation": {}, "statistical_analysis": {}}
config = json.dumps(config)
configuration_file.write(config)
config_message = "Configuration file is " + self.configuration_file
self.textBrowser.setText(config_message)
else:
QMessageBox.warning( self, 'Warning', f'Please choose a working directory first! (press button at the top left corner)')
def load_configuration_fun(self):
"""Load configuration
"""
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
# TODO: 解决中文编码的问题
with open(self.configuration_file, 'r') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
self.textBrowser.setText("Configuration file is " + self.configuration_file)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def data_loading_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('data_loading_fun')
self.data_loading = EasylearnDataLoadingRun(self.working_directory)
self.data_loading.show()
def feature_engineering_fun(self):
"""This function is called when feature_engineering button is clicked.
Then, this function will process the feature_engineering.
"""
print('feature_engineering_fun')
def machine_learning_fun(self):
"""This function is called when machine_learning button is clicked.
Then, this function will process the data loading.
"""
print('machine_learning_fun')
def model_evaluation_fun(self):
"""This function is called when model_evaluation button is clicked.
Then, this function will process the model evaluation.
"""
print('model_evaluation_fun')
def statistical_analysis_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('statistical_analysis_fun')
def save_workflow_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('save_workflow_fun')
def run_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('run_fun')
def closeEvent(self, event):
"""This function is called when exit icon of the window is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def closeEvent_button(self, event):
"""This function is called when quit button is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
QCoreApplication.quit()
if __name__=='__main__':
app=QApplication(sys.argv)
md=EasylearnMainGUI()
md.show()
sys.exit(app.exec_())
|
dongmengshi/easylearn | eslearn/utils/lc_basicInfoStat.py | <reponame>dongmengshi/easylearn<filename>eslearn/utils/lc_basicInfoStat.py
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 11:08:30 2018
1. 筛选age,sex匹配组,并根据age,sex,再次筛选basic
2. 将筛选好的各组被试的folder保存起来(升序排列,如不排列很可能最后相关是对应不上)
3. 根据各组的folder,将fmri数据分别复制或者移动到以组命名的文件夹
被试标签的意义:
1:HC;2:MDD;3:SZ;4:BD;5:HR
@author:<NAME>
"""
# =========================================================================
import pandas as pd
import numpy as np
from scipy.stats import f_oneway
from moveScreenedFile import moveMain
import copySelectedFile_OsWalk4 as copy
from selectSubjID_inScale import selMain
import sys
import os
sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\workstation')
# 外部导入
#from scipy.stats import chisquare
# ==========================================================================
### input ###
# 是否筛选数据
ifScreen = 1
# 总量表
scaleData = r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx"
# 总数据文件夹
neuroimageDataPath = r'H:\dynamicFC\state\allState17_4\state1'
# 保存量表
ifSave = 0
savePath_scale = r'H:\dynamicFC\state'
if not os.path.exists(savePath_scale):
os.makedirs(savePath_scale)
# copy or move 总数据到子文件夹
ifCopyOrMove = 0
# copy or move 参数
neuroimageDataPath = r'H:\dynamicFC\state\allState17_4\state4'
savePath_neuroimageData = r'H:\dynamicFC\state\allState17_4'
#
referencePath = [os.path.join(savePath_scale, 'folder_HC.xlsx'),
os.path.join(savePath_scale, 'folder_SZ.xlsx'),
os.path.join(savePath_scale, 'folder_BD.xlsx'),
os.path.join(savePath_scale, 'folder_MDD.xlsx')]
groupName = ['state4_HC', 'state4_SZ', 'state4_BD', 'state4_MDD']
# ==========================================================================
# if ifScreen:
def screen():
# 筛选量表
folder, basic, hamd17, hama, yars, bprs, logicIndex_scale, logicIndex_repeat\
= selMain(scaleData)
if ifSave:
folder.to_excel(os.path.join(savePath_scale, 'folder.xlsx'),
header=False, index=False)
# ==========================================================================
# 求量表被试与数据被试的交集,然后得到筛选的basic
reguForExtractFileName = r'[1-9]\d*'
# initiating parameters
sel = copy.copy_fmri(
referencePath=os.path.join(
savePath_scale,
'folder.xlsx'),
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=1,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='mat',
neuroimageDataPath=neuroimageDataPath,
savePath=savePath_scale,
n_processess=10,
ifSaveLog=0,
ifCopy=0,
ifMove=0,
saveInToOneOrMoreFolder='saveToOneFolder',
saveNameSuffix='',
ifRun=0)
# run copy or move
allFilePath, allSubjName, logic_loc,\
allSelectedFilePath, allSelectedSubjName = sel.main_run()
# 提取名字。注意:当由多个匹配时,只选择第一个为默认
allSelectedSubjName = allSelectedSubjName.iloc[:, 0]
extractedSubjName = allSelectedSubjName.str.findall(reguForExtractFileName)
extractedSubjName = [extractedSubjName_[0]
for extractedSubjName_ in extractedSubjName]
extractedSubjName = pd.Series(extractedSubjName, dtype='int64')
# 筛选basic
basic = basic.set_index('folder').join(
pd.DataFrame(extractedSubjName).set_index(0),
sort=True,
how='inner') # inner=intersection
print('### 注意:此时basic的index就是folder名! ###\n')
# ==========================================================================
# 根据age再次筛选basic
ageAll = basic['年龄'].dropna()
ageLogicInd = (ageAll <= 45) & (ageAll >= 13)
basic = basic[ageLogicInd]
# ==========================================================================
# diagnosis
dia = basic['诊断']
dia1Ind = dia == 1
dia2Ind = dia == 2
dia3Ind = dia == 3
dia4Ind = dia == 4
# ==========================================================================
# 根据sex再次筛选basic
# 先求每个组的性别构成
sex1 = basic['性别'][dia1Ind].dropna()
sex1Num = np.array([np.sum(sex1 == 1), np.sum(sex1 == 2)])
sex2 = basic['性别'][dia2Ind].dropna()
sex2Num = np.array([np.sum(sex2 == 1), np.sum(sex2 == 2)])
sex3 = basic['性别'][dia3Ind].dropna()
sex3Num = np.array([np.sum(sex3 == 1), np.sum(sex3 == 2)])
sex4 = basic['性别'][dia4Ind].dropna()
sex4Num = np.array([np.sum(sex4 == 1), np.sum(sex4 == 2)])
# 统计组间性别是否有差异(组1性别1太多)
# 第一组被试的性别1太多
sex1Ind1 = sex1[sex1 == 1].index
sex1Ind2 = sex1[sex1 == 2].index
screenedsex1Ind1 = sex1Ind1[0:60] # 只选择前60个,等同于把60以后的筛掉
sex1 = pd.concat([sex1.loc[screenedsex1Ind1], sex1.loc[sex1Ind2]])
sex1Num = np.array([np.sum(sex1 == 1), np.sum(sex1 == 2)])
# ==========================================================================
# 更新age,并筛选age
age1 = basic['年龄'].loc[sex1.index]
age2 = basic['年龄'].loc[sex2.index]
age3 = basic['年龄'].loc[sex3.index]
age4 = basic['年龄'].loc[sex4.index]
# 由于age3偏小,所以筛选掉部分年龄偏小的被试
def screenAge(age, num):
# 筛选掉一部分年龄偏小的样本,使各组匹配
age = age.sort_values()
screenedAgeInd = age.index[num:]
screenedAge = age.loc[screenedAgeInd]
return screenedAge
age3 = screenAge(age3, 25)
# ==========================================================================
# 更新sex
sex1 = basic['性别'].loc[age1.index]
sex2 = basic['性别'].loc[age2.index]
sex3 = basic['性别'].loc[age3.index]
sex4 = basic['性别'].loc[age4.index]
sex1Num = np.array([np.sum(sex1 == 1), np.sum(sex1 == 2)])
sex2Num = np.array([np.sum(sex2 == 1), np.sum(sex2 == 2)])
sex3Num = np.array([np.sum(sex3 == 1), np.sum(sex3 == 2)])
sex4Num = np.array([np.sum(sex4 == 1), np.sum(sex4 == 2)])
# ==========================================================================
# 根据age或者sex的index,来获得各个诊断的folder,并保存
folder1 = sex1.index
folder2 = sex2.index
folder3 = sex3.index
folder4 = sex4.index
# ascending sorted and save
if ifSave:
pd.DataFrame(folder1).sort_values(
by=[0]).to_excel(
os.path.join(
savePath_scale,
'folder_HC.xlsx'),
header=False,
index=False)
pd.DataFrame(folder2).sort_values(
by=[0]).to_excel(
os.path.join(
savePath_scale,
'folder_MDD.xlsx'),
header=False,
index=False)
pd.DataFrame(folder3).sort_values(
by=[0]).to_excel(
os.path.join(
savePath_scale,
'folder_SZ.xlsx'),
header=False,
index=False)
pd.DataFrame(folder4).sort_values(
by=[0]).to_excel(
os.path.join(
savePath_scale,
'folder_BD.xlsx'),
header=False,
index=False)
# ==========================================================================
# 筛选量表,并保存
# hamd17
hamd17_HC = hamd17.loc[folder1].sort_index(axis=0)
hamd17_MDD = hamd17.loc[folder2].sort_index(axis=0)
hamd17_SZ = hamd17.loc[folder3].sort_index(axis=0)
hamd17_BD = hamd17.loc[folder4].sort_index(axis=0)
if ifSave:
hamd17_HC = hamd17_HC.to_excel(
os.path.join(
savePath_scale,
'hamd17_HC.xlsx'))
hamd17_MDD = hamd17_MDD.to_excel(
os.path.join(savePath_scale, 'hamd17_MDD.xlsx'))
hamd17_SZ = hamd17_SZ.to_excel(
os.path.join(
savePath_scale,
'hamd17_SZ.xlsx'))
hamd17_BD = hamd17_BD.to_excel(
os.path.join(
savePath_scale,
'hamd17_BD.xlsx'))
# hama
hama_HC = hama.loc[folder1].sort_index(axis=0)
hama_MDD = hama.loc[folder2].sort_index(axis=0)
hama_SZ = hama.loc[folder3].sort_index(axis=0)
hama_BD = hama.loc[folder4].sort_index(axis=0)
if ifSave:
hama_HC = hama_HC.to_excel(
os.path.join(
savePath_scale,
'hama_HC.xlsx'))
hama_MDD = hama_MDD.to_excel(
os.path.join(
savePath_scale,
'hama_MDD.xlsx'))
hama_SZ = hama_SZ.to_excel(
os.path.join(
savePath_scale,
'hama_SZ.xlsx'))
hama_BD = hama_BD.to_excel(
os.path.join(
savePath_scale,
'hama_BD.xlsx'))
# yars
yars_HC = yars.loc[folder1].sort_index(axis=0)
yars_MDD = yars.loc[folder2].sort_index(axis=0)
yars_SZ = yars.loc[folder3].sort_index(axis=0)
yars_BD = yars.loc[folder4].sort_index(axis=0)
if ifSave:
yars_HC = yars_HC.to_excel(
os.path.join(
savePath_scale,
'yars_HC.xlsx'))
yars_MDD = yars_MDD.to_excel(
os.path.join(
savePath_scale,
'yars_MDD.xlsx'))
yars_SZ = yars_SZ.to_excel(
os.path.join(
savePath_scale,
'yars_SZ.xlsx'))
yars_BD = yars_BD.to_excel(
os.path.join(
savePath_scale,
'yars_BD.xlsx'))
# bprs
bprs_HC = bprs.loc[folder1].sort_index(axis=0)
bprs_MDD = bprs.loc[folder2].sort_index(axis=0)
bprs_SZ = bprs.loc[folder3].sort_index(axis=0)
bprs_BD = bprs.loc[folder4].sort_index(axis=0)
if ifSave:
bprs_HC = bprs_HC.to_excel(
os.path.join(
savePath_scale,
'bprs_HC.xlsx'))
bprs_MDD = bprs_MDD.to_excel(
os.path.join(
savePath_scale,
'bprs_MDD.xlsx'))
bprs_SZ = bprs_SZ.to_excel(
os.path.join(
savePath_scale,
'bprs_SZ.xlsx'))
bprs_BD = bprs_BD.to_excel(
os.path.join(
savePath_scale,
'bprs_BD.xlsx'))
# ==========================================================================
# save to excel
# join
scale_HC = pd.DataFrame(age1).join(pd.DataFrame(sex1), sort=True)
scale_MDD = pd.DataFrame(age2).join(pd.DataFrame(sex2), sort=True)
scale_SZ = pd.DataFrame(age3).join(pd.DataFrame(sex3), sort=True)
scale_BD = pd.DataFrame(age4).join(pd.DataFrame(sex4), sort=True)
# save
if ifSave:
scale_HC.to_excel(os.path.join(savePath_scale, 'ageANDsex_HC.xlsx'))
scale_MDD.to_excel(os.path.join(savePath_scale, 'ageANDsex_MDD.xlsx'))
scale_SZ.to_excel(os.path.join(savePath_scale, 'ageANDsex_SZ.xlsx'))
scale_BD.to_excel(os.path.join(savePath_scale, 'ageANDsex_BD.xlsx'))
scale_HC.to_csv(
os.path.join(
savePath_scale,
'ageANDsex_HC.txt'),
header=0,
index=False,
sep=' ')
scale_MDD.to_csv(
os.path.join(
savePath_scale,
'ageANDsex_MDD.txt'),
header=0,
index=False,
sep=' ')
scale_SZ.to_csv(
os.path.join(
savePath_scale,
'ageANDsex_SZ.txt'),
header=0,
index=False,
sep=' ')
scale_BD.to_csv(
os.path.join(
savePath_scale,
'ageANDsex_BD.txt'),
header=0,
index=False,
sep=' ')
# ==========================================================================
# 统计
# age
#f,p = f_oneway(age1,age2,age3,age4)
# ==========================================================================
# ==========================================================================
# 找到有完整量表数据的被试
# bprs_SZ1=bprs_SZ.dropna()
# folder_SZ1=pd.Series(bprs_SZ1.index)
# ===========================================================================
# if ifCopyOrMove:
def copyOrMove():
# 将某些被试复制到其他地方
# neuroimageDataPath=r'H:\dynamicFC\state\allState17_4\state1'
# savePath_neuroimageData=r'H:\dynamicFC\state\allState17_4\state1_HC'
# #
# referencePath=[os.path.join(savePath_scale,'folder_HC.xlsx'),
# os.path.join(savePath_scale,'folder_SZ.xlsx'),
# os.path.join(savePath_scale,'folder_BD.xlsx'),
# os.path.join(savePath_scale,'folder_MDD.xlsx')]
# #
# groupName=['HC','SZ','BD','MDD']
#
import copySelectedFile_OsWalk4 as copy
for (referencepath, groupname) in zip(referencePath, groupName):
# initiating parameters
sel = copy.copy_fmri(
referencePath=referencepath,
regularExpressionOfsubjName_forReference='([1-9]\d*)',
folderNameContainingFile_forSelect='',
num_countBackwards=1,
regularExpressionOfSubjName_forNeuroimageDataFiles='([1-9]\d*)',
keywordThatFileContain='',
neuroimageDataPath=neuroimageDataPath,
savePath=os.path.join(
savePath_neuroimageData,
groupname),
n_processess=10,
ifSaveLog=0,
ifCopy=0,
ifMove=1,
saveInToOneOrMoreFolder='saveToOneFolder',
saveNameSuffix='',
ifRun=1)
# run copy or move
allFilePath, allSubjName, logic_loc,\
allSelectedFilePath, allSelectedSubjName = sel.main_run()
#
print('Done!')
# ==========================================================================
if __name__ == '__main__':
# screen
if ifScreen:
screen()
# copy or move
if ifCopyOrMove:
copyOrMove()
|
dongmengshi/easylearn | eslearn/SSD_classification/Data_Inspection/lc_preprocess_for_our_dataset.py | """
This script is used to pre-process the dataeset in our center.
1.Transform the .mat files to one .npy file
2. Give labels to each subject, concatenate at the first column
3. Randomly splitting the whole data into training and validation
"""
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import numpy as np
import pandas as pd
import os
from eslearn.utils.lc_read_write_Mat import read_mat
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC550' # all mat files directory
scale = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx' # whole scale path
uid_unmedicated_and_firstepisode = r'D:\WorkStation_2018\SZ_classification\Scale\uid_unmedicated_and_firstepisode.txt'
uid_sz_chronic_drugtaking_morethan6mon = r'D:\WorkStation_2018\SZ_classification\Scale\精分-非首发用药-病程大于6月.txt'
n_node = 246 # number of nodes in the mat network
#%% Transform the .mat files to one .npy file
allmatpath = os.listdir(matroot)
allmatpath = [os.path.join(matroot, matpath) for matpath in allmatpath]
mask = np.triu(np.ones(n_node),1)==1
allmat = [read_mat(matpath)[mask].T for matpath in allmatpath]
allmat = pd.DataFrame(np.float32(allmat))
# Give labels to each subject, concatenate at the first column
uid = [os.path.basename(matpath) for matpath in allmatpath]
uid = pd.Series(uid)
uid = uid.str.findall('([1-9]\d*)')
uid = pd.DataFrame([np.int(id[0]) for id in uid])
scale = pd.read_excel(scale)
selected_diagnosis = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder','诊断']]
age_sex = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder', '诊断', '年龄','性别']]
# Giving large label to SZ
selected_diagnosis[selected_diagnosis==1] = 0
selected_diagnosis[selected_diagnosis==3] = 1
allmat_plus_label = pd.concat([selected_diagnosis, allmat],axis=1)
# print(allmat_plus_label)
#np.save(r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\ML_data_npy\dataset_550.npy',allmat_plus_label)
#%% Extract validation dataset that contains first episode unmedicated patients
# unmedicated
uid_unmedicated_and_firstepisode = pd.read_csv(uid_unmedicated_and_firstepisode, header=None)
data_unmedicated_and_firstepisode_550 = allmat_plus_label[allmat_plus_label['folder'].isin(uid_unmedicated_and_firstepisode[0])]
cov_unmedicated_and_firstepisode = age_sex[age_sex['folder'].isin(uid_unmedicated_and_firstepisode[0])]
# HC: matching hc and sz
from scipy.stats import ttest_ind
from eslearn.statistics.lc_chisqure import lc_chisqure
cov_hc_for_matching_unmedicated_and_firstepisode = age_sex[age_sex['诊断'] == 1]
np.random.seed(11)
idx_rand = np.random.permutation(len(cov_hc_for_matching_unmedicated_and_firstepisode))
cov_hc = cov_hc_for_matching_unmedicated_and_firstepisode.iloc[idx_rand[:len(cov_unmedicated_and_firstepisode)],:]
# Check if matching
ttest_ind(cov_unmedicated_and_firstepisode['年龄'], cov_hc['年龄'])
lc_chisqure([44, 44], [np.sum(cov_unmedicated_and_firstepisode['性别'] == 1), np.sum(cov_hc['性别'] == 1)])
# Get data and save
data_hc_for_matching_unmedicated_and_firstepisode_550 = allmat_plus_label[allmat_plus_label['folder'].isin(cov_hc['folder'])]
data_all = np.concatenate([data_unmedicated_and_firstepisode_550, data_hc_for_matching_unmedicated_and_firstepisode_550])
# np.save(r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_unmedicated_and_firstepisode_550.npy', data_all)
#%% Generate demographic table for Unmedicated and the matching HC
uid_unmedicated_file = r'D:\WorkStation_2018\SZ_classification\Scale\uid_unmedicated_and_firstepisode.txt'
uid_unmedicated = pd.read_csv(uid_unmedicated_file, header=None, dtype=np.int32)
uid_unmedicated_sz_hc = pd.concat([cov_hc['folder'], uid_unmedicated])
scale_unmedicated_hc = pd.merge(scale, uid_unmedicated_sz_hc, left_on='folder', right_on=0, how='inner')[['folder', '诊断', '年龄','性别', '病程月']]
des_unmedicated_hc = scale_unmedicated_hc.describe()
#%% Extract covariances for all: age and sex
cov = pd.merge(uid, scale, left_on=0, right_on='folder', how='inner')[['folder','诊断', '年龄', '性别']]
cov['诊断'] = selected_diagnosis['诊断']
cov['性别'] = np.int32(cov['性别'] == 2)
cov.columns = ['folder', 'diagnosis', 'age', 'sex']
cov.to_csv(r'D:\WorkStation_2018\SZ_classification\Scale\cov_550.txt', index=False)
#%% Extract covariances for unmedicated patients ans matched HC: age and sex
cov_unmedicated_sz_and_matched_hc = pd.merge(uid_unmedicated_sz_hc, cov, left_on=0, right_on='folder', how='inner')
cov_unmedicated_sz_and_matched_hc.drop(0, axis=1, inplace=True)
cov_unmedicated_sz_and_matched_hc.to_csv(r'D:\WorkStation_2018\SZ_classification\Scale\cov_unmedicated_sp_and_hc_550.txt', index=False)
|
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_super_test.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 16:43:09 2019
@author: lenovo
"""
class Base(object):
def __init__(self):
print ("Base init")
class Medium1(Base):
def __init__(self):
Base.__init__(self)
print ("Medium1 init")
class Medium2(Base):
def __init__(self):
Base.__init__(self)
print ("Medium2 init")
class Leaf(Medium1, Medium2):
def __init__(self):
Medium1.__init__(self)
Medium2.__init__(self)
print ("Leaf init")
leaf = Leaf() |
dongmengshi/easylearn | eslearn/GUI/easylearn_feature_engineering_run.py | # -*- coding: utf-8 -*-
"""The GUI of the feature_engineering module of easylearn
Created on 2020/04/12
@author: <NAME>
Email:<EMAIL>
GitHub account name: lichao312214129
Institution (company): Brain Function Research Section, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
@author: <NAME>
Email:<EMAIL>
GitHub account name: dongmengshi
Institution (company): Department of radiology, The First Affiliated Hospital of China Medical University, Shenyang, Liaoning, PR China.
License: MIT
"""
import sys
import os
import json
import cgitb
# from PyQt5 import *
# from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
# from PyQt5.QtCore import *
import PyQt5_stylesheets
from eslearn.PyQt5_stylesheets.PyQt5_stylesheets import pyqt5_style_Dark_rc
from easylearn_feature_engineering_gui import Ui_MainWindow
class EasylearnFeatureEngineeringRun(QMainWindow, Ui_MainWindow):
def __init__(self, working_directory=None):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# Initialization
self.feature_engineering = {}
self.configuration_file = ""
# Set appearance
# try:
self.set_run_appearance()
# except ModuleNotFoundError:
# pass
# Debug
cgitb.enable(display=1, logdir=None)
# Connect configuration functions
self.actionLoad_configuration.triggered.connect(self.load_configuration)
self.actionSave_configuration.triggered.connect(self.save_configuration)
# connect preprocessing setting signal to slot: switche to corresponding stackedWidget
self.preprocessing_stackedwedge_dict = {"Z-score normalization": 0, "Scaling": 1, "De-mean": 2, "None": 3}
self.radioButton_zscore.clicked.connect(self.switche_stacked_wedge_for_preprocessing)
self.radioButton_scaling.clicked.connect(self.switche_stacked_wedge_for_preprocessing)
self.radioButton_demean.clicked.connect(self.switche_stacked_wedge_for_preprocessing)
self.radioButton_none_methods.clicked.connect(self.switche_stacked_wedge_for_preprocessing)
# connect dimreduction setting signal to slot: switche to corresponding stackedWidget
self.dimreduction_stackedwedge_dict = {
"Principal component analysis": 0, "Independent component analysis": 1,
"Latent Dirichlet Allocation": 2, "Non-negative matrix factorization": 3, "None": 4
}
self.radioButton_pca.clicked.connect(self.switche_stacked_wedge_for_dimreduction)
self.radioButton_ica.clicked.connect(self.switche_stacked_wedge_for_dimreduction)
self.radioButton_lda.clicked.connect(self.switche_stacked_wedge_for_dimreduction)
self.radioButton_nmf.clicked.connect(self.switche_stacked_wedge_for_dimreduction)
self.radioButton_none.clicked.connect(self.switche_stacked_wedge_for_dimreduction)
# connect feature selection setting signal to slot: switche to corresponding stackedWidget
self.feature_selection_stackedwedge_dict = {
"Variance threshold": 0, "Correlation": 1, "Distance correlation": 2, "F-Score (classification)": 3,
"Mutual information (classification)": 4, "Mutual information (regression)": 5, "ReliefF": 6, "ANOVA/Ttest2 (classification)": 7,
"RFE": 8,
"L1 regularization (Lasso)": 9, "L1 + L2 regularization (Elastic net regression)": 10,
"None": 11
}
self.radioButton_variance_threshold.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_correlation.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_distancecorrelation.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_fscore.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_mutualinfo_cls.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_mutualinfo_regression.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_relieff.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_anova.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_rfe.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_l1.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_elasticnet.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
self.radioButton_featureselection_none.clicked.connect(self.switche_stacked_wedge_for_feature_selection)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_run_appearance)
self.actionBlack.triggered.connect(self.set_run_appearance)
self.actionDarkOrange.triggered.connect(self.set_run_appearance)
self.actionGray.triggered.connect(self.set_run_appearance)
self.actionBlue.triggered.connect(self.set_run_appearance)
self.actionNavy.triggered.connect(self.set_run_appearance)
self.actionClassic.triggered.connect(self.set_run_appearance)
def set_run_appearance(self):
"""Set style_sheets
"""
qss_special = """QPushButton:hover
{
font-weight: bold; font-size: 15px;
}
"""
self.setWindowTitle('Feature Engineering')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
# Make the stackedWidg to default at the begining
self.tabWidget_items.setCurrentIndex(0)
self.stackedWidget_preprocessing_methods.setCurrentIndex(-1)
self.stackedWidget_dimreduction.setCurrentIndex(-1)
self.stackedWidget_feature_selection.setCurrentIndex(-1)
def get_current_inputs(self):
"""Get all current inputs
Attrs:
-----
self.feature_engineering: dictionary
all feature_engineering parameters that the user input.
"""
self.all_backup_inputs = {
"feature_preprocessing": {
self.radioButton_zscore : {"Z-score normalization": {}},
self.radioButton_scaling: {
"Scaling": {
"min": {"value": self.lineEdit_scaling_min.text(), "wedget": self.lineEdit_scaling_min},
"max": {"value": self.lineEdit_scaling_max.text(), "wedget": self.lineEdit_scaling_max},
}
},
self.radioButton_demean: {"demean": {}},
self.radioButton_none_methods: {"none": {}},
self.radioButton_grouplevel: {"grouplevel": {}},
self.radioButton_subjectlevel: {"subjectlevel": {}}
},
"dimreduction": {
self.radioButton_pca: {
"Principal component analysis": {
"min": {"value": self.doubleSpinBox_pca_maxcomponents.text(), "wedget": self.doubleSpinBox_pca_maxcomponents},
"max": {"value": self.doubleSpinBox_pca_mincomponents.text(), "wedget": self.doubleSpinBox_pca_mincomponents},
"number": {"value": self.spinBox_pcanum.text(), "wedget": self.spinBox_pcanum}
},
},
self.radioButton_ica: {
"Independent component analysis": {
"min": {"value": self.doubleSpinBox_ica_minics.text(), "wedget": self.doubleSpinBox_ica_minics},
"max": {"value": self.doubleSpinBox_ica_maxics.text(), "wedget": self.doubleSpinBox_ica_maxics},
"number": {"value": self.spinBox_icnum.text(), "wedget": self.spinBox_icnum},
}
},
self.radioButton_lda: {"lda": {}},
self.radioButton_nmf: {
"Non-negative matrix factorization": {
"min": {"value": self.doubleSpinBox_nmf_mincompnents.text(), "wedget": self.doubleSpinBox_nmf_mincompnents},
"max": {"value": self.doubleSpinBox_nmf_maxcomponents.text(), "wedget": self.doubleSpinBox_nmf_maxcomponents},
"number": {"value": self.spinBox_icnum.text(), "wedget": self.spinBox_icnum},
}
},
self.radioButton_none: {"none": {}}
},
"feature_selection": {
self.radioButton_variance_threshold: {
"Variance threshold": {
"min": {"value": self.doubleSpinBox_variancethreshold_min.text(), "wedget": self.doubleSpinBox_variancethreshold_min},
"max": {"value": self.doubleSpinBox_variancethreshold_max.text(), "wedget": self.doubleSpinBox_variancethreshold_max},
"number": {"value": self.spinBox_variancethreshold_num.text(), "wedget": self.spinBox_variancethreshold_num}
}
},
self.radioButton_correlation: {
"Correlation": {
"min": {"value": self.doubleSpinBox_correlation_minabscoef.text(), "wedget": self.doubleSpinBox_correlation_minabscoef},
"max": {"value": self.doubleSpinBox_correlation_maxabscoef.text(), "wedget": self.doubleSpinBox_correlation_maxabscoef},
"number": {"value": self.spinBox_correlation_num.text(), "wedget": self.spinBox_correlation_num},
}
},
self.radioButton_distancecorrelation: {
"Distance correlation": {
"min": {"value": self.doubleSpinBox_distancecorrelation_minabscoef.text(), "wedget": self.doubleSpinBox_distancecorrelation_minabscoef},
"max": {"value": self.doubleSpinBox_distancecorrelation_maxabscoef.text(), "wedget": self.doubleSpinBox_distancecorrelation_maxabscoef},
"number": {"value": self.spinBox_distancecorrelation_num.text(), "wedget": self.spinBox_distancecorrelation_num},
}
},
self.radioButton_fscore: {
"F-Score (classification)": {
"max":{"value": self.doubleSpinBox_fscore_maxnum.text(), "wedget": self.doubleSpinBox_fscore_maxnum},
"min": {"value":self.doubleSpinBox_fscore_minnum.text(), "wedget": self.doubleSpinBox_fscore_minnum},
"number": {"value":self.spinBox_fscore_num.text(), "wedget": self.spinBox_fscore_num},
}
},
self.radioButton_mutualinfo_cls: {
"Mutual information (classification)": {
"max": {"value": self.doubleSpinBox_mutualinfocls_maxnum.text(), "wedget": self.doubleSpinBox_mutualinfocls_maxnum},
"min": {"value": self.doubleSpinBox_mutualinfocls_minnum.text(), "wedget": self.doubleSpinBox_mutualinfocls_minnum},
"number": {"value": self.spinBox_mutualinfocls_num.text(), "wedget": self.spinBox_mutualinfocls_num},
"n_neighbors": {"value": self.spinBox_mutualinfocls_neighbors.text(), "wedget": self.spinBox_mutualinfocls_neighbors},
}
},
self.radioButton_mutualinfo_regression: {
"Mutual information (regression)": {
"max": {"value": self.doubleSpinBox_mutualinforeg_maxnum.text(), "wedget": self.doubleSpinBox_mutualinforeg_maxnum},
"min": {"value": self.doubleSpinBox_mutualinforeg_minnum.text(), "wedget": self.doubleSpinBox_mutualinforeg_minnum},
"number": {"value": self.spinBox_mutualinforeg_num.text(), "wedget": self.spinBox_mutualinforeg_num},
"n_neighbors": {"value": self.spinBox_mutualinforeg_neighbors.text(), "wedget": self.spinBox_mutualinforeg_neighbors},
}
},
self.radioButton_relieff: {
"ReliefF": {
"max": {"value": self.doubleSpinBox_relieff_max.text(), "wedget": self.doubleSpinBox_relieff_max},
"min": {"value": self.doubleSpinBox_relieff_min.text(), "wedget": self.doubleSpinBox_relieff_min},
"number": {"value": self.spinBox_relief_num.text(), "wedget": self.spinBox_relief_num},
}
},
self.radioButton_anova: {
"ANOVA": {
"max": {"value": self.doubleSpinBox_anova_alpha_max.text(), "wedget": self.doubleSpinBox_anova_alpha_max},
"min": {"value": self.doubleSpinBox_anova_alpha_min.text(), "wedget": self.doubleSpinBox_anova_alpha_min},
"number": {"value": self.spinBox_anova_num.text(), "wedget": self.spinBox_anova_num},
"multiple_correction": {"value": self.comboBox_anova_multicorrect.currentText(), "wedget": self.comboBox_anova_multicorrect},
}
},
self.radioButton_rfe: {
"RFE": {
"step": {"value": self.doubleSpinBox_rfe_step.text(), "wedget": self.doubleSpinBox_rfe_step},
"n_folds": {"value": self.spinBox_rfe_nfold.text(), "wedget": self.spinBox_rfe_nfold},
"estimator": {"value": self.comboBox_rfe_estimator.currentText(), "wedget": self.comboBox_rfe_estimator},
"n_jobs": {"value": self.spinBox_rfe_njobs.text(), "wedget": self.spinBox_rfe_njobs}
}
},
self.radioButton_l1: {
"L1 regularization (Lasso)": {
"max": {"va1ue": self.doubleSpinBox_l1_alpha_max.text(), "wedget": self.doubleSpinBox_l1_alpha_max},
"min": {"va1ue": self.doubleSpinBox_l1_alpha_min.text(), "wedget": self.doubleSpinBox_l1_alpha_min},
"number": {"va1ue": self.spinBox_l1_num.text(), "wedget": self.spinBox_l1_num}
}
},
self.radioButton_elasticnet: {
"L1 + L2 regularization (Elastic net regression)": {
"max_alpha": {"value": self.doubleSpinBox_elasticnet_alpha_max.text(), "wedget": self.doubleSpinBox_elasticnet_alpha_max},
"min_alpha": {"value": self.doubleSpinBox_elasticnet_alpha_min.text(), "wedget": self.doubleSpinBox_elasticnet_alpha_min},
"number_alpha": {"value": self.spinBox_elasticnet_num.text(), "wedget": self.spinBox_elasticnet_num},
"max_l1ratio": {"value": self.doubleSpinBox_elasticnet_l1ratio_max.text(), "wedget": self.doubleSpinBox_elasticnet_l1ratio_max},
"min_l1ratio": {"value": self.doubleSpinBox_elasticnet_l1ratio_min.text(), "wedget": self.doubleSpinBox_elasticnet_l1ratio_min},
"Number_l1ratio": {"value": self.spinBox_l1ratio_num.text(), "wedget": self.spinBox_l1ratio_num},
}
}
},
"unbalance_treatment": {
self.radioButton_randover: {"randover": {}},
self.radioButton_smoteover: {"somteover": {}},
self.radioButton_smotencover: {"somtencover": {}},
self.radioButton_bsmoteover: {"bsmoteover": {}},
self.radioButton_randunder: {"randunder": {}},
self.radioButton_extractionunder: {"extractionunder": {}},
self.radioButton_cludterunder: {"clusterunder": {}},
self.radioButton_nearmissunder: {"nearmissunder": {}},
}
}
#%% ----------------------------------get current inputs---------------------------------------
for key_feature_engineering in self.all_backup_inputs:
for keys_one_feature_engineering in self.all_backup_inputs[key_feature_engineering]:
if keys_one_feature_engineering.isChecked():
self.feature_engineering[key_feature_engineering] = self.all_backup_inputs[key_feature_engineering][keys_one_feature_engineering]
def load_configuration(self):
"""Load configuration, and refresh_gui configuration in GUI
"""
# Get current inputs before load configuration, so we can
# compare loaded configuration["feature_engineering"] with the current self.feature_engineering
self.get_current_inputs()
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
with open(self.configuration_file, 'r', encoding='utf-8') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
# If already exists self.feature_engineering
if (self.feature_engineering != {}):
# If the loaded self.configuration["feature_engineering"] is not empty
# Then ask if rewrite self.feature_engineering with self.configuration["feature_engineering"]
if (list(self.configuration["feature_engineering"].keys()) != []):
reply = QMessageBox.question(self, "Data loading configuration already exists",
"The feature_engineering configuration is already exists, do you want to rewrite it with the loaded configuration?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.feature_engineering = self.configuration["feature_engineering"]
self.refresh_gui()
# If the loaded self.configuration["feature_engineering"] is empty
# Then assign self.configuration["feature_engineering"] with self.feature_engineering
else:
self.configuration["feature_engineering"] = self.feature_engineering
else:
self.feature_engineering = self.configuration["feature_engineering"]
self.refresh_gui()
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def refresh_gui(self):
""" Refresh gui the display the loaded configuration in the GUI
"""
print("refresh_gui")
# Generate a dict for switch stacked wedgets
switch_dict = {
"feature_preprocessing": self.switche_stacked_wedge_for_preprocessing,
"dimreduction": self.switche_stacked_wedge_for_dimreduction,
"feature_selection": self.switche_stacked_wedge_for_feature_selection,
}
for keys_one_feature_engineering in self.all_backup_inputs: # 4 feature eng module loop
for wedget in self.all_backup_inputs[keys_one_feature_engineering].keys(): # all wedgets in one feature eng loop
for method in self.all_backup_inputs[keys_one_feature_engineering][wedget].keys():
if keys_one_feature_engineering in self.feature_engineering.keys():
if method in list(self.feature_engineering[keys_one_feature_engineering].keys()):
# Make the wedget checked according loaded param
wedget.setChecked(True)
# Make setting to loaded text
for key_setting in self.feature_engineering[keys_one_feature_engineering][method]:
print(keys_one_feature_engineering)
print(wedget)
print(key_setting)
print(self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting].keys())
if "wedget" in list(self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting].keys()):
loaded_text = self.feature_engineering[keys_one_feature_engineering][method][key_setting]["value"]
print(f"method = {method}, setting = {key_setting}, loaded_text={loaded_text}")
# Identity wedget type, then using different methods to "setText"
# NOTE. 所有控件在设计时,尽量保留原控件的名字在命名的前部分,这样下面才好确定时哪一种类型的控件,从而用不同的赋值方式!
if "lineEdit" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setText(loaded_text)
elif "doubleSpinBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setValue(float(loaded_text))
elif "spinBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setValue(int(loaded_text))
elif "comboBox" in self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].objectName():
self.all_backup_inputs[keys_one_feature_engineering][wedget][method][key_setting]["wedget"].setCurrentText(loaded_text)
# Switch stacked wedget
switch_dict[keys_one_feature_engineering](True, method)
def save_configuration(self):
"""Save configuration
"""
# Get current inputs before saving feature_engineering parameters
self.get_current_inputs()
# Delete wedgets object from self.feature_engineering dict
for feature_engineering_name in list(self.feature_engineering.keys()):
for method_name in list(self.feature_engineering[feature_engineering_name].keys()):
for setting in self.feature_engineering[feature_engineering_name][method_name]:
for content in list(self.feature_engineering[feature_engineering_name][method_name][setting].keys()):
if "wedget" in list(self.feature_engineering[feature_engineering_name][method_name][setting].keys()):
self.feature_engineering[feature_engineering_name][method_name][setting].pop("wedget")
if self.configuration_file != "":
try:
# self.configuration = json.dumps(self.configuration, ensure_ascii=False)
self.configuration["feature_engineering"] = self.feature_engineering
self.configuration = json.dumps(self.configuration)
with open(self.configuration_file, 'w', encoding="utf-8") as config:
config.write(self.configuration)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration}'+ ' is not a valid JSON!')
else:
QMessageBox.warning( self, 'Warning', 'Please choose a configuration file first (press button at top left corner)!')
def switche_stacked_wedge_for_preprocessing(self, signal_bool, method=None):
if self.sender().text():
if not method:
self.stackedWidget_preprocessing_methods.setCurrentIndex(self.preprocessing_stackedwedge_dict[self.sender().text()])
else:
self.stackedWidget_preprocessing_methods.setCurrentIndex(self.preprocessing_stackedwedge_dict[method])
else:
self.stackedWidget_preprocessing_methods.setCurrentIndex(-1)
def switche_stacked_wedge_for_dimreduction(self, signal_bool, method=None):
if self.sender():
if not method:
self.stackedWidget_dimreduction.setCurrentIndex(self.dimreduction_stackedwedge_dict[self.sender().text()])
else:
self.stackedWidget_dimreduction.setCurrentIndex(self.dimreduction_stackedwedge_dict[method])
else:
self.stackedWidget_dimreduction.setCurrentIndex(-1)
def switche_stacked_wedge_for_feature_selection(self, signal_bool, method=None):
self.groupBox_feature_selection_input.setTitle(self.sender().text())
if self.sender().text():
if not method:
self.stackedWidget_feature_selection.setCurrentIndex(self.feature_selection_stackedwedge_dict[self.sender().text()])
else:
self.stackedWidget_feature_selection.setCurrentIndex(self.feature_selection_stackedwedge_dict[method])
else:
self.stackedWidget_feature_selection.setCurrentIndex(-1)
# def closeEvent(self, event):
# """This function is called when exit icon of the window is clicked.
# This function make sure the program quit safely.
# """
# # Set qss to make sure the QMessageBox can be seen
# reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
# QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
# if reply == QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
if __name__ == "__main__":
app=QApplication(sys.argv)
md=EasylearnFeatureEngineeringRun()
md.show()
sys.exit(app.exec_())
|
dongmengshi/easylearn | test/python_method.py | <reponame>dongmengshi/easylearn
class cal:
cal_name = 'computer'
def __init__(self,x,y):
self.x = x
self.y = y
#在cal_add函数前加上@property,使得该函数可直接调用,封装起来
@property
def cal_add(self):
return self.x + self.y
#在cal_info函数前加上@classmethon,则该函数变为类方法,该函数只能访问到类的数据属性,不能获取实例的数据属性
@classmethod
def cal_info(cls): #python自动传入位置参数cls就是类本身
print(cls.cal_name) # cls.cal_name调用类自己的数据属性
@staticmethod #静态方法 类或实例均可调用
def cal_test(a,b,c): #改静态方法函数里不传入self 或 cls
print(a,b,c)
c1 = cal(10,11)
c1.cal_test(1,2,3)
c1.cal_info()
print(c1.cal_add) |
dongmengshi/easylearn | eslearn/utils/lc_selectFile_.py | <filename>eslearn/utils/lc_selectFile_.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 15:01:44 2018
used for select target files
refer to and thank [https://www.jianshu.com/p/91453c76dbc3]
@author: <NAME>
"""
from nipype import SelectFiles, Node
def selectFile(
rootPath=r'I:\Data_Code\insomnia\workstation_MVPA_2018_05\FunImgARW1'):
templates = {'path': '*\\sw*.nii'}
# Create SelectFiles node
sf = Node(SelectFiles(templates),
name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = rootPath
# Feed {}-based placeholder strings with values
# sf.inputs.subject_id1 = '00[1,2]'
# sf.inputs.subject_id2 = '01'
# sf.inputs.ses_name = "retest"
# sf.inputs.task_name = 'covertverb'
path = sf.run().outputs.__dict__['path']
return path
|
dongmengshi/easylearn | eslearn/GUI/el_grid_pipe_test.py | <filename>eslearn/GUI/el_grid_pipe_test.py<gh_stars>0
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.datasets import make_classification
X, y = make_classification(n_informative=5, n_redundant=0, random_state=42)
anova_filter = SelectKBest(f_regression, k=5)
parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
svc = svm.SVC()
clf = GridSearchCV(svc, parameters)
anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
anova_svm.fit(X, y)
prediction = anova_svm.predict(X)
anova_svm.score(X, y)
selected_feature_bool = anova_svm['anova'].get_support()
sub_pipeline = anova_svm[:1]
sub_pipeline
coef = anova_svm[-1].coef_
anova_svm['svc'] is anova_svm[-1]
coef.shape
sub_pipeline.inverse_transform(coef).shape |
dongmengshi/easylearn | eslearn/machine_learning/classfication/lc_svc_rfe_fmri_V2.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 10:07:21 2019
@author: lenovo
"""
import sys
import os
cpwd = __file__
root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# root = r'D:/My_Codes/LC_Machine_Learning/lc_rsfmri_tools/lc_rsfmri_tools_python'
sys.path.append(root)
# sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python\Machine_learning\classfication')
import nibabel as nib
import numpy as np
import pickle
import matplotlib.pyplot as plt
import sklearn.utils as ut
from Utils.lc_niiProcessor import NiiProcessor
from Machine_learning.classfication import lc_svc_rfe_cv_V3 as lsvc
class SvcRfeFmri():
"""
Input is fmri image, such as .nii or .img
"""
def __init__(sel):
# =========================================================
sel.patients_path = r'D:\workstation_b\豪哥\results\Patients'
sel.hc_path = r'D:\workstation_b\豪哥\results\HC'
sel.suffix = '.nii'
sel.mask = r'G:\Softer_DataProcessing\spm12\spm12\tpm\Reslice3_TPM_greaterThan0.2.nii'
sel.out_path = r'D:\workstation_b\豪哥\results' # 结果保存路径
# =========================================================
sel.is_save_resutls = 1 # save all results
sel.is_save_weight_to_nii = 1
sel.k = 5 # outer k-fold
sel.pca_n_component= 0.8
sel.show_results = 1
sel.show_roc = 1
sel.is_train = 1
# Mask
sel.mask, sel.mask_obj = NiiProcessor().read_sigle_nii(sel.mask)
sel.orig_shape = sel.mask.shape
sel.mask = sel.mask >= 0.2
sel.mask = np.array(sel.mask).reshape(-1,)
def load_nii_and_gen_label(sel):
"""
Load nii and generate label
"""
data1, _ = NiiProcessor().main(sel.patients_path, sel.suffix)
data1 = np.squeeze(
np.array([np.array(data1).reshape(1, -1) for data1 in data1]))
data2, _ = NiiProcessor().main(sel.hc_path, sel.suffix)
data2 = np.squeeze(
np.array([np.array(data2).reshape(1, -1) for data2 in data2]))
sel.data = np.vstack([data1, data2])
# data in sel.mask
sel.data_in_mask = sel.data[:, sel.mask]
# label
sel.label = np.hstack(
[np.ones([len(data1), ]), np.ones([len(data2), ])-1])
return sel
def tr_te(sel):
"""
Training and test
"""
svc = lsvc.SVCRfeCv(
outer_k=sel.k,
pca_n_component=sel.pca_n_component,
show_results=sel.show_results,
show_roc=sel.show_roc)
if sel.is_train:
# sel.label = ut.shuffle(sel.label)
sel.results = svc.svc_rfe_cv(sel.data_in_mask, sel.label)
return sel
def weight2nii(sel, results):
"""
Transfer weight matrix to nii file
I used the mask file as reference to generate the nii file
"""
weight = np.squeeze(results['weight_all'])
weight_mean = np.mean(weight, axis=0)
# to orignal space
weight_mean_orig = np.zeros(sel.orig_shape)
mask_orig = np.reshape(sel.mask, sel.orig_shape)
weight_mean_orig[mask_orig] = weight_mean
# save to nii
weight_nii = nib.Nifti1Image(weight_mean_orig, affine=sel.mask_obj.affine)
weight_nii.to_filename(os.path.join(sel.out_path, 'weight.nii'))
def save_results(sel):
import time
now = time.strftime("%Y%m%d%H%M%S", time.localtime())
with open(os.path.join(sel.out_path, "".join(["results_", now, "_.pkl"])), "wb") as file:
pickle.dump(sel.results.__dict__, file, True)
# # load pkl file
# with open("".join(["results_",now,"_.pkl"]),"rb") as file:
# results = pickle.load(file)
def run(sel):
"""run"""
sel.load_nii_and_gen_label()
sel.tr_te()
results = sel.results.__dict__
# save all results
if sel.is_save_resutls:
sel.save_results()
# save weight
if sel.is_save_weight_to_nii:
sel.weight2nii(results)
return results
if __name__ == "__main__":
sel = SvcRfeFmri()
results = sel.run()
print(results.keys())
print(results.items())
|
dongmengshi/easylearn | eslearn/visualization/lc_radarplot.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 09:37:41 2018
@author: lenovo
"""
import pygal
radar_chart = pygal.Radar()
pygal.Radar(fill=True,line='-')
radar_chart.title = 'radar_plot'
radar_chart.x_labels = ['Richards', 'DeltaBlue', 'Crypto', 'RayTrace', 'EarleyBoyer', 'RegExp', 'Splay', 'NavierStokes']
radar_chart.add('Chrome', [6395, 8212, 7520, 7218, 12464, 1660, 2123, 8607])
radar_chart.add('Firefox', [7473, 8099, 11700, 2651, 6361, 1044, 3797, 9450])
radar_chart.add('Opera', [3472, 2933, 4203, 5229, 5810, 1828, 9013, 4669])
radar_chart.add('IE', [43, 41, 59, 79, 144, 136, 34, 102])
radar_chart.render_to_file('bar_chart.svg')
#bar = pygal.Bar()
#bar.title = "bar测试"
#bar.x_labels = ["1", "2"]
#bar.add("webp", [20, 30])
#bar.add("jpg", [20, 30])
#bar.render_to_file()
#bar.render_to_png(r'D:\myCodes\MVPA_LIChao\MVPA_Python\plot\a.png') |
dongmengshi/easylearn | eslearn/statistical analysis/lc_calc_cohen_d_effective_size.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 16:48:21 2020
@author: <NAME>
Email: <EMAIL>
"""
import numpy as np
def CohenEffectSize(group1, group2):
""" Calculate Cohen' d
Parameters:
-----------
group1: NumPy array
dimension is n_samples * n_features
group2: NumPy array
dimension is n_samples * n_features
Return: float
Cohen' d
"""
diff = group1.mean(axis=0) - group2.mean(axis=0)
n1, n2 = len(group1), len(group2)
var1 = group1.var(axis=0)
var2 = group2.var(axis=0)
pooled_var = ((n1 -1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2)
d = diff / np.sqrt(pooled_var)
return d |
dongmengshi/easylearn | eslearn/SSD_classification/ML/lc_pca_svc_firstepisodeunmedicated.py | <reponame>dongmengshi/easylearn
# -*- coding: utf-8 -*-
"""
Created on 2019/11/20
This script is used to training a linear svc model using training data,
and test this model using test data with k-fold cross validation-validation stratage.
@author: <NAME>
"""
import sys
import numpy as np
from sklearn import svm
from sklearn.model_selection import KFold
from sklearn import preprocessing
import eslearn.utils.el_preprocessing as elprep
import eslearn.utils.lc_dimreduction as dimreduction
from eslearn.utils.lc_evaluation_model_performances import eval_performance
class PCASVCPooling():
"""
Parameters:
----------
data : path str
path of dataset 1
NOTE: The first column of the dataset is subject unique index, the second is the diagnosis label(0/1),
the rest of columns are features.
is_dim_reduction: bool
if perform dimension reduction (PCA)
components: float
How many percentages of the cumulatively explained variance to be retained. This is used to select the top principal components.
cv: int
How many folds of the cross-validation.
out_name: str
The name of the output results.
Returns:
--------
Classification results, such as accuracy, sensitivity, specificity, AUC and figures that used to report.
"""
def __init__(sel,
data=None,
is_dim_reduction=True,
components=0.95,
cv=5):
sel.data =data
sel.is_dim_reduction=is_dim_reduction
sel.components = components
sel.cv=cv
def main_function(sel):
"""
The training data, validation data and test data are randomly splited
"""
print('training model and testing...\n')
# load data
data = np.load(sel.data )
# Extracting features and label
features_our_center_550 = data [:,2:]
label_our_center_550 = data [:,1]
# Generate training data and test data
data_all = features_our_center_550
label_all = label_our_center_550
# Unique ID
# KFold Cross Validation
sel.label_test_all = np.array([], dtype=np.int16)
train_index = np.array([], dtype=np.int16)
test_index = np.array([], dtype=np.int16)
sel.decision = np.array([], dtype=np.int16)
sel.prediction = np.array([], dtype=np.int16)
sel.accuracy = np.array([], dtype=np.float16)
sel.sensitivity = np.array([], dtype=np.float16)
sel.specificity = np.array([], dtype=np.float16)
sel.AUC = np.array([], dtype=np.float16)
sel.coef = []
kf = KFold(n_splits=sel.cv, shuffle=True, random_state=0)
for i, (tr_ind , te_ind) in enumerate(kf.split(data_all)):
print(f'------{i+1}/{sel.cv}...------\n')
train_index = np.int16(np.append(train_index, tr_ind))
test_index = np.int16(np.append(test_index, te_ind))
feature_train = data_all[tr_ind,:]
label_train = label_all[tr_ind]
feature_test = data_all[te_ind,:]
label_test = label_all[te_ind]
sel.label_test_all = np.int16(np.append(sel.label_test_all, label_test))
# resampling training data
# feature_train, label_train = sel.re_sampling(feature_train, label_train)
# normalization
prep = elprep.Preprocessing(data_preprocess_method='StandardScaler', data_preprocess_level='subject')
feature_train, feature_test = prep.data_preprocess(feature_train, feature_test)
# dimension reduction
if sel.is_dim_reduction:
feature_train,feature_test, model_dim_reduction= sel.dimReduction(feature_train, feature_test, sel.components)
print(f'After dimension reduction, the feature number is {feature_train.shape[1]}')
else:
print('No dimension reduction perfromed\n')
# train and test
print('training and testing...\n')
model = sel.training(feature_train,label_train)
weight = model.coef_
if sel.is_dim_reduction:
sel.coef.append(model_dim_reduction.inverse_transform(weight)) # save coef
else:
sel.coef.append(weight) # save coef
pred, dec = sel.testing(model,feature_test)
sel.prediction = np.append(sel.prediction, np.array(pred))
sel.decision = np.append(sel.decision, np.array(dec))
# Evaluating classification performances
acc, sens, spec, auc = eval_performance(label_test, pred, dec,
accuracy_kfold=None, sensitivity_kfold=None, specificity_kfold=None, AUC_kfold=None,
verbose=1, is_showfig=0)
sel.accuracy = np.append(sel.accuracy,acc)
sel.sensitivity = np.append(sel.sensitivity,sens)
sel.specificity = np.append(sel.specificity,spec)
sel.AUC = np.append(sel.AUC,auc)
sel.special_result = np.concatenate([sel.label_test_all, sel.decision, sel.prediction], axis=0).reshape(3, -1).T
print('Done!')
return sel
def re_sampling(sel,feature, label):
"""
Used to over-sampling unbalanced data
"""
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
feature_resampled, label_resampled = ros.fit_resample(feature, label)
from collections import Counter
print(sorted(Counter(label).items()))
print(sorted(Counter(label_resampled).items()))
return feature_resampled, label_resampled
def dimReduction(sel,train_X,test_X, pca_n_component):
train_X,trained_pca = dimreduction.pca(train_X, pca_n_component)
test_X=trained_pca.transform(test_X)
return train_X,test_X, trained_pca
def training(sel,train_X,train_y):
# svm GrigCV
svc = svm.SVC(kernel='linear', C=1, class_weight='balanced', max_iter=5000, random_state=0)
svc.fit(train_X, train_y)
return svc
def testing(sel,model,test_X):
predict = model.predict(test_X)
decision = model.decision_function(test_X)
return predict,decision
def save_results(sel, data, name):
import pickle
with open(name, 'wb') as f:
pickle.dump(data, f)
def save_fig(sel, out_name):
# Save ROC and Classification 2D figure
acc, sens, spec, auc = eval_performance(sel.label_test_all, sel.prediction, sel.decision,
sel.accuracy, sel.sensitivity, sel.specificity, sel.AUC,
verbose=0, is_showfig=1, legend1='HC', legend2='SSD', is_savefig=1,
out_name=out_name)
#
if __name__=='__main__':
sel=PCASVCPooling(data=r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\dataset_firstepisode_and_unmedicated_550.npy')
sel=sel.main_function()
# sel.save_fig(out_name=r'D:\WorkStation_2018\SZ_classification\Figure\Classification_performances_unmedicated.pdf')
results=sel.__dict__
sel.save_results(results, r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_unmedicated_and_firstepisode_550.npy')
print(np.mean(sel.accuracy))
print(np.std(sel.accuracy))
print(np.mean(sel.sensitivity))
print(np.std(sel.sensitivity))
print(np.mean(sel.specificity))
print(np.std(sel.specificity))
print(np.mean(sel.AUC))
print(np.std(sel.AUC))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.