repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
efrain2010/matchms | matchms/filtering/add_precursor_mz.py | <reponame>efrain2010/matchms<gh_stars>0
from ..typing import SpectrumType
def add_precursor_mz(spectrum_in: SpectrumType) -> SpectrumType:
"""Add precursor_mz to correct field and make it a float.
For missing precursor_mz field: check if there is "pepmass"" entry instead.
For string parsed as precursor_mz: convert to float.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if isinstance(spectrum.get("precursor_mz", None), str):
spectrum.set("precursor_mz", float(spectrum.get("precursor_mz").strip()))
elif spectrum.get("precursor_mz", None) is None:
pepmass = spectrum.get("pepmass")
if isinstance(pepmass[0], float):
spectrum.set("precursor_mz", pepmass[0])
else:
print("No precursor_mz found in metadata.")
return spectrum
|
efrain2010/matchms | tests/test_spectrum_similarity_functions.py | """Test function to collect matching peaks. Run tests both on numba compiled and
pure Python version."""
import numpy
from matchms.similarity.spectrum_similarity_functions import collect_peak_pairs
def test_collect_peak_pairs_no_shift():
"""Test finding expected peak matches within tolerance=0.2."""
spec1 = numpy.array([[100, 200, 300, 500],
[0.1, 0.1, 1.0, 1.0]], dtype="float").T
spec2 = numpy.array([[105, 205.1, 300, 500.1],
[0.1, 0.1, 1.0, 1.0]], dtype="float").T
matching_pairs = collect_peak_pairs(spec1, spec2, tolerance=0.2, shift=shift)
assert len(matching_pairs) == 2, "Expected different number of matching peaks"
assert matching_pairs == [pytest.approx(x, 1e-9) for x in expected_pairs], "Expected different pairs."
def test_collect_peak_pairs_shift_min5():
"""Test finding expected peak matches when given a mass_shift of -5.0."""
spec1 = numpy.array([[100, 200, 300, 500],
[0.1, 0.1, 1.0, 1.0]], dtype="float").T
spec2 = numpy.array([[105, 205.1, 300, 500.1],
[0.1, 0.1, 1.0, 1.0]], dtype="float").T
matching_pairs = collect_peak_pairs.py_func(spec1, spec2, tolerance=0.2, shift=shift)
assert len(matching_pairs) == 2, "Expected different number of matching peaks"
assert matching_pairs == [pytest.approx(x, 1e-9) for x in expected_pairs], "Expected different pairs."
|
efrain2010/matchms | matchms/similarity/CosineGreedy.py | <filename>matchms/similarity/CosineGreedy.py
from typing import Tuple
from matchms.typing import SpectrumType
from .spectrum_similarity_functions import collect_peak_pairs
from .spectrum_similarity_functions import get_peaks_array
from .spectrum_similarity_functions import score_best_matches
class CosineGreedy:
"""Calculate 'cosine similarity score' between two spectra.
The cosine score aims at quantifying the similarity between two mass spectra.
The score is calculated by finding best possible matches between peaks
of two spectra. Two peaks are considered a potential match if their
m/z ratios lie within the given 'tolerance'.
The underlying peak assignment problem is here solved in a 'greedy' way.
This can perform notably faster, but does occasionally deviate slightly from
a fully correct solution (as with the Hungarian algorithm). In practice this
will rarely affect similarity scores notably, in particular for smaller
tolerances.
For example
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import CosineGreedy
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]))
spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]))
# Use factory to construct a similarity function
cosine_greedy = CosineGreedy(tolerance=0.2)
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
print(f"Cosine score is {score:.2f} with {n_matches} matched peaks")
Should output
.. testoutput::
Cosine score is 0.83 with 1 matched peaks
"""
def __init__(self, tolerance: float = 0.1, mz_power: float = 0.0,
intensity_power: float = 1.0):
"""
Parameters
----------
tolerance:
Peaks will be considered a match when <= tolerance apart. Default is 0.1.
mz_power:
The power to raise m/z to in the cosine function. The default is 0, in which
case the peak intensity products will not depend on the m/z ratios.
intensity_power:
The power to raise intensity to in the cosine function. The default is 1.
"""
self.tolerance = tolerance
self.mz_power = mz_power
self.intensity_power = intensity_power
def __call__(self, spectrum1: SpectrumType, spectrum2: SpectrumType) -> Tuple[float, int]:
"""Calculate cosine score between two spectra.
Args:
----
spectrum1: SpectrumType
Input spectrum 1.
spectrum2: SpectrumType
Input spectrum 2.
Returns:
--------
Tuple with cosine score and number of matched peaks.
"""
def get_matching_pairs():
"""Get pairs of peaks that match within the given tolerance."""
matching_pairs = collect_peak_pairs(spec1, spec2, self.tolerance, shift=0.0,
mz_power=self.mz_power,
intensity_power=self.intensity_power)
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
return matching_pairs
spec1 = get_peaks_array(spectrum1)
spec2 = get_peaks_array(spectrum2)
matching_pairs = get_matching_pairs()
return score_best_matches(matching_pairs, spec1, spec2,
self.mz_power, self.intensity_power)
|
efrain2010/matchms | matchms/filtering/harmonize_undefined_inchikey.py | from ..typing import SpectrumType
def harmonize_undefined_inchikey(spectrum_in: SpectrumType, undefined="", aliases=None):
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if aliases is None:
aliases = [
"",
"N/A",
"NA",
"n/a",
"no data"
]
inchikey = spectrum.get("inchikey")
if inchikey is None:
# spectrum does not have an "inchikey" key in its metadata
spectrum.set("inchikey", undefined)
return spectrum
if inchikey in aliases:
# harmonize aliases for undefined values
spectrum.set("inchikey", undefined)
return spectrum
|
efrain2010/matchms | matchms/filtering/make_ionmode_lowercase.py | from ..typing import SpectrumType
def make_ionmode_lowercase(spectrum_in: SpectrumType) -> SpectrumType:
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
# if the ionmode key exists in the metadata, lowercase its value
if spectrum.get("ionmode") is not None:
spectrum.set("ionmode", spectrum.get("ionmode").lower())
return spectrum
|
efrain2010/matchms | matchms/similarity/spectrum_similarity_functions.py | from typing import Tuple
import numba
import numpy
from matchms.typing import SpectrumType
@numba.njit
def collect_peak_pairs(spec1, spec2, tolerance, shift=0,
mz_power=0.0, intensity_power=1.0):
# pylint: disable=too-many-arguments
"""Find matching pairs between two spectra.
Args
----
spec1: numpy array
Spectrum peaks and intensities as numpy array.
spec2: numpy array
Spectrum peaks and intensities as numpy array.
tolerance : float
Peaks will be considered a match when <= tolerance appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
mz_power: float, optional
The power to raise mz to in the cosine function. The default is 0, in which
case the peak intensity products will not depend on the m/z ratios.
intensity_power: float, optional
The power to raise intensity to in the cosine function. The default is 1.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
matching_pairs = []
for idx in range(len(spec1)):
intensity = spec1[idx, 1]
mz = spec1[idx, 0]
matches = numpy.where((numpy.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tolerance))[0]
for match in matches:
power_prod_spec1 = ((mz ** mz_power) * (intensity ** intensity_power))
power_prod_spec2 = ((spec2[match][0] ** mz_power) * (spec2[match][1] ** intensity_power))
matching_pairs.append((idx, match, power_prod_spec1 * power_prod_spec2))
return matching_pairs
def get_peaks_array(spectrum: SpectrumType) -> numpy.ndarray:
"""Get peaks mz and intensities as numpy array."""
return numpy.vstack((spectrum.peaks.mz, spectrum.peaks.intensities)).T
def score_best_matches(matching_pairs: list, spec1: numpy.ndarray,
spec2: numpy.ndarray, mz_power: float = 0.0,
intensity_power: float = 1.0) -> Tuple[float, int]:
"""Calculate cosine-like score by multiplying matches. Does require a sorted
list of matching peaks (sorted by intensity product)."""
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for match in matching_pairs:
if not match[0] in used1 and not match[1] in used2:
score += match[2]
used1.add(match[0]) # Every peak can only be paired once
used2.add(match[1]) # Every peak can only be paired once
used_matches.append(match)
# Normalize score:
spec1_power = numpy.power(spec1[:, 0], mz_power) * numpy.power(spec1[:, 1], intensity_power)
spec2_power = numpy.power(spec2[:, 0], mz_power) * numpy.power(spec2[:, 1], intensity_power)
score = score/(numpy.sqrt(numpy.sum(spec1_power**2)) * numpy.sqrt(numpy.sum(spec2_power**2)))
return score, len(used_matches)
|
efrain2010/matchms | matchms/similarity/ParentmassMatch.py | <reponame>efrain2010/matchms
from matchms.typing import SpectrumType
class ParentmassMatch:
"""Return True if spectrums match in parent mass (within tolerance), and False otherwise."""
def __init__(self, tolerance: float = 0.1):
"""
Parameters:
----------
tolerance
Specify tolerance below which two masses are counted as match.
"""
self.tolerance = tolerance
def __call__(self, spectrum: SpectrumType, reference_spectrum: SpectrumType) -> bool:
"""Compare parent masses"""
parentmass = spectrum.get("parent_mass")
parentmass_ref = reference_spectrum.get("parent_mass")
assert parentmass is not None and parentmass_ref is not None, "Missing parent mass."
return abs(parentmass-parentmass_ref) <= self.tolerance
|
efrain2010/matchms | matchms/calculate_scores_parallel.py | <reponame>efrain2010/matchms
from typing import Callable
from typing import List
from .Scores import Scores
def calculate_scores_parallel(references: List[object], queries: List[object], similarity_function: Callable):
return Scores(references=references,
queries=queries,
similarity_function=similarity_function).calculate_parallel()
|
efrain2010/matchms | matchms/similarity/vector_similarity_functions.py | """Collection of functions for calculating vector-vector similarities."""
import numba
import numpy
@numba.njit
def jaccard_similarity_matrix(references: numpy.ndarray, queries: numpy.ndarray) -> numpy.ndarray:
"""Returns matrix of jaccard indices between all-vs-all vectors of references
and queries.
Parameters
----------
references
Reference vectors as 2D numpy array. Expects that vector_i corresponds to
references[i, :].
queries
Query vectors as 2D numpy array. Expects that vector_i corresponds to
queries[i, :].
Returns
-------
scores
Matrix of all-vs-all similarity scores. scores[i, j] will contain the score
between the vectors references[i, :] and queries[j, :].
"""
size1 = references.shape[0]
size2 = queries.shape[0]
scores = numpy.zeros((size1, size2))
for i in range(size1):
for j in range(size2):
scores[i, j] = jaccard_index(references[i, :], queries[j, :])
return scores
@numba.njit
def dice_similarity_matrix(references: numpy.ndarray, queries: numpy.ndarray) -> numpy.ndarray:
"""Returns matrix of dice similarity scores between all-vs-all vectors of references
and queries.
Parameters
----------
references
Reference vectors as 2D numpy array. Expects that vector_i corresponds to
references[i, :].
queries
Query vectors as 2D numpy array. Expects that vector_i corresponds to
queries[i, :].
Returns
-------
scores
Matrix of all-vs-all similarity scores. scores[i, j] will contain the score
between the vectors references[i, :] and queries[j, :].
"""
size1 = references.shape[0]
size2 = queries.shape[0]
scores = numpy.zeros((size1, size2))
for i in range(size1):
for j in range(size2):
scores[i, j] = dice_similarity(references[i, :], queries[j, :])
return scores
@numba.njit
def cosine_similarity_matrix(references: numpy.ndarray, queries: numpy.ndarray) -> numpy.ndarray:
"""Returns matrix of cosine similarity scores between all-vs-all vectors of
references and queries.
Parameters
----------
references
Reference vectors as 2D numpy array. Expects that vector_i corresponds to
references[i, :].
queries
Query vectors as 2D numpy array. Expects that vector_i corresponds to
queries[i, :].
Returns
-------
scores
Matrix of all-vs-all similarity scores. scores[i, j] will contain the score
between the vectors references[i, :] and queries[j, :].
"""
size1 = references.shape[0]
size2 = queries.shape[0]
scores = numpy.zeros((size1, size2))
for i in range(size1):
for j in range(size2):
scores[i, j] = cosine_similarity(references[i, :], queries[j, :])
return scores
@numba.njit
def jaccard_index(u: numpy.ndarray, v: numpy.ndarray) -> numpy.float64:
r"""Computes the Jaccard-index (or Jaccard similarity coefficient) of two boolean
1-D arrays.
The Jaccard index between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
J(u,v) = \\frac{u \cap v}
{u \cup v}
Parameters
----------
u :
Input array. Expects boolean vector.
v :
Input array. Expects boolean vector.
Returns
-------
jaccard_similarity
The Jaccard similarity coefficient between vectors `u` and `v`.
"""
u_or_v = numpy.bitwise_or(u != 0, v != 0)
u_and_v = numpy.bitwise_and(u != 0, v != 0)
jaccard_score = 0
if u_or_v.sum() != 0:
jaccard_score = numpy.float64(u_and_v.sum()) / numpy.float64(u_or_v.sum())
return jaccard_score
@numba.njit
def dice_similarity(u: numpy.ndarray, v: numpy.ndarray) -> numpy.float64:
r"""Computes the Dice similarity coefficient (DSC) between two boolean 1-D arrays.
The Dice similarity coefficient between `u` and `v`, is
.. math::
DSC(u,v) = \\frac{2|u \cap v|}
{|u| + |v|}
Parameters
----------
u
Input array. Expects boolean vector.
v
Input array. Expects boolean vector.
Returns
-------
dice_similarity
The Dice similarity coefficient between 1-D arrays `u` and `v`.
"""
u_and_v = numpy.bitwise_and(u != 0, v != 0)
u_abs_and_v_abs = numpy.abs(u).sum() + numpy.abs(v).sum()
dice_score = 0
if u_abs_and_v_abs != 0:
dice_score = 2.0 * numpy.float64(u_and_v.sum()) / numpy.float64(u_abs_and_v_abs)
return dice_score
@numba.njit
def cosine_similarity(u: numpy.ndarray, v: numpy.ndarray) -> numpy.float64:
"""Calculate cosine similarity score.
Parameters
----------
u
Input vector.
v
Input vector.
Returns
-------
cosine_similarity
The Cosine similarity score between vectors `u` and `v`.
"""
assert u.shape[0] == v.shape[0], "Input vector must have same shape."
uv = 0
uu = 0
vv = 0
for i in range(u.shape[0]):
uv += u[i] * v[i]
uu += u[i] * u[i]
vv += v[i] * v[i]
cosine_score = 0
if uu != 0 and vv != 0:
cosine_score = uv / numpy.sqrt(uu * vv)
return numpy.float64(cosine_score)
|
efrain2010/matchms | matchms/filtering/require_minimum_number_of_peaks.py | from math import ceil
from typing import Optional
from ..typing import SpectrumType
def require_minimum_number_of_peaks(spectrum_in: SpectrumType,
n_required: int = 10,
ratio_required: Optional[float] = None) -> SpectrumType:
"""Spectrum will be set to None when it has fewer peaks than required.
Args:
-----
spectrum_in:
Input spectrum.
n_required:
Number of minimum required peaks. Spectra with fewer peaks will be set
to 'None'.
ratio_required:
Set desired ratio between minimum number of peaks and parent mass.
Default is None.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
parent_mass = spectrum.get("parent_mass", None)
if parent_mass and ratio_required:
n_required_by_mass = int(ceil(ratio_required * parent_mass))
threshold = max(n_required, n_required_by_mass)
else:
threshold = n_required
if spectrum.peaks.intensities.size < threshold:
return None
return spectrum
|
efrain2010/matchms | tests/test_spectrum.py | import numpy
from matplotlib import pyplot as plt
from matchms import Spectrum
def _assert_plots_ok(fig, n_plots):
assert len(fig.axes) == n_plots
assert fig is not None
assert hasattr(fig, "axes")
assert isinstance(fig.axes, list)
assert isinstance(fig.axes[0], plt.Axes)
assert hasattr(fig.axes[0], "lines")
assert isinstance(fig.axes[0].lines, list)
assert len(fig.axes[0].lines) == 11
assert isinstance(fig.axes[0].lines[0], plt.Line2D)
assert hasattr(fig.axes[0].lines[0], "_x")
def _create_test_spectrum():
intensities = numpy.array([1, 1, 5, 5, 5, 5, 7, 7, 7, 9, 9], dtype="float")
return _create_test_spectrum_with_intensities(intensities)
def _create_test_spectrum_with_intensities(intensities):
mz = numpy.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110], dtype="float")
return Spectrum(mz=mz, intensities=intensities)
def test_spectrum_plot_same_peak_height():
intensities_with_zero_variance = numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype="float")
spectrum = _create_test_spectrum_with_intensities(intensities_with_zero_variance)
fig = spectrum.plot(with_histogram=True, intensity_to=10.0)
_assert_plots_ok(fig, n_plots=2)
def test_spectrum_plot_with_histogram_false():
spectrum = _create_test_spectrum()
fig = spectrum.plot(with_histogram=False)
_assert_plots_ok(fig, n_plots=1)
def test_spectrum_plot_with_histogram_true():
spectrum = _create_test_spectrum()
fig = spectrum.plot(with_histogram=True)
_assert_plots_ok(fig, n_plots=2)
def test_spectrum_plot_with_histogram_true_and_intensity_limit():
spectrum = _create_test_spectrum()
fig = spectrum.plot(with_histogram=True, intensity_to=10.0)
_assert_plots_ok(fig, n_plots=2)
def test_spectrum_plot_with_histogram_unspecified():
spectrum = _create_test_spectrum()
fig = spectrum.plot()
_assert_plots_ok(fig, n_plots=1)
|
efrain2010/matchms | matchms/similarity/FingerprintSimilarityParallel.py | from typing import List
from typing import Union
import numpy
from matchms.similarity.vector_similarity_functions import \
cosine_similarity_matrix
from matchms.similarity.vector_similarity_functions import \
dice_similarity_matrix
from matchms.similarity.vector_similarity_functions import \
jaccard_similarity_matrix
from matchms.typing import SpectrumType
class FingerprintSimilarityParallel:
"""Calculate similarity between molecules based on their fingerprints.
For this similarity measure to work, fingerprints are expected to be derived
by running the "add_fingerprint()" filter function.
"""
def __init__(self, similarity_measure: str = "jaccard",
set_empty_scores: Union[float, int, str] = "nan"):
"""
Parameters
----------
similarity_measure:
Chose similarity measure form "cosine", "dice", "jaccard".
The default is "jaccard".
set_empty_scores:
Define what should be given instead of a similarity score in cases
where fingprints are missing. The default is "nan", which will return
numpy.nan's in such cases.
"""
self.set_empty_scores = set_empty_scores
assert similarity_measure in ["cosine", "dice", "jaccard"], "Unknown similarity measure."
self.similarity_measure = similarity_measure
def __call__(self, references: List[SpectrumType], queries: List[SpectrumType]) -> numpy.array:
"""Calculate matrix of fingerprint based similarity scores.
Parameters
----------
references:
List of reference spectrums.
queries:
List of query spectrums.
"""
def get_fingerprints(spectrums):
for index, spectrum in enumerate(spectrums):
yield index, spectrum.get("fingerprint")
def collect_fingerprints(spectrums):
"""Collect fingerprints and indices of spectrum with finterprints."""
idx_fingerprints = []
fingerprints = []
for index, fp in get_fingerprints(spectrums):
if fp is not None:
idx_fingerprints.append(index)
fingerprints.append(fp)
return numpy.asarray(fingerprints), numpy.asarray(idx_fingerprints)
def create_full_matrix():
"""Create matrix for all similarities."""
similarity_matrix = numpy.zeros((len(references), len(queries)))
if self.set_empty_scores == "nan":
similarity_matrix[:] = numpy.nan
elif isinstance(self.set_empty_scores, (float, int)):
similarity_matrix[:] = self.set_empty_scores
return similarity_matrix
fingerprints1, idx_fingerprints1 = collect_fingerprints(references)
fingerprints2, idx_fingerprints2 = collect_fingerprints(queries)
assert idx_fingerprints1.size > 0 and idx_fingerprints2.size > 0, ("Not enouth molecular fingerprints.",
"Apply 'add_fingerprint'filter first.")
# Calculate similarity score matrix following specified method
similarity_matrix = create_full_matrix()
if self.similarity_measure == "jaccard":
similarity_matrix[numpy.ix_(idx_fingerprints1,
idx_fingerprints2)] = jaccard_similarity_matrix(fingerprints1,
fingerprints2)
elif self.similarity_measure == "dice":
similarity_matrix[numpy.ix_(idx_fingerprints1,
idx_fingerprints2)] = dice_similarity_matrix(fingerprints1,
fingerprints2)
elif self.similarity_measure == "cosine":
similarity_matrix[numpy.ix_(idx_fingerprints1,
idx_fingerprints2)] = cosine_similarity_matrix(fingerprints1,
fingerprints2)
return similarity_matrix
|
efrain2010/matchms | matchms/importing/load_from_msp.py | from typing import Generator
import numpy
from ..Spectrum import Spectrum
def parse_msp_file(filename: str) -> Generator[dict, None, None]:
"""Read msp file and parse info in list of spectrum dictionaries."""
# Lists/dicts that will contain all params, masses and intensities of each molecule
params = {}
masses = []
intensities = []
# Peaks counter. Used to track and count the number of peaks
peakscount = 0
with open(filename, 'r') as f:
for line in f:
rline = line.rstrip()
if len(rline) == 0:
continue
if ':' in rline:
# Obtaining the params
splitted_line = rline.split(":", 1)
if splitted_line[0].lower() == 'comments':
# Obtaining the parameters inside the comments index
for s in splitted_line[1][2:-1].split('" "'):
splitted_line = s.split("=", 1)
if splitted_line[0].lower() in params.keys() and splitted_line[0].lower() == 'smiles':
params[splitted_line[0].lower()+"_2"] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
else:
# Obtaining the masses and intensities
peakscount += 1
splitted_line = rline.split(" ")
masses.append(float(splitted_line[0]))
intensities.append(float(splitted_line[1]))
# Obtaining the masses and intensities
if int(params['num peaks']) == peakscount:
peakscount = 0
yield {
'params': (params),
'm/z array': numpy.array(masses),
'intensity array': numpy.array(intensities)
}
params = {}
masses = []
intensities = []
def load_from_msp(filename: str) -> Generator[Spectrum, None, None]:
"""
MSP file to a :py:class:`~matchms.Spectrum.Spectrum` objects
Function that reads a .msp file and converts the info
in :py:class:`~matchms.Spectrum.Spectrum` objects.
Args:
filename: path of the msp file
Yields:
Yield a spectrum object with the data of the msp file
Example:
.. code-block:: python
from matchms.importing import load_from_msp
# Download msp file from MassBank of North America repository at https://mona.fiehnlab.ucdavis.edu/
spectrum = next(load_from_msp("MoNA-export-GC-MS-first10.msp"))
"""
for spectrum in parse_msp_file(filename):
metadata = spectrum.get("params", None)
mz = spectrum["m/z array"]
intensities = spectrum["intensity array"]
# Sort by mz (if not sorted already)
if not numpy.all(mz[:-1] <= mz[1:]):
idx_sorted = numpy.argsort(mz)
mz = mz[idx_sorted]
intensities = intensities[idx_sorted]
yield Spectrum(mz=mz, intensities=intensities, metadata=metadata)
|
efrain2010/matchms | matchms/similarity/CosineGreedyVectorial.py | from typing import Tuple
import numpy
from matchms.typing import SpectrumType
class CosineGreedyVectorial:
"""Calculate 'cosine similarity score' between two spectra.
The cosine score aims at quantifying the similarity between two mass spectra.
The score is calculated by finding best possible matches between peaks
of two spectra. Two peaks are considered a potential match if their
m/z ratios lie within the given 'tolerance'.
The underlying peak assignment problem is here solved in a 'greedy' way.
This can perform notably faster, but does occasionally deviate slightly from
a fully correct solution (as with the Hungarian algorithm). In practice this
will rarely affect similarity scores notably, in particular for smaller
tolerances.
For example
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import CosineGreedyVectorial
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]))
spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]))
# Use factory to construct a similarity function
cosine_greedy = CosineGreedyVectorial(tolerance=0.2)
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
print(f"Cosine score is {score:.2f} with {n_matches} matched peaks")
Should output
.. testoutput::
Cosine score is 0.52 with 1 matched peaks
"""
def __init__(self, tolerance: float = 0.1):
"""
Parameters
----------
tolerance
Peaks will be considered a match when <= tolerance apart.
"""
self.tolerance = tolerance
def __call__(self, spectrum: SpectrumType, reference_spectrum: SpectrumType) -> Tuple[float, int]:
"""Calculate 'greedy cosine score' between mass spectra.
Args:
-----
spectrum
First spectrum
reference_spectrum
Second spectrum
Returns:
--------
Tuple with cosine score and number of matched peaks.
"""
def calc_mz_distance():
mz_row_vector = spectrum.peaks.mz
mz_col_vector = numpy.reshape(reference_spectrum.peaks.mz, (n_rows, 1))
mz1 = numpy.tile(mz_row_vector, (n_rows, 1))
mz2 = numpy.tile(mz_col_vector, (1, n_cols))
return mz1 - mz2
def calc_intensities_product():
intensities_row_vector = spectrum.peaks.intensities
intensities_col_vector = numpy.reshape(reference_spectrum.peaks.intensities, (n_rows, 1))
intensities1 = numpy.tile(intensities_row_vector, (n_rows, 1))
intensities2 = numpy.tile(intensities_col_vector, (1, n_cols))
return intensities1 * intensities2
def calc_intensities_product_within_tolerance():
mz_distance = calc_mz_distance()
intensities_product = calc_intensities_product()
within_tolerance = numpy.absolute(mz_distance) <= self.tolerance
return numpy.where(within_tolerance, intensities_product, numpy.zeros_like(intensities_product))
def calc_score():
r_unordered, c_unordered = intensities_product_within_tolerance.nonzero()
v_unordered = intensities_product_within_tolerance[r_unordered, c_unordered]
sortorder = numpy.argsort(v_unordered)[::-1]
r_sorted = r_unordered[sortorder]
c_sorted = c_unordered[sortorder]
score = 0
n_matches = 0
for r, c in zip(r_sorted, c_sorted):
if intensities_product_within_tolerance[r, c] > 0:
score += intensities_product_within_tolerance[r, c]
n_matches += 1
intensities_product_within_tolerance[r, :] = 0
intensities_product_within_tolerance[:, c] = 0
return score / max(sum(squared1), sum(squared2)), n_matches
n_rows = reference_spectrum.peaks.mz.size
n_cols = spectrum.peaks.mz.size
intensities_product_within_tolerance = calc_intensities_product_within_tolerance()
squared1 = numpy.power(spectrum.peaks.intensities, 2)
squared2 = numpy.power(reference_spectrum.peaks.intensities, 2)
return calc_score()
|
efrain2010/matchms | matchms/filtering/select_by_mz.py | <filename>matchms/filtering/select_by_mz.py
import numpy
from ..Spikes import Spikes
from ..typing import SpectrumType
def select_by_mz(spectrum_in: SpectrumType, mz_from=0.0, mz_to=1000.0) -> SpectrumType:
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
assert mz_from <= mz_to, "'mz_from' should be smaller than or equal to 'mz_to'."
condition = numpy.logical_and(mz_from <= spectrum.peaks.mz, spectrum.peaks.mz <= mz_to)
spectrum.peaks = Spikes(mz=spectrum.peaks.mz[condition],
intensities=spectrum.peaks.intensities[condition])
return spectrum
|
efrain2010/matchms | tests/test_modified_cosine.py | <gh_stars>0
import numpy
import pytest
from matchms import Spectrum
from matchms.filtering import normalize_intensities
from matchms.similarity import ModifiedCosine
def test_modified_cosine_without_precursor_mz():
"""Test without precursor-m/z. Should raise assertion error."""
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 140, 190, 300, 490, 510, 1090], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
modified_cosine = ModifiedCosine()
with pytest.raises(AssertionError) as msg:
modified_cosine(norm_spectrum_1, norm_spectrum_2)
expected_message = "Precursor_mz missing. Apply 'add_precursor_mz' filter first."
assert str(msg.value) == expected_message
def test_modified_cosine_with_mass_shift_5():
"""Test modified cosine on two spectra with mass set shift."""
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata={"precursor_mz": 1000.0})
spectrum_2 = Spectrum(mz=numpy.array([55, 105, 205, 304.5, 494.5, 515.5, 1045], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata={"precursor_mz": 1005.0})
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
modified_cosine = ModifiedCosine()
score, n_matches = modified_cosine(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.081966, 0.0001), "Expected different cosine score."
assert n_matches == 2, "Expected 2 matching peaks."
def test_modified_cosine_with_mass_shift_5_tolerance_2():
"""Test modified cosine on two spectra with mass set shift and tolerance."""
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 200, 20, 100], dtype="float"),
metadata={"precursor_mz": 1000.0})
spectrum_2 = Spectrum(mz=numpy.array([105, 205, 305, 306, 505, 517], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 20, 100], dtype="float"),
metadata={"precursor_mz": 1005})
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
modified_cosine = ModifiedCosine(tolerance=2.0)
score, n_matches = modified_cosine(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.96788, 0.0001), "Expected different modified cosine score."
assert n_matches == 6, "Expected 6 matching peaks."
def test_modified_cosine_order_of_input_spectrums():
"""Test modified cosine on two spectra in changing order."""
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata={"precursor_mz": 1000.0})
spectrum_2 = Spectrum(mz=numpy.array([55, 105, 205, 304.5, 494.5, 515.5, 1045], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata={"precursor_mz": 1005.0})
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
modified_cosine = ModifiedCosine(tolerance=2.0)
score_1_2, n_matches_1_2 = modified_cosine(norm_spectrum_1, norm_spectrum_2)
score_2_1, n_matches_2_1 = modified_cosine(norm_spectrum_2, norm_spectrum_1)
assert score_1_2 == score_2_1, "Expected that the order of the arguments would not matter."
assert n_matches_1_2 == n_matches_2_1, "Expected that the order of the arguments would not matter."
|
efrain2010/matchms | matchms/filtering/normalize_intensities.py | <reponame>efrain2010/matchms
import numpy
from matchms.typing import SpectrumType
from ..Spikes import Spikes
def normalize_intensities(spectrum_in: SpectrumType) -> SpectrumType:
"""Normalize intensities of peaks (and losses) to unit height."""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if len(spectrum.peaks) == 0:
return spectrum
max_intensity = numpy.max(spectrum.peaks.intensities)
# Normalize peak intensities
mz, intensities = spectrum.peaks
normalized_intensities = intensities / max_intensity
spectrum.peaks = Spikes(mz=mz, intensities=normalized_intensities)
# Normalize loss intensities
if spectrum.losses is not None and len(spectrum.losses) > 0:
mz, intensities = spectrum.losses
normalized_intensities = intensities / max_intensity
spectrum.losses = Spikes(mz=mz, intensities=normalized_intensities)
return spectrum
|
efrain2010/matchms | tests/test_cosine_greedy.py | import numpy
import pytest
from matchms import Spectrum
from matchms.similarity import CosineGreedy
def test_cosine_greedy_without_parameters():
"""Compare output cosine score with own calculation on simple dummy spectrums."""
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 300, 500, 510], dtype="float"),
intensities=numpy.array([0.1, 0.2, 1.0, 0.3, 0.4], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 290, 490, 510], dtype="float"),
intensities=numpy.array([0.1, 0.2, 1.0, 0.3, 0.4], dtype="float"))
cosine_greedy = CosineGreedy()
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
# Derive expected cosine score
expected_matches = [0, 1, 4] # Those peaks have matching mz values (within given tolerance)
multiply_matching_intensities = spectrum_1.peaks.intensities[expected_matches] \
* spectrum_2.peaks.intensities[expected_matches]
denominator = numpy.sqrt((spectrum_1.peaks.intensities ** 2).sum()) \
* numpy.sqrt((spectrum_2.peaks.intensities ** 2).sum())
expected_score = multiply_matching_intensities.sum() / denominator
assert score == pytest.approx(expected_score, 0.0001), "Expected different cosine score."
assert n_matches == len(expected_matches), "Expected different number of matching peaks."
def test_cosine_score_greedy_with_tolerance_0_2():
"""Compare output cosine score for tolerance 0.2 with own calculation on simple dummy spectrums."""
spectrum_1 = Spectrum(mz=numpy.array([100, 299, 300, 301, 510], dtype="float"),
intensities=numpy.array([0.1, 1.0, 0.2, 0.3, 0.4], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 300, 301, 511], dtype="float"),
intensities=numpy.array([0.1, 1.0, 0.3, 0.4], dtype="float"))
cosine_greedy = CosineGreedy(tolerance=0.2)
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
# Derive expected cosine score
expected_matches = [[0, 2, 3], [0, 1, 2]] # Those peaks have matching mz values (within given tolerance)
multiply_matching_intensities = spectrum_1.peaks.intensities[expected_matches[0]] \
* spectrum_2.peaks.intensities[expected_matches[1]]
denominator = numpy.sqrt((spectrum_1.peaks.intensities ** 2).sum()) \
* numpy.sqrt((spectrum_2.peaks.intensities ** 2).sum())
expected_score = multiply_matching_intensities.sum() / denominator
assert score == pytest.approx(expected_score, 0.0001), "Expected different cosine score."
assert n_matches == len(expected_matches[0]), "Expected different number of matching peaks."
def test_cosine_score_greedy_with_tolerance_2_0():
"""Compare output cosine score for tolerance 2.0 with own calculation on simple dummy spectrums."""
spectrum_1 = Spectrum(mz=numpy.array([100, 299, 300, 301, 510], dtype="float"),
intensities=numpy.array([0.1, 1.0, 0.2, 0.3, 0.4], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 300, 301, 511], dtype="float"),
intensities=numpy.array([0.1, 1.0, 0.3, 0.4], dtype="float"))
cosine_greedy = CosineGreedy(tolerance=2.0)
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
# Derive expected cosine score
expected_matches = [[0, 1, 3, 4], [0, 1, 2, 3]] # Those peaks have matching mz values (within given tolerance)
multiply_matching_intensities = spectrum_1.peaks.intensities[expected_matches[0]] \
* spectrum_2.peaks.intensities[expected_matches[1]]
denominator = numpy.sqrt((spectrum_1.peaks.intensities ** 2).sum()) \
* numpy.sqrt((spectrum_2.peaks.intensities ** 2).sum())
expected_score = multiply_matching_intensities.sum() / denominator
assert score == pytest.approx(expected_score, 0.0001), "Expected different cosine score."
assert n_matches == len(expected_matches[0]), "Expected different number of matching peaks."
def test_cosine_score_greedy_order_of_arguments():
"""Compare cosine scores for A,B versus B,A, which should give the same score."""
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([0.02, 0.02, 1.0, 0.2, 0.4, 0.04, 0.2], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 300, 301, 500, 512], dtype="float"),
intensities=numpy.array([0.02, 0.02, 1.0, 0.2, 0.04, 0.2], dtype="float"),
metadata=dict())
cosine_greedy = CosineGreedy(tolerance=2.0)
score_1_2, n_matches_1_2 = cosine_greedy(spectrum_1, spectrum_2)
score_2_1, n_matches_2_1 = cosine_greedy(spectrum_2, spectrum_1)
assert score_1_2 == score_2_1, "Expected that the order of the arguments would not matter."
assert n_matches_1_2 == n_matches_2_1, "Expected that the order of the arguments would not matter."
def test_cosine_greedy_with_peak_powers():
"""Compare output cosine score with own calculation on simple dummy spectrums.
Here testing the options to raise peak intensities to given powers.
"""
mz_power = 0.5
intensity_power = 2.0
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 300, 500, 510], dtype="float"),
intensities=numpy.array([0.1, 0.2, 1.0, 0.3, 0.4], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 290, 490, 510], dtype="float"),
intensities=numpy.array([0.1, 0.2, 1.0, 0.3, 0.4], dtype="float"))
cosine_greedy = CosineGreedy(tolerance=1.0, mz_power=mz_power, intensity_power=intensity_power)
score, n_matches = cosine_greedy(spectrum_1, spectrum_2)
# Derive expected cosine score
matches = [0, 1, 4] # Those peaks have matching mz values (within given tolerance)
intensity1 = spectrum_1.peaks.intensities
mz1 = spectrum_1.peaks.mz
intensity2 = spectrum_2.peaks.intensities
mz2 = spectrum_2.peaks.mz
multiply_matching_intensities = (mz1[matches] ** mz_power) * (intensity1[matches] ** intensity_power) \
* (mz2[matches] ** mz_power) * (intensity2[matches] ** intensity_power)
denominator = numpy.sqrt((((mz1 ** mz_power) * (intensity1 ** intensity_power)) ** 2).sum()) \
* numpy.sqrt((((mz2 ** mz_power) * (intensity2 ** intensity_power)) ** 2).sum())
expected_score = multiply_matching_intensities.sum() / denominator
assert score == pytest.approx(expected_score, 0.0001), "Expected different cosine score."
assert n_matches == len(matches), "Expected different number of matching peaks."
|
efrain2010/matchms | matchms/exporting/__init__.py | from .save_as_json import save_as_json
from .save_as_mgf import save_as_mgf
__all__ = [
"save_as_json",
"save_as_mgf",
]
|
efrain2010/matchms | tests/test_vector_similarity_functions.py | <gh_stars>10-100
"""Test for vector similarity functions. Will run test on both numba compiled
and fully python-based versions of functions."""
import numpy
import pytest
from matchms.similarity.vector_similarity_functions import cosine_similarity
from matchms.similarity.vector_similarity_functions import \
cosine_similarity_matrix
from matchms.similarity.vector_similarity_functions import dice_similarity
from matchms.similarity.vector_similarity_functions import \
dice_similarity_matrix
from matchms.similarity.vector_similarity_functions import jaccard_index
from matchms.similarity.vector_similarity_functions import \
jaccard_similarity_matrix
def test_cosine_similarity_compiled():
"""Test cosine similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = cosine_similarity(vector1, vector1)
score12 = cosine_similarity(vector1, vector2)
score22 = cosine_similarity(vector2, vector2)
assert score12 == 2 / numpy.sqrt(2 * 4), "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_cosine_similarity_all_zeros_compiled():
"""Test cosine similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = cosine_similarity(vector1, vector1)
score12 = cosine_similarity(vector1, vector2)
score22 = cosine_similarity(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_cosine_similarity_matrix_compiled():
"""Test cosine similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[1, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[0, 0, 1, 1]])
scores = cosine_similarity_matrix(vectors1, vectors2)
expected_scores = numpy.array([[0.5, 0.],
[0.40824829, 0.81649658]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
def test_cosine_similarity():
"""Test cosine similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = cosine_similarity.py_func(vector1, vector1)
score12 = cosine_similarity.py_func(vector1, vector2)
score22 = cosine_similarity.py_func(vector2, vector2)
assert score12 == 2 / numpy.sqrt(2 * 4), "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_cosine_similarity_all_zeros():
"""Test cosine similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = cosine_similarity.py_func(vector1, vector1)
score12 = cosine_similarity.py_func(vector1, vector2)
score22 = cosine_similarity.py_func(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_cosine_similarity_matrix():
"""Test cosine similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[1, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[0, 0, 1, 1]])
scores = cosine_similarity_matrix.py_func(vectors1, vectors2)
expected_scores = numpy.array([[0.5, 0.],
[0.40824829, 0.81649658]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
def test_dice_similarity_compiled():
"""Test dice similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = dice_similarity(vector1, vector1)
score12 = dice_similarity(vector1, vector2)
score22 = dice_similarity(vector2, vector2)
assert score12 == 2 * 2/6, "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_dice_similarity_all_zeros_compiled():
"""Test dice similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = dice_similarity(vector1, vector1)
score12 = dice_similarity(vector1, vector2)
score22 = dice_similarity(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_dice_similarity_matrix_compiled():
"""Test dice similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[0, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[1, 0, 1, 1]])
scores = dice_similarity_matrix(vectors1, vectors2)
expected_scores = numpy.array([[0.5, 0.4],
[0.5, 0.8]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
def test_dice_similarity():
"""Test dice similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = dice_similarity.py_func(vector1, vector1)
score12 = dice_similarity.py_func(vector1, vector2)
score22 = dice_similarity.py_func(vector2, vector2)
assert score12 == 2 * 2/6, "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_dice_similarity_all_zeros():
"""Test dice similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = dice_similarity.py_func(vector1, vector1)
score12 = dice_similarity.py_func(vector1, vector2)
score22 = dice_similarity.py_func(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_dice_similarity_matrix():
"""Test dice similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[0, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[1, 0, 1, 1]])
scores = dice_similarity_matrix.py_func(vectors1, vectors2)
expected_scores = numpy.array([[0.5, 0.4],
[0.5, 0.8]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
def test_jaccard_index_compiled():
"""Test jaccard similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = jaccard_index(vector1, vector1)
score12 = jaccard_index(vector1, vector2)
score22 = jaccard_index(vector2, vector2)
assert score12 == 2 / 4, "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_jaccard_index_all_zeros_compiled():
"""Test jaccard similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = jaccard_index(vector1, vector1)
score12 = jaccard_index(vector1, vector2)
score22 = jaccard_index(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_jaccard_similarity_matrix_compiled():
"""Test jaccard similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[0, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[1, 0, 1, 1]])
scores = jaccard_similarity_matrix(vectors1, vectors2)
expected_scores = numpy.array([[1/3, 1/4],
[1/3, 2/3]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
def test_jaccard_index():
"""Test jaccard similarity score calculation."""
vector1 = numpy.array([1, 1, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = jaccard_index.py_func(vector1, vector1)
score12 = jaccard_index.py_func(vector1, vector2)
score22 = jaccard_index.py_func(vector2, vector2)
assert score12 == 2 / 4, "Expected different score."
assert score11 == score22 == 1.0, "Expected different score."
def test_jaccard_index_all_zeros():
"""Test jaccard similarity score calculation with empty vector."""
vector1 = numpy.array([0, 0, 0, 0])
vector2 = numpy.array([1, 1, 1, 1])
score11 = jaccard_index.py_func(vector1, vector1)
score12 = jaccard_index.py_func(vector1, vector2)
score22 = jaccard_index.py_func(vector2, vector2)
assert score11 == score12 == 0.0, "Expected different score."
assert score22 == 1.0, "Expected different score."
def test_jaccard_similarity_matrix():
"""Test jaccard similarity scores calculation."""
vectors1 = numpy.array([[1, 1, 0, 0],
[0, 0, 1, 1]])
vectors2 = numpy.array([[0, 1, 1, 0],
[1, 0, 1, 1]])
scores = jaccard_similarity_matrix.py_func(vectors1, vectors2)
expected_scores = numpy.array([[1/3, 1/4],
[1/3, 2/3]])
assert scores == pytest.approx(expected_scores, 1e-7), "Expected different scores."
|
efrain2010/matchms | matchms/importing/load_from_mgf.py | <filename>matchms/importing/load_from_mgf.py<gh_stars>0
from typing import Generator
import numpy
from pyteomics.mgf import MGF
from ..Spectrum import Spectrum
def load_from_mgf(filename: str) -> Generator[Spectrum, None, None]:
"""Load spectrum(s) from mgf file."""
for pyteomics_spectrum in MGF(filename, convert_arrays=1):
metadata = pyteomics_spectrum.get("params", None)
mz = pyteomics_spectrum["m/z array"]
intensities = pyteomics_spectrum["intensity array"]
# Sort by mz (if not sorted already)
if not numpy.all(mz[:-1] <= mz[1:]):
idx_sorted = numpy.argsort(mz)
mz = mz[idx_sorted]
intensities = intensities[idx_sorted]
yield Spectrum(mz=mz, intensities=intensities, metadata=metadata)
|
efrain2010/matchms | matchms/filtering/harmonize_undefined_inchi.py | <reponame>efrain2010/matchms
from ..typing import SpectrumType
def harmonize_undefined_inchi(spectrum_in: SpectrumType, undefined="", aliases=None):
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if aliases is None:
aliases = [
"",
"N/A",
"NA",
"n/a"
]
inchi = spectrum.get("inchi")
if inchi is None:
# spectrum does not have an "inchi" key in its metadata
spectrum.set("inchi", undefined)
return spectrum
if inchi in aliases:
# harmonize aliases for undefined values
spectrum.set("inchi", undefined)
return spectrum
|
efrain2010/matchms | matchms/filtering/harmonize_undefined_smiles.py | <filename>matchms/filtering/harmonize_undefined_smiles.py
from ..typing import SpectrumType
def harmonize_undefined_smiles(spectrum_in: SpectrumType, undefined="", aliases=None):
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if aliases is None:
aliases = [
"",
"N/A",
"NA",
"n/a",
"no data"
]
smiles = spectrum.get("smiles")
if smiles is None:
# spectrum does not have a "smiles" key in its metadata
spectrum.set("smiles", undefined)
return spectrum
if smiles in aliases:
# harmonize aliases for undefined values
spectrum.set("smiles", undefined)
return spectrum
|
efrain2010/matchms | matchms/typing.py | <gh_stars>0
from typing import Callable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy
from .Spectrum import Spectrum
SpectrumType = Optional[Spectrum]
ReferencesType = QueriesType = Union[List[object], Tuple[object], numpy.ndarray]
"""Input for a similarity function"""
Sample = Union[Spectrum, None, object]
"""Result of a similarity function"""
Score = Union[float, Tuple[float, int]]
"""Signature of a similarity function"""
SimilarityFunction = Callable[[Sample, Sample], Score]
|
efrain2010/matchms | tests/test_add_parent_mass.py | <gh_stars>0
import numpy
from matchms import Spectrum
from matchms.filtering import add_parent_mass
def test_add_parent_mass():
"""Test if parent mass is correctly derived."""
mz = numpy.array([], dtype='float')
intensities = numpy.array([], dtype='float')
metadata = {"pepmass": (444.0, 10),
"charge": -1}
spectrum_in = Spectrum(mz=mz,
intensities=intensities,
metadata=metadata)
spectrum = add_parent_mass(spectrum_in)
assert numpy.abs(spectrum.get("parent_mass") - 445.0) < .01, "Expected parent mass of about 445.0."
|
efrain2010/matchms | tests/test_repair_inchi_inchikey_smiles.py | import numpy
from matchms import Spectrum
from matchms.filtering import repair_inchi_inchikey_smiles
def test_repair_inchi_inchikey_smiles_clean_inchi_entered_as_inchi():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_inchi_entered_as_inchikey():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchikey": "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_inchi_entered_as_smiles():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == "InChI=1/C2H4N4/c3-2-4-1-5-6-2/h1H,(H3,3,4,5,6)/f/h6H,3H2"
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_inchikey_entered_as_inchi():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "ABTNALLHJFCFRZ-UHFFFAOYSA-N"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == "ABTNALLHJFCFRZ-UHFFFAOYSA-N"
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_inchikey_entered_as_inchikey():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchikey": "ABTNALLHJFCFRZ-UHFFFAOYSA-N"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == "ABTNALLHJFCFRZ-UHFFFAOYSA-N"
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_inchikey_entered_as_smiles():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "ABTNALLHJFCFRZ-UHFFFAOYSA-N"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == "ABTNALLHJFCFRZ-UHFFFAOYSA-N"
assert spectrum.get("smiles") == ""
def test_repair_inchi_inchikey_smiles_clean_smiles_entered_as_inchi():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "C[C@H](Cc1ccccc1)N(C)CC#C"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == "C[C@H](Cc1ccccc1)N(C)CC#C"
def test_repair_inchi_inchikey_smiles_clean_smiles_entered_as_inchikey():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchikey": "C[C@H](Cc1ccccc1)N(C)CC#C"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == "C[C@H](Cc1ccccc1)N(C)CC#C"
def test_repair_inchi_inchikey_smiles_clean_smiles_entered_as_smiles():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "C[C@H](Cc1ccccc1)N(C)CC#C"})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == ""
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == "C[C@H](Cc1ccccc1)N(C)CC#C"
def test_repair_inchi_inchikey_smiles_various_inchi_entered_as_smiles():
"""Test a wider variety of different inchis."""
test_inchis = [
"1S/C4H11N5.ClH/c1-7-3(5)9-4(6)8-2;/h1-2H3,(H5,5,6,7,8,9);1H",
"InChI=1S/C11H15N3O2.ClH/c1-12-11(15)16-10-6-4-5-9(7-10)13-8-14(2)3;/h4-8H,1-3H3,(H,12,15);1H/b13-8+;",
'"InChI=1S/C17O8/c1-9-7-12(19)14(16(20)21)13(8-9)25-15(10(2)23-3)11(5-6-18)17(22)24-4"',
"InChI=1S/CH3/h1H3",
"1/2C17H18N3O3S.Mg/c2*1-10-8-18-15(11(2)16(10)23-4)9-24(21)17-19-13-6-5-12(22-3)7-14(13)20-17;/h2*5-8H,9H2,1-4H3;/q2*-1;+2"
]
for inchi in test_inchis:
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": inchi})
spectrum = repair_inchi_inchikey_smiles(spectrum_in)
assert spectrum is not spectrum_in
assert spectrum.get("inchi") == "InChI=" + inchi.replace("InChI=", "").replace('"', "")
assert spectrum.get("inchikey") == ""
assert spectrum.get("smiles") == ""
|
efrain2010/matchms | tests/test_reduce_to_number_of_peaks.py | import numpy
from matchms import Spectrum
from matchms.filtering import reduce_to_number_of_peaks
def test_reduce_to_number_of_peaks_no_params():
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = numpy.array([0, 1, 10, 100], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = reduce_to_number_of_peaks(spectrum_in)
assert spectrum == spectrum_in, "Expected no changes."
def test_reduce_to_number_of_peaks_n_max_4():
mz = numpy.array([10, 20, 30, 40, 50], dtype="float")
intensities = numpy.array([1, 1, 10, 20, 100], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = reduce_to_number_of_peaks(spectrum_in, n_max=4)
assert len(spectrum.peaks) == 4, "Expected that only 4 peaks remain."
assert spectrum.peaks.mz.tolist() == [20., 30., 40., 50.], "Expected different peaks to remain."
def test_reduce_to_number_of_peaks_ratio_given_but_no_parent_mass():
"""A ratio_desired given without parent_mass should not result in changes."""
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = numpy.array([0, 1, 10, 100], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = reduce_to_number_of_peaks(spectrum_in, n_required=4, ratio_desired=0.1)
assert spectrum == spectrum_in, "Expected the spectrum to remain unchanged."
def test_reduce_to_number_of_peaks_required_2_desired_2():
"""Here: ratio_desired * parent_mass is 2, same as n_required."""
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = numpy.array([0, 1, 10, 100], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities,
metadata={"parent_mass": 20})
spectrum = reduce_to_number_of_peaks(spectrum_in, n_required=2, n_max=4, ratio_desired=0.1)
assert len(spectrum.peaks) == 2, "Expected that only 2 peaks remain."
assert spectrum.peaks.mz.tolist() == [30., 40.], "Expected different peaks to remain."
def test_reduce_to_number_of_peaks_required_2_desired_3():
"""Here: ratio_desired * parent_mass is 3, more than n_required."""
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = numpy.array([0, 1, 10, 100], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities,
metadata={"parent_mass": 20})
spectrum = reduce_to_number_of_peaks(spectrum_in, n_required=3, n_max=4, ratio_desired=0.1)
assert len(spectrum.peaks) == 3, "Expected that only 3 peaks remain."
assert spectrum.peaks.mz.tolist() == [20., 30., 40.], "Expected different peaks to remain."
def test_reduce_to_number_of_peaks_desired_5_check_sorting():
"""Check if mz and intensities order is sorted correctly """
mz = numpy.array([10, 20, 30, 40, 50, 60], dtype="float")
intensities = numpy.array([5, 1, 4, 3, 100, 2], dtype="float")
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = reduce_to_number_of_peaks(spectrum_in, n_max=5)
assert spectrum.peaks.intensities.tolist() == [5., 4., 3., 100., 2.], "Expected different intensities."
assert spectrum.peaks.mz.tolist() == [10., 30., 40., 50., 60.], "Expected different peaks to remain."
|
efrain2010/matchms | matchms/utils.py | import re
from typing import Optional
import numpy
from rdkit import Chem
from rdkit.Chem import AllChem
def convert_smiles_to_inchi(smiles: str) -> Optional[str]:
return mol_converter(smiles, "smiles", "inchi")
def convert_inchi_to_smiles(inchi: str) -> Optional[str]:
return mol_converter(inchi, "inchi", "smiles")
def convert_inchi_to_inchikey(inchi: str) -> Optional[str]:
return mol_converter(inchi, "inchi", "inchikey")
def mol_converter(mol_input: str, input_type: str, output_type: str) -> Optional[str]:
"""Convert molecular representations using rdkit.
Convert from "smiles" or "inchi" to "inchi", "smiles", or "inchikey".
Parameters
----------
mol_input
Input data in "inchi" or "smiles" molecular representation.
input_type
Define input type: "smiles" for smiles and "inchi" for inchi.
output_type
Define output type: "smiles", "inchi", or "inchikey".
Returns:
--------
Mol string in output type or None when conversion failure occurs.
"""
input_function = {"inchi": Chem.MolFromInchi,
"smiles": Chem.MolFromSmiles}
output_function = {"inchi": Chem.MolToInchi,
"smiles": Chem.MolToSmiles,
"inchikey": Chem.MolToInchiKey}
mol = input_function[input_type](mol_input.strip('"'))
if mol is None:
return None
output = output_function[output_type](mol)
if output:
return output
return None
def is_valid_inchi(inchi: str) -> bool:
"""Return True if input string is valid InChI.
This functions test if string can be read by rdkit as InChI.
Parameters
----------
inchi
Input string to test if it has format of InChI.
"""
# First quick test to avoid excess in-depth testing
if inchi is None:
return False
inchi = inchi.strip('"')
regexp = r"(InChI=1|1)(S\/|\/)[0-9, A-Z, a-z,\.]{2,}\/(c|h)[0-9]"
if not re.search(regexp, inchi):
return False
# Proper chemical test
mol = Chem.MolFromInchi(inchi)
if mol:
return True
return False
def is_valid_smiles(smiles: str) -> bool:
"""Return True if input string is valid smiles.
This functions test if string can be read by rdkit as smiles.
Parameters
----------
smiles
Input string to test if it can be imported as smiles.
"""
if smiles is None:
return False
regexp = r"^([^J][0-9BCOHNSOPIFKcons@+\-\[\]\(\)\\\/%=#$,.~&!|Si|Se|Br|Mg|Na|Cl|Al]{3,})$"
if not re.match(regexp, smiles):
return False
mol = Chem.MolFromSmiles(smiles)
if mol:
return True
return False
def is_valid_inchikey(inchikey: str) -> bool:
"""Return True if string has format of inchikey.
Parameters
----------
inchikey
Input string to test if it format of an inchikey.
"""
if inchikey is None:
return False
regexp = r"[A-Z]{14}-[A-Z]{10}-[A-Z]"
if re.fullmatch(regexp, inchikey):
return True
return False
def derive_fingerprint_from_smiles(smiles: str, fingerprint_type: str, nbits: int) -> numpy.ndarray:
"""Calculate molecule fingerprint based on given smiles or inchi (using rdkit).
Parameters
----------
smiles
Input smiles to derive fingerprint from.
fingerprint_type
Determine method for deriving molecular fingerprints. Supported choices are 'daylight',
'morgan1', 'morgan2', 'morgan3'.
nbits
Dimension or number of bits of generated fingerprint.
Returns
-------
fingerprint
Molecular fingerprint.
"""
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
return mol_to_fingerprint(mol, fingerprint_type, nbits)
def derive_fingerprint_from_inchi(inchi: str, fingerprint_type: str, nbits: int) -> numpy.ndarray:
"""Calculate molecule fingerprint based on given inchi (using rdkit).
Parameters
----------
inchi
Input InChI to derive fingerprint from.
fingerprint_type
Determine method for deriving molecular fingerprints. Supported choices are 'daylight',
'morgan1', 'morgan2', 'morgan3'.
nbits
Dimension or number of bits of generated fingerprint.
Returns
-------
fingerprint: numpy.array
Molecular fingerprint.
"""
mol = Chem.MolFromInchi(inchi)
if mol is None:
return None
return mol_to_fingerprint(mol, fingerprint_type, nbits)
def mol_to_fingerprint(mol: Chem.rdchem.Mol, fingerprint_type: str, nbits: int) -> numpy.ndarray:
"""Convert rdkit mol (molecule) to molecular fingerprint.
Parameters
----------
mol
Input rdkit molecule.
fingerprint_type
Determine method for deriving molecular fingerprints.
Supported choices are 'daylight', 'morgan1', 'morgan2', 'morgan3'.
nbits
Dimension or number of bits of generated fingerprint.
Returns
-------
fingerprint
Molecular fingerprint.
"""
assert fingerprint_type in ["daylight", "morgan1", "morgan2", "morgan3"], "Unkown fingerprint type given."
if fingerprint_type == "daylight":
fp = Chem.RDKFingerprint(mol, fpSize=nbits)
elif fingerprint_type == "morgan1":
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 1, nBits=nbits)
elif fingerprint_type == "morgan2":
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=nbits)
elif fingerprint_type == "morgan3":
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=nbits)
if fp:
return numpy.array(fp)
return None
def looks_like_adduct(adduct):
"""Return True if input string has expected format of an adduct."""
if not isinstance(adduct, str):
return False
adduct = adduct.strip().replace("*", "")
# Format 1, e.g. "[2M-H]" or "[2M+Na]+"
regexp1 = r"^\[(([0-9]M)|(M[0-9])|(M)|(MBr)|(MCl))[+-0-9][A-Z0-9\+\-\(\)|(Na)|(Ca)|(Mg)|(Cl)|(Li)|(Br)|(Ser)]{1,}[\]0-9+-]{1,4}"
# Format 2, e.g. "M+Na+K" or "M+H-H20"
regexp2 = r"^(([0-9]M)|(M[0-9])|(M)|(MBr)|(MCl))[+-0-9][A-Z0-9\+\-\(\)|(Na)|(Ca)|(Mg)|(Cl)|(Li)|(Br)|(Ser)]{1,}"
return re.search(regexp1, adduct) is not None or re.search(regexp2, adduct) is not None
|
efrain2010/matchms | matchms/filtering/add_parent_mass.py | from ..constants import PROTON_MASS
from ..typing import SpectrumType
def add_parent_mass(spectrum_in: SpectrumType) -> SpectrumType:
"""Add parentmass to metadata (if not present yet).
Method to calculate the parent mass from given precursor mass
and charge.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if spectrum.get("parent_mass", None) is None:
try:
charge = spectrum.get("charge")
protons_mass = PROTON_MASS * charge
precursor_mz = spectrum.get("pepmass")[0]
precursor_mass = precursor_mz * abs(charge)
parent_mass = precursor_mass - protons_mass
if parent_mass:
spectrum.set("parent_mass", parent_mass)
except KeyError:
print("Not sufficient spectrum metadata to derive parent mass.")
return spectrum
|
efrain2010/matchms | tests/test_derive_smiles_from_inchi.py | import numpy
from matchms import Spectrum
from matchms.filtering import derive_smiles_from_inchi
def test_derive_smiles_from_inchi():
"""Test if conversion to smiles works when only inchi is given.
"""
spectrum_in = Spectrum(mz=numpy.array([], dtype='float'),
intensities=numpy.array([], dtype='float'),
metadata={"inchi": '"InChI=1S/C6H12/c1-2-4-6-5-3-1/h1-6H2"',
"smiles": ""})
spectrum = derive_smiles_from_inchi(spectrum_in)
assert spectrum.get("smiles") == "C1CCCCC1", "Expected different smiles"
|
efrain2010/matchms | tests/test_normalize_intensities.py | import numpy
from matchms import Spectrum
from matchms.filtering import add_losses
from matchms.filtering import normalize_intensities
def test_normalize_intensities():
"""Test if peak intensities are normalized correctly."""
mz = numpy.array([10, 20, 30, 40], dtype='float')
intensities = numpy.array([0, 1, 10, 100], dtype='float')
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = normalize_intensities(spectrum_in)
assert max(spectrum.peaks.intensities) == 1.0, "Expected the spectrum to be scaled to 1.0."
assert numpy.array_equal(spectrum.peaks.intensities, intensities/100), "Expected different intensities"
assert numpy.array_equal(spectrum.peaks.mz, mz), "Expected different peak mz."
def test_normalize_intensities_losses_present():
"""Test if also losses (if present) are normalized correctly."""
mz = numpy.array([10, 20, 30, 40], dtype='float')
intensities = numpy.array([0, 1, 10, 100], dtype='float')
spectrum_in = Spectrum(mz=mz, intensities=intensities,
metadata={"precursor_mz": 45.0})
spectrum = add_losses(spectrum_in)
spectrum = normalize_intensities(spectrum)
expected_loss_intensities = numpy.array([1., 0.1, 0.01, 0.], dtype='float')
assert max(spectrum.peaks.intensities) == 1.0, "Expected the spectrum to be scaled to 1.0."
assert numpy.array_equal(spectrum.peaks.intensities, intensities/100), "Expected different intensities"
assert max(spectrum.losses.intensities) == 1.0, "Expected the losses to be scaled to 1.0."
assert numpy.all(spectrum.losses.intensities == expected_loss_intensities), "Expected different loss intensities"
def test_normalize_intensities_empty_peaks():
"""Test running filter with empty peaks spectrum."""
mz = numpy.array([], dtype='float')
intensities = numpy.array([], dtype='float')
spectrum_in = Spectrum(mz=mz, intensities=intensities)
spectrum = normalize_intensities(spectrum_in)
assert spectrum == spectrum_in, "Spectrum should remain unchanged."
def test_normalize_intensities_empty_spectrum():
"""Test running filter with spectrum == None."""
spectrum = normalize_intensities(None)
assert spectrum is None, "Expected spectrum to be None."
|
efrain2010/matchms | tests/test_ParentmassMatch.py | <gh_stars>0
import numpy
import pytest
from matchms import Spectrum
from matchms.similarity import ParentmassMatch
def test_parentmass_match():
"Test with default tolerance."
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 101.0})
similarity_score = ParentmassMatch()
score = similarity_score(spectrum_1, spectrum_2)
assert not score, "Expected different score."
def test_parentmass_match_tolerance2():
"Test with tolerance > difference."
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 101.0})
similarity_score = ParentmassMatch(tolerance=2.0)
score = similarity_score(spectrum_1, spectrum_2)
assert score, "Expected different score."
def test_parentmass_match_missing_parentmass():
"Test with missing parentmass."
spectrum_1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"parent_mass": 100.0})
spectrum_2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
similarity_score = ParentmassMatch(tolerance=2.0)
with pytest.raises(AssertionError) as msg:
_ = similarity_score(spectrum_1, spectrum_2)
expected_message_part = "Missing parent mass."
assert expected_message_part in str(msg.value), "Expected particular error message."
|
efrain2010/matchms | tests/test_harmonize_undefined_inchi.py | <filename>tests/test_harmonize_undefined_inchi.py
import numpy
from matchms import Spectrum
from matchms.filtering import harmonize_undefined_inchi
def test_harmonize_undefined_inchi_empty_string():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": ""})
spectrum = harmonize_undefined_inchi(spectrum_in)
assert spectrum.get("inchi") == ""
def test_harmonize_undefined_inchi_na_1():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "n/a"})
spectrum = harmonize_undefined_inchi(spectrum_in)
assert spectrum.get("inchi") == ""
def test_harmonize_undefined_inchi_na_2():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "N/A"})
spectrum = harmonize_undefined_inchi(spectrum_in)
assert spectrum.get("inchi") == ""
def test_harmonize_undefined_inchi_na_3():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "NA"})
spectrum = harmonize_undefined_inchi(spectrum_in)
assert spectrum.get("inchi") == ""
def test_harmonize_undefined_inchi_alias_nan():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "nan"})
spectrum = harmonize_undefined_inchi(spectrum_in, aliases=["nodata", "NaN", "Nan", "nan"])
assert spectrum.get("inchi") == ""
def test_harmonize_undefined_inchi_alias_nan_undefined_is_na():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "nan"})
spectrum = harmonize_undefined_inchi(spectrum_in, aliases=["nodata", "NaN", "Nan", "nan"], undefined="n/a")
assert spectrum.get("inchi") == "n/a"
|
efrain2010/matchms | tests/test_scores.py | import numpy
import pytest
from matchms import Scores
class DummySimilarityFunction:
def __init__(self):
"""constructor"""
def __call__(self, reference, query):
"""call method"""
s = reference + query
return s, len(s)
class DummySimilarityFunctionParallel:
def __init__(self):
"""constructor"""
def __call__(self, references, queries):
"""call method"""
shape = len(references), len(queries)
s = numpy.empty(shape, dtype="object")
for index_reference, reference in enumerate(references):
for index_query, query in enumerate(queries):
rq = reference + query
s[index_reference, index_query] = rq, len(rq)
return s
def test_scores_calculate():
dummy_similarity_function = DummySimilarityFunction()
scores = Scores(references=["r0", "r1", "r2"],
queries=["q0", "q1"],
similarity_function=dummy_similarity_function)
scores.calculate()
actual = list(scores)
expected = [
("r0", "q0", "r0q0", 4),
("r0", "q1", "r0q1", 4),
("r1", "q0", "r1q0", 4),
("r1", "q1", "r1q1", 4),
("r2", "q0", "r2q0", 4),
("r2", "q1", "r2q1", 4)
]
assert actual == expected
def test_scores_calculate_parallel():
dummy_similarity_function = DummySimilarityFunctionParallel()
scores = Scores(references=["r0", "r1", "r2"],
queries=["q0", "q1"],
similarity_function=dummy_similarity_function)
scores.calculate_parallel()
actual = list(scores)
expected = [
("r0", "q0", "r0q0", 4),
("r0", "q1", "r0q1", 4),
("r1", "q0", "r1q0", 4),
("r1", "q1", "r1q1", 4),
("r2", "q0", "r2q0", 4),
("r2", "q1", "r2q1", 4)
]
assert actual == expected
def test_scores_init_with_list():
dummy_similarity_function = DummySimilarityFunction()
scores = Scores(references=["r0", "r1", "r2"],
queries=["q0", "q1"],
similarity_function=dummy_similarity_function)
assert scores.scores.shape == (3, 2)
def test_scores_init_with_numpy_array():
dummy_similarity_function = DummySimilarityFunction()
scores = Scores(references=numpy.asarray(["r0", "r1", "r2"]),
queries=numpy.asarray(["q0", "q1"]),
similarity_function=dummy_similarity_function)
assert scores.scores.shape == (3, 2)
def test_scores_init_with_queries_dict():
dummy_similarity_function = DummySimilarityFunction()
with pytest.raises(AssertionError) as msg:
_ = Scores(references=["r0", "r1", "r2"],
queries=dict(k0="q0", k1="q1"),
similarity_function=dummy_similarity_function)
assert str(msg.value) == "Expected input argument 'queries' to be list or tuple or numpy.ndarray."
def test_scores_init_with_references_dict():
dummy_similarity_function = DummySimilarityFunction()
with pytest.raises(AssertionError) as msg:
_ = Scores(references=dict(k0="r0", k1="r1", k2="r2"),
queries=["q0", "q1"],
similarity_function=dummy_similarity_function)
assert str(msg.value) == "Expected input argument 'references' to be list or tuple or numpy.ndarray."
def test_scores_init_with_tuple():
dummy_similarity_function = DummySimilarityFunction()
scores = Scores(references=("r0", "r1", "r2"),
queries=("q0", "q1"),
similarity_function=dummy_similarity_function)
assert scores.scores.shape == (3, 2)
def test_scores_next():
dummy_similarity_function = DummySimilarityFunction()
scores = Scores(references=["r", "rr", "rrr"],
queries=["q", "qq"],
similarity_function=dummy_similarity_function).calculate()
actual = list(scores)
expected = [
("r", "q", "rq", 2),
("r", "qq", "rqq", 3),
("rr", "q", "rrq", 3),
("rr", "qq", "rrqq", 4),
("rrr", "q", "rrrq", 4),
("rrr", "qq", "rrrqq", 5)
]
assert actual == expected
|
efrain2010/matchms | matchms/filtering/select_by_intensity.py | import numpy
from ..Spikes import Spikes
from ..typing import SpectrumType
def select_by_intensity(spectrum_in: SpectrumType, intensity_from=10.0, intensity_to=200.0) -> SpectrumType:
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
assert intensity_from <= intensity_to, "'intensity_from' should be smaller than or equal to 'intensity_to'."
condition = numpy.logical_and(intensity_from <= spectrum.peaks.intensities,
spectrum.peaks.intensities <= intensity_to)
spectrum.peaks = Spikes(mz=spectrum.peaks.mz[condition],
intensities=spectrum.peaks.intensities[condition])
return spectrum
|
efrain2010/matchms | matchms/filtering/default_filters.py | from ..typing import SpectrumType
from .add_compound_name import add_compound_name
from .add_precursor_mz import add_precursor_mz
from .clean_compound_name import clean_compound_name
from .correct_charge import correct_charge
from .derive_adduct_from_name import derive_adduct_from_name
from .derive_formula_from_name import derive_formula_from_name
from .derive_ionmode import derive_ionmode
from .make_charge_scalar import make_charge_scalar
from .make_ionmode_lowercase import make_ionmode_lowercase
from .set_ionmode_na_when_missing import set_ionmode_na_when_missing
def default_filters(spectrum: SpectrumType) -> SpectrumType:
"""
Collection of filters that are considered default and that do no require any (factory) arguments.
Collection is
1. :meth:`~matchms.filtering.make_charge_scalar.make_charge_scalar`
2. :meth:`~matchms.filtering.make_charge_scalar`
3. :meth:`~matchms.filtering.make_ionmode_lowercase`
4. :meth:`~matchms.filtering.set_ionmode_na_when_missing`
5. :meth:`~matchms.filtering.add_precursor_mz`
6. :meth:`~matchms.filtering.add_adduct`
7. :meth:`~matchms.filtering.derive_ionmode`
8. :meth:`~matchms.filtering.correct_charge`
"""
spectrum = make_charge_scalar(spectrum)
spectrum = make_ionmode_lowercase(spectrum)
spectrum = set_ionmode_na_when_missing(spectrum)
spectrum = add_compound_name(spectrum)
spectrum = derive_adduct_from_name(spectrum)
spectrum = derive_formula_from_name(spectrum)
spectrum = clean_compound_name(spectrum)
spectrum = add_precursor_mz(spectrum)
spectrum = derive_ionmode(spectrum)
spectrum = correct_charge(spectrum)
return spectrum
|
efrain2010/matchms | tests/test_fingerprint_similarity_parallel.py | <reponame>efrain2010/matchms<filename>tests/test_fingerprint_similarity_parallel.py
import numpy
import pytest
from matchms import Spectrum
from matchms.similarity import FingerprintSimilarityParallel
def test_fingerprint_similarity_parallel_cosine_empty_fingerprint():
"""Test cosine score matrix with empty fingerprint."""
fingerprint1 = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="cosine")
score_matrix = similarity_measure([spectrum1, spectrum2],
[spectrum1, spectrum2])
assert score_matrix == pytest.approx(numpy.array([[0, 0],
[0, 1.]]), 0.001), "Expected different values."
def test_fingerprint_similarity_parallel_jaccard_empty_fingerprint():
"""Test jaccard score matrix with empty fingerprint."""
fingerprint1 = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="jaccard")
score_matrix = similarity_measure([spectrum1, spectrum2],
[spectrum1, spectrum2])
assert score_matrix == pytest.approx(numpy.array([[0, 0],
[0, 1.]]), 0.001), "Expected different values."
def test_fingerprint_similarity_parallel_dice_empty_fingerprint():
"""Test dice score matrix with empty fingerprint."""
fingerprint1 = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="dice")
score_matrix = similarity_measure([spectrum1, spectrum2],
[spectrum1, spectrum2])
assert score_matrix == pytest.approx(numpy.array([[0, 0],
[0, 1.]]), 0.001), "Expected different values."
def test_fingerprint_similarity_parallel_cosine():
"""Test cosine score matrix with known values."""
spectrum0 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
fingerprint1 = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="cosine")
score_matrix = similarity_measure([spectrum0, spectrum1, spectrum2],
[spectrum0, spectrum1, spectrum2])
assert score_matrix[1:, 1:] == pytest.approx(numpy.array([[1., 0.84515425],
[0.84515425, 1.]]), 0.001), "Expected different values."
assert numpy.all(numpy.isnan(score_matrix[:, 0])), "Expected 'nan' entries."
assert numpy.all(numpy.isnan(score_matrix[0, :])), "Expected 'nan' entries."
def test_fingerprint_similarity_parallel_jaccard():
"""Test jaccard index matrix with known values."""
spectrum0 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
fingerprint1 = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="jaccard")
score_matrix = similarity_measure([spectrum0, spectrum1, spectrum2],
[spectrum0, spectrum1, spectrum2])
assert score_matrix[1:, 1:] == pytest.approx(numpy.array([[1., 0.71428571],
[0.71428571, 1.]]), 0.001), "Expected different values."
assert numpy.all(numpy.isnan(score_matrix[:, 0])), "Expected 'nan' entries."
assert numpy.all(numpy.isnan(score_matrix[0, :])), "Expected 'nan' entries."
def test_fingerprint_similarity_parallel_dice():
"""Test dice score matrix with known values."""
spectrum0 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
fingerprint1 = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(similarity_measure="dice")
score_matrix = similarity_measure([spectrum0, spectrum1, spectrum2],
[spectrum0, spectrum1, spectrum2])
assert score_matrix[1:, 1:] == pytest.approx(numpy.array([[1., 0.83333333],
[0.83333333, 1.]]), 0.001), "Expected different values."
assert numpy.all(numpy.isnan(score_matrix[:, 0])), "Expected 'nan' entries."
assert numpy.all(numpy.isnan(score_matrix[0, :])), "Expected 'nan' entries."
def test_fingerprint_similarity_parallel_cosine_set_empty_to_0():
"""Test cosine score matrix with known values. Set non-exising values to 0."""
spectrum0 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
fingerprint1 = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
spectrum1 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint1})
fingerprint2 = numpy.array([0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1])
spectrum2 = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"fingerprint": fingerprint2})
similarity_measure = FingerprintSimilarityParallel(set_empty_scores=0, similarity_measure="cosine")
score_matrix = similarity_measure([spectrum0, spectrum1, spectrum2],
[spectrum0, spectrum1, spectrum2])
assert score_matrix == pytest.approx(numpy.array([[0, 0, 0],
[0, 1., 0.84515425],
[0, 0.84515425, 1.]]), 0.001), "Expected different values."
|
efrain2010/matchms | tests/test_harmonize_undefined_smiles.py | import numpy
from matchms import Spectrum
from matchms.filtering import harmonize_undefined_smiles
def test_harmonize_undefined_smiles_empty_string():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": ""})
spectrum = harmonize_undefined_smiles(spectrum_in)
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_na_1():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "n/a"})
spectrum = harmonize_undefined_smiles(spectrum_in)
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_na_2():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "N/A"})
spectrum = harmonize_undefined_smiles(spectrum_in)
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_na_3():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "NA"})
spectrum = harmonize_undefined_smiles(spectrum_in)
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_no_data():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "no data"})
spectrum = harmonize_undefined_smiles(spectrum_in)
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_alias_nan():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "nan"})
spectrum = harmonize_undefined_smiles(spectrum_in, aliases=["nodata", "NaN", "Nan", "nan"])
assert spectrum.get("smiles") == ""
def test_harmonize_undefined_smiles_alias_nan_undefined_is_na():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "nan"})
spectrum = harmonize_undefined_smiles(spectrum_in, aliases=["nodata", "NaN", "Nan", "nan"], undefined="n/a")
assert spectrum.get("smiles") == "n/a"
|
efrain2010/matchms | matchms/filtering/add_compound_name.py | from ..typing import SpectrumType
def add_compound_name(spectrum_in: SpectrumType) -> SpectrumType:
"""Add compound_name to correct field: "compound_name" in metadata."""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if spectrum.get("compound_name", None) is None:
if isinstance(spectrum.get("name", None), str):
spectrum.set("compound_name", spectrum.get("name"))
return spectrum
if isinstance(spectrum.get("title", None), str):
spectrum.set("compound_name", spectrum.get("title"))
return spectrum
print("No compound name found in metadata.")
return spectrum
|
efrain2010/matchms | tests/test_add_fingerprint.py | import numpy
from matchms import Spectrum
from matchms.filtering import add_fingerprint
def test_add_fingerprint_from_smiles():
"""Test if fingerprint it generated correctly."""
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"smiles": "[C+]#C[O-]"})
spectrum = add_fingerprint(spectrum_in, nbits=16)
expected_fingerprint = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
assert numpy.all(spectrum.get("fingerprint") == expected_fingerprint), "Expected different fingerprint."
def test_add_fingerprint_from_inchi():
"""Test if fingerprint it generated correctly."""
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"inchi": "InChI=1S/C2O/c1-2-3"})
spectrum = add_fingerprint(spectrum_in, nbits=16)
expected_fingerprint = numpy.array([0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
assert numpy.all(spectrum.get("fingerprint") == expected_fingerprint), "Expected different fingerprint."
def test_add_fingerprint_no_smiles_no_inchi():
"""Test if fingerprint it generated correctly."""
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={})
spectrum = add_fingerprint(spectrum_in)
assert spectrum.get("fingerprint", None) is None, "Expected None."
def test_add_fingerprint_empty_spectrum():
"""Test if empty spectrum is handled correctly."""
spectrum = add_fingerprint(None)
assert spectrum is None, "Expected None."
|
efrain2010/matchms | matchms/similarity/__init__.py | <reponame>efrain2010/matchms
"""similarity module"""
from .CosineGreedy import CosineGreedy
from .CosineHungarian import CosineHungarian
from .FingerprintSimilarityParallel import FingerprintSimilarityParallel
from .IntersectMz import IntersectMz
from .ModifiedCosine import ModifiedCosine
from .ParentmassMatch import ParentmassMatch
from .ParentmassMatchParallel import ParentmassMatchParallel
__all__ = [
"CosineGreedy",
"CosineHungarian",
"FingerprintSimilarityParallel",
"IntersectMz",
"ModifiedCosine",
"ParentmassMatch",
"ParentmassMatchParallel",
]
|
efrain2010/matchms | matchms/filtering/derive_formula_from_name.py | <filename>matchms/filtering/derive_formula_from_name.py
import re
from ..typing import SpectrumType
def derive_formula_from_name(spectrum_in: SpectrumType, remove_formula_from_name=True) -> SpectrumType:
"""Detect and remove misplaced formula in compound name and add to metadata.
Method to find misplaced formulas in compound name based on regular expression.
Args:
----
spectrum_in: SpectrumType
Input spectrum.
remove_formula_from_name: bool
Remove found formula from compound name if set to True. Default is True.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if spectrum.get("compound_name", None) is not None:
name = spectrum.get("compound_name")
else:
assert spectrum.get("name", None) in [None, ""], ("Found 'name' but not 'compound_name' in metadata",
"Apply 'add_compound_name' filter first.")
return spectrum
# Detect formula at end of compound name
end_of_name = name.split(" ")[-1]
formula_from_name = end_of_name if looks_like_formula(end_of_name) else None
if formula_from_name and remove_formula_from_name:
name_formula_removed = " ".join(name.split(" ")[:-1])
spectrum.set("compound_name", name_formula_removed)
print("Removed formula {} from compound name.".format(formula_from_name))
# Add found formula to metadata (if not present yet)
if formula_from_name and spectrum.get("formula", None) is None:
spectrum.set("formula", formula_from_name)
print("Added formula {} to metadata.".format(formula_from_name))
return spectrum
def looks_like_formula(formula):
"""Return True if input string has expected format of a molecular formula."""
regexp = r"^(?:[A-Z][a-z]?(?:[1-9][0-9]*)?){2,}$"
return re.search(regexp, formula) is not None
|
rjsmethurst/noticite | noticite.py | #############
### Flask app for citation notification
#############
from __future__ import print_function, division, unicode_literals
import numpy as np
from flask import Flask, jsonify, render_template, request, send_file, make_response, json
import httplib
import json
import os
import urllib
from collections import Counter
# Module specific
import ads
app = Flask(__name__)
__author__ = ["<NAME> <<EMAIL>>", "<NAME> <<EMAIL>>"]
@app.route('/')
def homepage():
doi = "2015MNRAS.450..435S"
paper, papers, scraped_text, no_citations = get_citations(doi)
return render_template('index.html', paper =paper, text = scraped_text, citations=papers, num=no_citations)
def get_citations(bibcode):
papers = list(ads.SearchQuery(q=bibcode))
paper = papers[0]
no_citations = paper.citation_count
cite_list =[]
cite_arxivid = []
for cite in paper.citation:
paper_cite = list(ads.SearchQuery(q=cite))
print paper_cite[0].indentifier
cite_list.append(paper_cite[0].first_author.split(',')[0]+' et al. '+paper_cite[0].year)
for entry in paper_cite[0].identifier:
if re.match("^[0-9]{4}\.+[0-9]{5}", entry):
cite_arxivid.append(entry)
else:
pass
for entry in cite_arxivid:
text.append(get_article(entry, first_author=paper[0].first_author, clobber=False))
else:
text.append()
return paper.first_author.split(',')[0]+' et al. '+paper.year, cite_list, text, no_citations
def get_article(arxiv_id, first_author, clobber=False):
# Try to load cached file.
fn = "{0}.tar.gz".format(arxiv_id)
local = os.path.join(DATA_DIR, fn)
# Download the remote file.
if clobber or not os.path.exists(local):
url = "http://arxiv.org/e-print/{0}v2".format(arxiv_id)
r = requests.get(url)
code = r.status_code
if code != requests.codes.ok:
url = "http://arxiv.org/e-print/{0}v1".format(arxiv_id)
r = requests.get(url)
code = r.status_code
if code != requests.codes.ok:
print("Download of {0} failed with code: {1}".format(url, code))
return None
with open(local, "wb") as f:
f.write(r.content)
tex = []
if tarfile.is_tarfile(local):
with tarfile.open(local) as f:
for member in f:
if os.path.splitext(member.name)[1] == ".tex":
tex.append(f.extractfile(member).read())
else:
with gzip.open(local) as f:
tex.append(f.read())
# Parse the tex files in the archive.
bib = []
if tarfile.is_tarfile(local):
with tarfile.open(local) as f:
for member in f:
if os.path.splitext(member.name)[1] == ".bbl":
bib.append(f.extractfile(member).read())
elif os.path.splitext(member.name)[1] == ".bib":
bib.append(f.extractfile(member).read())
else:
with gzip.open(local) as f:
bib.append(f.read())
if len(bib) != 0:
idx = bib[0].find(first_author)
bibtext = bib[0][idx:idx+1000].split(']')[1]
bibref = re.findall("{.*?}", bibtext)[0]
idxbib = tex[0].find(bibref[1:-1])
splicetex = tex[0][idxbib-1000:idxbib+1000]
else:
idx = tex[0].find(first_author)
bibtext = tex[0][idx:idx+1000].split(']')[1]
bibref = re.findall("{.*?}", bibtext)[0]
idxbib = tex[0].find(bibref[1:-1])
splicetex = tex[0][idxbib-1000:idxbib+1000]
return splicetex
print("Found {0} tex file(s)".format(len(tex)))
return [s for p in map(parse_tex, tex) for s in p if len(s)]
@app.route('/showSignUp')
def showsignup():
return render_template('signup.html')
@app.route('/signUp', methods=['POST'])
def signup():
_name = request.form['inputName']
_email = request.form['inputEmail']
_password = request.form['<PASSWORD>']
if _name and _email and _password:
return json.dumps({'html':'<span>All fields good !!</span>'})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
if __name__ == '__main__':
app.debug = True
app.run()
|
rjsmethurst/noticite | beers_for_citations.py | # coding: utf-8
""" Beers for citations. The new underground currency. """
__author__ = "<NAME> <<EMAIL>>"
# Standard library
import httplib
import json
import os
import urllib
from collections import Counter
# Module specific
import ads
# Couple of mutable variables for the reader
author_query = "^Casey, <NAME>."
records_filename = "citations.json"
my_papers = ads.search(author_query)
# How many citations did we have last time this ran?
if not os.path.exists(records_filename):
all_citations_last_time = {"total": 0}
else:
with open(records_filename, "r") as fp:
all_citations_last_time = json.load(fp)
# Build a dictionary with all of our citations
bibcodes, citations = zip(*[(paper.bibcode, paper.citation_count)
for paper in my_papers])
all_citations = dict(zip(bibcodes, citations))
all_citations["total"] = sum(citations)
# Check if we have more citations than last time, but only if we have run
# this script beforehand, too. Otherwise we'll get 1,000 notifications on
# the first time the script has been run
if all_citations["total"] > all_citations_last_time["total"] \
and len(all_citations_last_time) > 1:
# Someone has cited us since the last time we checked.
newly_cited_papers = {}
for bibcode, citation_count in zip(bibcodes, citations):
new_citations = citation_count - all_citations_last_time[bibcode]
if new_citations > 0:
# Who were the first authors for the new papers that cited us?
citing_papers = ads.search("citations(bibcode:{0})"
.format(bibcode), rows=new_citations)
newly_cited_papers[bibcode] = [paper.author[0] for paper in citing_papers]
# Ok, so now we have a dictionary (called 'newly_cited_papers') that contains
# the bibcodes and names of authors who we owe beers to. But instead, we
# would like to know how many beers we owe, and who we owe them to.
beers_owed = Counter(sum(newly_cited_papers.values(), []))
# Let's not buy ourself beers.
if my_papers[0].author[0] in beers_owed:
del beers_owed[my_papers[0].author[0]]
for author, num_of_beers_owed in beers_owed.iteritems():
readable_name = " ".join([name.strip() for name in author.split(",")[::-1]])
this_many_beers = "{0} beers".format(num_of_beers_owed) \
if num_of_beers_owed > 1 else "a beer"
message = "You owe {0} {1} because they just cited you!"
.format(readable_name, this_many_beers)
print(message)
if not "PUSHOVER_TOKEN" in os.environ \
or not "PUSHOVER_USER" in os.environ:
print("No pushover.net notification sent because PUSHOVER_TOKEN or"
" PUSHOVER_USER environment variables not found.")
continue
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": os.environ["PUSHOVER_TOKEN"],
"user": os.environ["PUSHOVER_USER"],
"message": message
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
else:
print("No new citations!")
# Save these citations
with open(records_filename, "w") as fp:
json.dump(all_citations, fp)
|
l4rz/first-order-model | webrtc/webrtc-server-cursed.py | <filename>webrtc/webrtc-server-cursed.py
import argparse
import asyncio
import json
import logging
import os
import ssl
import uuid
from torchvision import transforms
from hashlib import sha1
from PIL import Image
import cv2
from aiohttp import web
from av import VideoFrame
from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder
import matplotlib
matplotlib.use('Agg')
import os, sys
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
import imageio
import numpy as np
from skimage.transform import resize
from skimage import img_as_ubyte
import torch
from sync_batchnorm import DataParallelWithCallback
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from animate import normalize_kp
from scipy.spatial import ConvexHull
ROOT = os.path.dirname(__file__)
logger = logging.getLogger("pc")
pcs = set()
idx = 0
# cursed routines
def load_checkpoints(config_path, checkpoint_path):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
kp_detector.cuda()
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
class VideoTransformTrack(MediaStreamTrack):
"""
A video stream track that transforms frames from an another track.
"""
kind = "video"
def __init__(self, track, transform):
super().__init__() # don't forget this!
self.track = track
self.transform = transform
async def recv(self):
global generator
global kp_detector
global source
global kp_source
global kp1
global idx
frame = await self.track.recv()
idx+=1
if self.transform == "cartoon":
img = frame.to_ndarray(format="rgb24")
rimg = cv2.resize(img,(int(342),int(256))) # downsample 640x480 to 342x256 while preserving the AR
simg = rimg[0:0+256, 43:43+256]
# sharpen?
#kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
#simg = cv2.filter2D(simg, -1, kernel)
# enhance?
#lab= cv2.cvtColor(simg, cv2.COLOR_BGR2LAB)
#l, a, b = cv2.split(lab)
#clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
#cl = clahe.apply(l)
#limg = cv2.merge((cl,a,b))
#simg = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
# prepare unprocessed frame
new_frame = VideoFrame.from_ndarray(simg, format="rgb24")
new_frame.pts = frame.pts
new_frame.time_base = frame.time_base
# driving frame should be of torch.Size([1, 3, 256, 256])
im = torch.tensor(simg[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2).cuda()
driving_frame = im
# use 100th frame for kp_driving_initial
if idx == 100:
print ('shapes: img', img.shape, 'rimg', rimg.shape, 'simg:', simg.shape )
kp1 = kp_detector(driving_frame)
#imageio.imsave(os.path.join('tmp', 'driving.png'), simg)
#imageio.imsave(os.path.join('tmp', 'real'+str(idx)+'.png'), simg)
if idx > 100: # apply xform after the 100th frame
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
#kp_driving_initial=kp_driving_initial,
kp_driving_initial=kp1, # None doesnt work when adapt_movement_scale or use_relative_movement set to True
use_relative_movement=False,
use_relative_jacobian=False,
adapt_movement_scale=False)
# out['prediction']: torch.Size([1, 3, 256, 256]) dtype torch.float32
with torch.no_grad():
out = generator(source, kp_source=kp_source, kp_driving=kp_norm) #was kp_norm
prediction = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
newimg = (255 * prediction).astype(np.uint8)
# just to test conversion of image to tensor and back, like latency of the code
#newimg = np.transpose(im.data.cpu().numpy(), [0, 2, 3, 1])[0].astype(np.uint8)
#newimg = cv2.resize(newimg,(int(256),int(256)))
new_frame = VideoFrame.from_ndarray(newimg, format="rgb24")
new_frame.pts = frame.pts
new_frame.time_base = frame.time_base
return new_frame
else:
return new_frame
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pc_id = "PeerConnection(%s)" % uuid.uuid4()
pcs.add(pc)
def log_info(msg, *args):
logger.info(pc_id + " " + msg, *args)
log_info("Created for %s", request.remote)
# prepare local media
#player = MediaPlayer(os.path.join(ROOT, "demo-instruct.wav"))
#if args.write_audio:
# recorder = MediaRecorder(args.write_audio)
#else:
recorder = MediaBlackhole()
@pc.on("datachannel")
def on_datachannel(channel):
@channel.on("message")
def on_message(message):
if isinstance(message, str) and message.startswith("ping"):
channel.send("pong" + message[4:])
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
log_info("ICE connection state is %s", pc.iceConnectionState)
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc)
@pc.on("track")
def on_track(track):
log_info("Track %s received", track.kind)
if track.kind == "audio":
#pc.addTrack(player.audio)
recorder.addTrack(track)
elif track.kind == "video":
local_video = VideoTransformTrack(
track, transform=params["video_transform"]
)
pc.addTrack(local_video)
@track.on("ended")
async def on_ended():
log_info("Track %s ended", track.kind)
await recorder.stop()
# handle offer
await pc.setRemoteDescription(offer)
await recorder.start()
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
if __name__ == "__main__":
global generator
global kp_detector
global source
global kp_source
parser = argparse.ArgumentParser(
description="WebRTC audio / video / data-channels demo"
)
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
parser.add_argument("--write-audio", help="Write received audio to a file")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
kp1 = None
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
app.router.add_post("/offer", offer)
# initialize the model
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
source_image = imageio.imread('mlma.jpg')
source_image = resize(source_image, (256, 256))[..., :3]
print('Loading ckpt')
generator, kp_detector = load_checkpoints(config_path='config/vox-adv-256.yaml', checkpoint_path='vox-adv-cpk.pth.tar')
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2).cuda()
kp_source = kp_detector(source)
print('Loading ckpt done')
# pass generator, kp_detector, source, kp_source as globals
web.run_app(app, access_log=None, port=args.port, ssl_context=ssl_context)
|
WhiteD3vil/Multi-Camera-Person-Re-Identification | mtmct_reid/model.py | import torch
import torch.nn as nn
from torch.nn import init
# Replace with custom model
from torchvision.models import resnet50
def weights_init_kaiming(layer):
if type(layer) in [nn.Conv1d, nn.Conv2d]:
init.kaiming_normal_(layer.weight.data, mode='fan_in')
elif type(layer) == nn.Linear:
init.kaiming_normal_(layer.weight.data, mode='fan_out')
init.constant_(layer.bias.data, 0.0)
elif type(layer) == nn.BatchNorm1d:
init.normal_(layer.weight.data, mean=1.0, std=0.02)
init.constant_(layer.bias.data, 0.0)
def weights_init_classifier(layer):
if type(layer) == nn.Linear:
init.normal_(layer.weight.data, std=0.001)
init.constant_(layer.bias.data, 0.0)
class ClassifierBlock(nn.Module):
def __init__(self, input_dim: int, num_classes: int,
dropout: bool = True, activation: str = None,
num_bottleneck=512):
super().__init__()
self._layers(input_dim, num_classes, dropout,
activation, num_bottleneck)
def _layers(self, input_dim, num_classes, dropout,
activation, num_bottleneck):
block = [
nn.Linear(input_dim, num_bottleneck),
nn.BatchNorm1d(num_bottleneck)
]
if activation == 'relu':
block += [nn.ReLU]
elif activation == 'lrelu':
block += [nn.LeakyReLU(0.1)]
if dropout:
block += [nn.Dropout(p=0.5)]
block = nn.Sequential(*block)
block.apply(weights_init_kaiming)
classifier = nn.Linear(num_bottleneck, num_classes)
classifier.apply(weights_init_classifier)
self.block = block
self.classifier = classifier
def forward(self, x):
x = self.block(x)
x = self.classifier(x)
return x
class PCB(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_parts = 6 # Parameterize?
self._layers(num_classes)
def _layers(self, num_classes):
self.model = resnet50(pretrained=True)
# Delete final fc layer
del self.model.fc
# Remove downsampling by changing stride to 1
self.model.layer4[0].downsample[0].stride = (1, 1)
self.model.layer4[0].conv2.stride = (1, 1)
# Replace final layers
self.model.avgpool = nn.AdaptiveAvgPool2d((self.num_parts, 1))
self.dropout = nn.Dropout(p=0.5)
self.model = nn.Sequential(*list(self.model.children()))
# Define 6 separate layers for 6 parts!
for i in range(self.num_parts):
name = 'classifier'+str(i)
setattr(self, name, ClassifierBlock(
2048, num_classes, True, 'lrelu', 256))
def forward(self, x, training=False):
x = self.model(x)
x = torch.squeeze(x)
# Create a hook to identify whether
# it is training phase or testing phase
if training:
x = self.dropout(x)
part = []
strips_out = []
for i in range(self.num_parts):
part.append(x[:, :, i])
name = 'classifier' + str(i)
classifier = getattr(self, name)
part_out = classifier(part[i])
strips_out.append(part_out)
return strips_out
return x # return fc features
|
krig/utils | gravatar.py | <reponame>krig/utils
# gravatar.py Copyright (C) 2008 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, time, urllib
try:
from hashlib import md5
except ImportError:
import md5 as md5lib
md5 = md5lib.new
_BASEPATH = "~/.cache/pygravatar/%s_%d"
_OLDAGE = 24 # age in hours
def _makemd5(email):
return md5(email.lower()).hexdigest()
def _makename(emailmd5, size):
return os.path.expanduser(_BASEPATH % (emailmd5, size))
def _older(filename, hours):
mtime = os.path.getmtime(filename)
return (time.time() - mtime) > (hours * 3600)
def _dirty(filename):
ex = os.path.exists(filename)
return not ex or _older(filename, _OLDAGE)
def _makeurl(emailmd5, size):
"""Constructs the Gravatar URL.
"""
gravatar_url = "http://www.gravatar.com/avatar/"
gravatar_url += emailmd5
gravatar_url += '?' + urllib.urlencode({'d':'monsterid', 's': str(size)})
return gravatar_url
def get(email = "<EMAIL>",
size = 80):
"""Looks in local cache if file exists
and is newer than TIMEOUT. If not, fetches
a new image and puts it in the cache.
Returns local path to image."""
emailmd5 = _makemd5(email)
filename = _makename(emailmd5, size)
if _dirty(filename):
try:
os.makedirs(os.path.split(filename)[0])
except os.error:
pass
url = _makeurl(emailmd5, size)
urllib.urlretrieve(url, filename)
return filename
if __name__ == "__main__":
# requires PIL installed
import Image
im = Image.open(get(email="<EMAIL>"))
print im.format, im.size, im.mode
|
krig/utils | multidict.py | # multidict.py, Copyright (c) <NAME>, 2009
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# a map over a pile of objects with multiple indices
# an index is defined by an index function
# each index gets a by_<id> member in the set, which
# is simply a dict using a particular key function to map the object
class _index(dict):
def __init__(self, mdict):
dict.__init__(self)
self._mdict = mdict
def __setitem__(self, name, value):
raise AttributeError("setting values is not allowed on multidict indices, use add() on the multidict")
def __delitem__(self, item):
itm = dict.__getitem__(self, item)
self._mdict.remove(itm)
def priv_set(self, name, value):
dict.__setitem__(self, name, value)
def priv_del(self, item):
dict.__delitem__(self, item)
class multidict(object):
def __init__(self):
self._indices = {}
self._primary = None
def __add_index(self, name, key_fun):
if name in self._indices:
raise IndexError('index %s already defined' % (name))
idx = _index(self)
if self._primary is not None:
for itm in self._primary.values():
idx.priv_set(key_fun(itm), itm)
else:
print "Setting primary index to %s", idx
self._primary = idx
setattr(self, 'by_'+name, idx)
self._indices[name] = key_fun
def __setitem__(self, name, value):
if not callable(value):
raise ValueError('Index function is not callable')
self.__add_index(name, value)
def add(self, item):
if self._primary is None:
raise IndexError("A primary index must be defined before adding items")
for name, fn in self._indices.iteritems():
idx = getattr(self, 'by_'+name)
idx.priv_set(fn(item), item)
def remove(self, item):
for name, fn in self._indices.iteritems():
idx = getattr(self, 'by_'+name)
idx.priv_del(fn(item))
def set_primary(self, name):
if self._primary == getattr(self, 'by_'+name):
return
self._primary = getattr(self, 'by_'+name)
# rebuild all other indices to drop all items
# not referred from the new primary
vals = self._primary.values()
for nam, fn in self._indices.iteritems():
if nam != name:
idx = getattr(self, 'by_'+nam)
idx.clear()
for itm in vals:
idx.priv_set(fn(itm), itm)
def __iter__(self):
if self._primary is None:
raise IndexError('No index defined on multidict')
return self._primary.itervalues()
def __len__(self):
if self._primary is None:
raise IndexError('No index defined on multidict')
return len(self._primary)
def __delattr__(self, name):
if name.startswith('by_'):
if self._primary == getattr(self, name):
raise IndexError('Removing the primary index is illegal')
del self._indices[name[3:]]
object.__delattr__(self, name)
else:
return object.__delattr__(self, name)
def __repr__(self):
if self._primary is None:
return "multidict()"
else:
return "multidict(%s)" % (self._primary)
def testing():
import random
d2 = multidict()
class obj(object):
def __init__(self, name, size):
self.name = name
self.size = size
self.rnd = random.randint(0, 100)
def __repr__(self):
return "<%s, %s>" % (self.name, self.size)
d2['name'] = lambda x: x.name
d2['size'] = lambda x: x.size
d2.add(obj('bill', 3))
d2.add(obj('jane', 100))
d2.add(obj('alice', 190))
d2.add(obj('bob', 3))
d2.add(obj('charlie', 100))
d2.add(obj('foo', 100))
for n, o in d2.by_name.iteritems():
print "%s is %s" % (n, o)
for s, o in d2.by_size.iteritems():
print "%s is %s" % (s, o)
for o in d2:
print o
del d2.by_name['bill']
print d2
for o in d2:
print o
del d2.by_size
print d2
try:
for s, o in d2.by_size.iteritems():
print "%s is %s" % (s, o)
except AttributeError, e:
print e
d2['rand'] = lambda x: x.rnd
for r, o in d2.by_rand.iteritems():
print "%s is %s" % (r, o)
try:
del d2.by_name
except IndexError, e:
print e
d2['size'] = lambda x: x.size
print d2
d2.set_primary('size')
print d2
if __name__=="__main__":
testing()
|
rafaelcpalmeida/schokotron-caralho | schokotron/app.py | from flask import Flask
from time import sleep
from candy import Candy
app = Flask(__name__)
@app.route('/')
def index():
return 'Bem vindo ao Schokotron, caralho'
@app.route('/candies')
def candies():
c = Candy()
print("Esta a abrir, caralho")
c.set_duty_cycle(5.0)
print("Esta fechar, caralho")
c.set_duty_cycle(2.0)
return 'Pintarolas, caralho!'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
rafaelcpalmeida/schokotron-caralho | schokotron-slack/schokotron.py | <filename>schokotron-slack/schokotron.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import re
import urllib
class SchokotronCaralho(object):
ROBOT_ADDRESS = "http://127.0.0.1:5000"
API_ADDRESS = ""
def __init__(self, logger, slack_client, slack_username):
self.logger = logger
self.slack_client = slack_client
self.slack_username = slack_username
def run(self):
if self.slack_client.rtm_connect(with_team_state=False):
self.logger.info("Successfully connected, listening for commands")
while True:
events = self.slack_client.rtm_read()
if events and len(events) > 0:
for event in events:
self.parse_event(event)
time.sleep(1)
else:
exit("Error, Connection Failed")
def parse_event(self, event):
if event and "text" in event and "user" in event:
if not self.self_sent(event["user"]):
channel_id = event["channel"]
user_info = self.parse_user_info(event['user'])
self.logger.info("Recebi uma mensagem do " + user_info["username"])
if re.match( r'(ajuda).*(caralho)', event["text"], re.M|re.I):
self.send_message("Quié caralho?", channel_id)
self.send_message("Podes enviar-me:", channel_id)
self.send_message("`Dá-me um chocolate, caralho!`", channel_id)
self.send_message("`Dá-me a puta de um chocolate seu filho da puta, caralho!`", channel_id)
return
if re.match( r'(dá-me|quero).*(chocolate).*(caralho)', event["text"], re.M|re.I):
self.perform_action("serve_chocolat", channel_id)
#self.send_message("O(A) gordo(a) do(a) @" + user_details["username"] + " acabou de comer mais um chocolate!", "#random")
return
self.send_message("Vai chatear o caralho! :middle_finger:", channel_id)
def self_sent(self, user_id):
users = self.slack_client.api_call(
"users.list"
)
if users and "ok" in users and "members" in users:
for user in users["members"]:
if user["name"] == self.slack_username and user["id"] == user_id:
return True
return False
def send_message(self, message, channel):
self.slack_client.api_call(
"chat.postMessage",
channel=channel,
link_names=True,
parse="full",
text=message
)
def parse_user_info(self, user_id):
user_data = {}
user_details = self.slack_client.api_call(
"users.info",
user=user_id
)
user_data["provider_id"] = user_details["user"]["id"]
user_data["username"] = user_details["user"]["name"]
user_data["full_name"] = user_details["user"]["real_name"]
user_data["email"] = user_details["user"]["profile"]["email"]
return user_data
def perform_action(self, action, channel_id):
return {
'serve_chocolat': self.serve_chocolat(channel_id),
}[action]
def serve_chocolat(self, channel_id):
try:
urllib.request.urlopen(self.ROBOT_ADDRESS + "/candies").read()
self.send_message("Toma o teu chocolate gordo(a).", channel_id)
except Exception as description:
self.logger.error("Something went wrong! Here are some details:")
self.logger.error(description)
self.send_message("Ocorreu um erro! :cry: Contacta os Super Administradores desta merda.", channel_id)
|
rafaelcpalmeida/schokotron-caralho | schokotron/candy.py | import RPi.GPIO as GPIO
from time import sleep
class Candy:
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
def __init__(self):
print("Starting the Mighty Schokotron, caralho")
GPIO.setup(3, GPIO.OUT)
self.pwm = GPIO.PWM(3, 50)
self.pwm.start(0)
def set_duty_cycle(self, duty):
GPIO.output(3, True)
self.pwm.ChangeDutyCycle(duty)
sleep(0.15)
GPIO.output(3, False)
self.pwm.ChangeDutyCycle(0)
|
rafaelcpalmeida/schokotron-caralho | schokotron-slack/main.py | <reponame>rafaelcpalmeida/schokotron-caralho
#!/usr/bin/python
# -*- coding: utf-8 -*-
from slackclient import SlackClient
from schokotron import SchokotronCaralho
import os
import logging
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
slack_username = os.getenv("SLACK_API_USERNAME", "")
slack_token = os.getenv("SLACK_API_TOKEN", 0)
if slack_token is 0 or slack_username is "":
exit("Error, couldn't connect to Slack")
slack_client = SlackClient(slack_token)
sc = SchokotronCaralho(logger, slack_client, slack_username)
sc.run()
|
buildthedocs/autoapi | examples/apinode.py | <reponame>buildthedocs/autoapi<filename>examples/apinode.py<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example usage of the APINode class.
"""
import readline # noqa
from code import InteractiveConsole
from logging import basicConfig, DEBUG
from autoapi.apinode import APINode
if __name__ == '__main__':
basicConfig(level=DEBUG)
m = APINode('sphinx')
for node, leaves in m.walk():
print(
'{} node has leaves: {}'.format(
node.name, ', '.join([l.name for l in leaves])
)
)
InteractiveConsole(globals()).interact()
|
buildthedocs/autoapi | doc/conf.py | <reponame>buildthedocs/autoapi
# -*- coding: utf-8 -*-
#
# autoapi documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
from sys import path
from pathlib import Path
from datetime import date
from autoapi import __version__
from sphinx_readable_theme import get_html_theme_path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# Allow to find the 'documented.py' example
path.insert(0, str(Path(__file__).resolve().parent))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'autoapi.sphinx',
'plantweb.directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AutoAPI'
author = '<NAME>'
years = '2015-{}'.format(date.today().year)
copyright = '{}, {}'.format(years, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# -- Options for HTML output ----------------------------------------------
html_theme = 'readable'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# Add style overrides
def setup(app):
app.add_stylesheet('styles/custom.css')
# -- Plugins options ----------------------------------------------------------
# AutoAPI configuration
autoapi_modules = {
'autoapi': {'prune': True},
'documented': {'output': 'autoapi'}
}
# Plantweb configuration
plantweb_defaults = {
'use_cache': True,
'format': 'svg',
}
# Configure Graphviz
graphviz_output_format = 'svg'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
|
roansong/osu-replay-parser | tests/__init__.py | from .replay_test import TestStandardReplay
|
roansong/osu-replay-parser | tests/replay_test.py | <reponame>roansong/osu-replay-parser<gh_stars>1-10
import unittest, datetime
from osrparse.replay import parse_replay, parse_replay_file, ReplayEvent
from osrparse.enums import GameMode, Mod
class TestStandardReplay(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open('tests/resources/replay.osr', 'rb') as f:
data = f.read()
cls._replays = [parse_replay(data), parse_replay_file('tests/resources/replay.osr')]
cls._combination_replay = parse_replay_file('tests/resources/replay2.osr')
def test_replay_mode(self):
for replay in self._replays:
self.assertEqual(replay.game_mode, GameMode.Standard, "Game mode is incorrect")
def test_game_version(self):
for replay in self._replays:
self.assertEqual(replay.game_version, 20140226, "Game version is incorrect")
def test_beatmap_hash(self):
for replay in self._replays:
self.assertEqual(replay.beatmap_hash, "da8aae79c8f3306b5d65ec951874a7fb", "Beatmap hash is incorrect")
def test_player_name(self):
for replay in self._replays:
self.assertEqual(replay.player_name, "Cookiezi", "Player name is incorrect")
def test_number_hits(self):
for replay in self._replays:
self.assertEqual(replay.number_300s, 1982, "Number of 300s is wrong")
self.assertEqual(replay.number_100s, 1, "Number of 100s is wrong")
self.assertEqual(replay.number_50s, 0, "Number of 50s is wrong")
self.assertEqual(replay.gekis, 250, "Number of gekis is wrong")
self.assertEqual(replay.katus, 1, "Number of katus is wrong")
self.assertEqual(replay.misses, 0, "Number of misses is wrong")
def test_max_combo(self):
for replay in self._replays:
self.assertEqual(replay.max_combo, 2385, "Max combo is wrong")
def test_is_perfect_combo(self):
for replay in self._replays:
self.assertEqual(replay.is_perfect_combo, True, "is_perfect_combo is wrong")
def test_nomod(self):
for replay in self._replays:
self.assertEqual(replay.mod_combination, frozenset([Mod.NoMod]), "Mod combination is wrong")
def test_mod_combination(self):
self.assertEqual(self._combination_replay.mod_combination, frozenset([Mod.Hidden, Mod.HardRock]), "Mod combination is wrong")
def test_timestamp(self):
for replay in self._replays:
self.assertEqual(replay.timestamp, datetime.datetime(2013, 2, 1, 16, 31, 34), "Timestamp is wrong")
def test_play_data(self):
for replay in self._replays:
self.assertIsInstance(replay.play_data[0], ReplayEvent, "Replay data is wrong")
self.assertEqual(len(replay.play_data), 17500, "Replay data is wrong")
|
roansong/osu-replay-parser | setup.py | <filename>setup.py
from distutils.core import setup
from setuptools import find_packages
version = '3.0.0'
setup(
name = 'osrparse',
version = version,
description = "Python implementation of osu! rhythm game replay parser.",
classifiers = [
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords = ['osu!, osr, replay, replays, parsing, parser, python'],
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/kszlim/osu-replay-parser',
download_url = 'https://github.com/kszlim/osu-replay-parser/tarball/' + version,
license = 'MIT',
test_suite="tests",
packages = find_packages()
)
|
roansong/osu-replay-parser | osrparse/__init__.py | from .replay import parse_replay_file, parse_replay
|
mingzhaochina/obspyh5 | setup.py | # Copyright 2013-2017 <NAME>, MIT license
import os.path
import re
from setuptools import setup
def find_version(*paths):
fname = os.path.join(os.path.dirname(__file__), *paths)
with open(fname) as fp:
code = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find version string.")
version = find_version('obspyh5.py')
with open('README.rst') as f:
README = f.read()
DESCRIPTION = README.split('\n')[2]
LONG_DESCRIPTION = '\n'.join(README.split('\n')[20:])
ENTRY_POINTS = {
'obspy.plugin.waveform': ['H5 = obspyh5'],
'obspy.plugin.waveform.H5': [
'isFormat = obspyh5:is_obspyh5',
'readFormat = obspyh5:readh5',
'writeFormat = obspyh5:writeh5']}
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Physics'
]
setup(name='obspyh5',
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/trichter/obspyh5',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
py_modules=['obspyh5'],
install_requires=['h5py', 'numpy', 'obspy', 'setuptools'],
entry_points=ENTRY_POINTS,
zip_safe=False,
include_package_data=True,
classifiers=CLASSIFIERS
)
|
mingzhaochina/obspyh5 | test_obspyh5.py | <reponame>mingzhaochina/obspyh5<filename>test_obspyh5.py<gh_stars>1-10
# Copyright 2013-2016 <NAME>, MIT license
import unittest
import warnings
import h5py
import numpy as np
from obspy import read
from obspy.core import UTCDateTime as UTC
from obspy.core.util import NamedTemporaryFile
from obspyh5 import readh5, writeh5, trace2group, iterh5, set_index
import obspyh5
class HDF5TestCase(unittest.TestCase):
def setUp(self):
self.stream = read().sort()
# add processing info
self.stream.decimate(2)
self.stream.differentiate()
self.stream[0].stats.onset = UTC()
self.stream[0].stats.header = 42
self.stream[0].stats.header2 = 'Test entry'
self.stream[0].stats.header3 = u'Test entry unicode'
for tr in self.stream:
if 'response' in tr.stats:
del tr.stats.response
def test_is_utc(self):
self.assertTrue(obspyh5._is_utc(UTC()))
self.assertFalse(obspyh5._is_utc(110))
def test_hdf5_plugin(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
stream2 = read(fname).sort()
for tr in stream2:
del tr.stats._format
self.assertEqual(stream, stream2)
def test_hdf5_plugin_and_xcorr_index(self):
set_index('xcorr')
stream = self.stream.copy()
for i, tr in enumerate(stream): # manipulate stats object
station1, station2 = 'ST1', 'ST%d' % i
channel1, channel2 = 'HHZ', u'HHN'
s = tr.stats
# we manipulate seed id so that important information gets
# printed by obspy
s.network, s.station = s.station1, s.channel1 = station1, channel1
s.location, s.channel = s.station2, s.channel2 = station2, channel2
s.network1 = s.network2 = 'BW'
s.location1 = s.location2 = ''
stream.sort()
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
stream2 = read(fname).sort()
for tr in stream2:
del tr.stats._format
set_index()
self.assertEqual(stream, stream2)
def test_hdf5_basic(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
# write stream and read again, append data
writeh5(stream[:1], fname)
self.assertTrue(obspyh5.is_obspyh5(fname))
stream2 = readh5(fname)
writeh5(stream[1:], fname, mode='a')
stream3 = readh5(fname)
self.assertEqual(stream[:1], stream2)
self.assertEqual(stream, stream3)
# read only header
stream3 = readh5(fname, headonly=True)
self.assertEqual(stream2[0].stats, stream3[0].stats)
self.assertEqual(len(stream3[0].data), 0)
# test if group was really created
with h5py.File(fname) as f:
self.assertTrue('waveforms' in f)
# # test numpy headers
stream[0].stats.num = np.array([[5, 4, 3], [1, 2, 3.]])
writeh5(stream, fname)
stream2 = readh5(fname)
# stream/stats comparison not working for arrays
# therefore checking directly
np.testing.assert_array_equal(stream[0].stats.num,
stream2[0].stats.num)
del stream[0].stats.num
# check for warning for unsupported types
stream[0].stats.toomuch = {1: 3}
with warnings.catch_warnings(record=True) as w:
writeh5(stream, fname)
warnings.simplefilter("always")
self.assertEqual(len(w), 1)
del stream[0].stats.toomuch
def test_hdf5_interface(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
with h5py.File(ft.name) as f:
trace2group(stream[0], f)
# test override
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
trace2group(stream[0], f)
self.assertEqual(len(w), 1)
with self.assertRaises(KeyError):
trace2group(stream[0], f, key=None, override='raise')
# is_obspyh5 is only working with file names
self.assertFalse(obspyh5.is_obspyh5(f))
def test_hdf5_iter(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
traces = []
for tr in iterh5(fname):
traces.append(tr)
self.assertEqual(stream.traces, traces)
def test_hdf5_readonly(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
ro = {'network': 'BW', 'station': u'RJOB', 'location': '',
'channel': 'EHE'}
stream2 = read(fname, 'H5', readonly=ro)
self.assertEqual(stream[0].id, stream2[0].id)
ro = {'network': 'BW', 'station': 'RJOB'}
stream2 = read(fname, 'H5', readonly=ro)
self.assertEqual(len(stream2), 3)
def test_hdf5_headonly(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
stream2 = read(fname, 'H5', headonly=True)
stream2[0].stats.header = -42
self.assertEqual(len(stream2[0]), 0)
stream2.write(fname, 'H5', mode='a', headonly=True)
stream2 = read(fname, 'H5')
self.assertEqual(stream2[0].stats.header, -42)
stream2[0].stats.header = 42
for tr in stream2:
del tr.stats._format
self.assertEqual(stream, stream2)
def test_stored_index(self):
stream = self.stream
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
stream.write(fname, 'H5')
set_index('nonesens')
stream.write(fname, 'H5', mode='a', override='ignore')
set_index()
def test_read_files_saved_prior_version_0_3(self):
stream = self.stream
index_v_0_2 = ('{network}.{station}/{location}.{channel}/'
'{starttime.datetime:%Y-%m-%dT%H:%M:%S}_'
'{endtime.datetime:%Y-%m-%dT%H:%M:%S}')
with NamedTemporaryFile(suffix='.h5') as ft:
fname = ft.name
set_index(index_v_0_2)
stream.write(fname, 'H5', group='waveforms')
stream2 = read(fname, 'H5', group='waveforms')
stream3 = read(fname, 'H5')
for tr in stream2:
del tr.stats._format
for tr in stream3:
del tr.stats._format
self.assertEqual(stream, stream2)
self.assertEqual(stream, stream3)
set_index()
def suite():
return unittest.makeSuite(HDF5TestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
uhh-lt/triframes | utils.py | <gh_stars>1-10
from collections import namedtuple, defaultdict
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
Triple = namedtuple('Triple', 'subject predicate object weight')
def triples(f, min_weight=None, build_index=True):
spos, index = [], defaultdict(set)
for line in f:
predicate, subject, object, weight = line.strip().split('\t', 3)
predicate, *tail = predicate.rpartition('#')
if not predicate:
predicate = tail[-1]
subject, *tail = subject.rpartition('#')
if not subject:
subject = tail[-1]
object, *tail = object.rpartition('#')
if not object:
object = tail[-1]
weight = float(weight)
if (min_weight is not None and weight < min_weight) or not subject or not predicate or not object:
continue
spos.append(Triple(subject, predicate, object, weight))
if build_index:
index[predicate].add(len(spos) - 1)
return spos, index
def word_vectors(args, fallback=lambda x: None):
if args.w2v:
from gensim.models import KeyedVectors
w2v = KeyedVectors.load_word2vec_format(args.w2v, binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
return w2v
elif args.pyro:
import Pyro4
Pyro4.config.SERIALIZER = 'pickle'
w2v = Pyro4.Proxy(args.pyro)
return w2v
else:
return fallback(args)
def words_vec(w2v, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(w2v, 'words_vec', None)):
return w2v.words_vec(words, use_norm)
return {word: w2v.wv.word_vec(word, use_norm) for word in words if word in w2v.wv}
|
uhh-lt/triframes | fi/hosg/convert2counts.py | <gh_stars>1-10
#!/usr/bin/python3
import sys
def main():
errors = 0
for line in sys.stdin:
res = line.strip().split()
if len(res) != 4:
errors += 1
continue
(verb, subject, obj, frequency) = res
frequency = int(float(frequency))
print(frequency, verb.split('#')[0], obj.split('#')[0], subject.split('#')[0])
print('Errors:', errors, file=sys.stderr)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | graph_embeddings/generate_graph.py | import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from nxviz.plots import CircosPlot
import numpy as np
import itertools
import pickle
lines_to_read = 100000
def get_pairs(df, col1, col2):
sub = df[df.duplicated(subset=[col1, col2], keep=False)]
grp_by = sub.groupby([col1, col2])
pairs = []
for i, group in enumerate(grp_by.groups):
try:
grp = grp_by.get_group(group)
if len(grp) > 1:
pairs += list(itertools.combinations(grp.index.values, 2))
except KeyError:
print("KeyError")
return pairs
#read data
df = pd.read_csv("vso-1.3m-pruned-strict.csv", delimiter="\t", header=None, nrows=lines_to_read)
df.columns = ['verb', 'subject', 'object', 'score']
df = df.reset_index()
df = df.fillna('')
#init graph
G=nx.Graph()
edges = []
dictionary_id_to_name = {}
#add vertices
print("Adding vertices...")
for index, row in df.iterrows():
G.add_node(index, verb=row['verb'], subject=row['subject'], object=row['object'])
dictionary_id_to_name[str(index)] = row
print("Done")
#add edges
edges += get_pairs(df, 'verb','subject')
edges += get_pairs(df, 'verb','object')
edges += get_pairs(df, 'object','subject')
G.add_edges_from(edges)
# graph info
print ("nodes: ", G.number_of_nodes())
print ("edges: ", G.number_of_edges())
#save graph
nx.write_adjlist(G, "triframes.adjlist")
nx.write_edgelist(G, "triframes.edgelist")
#save dictionary with id mapping
with open('id_to_name.pkl', 'wb') as f:
pickle.dump(dictionary_id_to_name, f, pickle.HIGHEST_PROTOCOL)
#plot graph
#c = CircosPlot(G, node_labels=True)
#c.draw()
#plt.show()
|
uhh-lt/triframes | graph_embeddings/check_similarity.py | import pickle
import sys
import gensim
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec, KeyedVectors
embeddings_name = "input100_connection2_dim200_windowsize70.emb"
id_to_check = "27236"
with open('id_to_name.pkl', 'rb') as f:
pkl = pickle.load(f)
vectors = KeyedVectors.load_word2vec_format(embeddings_name, binary=False)
node_to_check = pkl[id_to_check]
similar = wv_from_text.most_similar(positive=id_to_check)
print("similar to: ", node_to_check["verb"], node_to_check["subject"], node_to_check["object"])
print("--------------")
for index, score in similar:
node = pkl[index]
print(node["verb"], node["subject"], node["object"], score) |
uhh-lt/triframes | pkl2edges.py | #!/usr/bin/env python
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('--no-header', dest='header', action='store_false')
parser.add_argument('pickle', type=argparse.FileType('rb'))
args = parser.parse_args()
data = pickle.load(args.pickle)
if args.header:
print('\t'.join(('source', 'target', 'weight')))
for edge in data:
source = '|'.join(edge[0])
target = '|'.join(edge[1])
weight = str(edge[2]['weight'])
print('\t'.join((source, target, weight)))
|
uhh-lt/triframes | fi/eval/noac2eval.py | <reponame>uhh-lt/triframes<gh_stars>1-10
from glob import glob
from os.path import join
from pandas import read_csv
import codecs
import argparse
sample_input = """
Density Variance Average Coverage Objects coverage Attributes coverage Conditions coverage Extent Intent Modus
31.23 7899693.28 1286.43 0.48 0.64 0.33 0.14 <take#VB, see#VB, think#VB, make#VB, have#VB, know#VB, say#VB, tell#VB, do#VB, get#VB> <person, man, player, fan, nobody, anyone, lot, be, everyone, friend, many, guy, everybody, reader, God, other, child, people, someone, one, eye, kid> <picture, anything, video, movie, something, thing, lot>
"""
sample_output = """
# Cluster 23
Predicates: dred, donna, hank, ggg, kardashian, haff, jill, santa, julie, luke, bday, lizzie, beatle, publi, eather, sasha, lauren, bernie, kern, 0ne, lfe, monica, kristen, hanna, sherman, suckin, kate, hud, zack, kirsten, einstein, ^_^
Subjects: hun, hoo
Objects: tape, Tape
# Cluster 23
Predicates: dred, donna, hank, ggg, kardashian, haff, jill, santa, julie, luke, bday, lizzie, beatle, publi, eather, sasha, lauren, bernie, kern, 0ne, lfe, monica, kristen, hanna, sherman, suckin, kate, hud, zack, kirsten, einstein, ^_^
Subjects: hun, hoo
Objects: tape, Tape
"""
def delete_header(input_fpath, output_fpath):
with codecs.open(input_fpath, 'r', "utf-16le") as fin, codecs.open(output_fpath, 'w', "utf-8") as fout:
data = fin.read().splitlines(True)[1:]
fout.writelines(data)
def exract_bow(df, old_field, new_field):
df[new_field] = df[old_field].apply(lambda x: x
.replace("<","")
.replace(">","")
.split(","))
df[new_field] = df[new_field].apply(lambda p_set: set([p.split("#")[0] for p in p_set]) )
df[new_field] = df[new_field].apply(lambda p_set: [p.strip() for p in p_set if p.strip() != ""] )
return df
def run(noac_dir):
for noac_fpath in glob(join(noac_dir,"*.txt")):
csv_fpath = noac_fpath.replace(" ", "").replace(",","-") + ".tsv"
delete_header(noac_fpath, csv_fpath)
df = read_csv(csv_fpath, sep="\t", encoding="utf8")
df = exract_bow(df, "Extent", "predicates")
df = exract_bow(df, "Intent", "subjects")
df = exract_bow(df, "Modus", "objects")
output_fpath = csv_fpath + ".arguments.tsv"
with codecs.open(output_fpath, "w", "utf-8") as out:
for i, row in df.iterrows():
out.write("# Cluster {:d}\n\n".format(i))
out.write("Predicates: {}\n".format(", ".join(row.predicates)))
out.write("Subjects: {}\n".format(", ".join(row.subjects)))
out.write("Objects: {}\n\n".format(", ".join(row.objects)))
print("Output:", output_fpath)
def main():
parser = argparse.ArgumentParser(description='Converts NOAC outputs to the input of the evaluation script. ')
parser.add_argument('noac_dir', help='A directory with the NOAC output files.')
args = parser.parse_args()
print("Input:", args.noac_dir)
run(args.noac_dir)
if __name__ == '__main__':
main()
# Example paths of ltcpu3
# noac_dir = "/home/panchenko/tmp/triclustering-results/"
|
uhh-lt/triframes | trihosg.py | <reponame>uhh-lt/triframes
#!/usr/bin/env python
import argparse
import gzip
import re
import sys
from collections import Counter
from functools import partial
from itertools import zip_longest
import faiss
import networkx as nx
import numpy as np
from chinese_whispers import chinese_whispers, aggregate_clusters
from gensim.models import KeyedVectors
from utils import grouper
parser = argparse.ArgumentParser()
parser.add_argument('--neighbors', '-n', type=int, default=10)
parser.add_argument('--pickle', type=argparse.FileType('wb'))
parser.add_argument('words', type=argparse.FileType('rb'))
parser.add_argument('contexts', type=argparse.FileType('rb'))
parser.add_argument('relations', type=argparse.FileType('rb'))
parser.add_argument('triples', type=argparse.FileType('rb'))
args = parser.parse_args()
wordmodel = KeyedVectors.load_word2vec_format(args.words, binary=False)
contextmodel = KeyedVectors.load_word2vec_format(args.contexts, binary=False)
relationmodel = KeyedVectors.load_word2vec_format(args.relations, binary=False)
spos = set()
POS = r'\#\w+$'
extract = partial(re.sub, POS, '')
with gzip.open(args.triples) as f:
for line in f:
_, verb, subject, object = line.decode('utf-8').strip().split(' ', 3)
if verb in wordmodel and subject in contextmodel and object in relationmodel:
spos.add((verb, subject, object))
spos = list(spos)
index2triple = {}
X = np.empty((len(spos), wordmodel.vector_size + contextmodel.vector_size + relationmodel.vector_size), 'float32')
for i, (verb, subject, object) in enumerate(spos):
# This changes order from VSO to SVO because I use it everywhere.
j = 0
X[i, j:j + contextmodel.vector_size] = contextmodel[subject]
j += contextmodel.vector_size
X[i, j:j + wordmodel.vector_size] = wordmodel[verb]
j += wordmodel.vector_size
X[i, j:j + relationmodel.vector_size] = relationmodel[object]
index2triple[i] = (extract(subject), extract(verb), extract(object))
knn = faiss.IndexFlatIP(X.shape[1])
knn.add(X)
G, maximal_distance = nx.Graph(), -1
for slice in grouper(range(X.shape[0]), 2048):
slice = [j for j in slice if j is not None]
D, I = knn.search(X[slice, :], args.neighbors + 1)
last = min(slice)
print('%d / %d' % (last, X.shape[0]), file=sys.stderr)
for i, (_D, _I) in enumerate(zip(D, I)):
source = index2triple[last + i]
words = Counter()
for d, j in zip(_D.ravel(), _I.ravel()):
if last + i != j:
words[index2triple[j]] = float(d)
for target, distance in words.most_common(args.neighbors):
G.add_edge(source, target, weight=distance)
maximal_distance = distance if distance > maximal_distance else maximal_distance
for _, _, d in G.edges(data=True):
d['weight'] = maximal_distance / d['weight']
if args.pickle is not None:
import pickle
pickle.dump(list(G.edges(data=True)), args.pickle, protocol=3)
sys.exit(0)
chinese_whispers(G, weighting='top', iterations=20)
clusters = aggregate_clusters(G)
for label, cluster in sorted(aggregate_clusters(G).items(), key=lambda e: len(e[1]), reverse=True):
print('# Cluster %d\n' % label)
subjects = {subject for subject, _, _ in cluster}
predicates = {predicate for _, predicate, _ in cluster}
objects = {object for _, _, object in cluster}
print('Predicates: %s' % ', '.join(predicates))
print('Subjects: %s' % ', '.join(subjects))
print('Objects: %s\n' % ', '.join(objects))
|
uhh-lt/triframes | fi/eval/extract_conll_framenet_roles.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
from glob import glob
from collections import defaultdict
from collections import namedtuple
from os.path import join
import codecs
import re
from traceback import format_exc
import operator
import codecs
from collections import Counter
import networkx as nx
from networkx import NetworkXNoPath
test_sentence = """
# RID: 2492
# Frame "Claim_ownership"
# FEE: 5
# Property: 3
# Claimant: 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
# Frame "Leadership"
# FEE: 18
# Leader: 18
# Governed: 19, 20
# Mapped annotation onto parse #1 with average recall of 0.792
1 Note note nn NN _ _ _ _ _ _ _ _ _
2 : : : : _ _ _ _ _ _ _ _ _
3 Taiwan Taiwan NP NNP _ _ _ 5 nsubjpass _ Property _ _
4 is be VBZ VBZ _ _ _ 5 auxpass _ _ _ _
5 claimed claim VVN VBN _ _ _ 1|1 ccomp|parataxis Claim_ownership _ _ _
6 by by in IN _ _ _ _ _ _ _ _ _
7 both both pdt PDT _ _ _ 9 predet _ _ _ _
8 the the dt DT _ _ _ 9 det _ _ _ _
9 Government government nn NN _ _ _ 5 agent _ Claimant _ _
10 of of in IN _ _ _ _ _ _ _ _ _
11 the the dt DT _ _ _ 13 det _ _ _ _
12 People's People's NP NNP _ _ _ 13 nn _ _ _ _
13 Republic Republic NP NNP _ _ _ 9 prep_of _ _ _ _
14 of of in IN _ _ _ _ _ _ _ _ _
15 China China NP NNP _ _ _ 13 prep_of _ _ _ _
16 and and cc CC _ _ _ _ _ _ _ _ _
17 the the dt DT _ _ _ 18 det _ _ _ _
18 authorities authority nns NNS _ _ _ 9|13 prep_of|conj_and _ _ Leadership Leader
19 on on in IN _ _ _ _ _ _ _ _ _
20 Taiwan Taiwan NP NNP _ _ _ 18 prep_on _ _ _ Governed
21 . . sent . _ _ _ _ _ _ _ _ _"""
sep_deps_list = "|"
no_parent = "_"
NO_PATH = ("","","")
verbose = False
frame_prefix = "# Frame "
role_prefix = "# "
framename_prefix = '"'
role_key_value_sep = ":"
Frame = namedtuple('Frame', 'name roles')
Role = namedtuple('Role', 'name lu ids')
Dependency = namedtuple('Dependency', 'src token lemma pos dep dst')
def file_split(fpath, delim='\n\n', bufsize=1024):
with codecs.open(fpath, "r", "utf-8") as f:
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
split = s.split(delim)
if len(split) > 1:
yield prev + split[0]
prev = split[-1]
for x in split[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def parse_role(role_string, deps):
""" A line with the row like: '# FEE: 5'. """
if not role_string.startswith(role_prefix):
return None # not a role
else:
try:
role_name, dst_ids = role_string[len(role_prefix):].split(role_key_value_sep)
role_name = role_name.strip()
dst_ids = [int(dst_id) for dst_id in dst_ids.split(",")]
dst = " ".join([deps[dst_id].lemma for dst_id in dst_ids])
return Role(role_name, dst, dst_ids)
except:
if verbose: print(format_exc())
return None
def parse_sentence(sentence_str):
""" Gets a CoNLL sentence with comments
and returns a list of frames. """
deps = defaultdict()
comment_str = ""
for line in sentence_str.split("\n"):
if line.startswith("# "):
if len(line) > 1: comment_str += line + "\n"
else:
fields = line.split("\t")
if len(fields) >= 10:
dep = parse_dep(fields)
deps[dep.src] = dep
else:
if verbose: print("Bad line:", line)
frames = []
for frame_str in comment_str.split(frame_prefix):
frame_lines = frame_str.split("\n")
if not frame_lines[0].startswith(framename_prefix):
continue
frame_name = frame_lines[0].replace('"','')
del frame_lines[0]
frame = Frame(frame_name, list())
for fl in frame_lines:
if fl.startswith(role_prefix):
role = parse_role(fl, deps)
if role is not None:
frame.roles.append(role)
else:
if verbose: print("Bad role:", fl)
frames.append(frame)
return frames, deps
def parse_dep(fields):
if len(fields) < 10:
print("Bad dependency:", fields)
return Dependency()
return Dependency(
src = int(fields[0]),
token = fields[1],
lemma = fields[2],
pos = fields[3],
dst = fields[8],
dep = fields[9])
def extract_frames(conll_dir):
conll_fpaths = glob(join(conll_dir,"*.conll"))
name2frame = defaultdict(lambda: list())
paths_fee2role = Counter()
paths_role2role = Counter()
sentences_total = 0
frames_total = 0
for file_num, conll_fpath in enumerate(conll_fpaths):
if verbose: print("\n", "="*50, "\n", conll_fpath, "\n")
for sent_num, sent_str in enumerate(file_split(conll_fpath)):
frames, deps = parse_sentence(sent_str)
for f in frames: name2frame[f.name].append(f)
fee2role, role2role = get_syn_paths(frames, deps)
paths_fee2role.update(fee2role)
paths_role2role.update(role2role)
frames_total += len(frames)
sentences_total += 1
print("Sentences total:", sentences_total)
print("Files total:", file_num + 1)
print("Frames total:", frames_total)
return name2frame, paths_fee2role, paths_role2role
def aggregate_frames(name2frame, output_fpath):
frame2role2lu = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
roles_count = 0
for frame_name in name2frame:
for frame in name2frame[frame_name]:
for role in frame.roles:
frame2role2lu[frame.name][role.name][role.lu] += 1 # update the count
roles_count += 1
print("Roles count:", roles_count)
save_frame2role2lu(frame2role2lu, output_fpath)
def save_frame2role2lu(frame2role2lu, output_fpath):
with codecs.open(output_fpath, "w", "utf-8") as out:
uniq_roles = 0
for frame in frame2role2lu:
for role in frame2role2lu[frame]:
try:
lus = sorted(frame2role2lu[frame][role].items(), key=operator.itemgetter(1), reverse=True)
uniq_roles += 1
out.write(u"{}\t{}\t{}\n".format(
frame,
role,
u", ".join(u"{}:{}".format(
lu.replace(",", " ").replace(":"," "), count) for lu, count in lus)))
except:
print("Bad entry:", frame, role, lus)
print(format_exc())
print("Uniq. roles count:", uniq_roles)
print("Output:", output_fpath)
def get_path(g, src, dst):
""" Returns a list of labels between the src
and the dst nodes. """
try:
path = nx.shortest_path(g, src, dst)
except NetworkXNoPath:
try:
if verbose: print("Warning: trying inverse path.")
path = nx.shortest_path(g, dst, src)
except NetworkXNoPath:
return NO_PATH
labeled_path = []
for i, src in enumerate(path):
dst_index = i + 1
if dst_index >= len(path): break
dst = path[dst_index]
labeled_path.append((
g.nodes[src]["label"],
g[src][dst]["label"],
g.nodes[dst]["label"]))
words = "-->".join([s for s, t, d in labeled_path])
deps = "-->".join([t for s, t, d in labeled_path])
deps_n_words = ":".join(["%s--%s--%s" % (s, t,d ) for s, t, d in labeled_path])
return words, deps, deps_n_words
def build_nx_graph(list_of_nodes, list_of_edges):
""" Takes as an input a list of triples (src, dst, label)
and builds the graph out of them. """
g = nx.DiGraph()
for src, label in list_of_nodes:
g.add_node(src, label=label)
for src, dst, label in list_of_edges:
g.add_edge(src, dst, label=label)
return g
def build_graph(deps):
nodes = []
edges = []
for d in deps.values():
if d.dst == no_parent or d.dep == no_parent:
nodes.append( (d.src, d.lemma) )
else:
dst_ids = [int(dst_id) for dst_id in d.dst.split(sep_deps_list)]
dst_types = d.dep.split(sep_deps_list)
dsts = zip(dst_ids, dst_types)
nodes.append( (d.src, d.lemma) )
for dst_id, dst_label in dsts:
edges.append( (d.src, dst_id, dst_label) )
return build_nx_graph(nodes, edges)
def get_syn_paths(frames, deps):
""" Returns shortest paths between FEE and roles and
roles in the parse trees. """
fee2role = Counter()
role2role = Counter()
for frame in frames:
fee_ids = []
role_ids_list = []
for role in frame.roles:
if role.name == "FEE":
fee_ids = role.ids
else:
role_ids_list.append(role.ids)
g = build_graph(deps)
print("="*50, "\n", "fee2role","\n")
for fee_id in fee_ids:
for role_id_list in role_ids_list:
for role_id in role_id_list:
if fee_id != role_id:
path = get_path(g, fee_id, role_id)
if path != NO_PATH:
print("\t".join(path), "\n")
fee2role.update({path[1]:1})
print("\n","="*50, "\n", "role2role","\n")
for role_id_list_i in role_ids_list:
for role_id_list_j in role_ids_list:
if role_id_list_i == role_id_list_j: continue
for role_id_i in role_id_list_i:
for role_id_j in role_id_list_j:
if role_id_i == role_id_j: continue
path = get_path(g, role_id_i, role_id_j)
if path != NO_PATH:
print("\t".join(path), "\n")
role2role.update({path[1]:1})
return fee2role, role2role
def save_path_stats(x2role, output_fpath):
with codecs.open(output_fpath, "w", "utf-8") as out:
for path, freq in sorted(x2role.items(), key=operator.itemgetter(1), reverse=True):
if freq > 2:
out.write("{}\t{}\n".format(path, freq))
print("Output:", output_fpath)
def run(conll_dir, output_dir):
fee2role_fpath = join(output_dir, "fee2role.csv")
role2role_fpath = join(output_dir, "role2role.csv")
slots_fpath = join(output_dir, "slots.csv")
name2frame, fee2role, role2role = extract_frames(conll_dir)
save_path_stats(fee2role, fee2role_fpath)
save_path_stats(role2role, role2role_fpath)
aggregate_frames(name2frame, slots_fpath)
def main():
parser = argparse.ArgumentParser(description='Extracts an evaluation dataset for role induction'
'from the conll framenet files.')
parser.add_argument('conll_dir', help='Directory with the .conll files (dep.parsed framenet).')
parser.add_argument('output_dir', help='Output directory.')
args = parser.parse_args()
print("Input: ", args.conll_dir)
print("Output: ", args.output_dir)
run(args.conll_dir, args.output_dir)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | fi/hosg/cluster_triples.py | #!/usr/bin/python3
import sys
import os
import gzip
import numpy as np
import gensim
import logging
from sklearn.cluster import AffinityPropagation, KMeans
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
import pylab as plot
def embed(contexts, vec_matrix, cluster_labels, goldclusters=None):
if goldclusters is None:
goldclusters = [0] * len(cluster_labels)
embedding = PCA(n_components=2)
y = embedding.fit_transform(vec_matrix)
xpositions = y[:, 0]
ypositions = y[:, 1]
plot.clf()
colors = ['black', 'cyan', 'red', 'lime', 'brown', 'yellow', 'magenta', 'goldenrod', 'navy', 'purple', 'silver']
markers = ['.', 'o', '*', '+', 'x', 'D']
for label, x, y, cluster_label, goldcluster in zip(contexts, xpositions, ypositions, cluster_labels, goldclusters):
plot.scatter(x, y, 20, marker=markers[int(float(goldcluster))], color=colors[cluster_label])
# plot.annotate(label, xy=(x, y), size='small', color=colors[cluster_label])
plot.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plot.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plot.legend(loc='best')
plot.savefig('pca.png', dpi=300)
plot.close()
plot.clf()
return y
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
directory2process = sys.argv[1]
n_clusters = int(sys.argv[2])
contextfile = os.path.join(directory2process, 'context-matrix')
wordfile = os.path.join(directory2process, 'word-matrix')
relationfile = os.path.join(directory2process, 'relation-matrix')
triplesfile = os.path.join(directory2process, 'counts.gz')
contextmodel = gensim.models.KeyedVectors.load_word2vec_format(contextfile, binary=False)
wordmodel = gensim.models.KeyedVectors.load_word2vec_format(wordfile, binary=False)
relationmodel = gensim.models.KeyedVectors.load_word2vec_format(relationfile, binary=False)
triples = set()
triplesdata = gzip.open(triplesfile)
for line in triplesdata:
res = line.decode('utf-8').strip().split()
triples.add((res[1], res[2], res[3]))
triples = list(triples)
matrix = np.empty((len(triples), wordmodel.vector_size * 3))
counter = 0
missing = 0
for triple in triples:
(word, context, relation) = triple
if word in wordmodel and context in contextmodel and relation in relationmodel:
wordvector = wordmodel[word]
contextvector = contextmodel[context]
relationvector = relationmodel[relation]
else:
missing += 1
continue
conc_vector = np.concatenate((wordvector, contextvector, relationvector))
matrix[counter, :] = conc_vector
counter += 1
print('Skipped %d triples' % missing, file=sys.stderr)
print('Final matrix shape:', matrix.shape, file=sys.stderr)
clustering = KMeans(n_clusters=n_clusters, n_init=20, n_jobs=8).fit(matrix)
predicted = clustering.labels_.tolist()
clusters = {}
for triple, p_cluster in zip(triples, predicted):
(predicate, obj, subject) = triple
if str(p_cluster) not in clusters:
clusters[str(p_cluster)] = {"predicates": set(), 'subjects': set(), 'objects': set()}
clusters[str(p_cluster)]['predicates'].add(predicate.split('#')[0])
clusters[str(p_cluster)]['subjects'].add(subject.split('#')[0])
clusters[str(p_cluster)]['objects'].add(obj.split('#')[0])
for cluster in sorted(clusters):
print('\n# Cluster %s \n' % cluster)
print('Predicates:', ', '.join(clusters[cluster]['predicates']))
print('Subjects:', ', '.join(clusters[cluster]['subjects']))
print('Objects:', ', '.join(clusters[cluster]['objects']))
# embed(triples, matrix, predicted)
|
uhh-lt/triframes | fi/preprocessing/prune_svo_triples.py | from __future__ import print_function
import gzip
from fi.exact_full.exact_full import skip, generalize_by_pos
import codecs
from traceback import format_exc
def prune_word_features(svo_fpath, strict=True, generalize=True, verbose=False):
output_fpath = svo_fpath + "-pruned-strict{}.csv".format(int(strict))
with gzip.open(svo_fpath, "rb") as svo_bytes, codecs.open(output_fpath, "w", "utf-8") as out:
reader = codecs.getreader("utf-8")
svo = reader(svo_bytes)
for i, line in enumerate(svo):
try:
verb, subj_obj, freq = line.split("\t")
subj, obj = subj_obj.split("_")
subj_lemma, subj_pos = subj.split("#")
obj_lemma, obj_pos = obj.split("#")
freq = float(freq)
if not skip(subj_lemma, subj_pos, obj_lemma, obj_pos, not strict):
out.write(u"{}\t{}\t{}\t{}\n".format(
verb,
generalize_by_pos(subj_lemma, subj_pos),
generalize_by_pos(obj_lemma, obj_pos),
freq))
else:
if verbose: print("Skipping:", line.strip())
except:
if verbose:
print(line.strip())
print(format_exc())
print("Output:", output_fpath)
svo_fpath = "wf.csv.gz" # e.g. "ltcpu3:/home/panchenko/tmp/verbs/frames/2.6t/wf.csv.gz"
prune_word_features(svo_fpath, strict=True, generalize=True, verbose=False)
prune_word_features(svo_fpath, strict=False, generalize=True, verbose=False)
|
uhh-lt/triframes | triclustering.py | <reponame>uhh-lt/triframes
#!/usr/bin/env python
import argparse
from collections import defaultdict
import numpy as np
from sklearn.cluster import KMeans, SpectralClustering, DBSCAN
from utils import word_vectors, triples, grouper, words_vec
STOP = {'i', 'he', 'she', 'it', 'they', 'you', 'this', 'we', 'them', 'their', 'us', 'my', 'those', 'who', 'what',
'that', 'which', 'each', 'some', 'me', 'one', 'the'}
METHODS = {
'kmeans': KMeans,
'spectral': SpectralClustering,
'dbscan': DBSCAN
}
parser = argparse.ArgumentParser()
parser.add_argument('--min-weight', type=float, default=1000.)
parser.add_argument('--method', choices=METHODS.keys(), default='kmeans')
parser.add_argument('-k', type=int, default=10)
parser.add_argument('triples', type=argparse.FileType('r', encoding='UTF-8'))
group = parser.add_mutually_exclusive_group()
group.add_argument('--w2v', default=None, type=argparse.FileType('rb'))
group.add_argument('--pyro', default=None, type=str)
args = parser.parse_args()
w2v = word_vectors(args, lambda args: parser.error('Please set the --w2v or --pyro option.'))
spos, _ = triples(args.triples, min_weight=args.min_weight, build_index=False)
vocabulary = {word for triple in spos for word in (triple.subject, triple.predicate, triple.object)} - STOP
vectors = {}
for words in grouper(vocabulary, 512):
vectors.update(words_vec(w2v, words))
spos = [triple for triple in spos if
triple.subject in vectors and triple.predicate in vectors and triple.object in vectors]
X, index2triple = np.empty((len(spos), w2v.vector_size * 3), 'float32'), {}
for i, triple in enumerate(spos):
X[i] = np.concatenate((vectors[triple.subject], vectors[triple.predicate], vectors[triple.object]))
index2triple[i] = triple
clustering = METHODS[args.method](n_jobs=-2) if args.method == 'dbscan' else METHODS[args.method](n_clusters=args.k,
n_jobs=-2)
clusters = clustering.fit_predict(X)
frames = defaultdict(set)
for i, cluster in enumerate(clusters):
frames[int(cluster)].add(index2triple[i])
for label, cluster in frames.items():
print('# Cluster %d\n' % label)
predicates = {triple.predicate for triple in cluster}
subjects = {triple.subject for triple in cluster}
objects = {triple.object for triple in cluster}
print('Predicates: %s' % ', '.join(predicates))
print('Subjects: %s' % ', '.join(subjects))
print('Objects: %s\n' % ', '.join(objects))
|
uhh-lt/triframes | fi/hosg/generate_pairs_from_counts.py | #!/usr/bin/python3
import sys
def main():
scale = True
errors = 0
for line in sys.stdin:
res = line.strip().split()
if len(res) != 4:
errors += 1
continue
(frequency, verb, obj, subject) = res
frequency = int(frequency)
if scale:
frequency = int(frequency / 10)
for i in range(frequency):
print(verb, obj, subject)
print('Errors:', errors, file=sys.stderr)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | fi/eval/extract_framenet_verb_clusters.py | # Use framenet to generate the lexical unit clusters of frames
from __future__ import print_function
import argparse
import codecs
from collections import defaultdict
import sys
import csv
from src.builder import *
from src.ecg_utilities import ECGUtilities as utils
# from src.valence_data import *
from src.hypothesize_constructions import *
from scripts import *
verbose = False
def load_framenet(data_path):
frame_path = data_path + "frame/"
relation_path = data_path + "frRelation.xml"
lu_path = data_path + "lu/"
fnb = FramenetBuilder(frame_path, relation_path, lu_path)
fn = fnb.read()
fn.build_relations()
fn.build_typesystem()
return fn, fnb
def add_children(lus, f_target, f):
if verbose: print(f.name, ">>>", end="")
if len(f.children) > 0:
for c in f.children:
fc = fn.get_frame(c)
lus[f_target.name] = lus[f_target.name].union(set(fc.lexicalUnits))
add_children(lus, f_target, fc)
def get_verbs(lu_set):
return set([lu for lu in lu_set if ".v" in unicode(lu)])
def get_framenet_clusters(use_children=False, verbs_only=False):
lus = defaultdict(lambda: set())
lus_count = 0
for i, f in enumerate(fn.frames):
lus[f.name] = lus[f.name].union(set(f.lexicalUnits))
if use_children:
if verbose: print("\n")
add_children(lus, f, f)
if verbs_only:
lus[f.name] = get_verbs(lus[f.name])
lus_count += len(lus[f.name])
print(len(lus), lus_count)
return lus
def save_framenet_clusters(framenet_clusters, output_fpath):
with codecs.open(output_fpath, "w", "utf-8") as out:
out.write("cid\tsize\tcluster\n")
for f in framenet_clusters:
fc = ", ".join([unicode(x)for x in framenet_clusters[f]])
out.write("%s\t%s\t%s\n".format(f, len(framenet_clusters[f]), fc))
print("Output:", output_fpath)
def run(framenet_dir, output_dir):
fn, fnb = load_framenet(framenet_dir)
save_framenet_clusters(
get_framenet_clusters(use_children=False),
join(output_dir,"lus-wo-ch.csv"))
save_framenet_clusters(
get_framenet_clusters(use_children=False, verbs_only=True),
join(output_dir, "lus-wo-ch-verbs.csv"))
save_framenet_clusters(
get_framenet_clusters(use_children=True),
join(output_dir, "lus-with-ch-r.csv"))
save_framenet_clusters(
get_framenet_clusters(use_children=True, verbs_only=True),
join(output_dir, "lus-with-ch-r-verbs.csv"))
def main():
parser = argparse.ArgumentParser(description='Extracts verb clusters from the framenet.')
parser.add_argument('framenet_dir', help='Directory with the framenet files.')
parser.add_argument('output_dir', help='Output directory.')
args = parser.parse_args()
print("Input: ", args.framenet_dir)
print("Output: ", args.output_dir)
run(args.framenet_dir, args.output_dir)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | fi/exact_full/exact_full.py | import codecs
from pandas import read_csv
from jnt.pcz.sense_clusters import SenseClusters
from time import time
import operator
from traceback import format_exc
import sys
SKIP_POS = set(["-lrb-", "-LRB-", "-RRB-","-rrb-", "-lsb-", "-LSB-", "-rsb-", "-RSB-", "SYM", "SY"])
FUNCTIONAL_LEMMAS = set(["whom","that","which","where","what"])
FUNCTIONAL_POS = set(["PRP","PR","FW","DT","DET","CC","CONJ","CD","NUM","WH","WP"])
SKIP_HYPERS = set(["form","member","condition","task","service","sector", "topic", "source","part","modification","feature", "thing", "object", "", "stuff", "item","issue", "information", "medium", "tag", "work","material"])
min_common = 20
verbose = False
max_nn = 5
err_f_num = 0
f_num = 0
def skip(subj_lemma, subj_pos, obj_lemma, obj_pos, one_stopword_is_ok=False):
""" Returns true if the triple should be skipped on the basis
of the fact that a lemma/pos of its subject/object is in a stoplist. """
skip_pos_subj = (
subj_lemma in SKIP_POS or # sometimes lemma is replaced with pos
subj_pos in SKIP_POS)
skip_pos_obj = (
obj_lemma in SKIP_POS or # sometimes lemma is replaced with pos
obj_pos in SKIP_POS)
skip_lemma_subj = (subj_lemma in FUNCTIONAL_LEMMAS or subj_pos in FUNCTIONAL_POS)
skip_lemma_obj = (obj_lemma in FUNCTIONAL_LEMMAS or obj_pos in FUNCTIONAL_POS)
if one_stopword_is_ok:
skip_pos = skip_pos_subj and skip_pos_obj
skip_lemma = skip_lemma_subj and skip_lemma_obj
else:
skip_pos = skip_pos_subj or skip_pos_obj
skip_lemma = skip_lemma_subj or skip_lemma_obj
return skip_pos or skip_lemma
def generalize_by_pos(lemma, pos):
""" Takes a lemma and a POS and generalizes it if possible. """
if pos in ["PRP", "PR", "FW"]:
if lemma.lower() == "it" or lemma.lower() == "its": s = "THING"
else: s = "PERSON"
elif pos in ["DT", "DET"]: s = "DET"
elif pos in ["CC", "CONJ"]: s = "CONJ"
elif pos in ["CD", "NU", "NUM"]: s = "NUM"
else: s = lemma
if lemma == "who": s = "PERSON"
return s
def parse_subj_obj(f):
""" Performs parsing of a string like 'it#PR_woman#NN:10.0'. """
subj_obj, score = f.split(":")
score = float(score)
subj, obj = subj_obj.split("_")
subj_lemma, subj_pos = subj.split("#")
obj_lemma, obj_pos = obj.split("#")
return (subj_lemma, subj_pos, obj_lemma, obj_pos, score)
def parse_features(features_str):
""" Parses a string of verb features e.g. at#WD_VALID#NN:5.0, at#WD_CENTER#NN:5.0
into a structured representation (a set). """
global err_f_num
global f_num
features = [f.strip() for f in features_str.split(",")]
res = set()
for f in features:
try:
f_num += 1
subj_lemma, subj_pos, obj_lemma, obj_pos, score = parse_subj_obj(f)
if skip(subj_lemma, subj_pos, obj_lemma, obj_pos):
if verbose: print("Warning: skipping feature '{}'".format(f))
err_f_num += 1
continue
s = generalize_by_pos(subj_lemma, subj_pos)
o = generalize_by_pos(obj_lemma, obj_pos)
res.add(s + "_" + o)
res.add(s + "_" + o)
except:
if verbose: print("Warning: bad feature '{}'".format(f))
err_f_num += 1
return res
def featurize(output_fpath):
with codecs.open(output_fpath, "w", "utf-8") as out:
inventory_voc = set(sc.data)
features_voc = set(features)
common_voc = inventory_voc.intersection(features_voc)
print("Vocabulary of verbs (full and common with inv.):", len(features_voc), len(common_voc))
for verb in common_voc:
for sense_id in sc.data[verb]:
verb_features = features[verb]
used_rverbs = [(verb + "#" + unicode(int(float(sense_id))), len(verb_features))]
c = sc.data[verb][sense_id]["cluster"]
i = 0
for rverb_sense, _ in sorted(c.items(), key=operator.itemgetter(1), reverse=True)[:max_nn]:
try:
i += 1
rverb, pos, rsense_id = rverb_sense.split("#")
rverb_pos = rverb + "#VB"
if rverb_pos not in features:
rverb_pos = rverb.lower() + "#VB"
if rverb_pos not in features:
if verbose: print("Warning: related verb '{}' is OOV.".format(rverb_sense))
continue
rverb_features = features[rverb_pos]
common_features = verb_features.intersection(rverb_features)
if len(common_features) >= min_common:
used_rverbs.append((rverb_sense, len(common_features)))
verb_features = common_features
else:
break
except KeyboardInterrupt:
break
except:
if verbose: print("Warning: bad related verb '{}'".format(rverb_sense))
print(format_exc())
if i > 0 and len(used_rverbs) > 1 and len(verb_features) > 0:
out.write("{}\t{}\n".format(
", ".join("{}:{}".format(w, f) for w, f in used_rverbs),
", ".join(verb_features)))
def compute_exact_full(wf_w_fpath, pcz_fpath, output_fpath):
tic = time()
wfw = read_csv(wf_w_fpath, sep="\t", encoding="utf-8", names=["verb", "features"])
sc = SenseClusters(pcz_fpath, strip_dst_senses=False, load_sim=True)
features = {row.verb: parse_features(row.features) for i, row in wfw.iterrows()}
print("Loaded featrues in", time() - tic, "sec.")
print("Number of skipped features:", err_f_num, "of", f_num)
tic = time()
featurize(output_fpath)
print("Featurized in", time() - tic, "sec.")
sys.stdout.flush()
# example:
#compute_exact_full(
# "/home/panchenko/tmp/verbs/2.6t/wf-w.csv.gz",
# "/home/panchenko/tmp/originalinventories/wiki-n30.csv.gz",
# "/home/panchenko/tmp/verbs/exact-full.txt")
|
uhh-lt/triframes | fi/preprocessing/find_verbs_lmi.py | from __future__ import print_function
import gzip
from time import time
import codecs
from os.path import join
from traceback import format_exc
import re
def find_verbs(lmi_fpath, voc=[]):
use_voc = "" if len(voc) == 0 else ".voc"
output_fpath = lmi_fpath + use_voc + ".verbs.csv"
wfc_num = 0
verbs_num = 0
tic = time()
re_verb = re.compile(r"^[a-zA-Z]+#VB( +[a-zA-Z]+#(VB|RP|RB|IN))?$")
with gzip.open(lmi_fpath, "r") as lmi, codecs.open(output_fpath, "w") as out:
for line in lmi:
try:
wfc_num += 1
word, feature, score = line.split("\t")
word = word.strip()
if len(voc) > 0 and (" " in word or word.split("#")[0] not in voc):
continue
if not re_verb.match(word):
continue
out.write(line)
verbs_num += 1
except:
print("Warning: bad line '{}'", line)
print(format_exc())
print("Time: {} sec.".format(time() - tic))
print("Found verbs:", verbs_num, "of", wfc_num)
print("Input:", lmi_fpath)
print("Output:", output_fpath)
return output_fpath
lmi_fpath = "lmi-culwg-coarse.csv.gz"
voc = ["run", "jog", "train", "execute", "launch", "eat", "consume", "chew"]
find_verbs(lmi_fpath, voc=voc)
find_verbs(lmi_fpath)
|
uhh-lt/triframes | graph_embeddings/remove_duplicates.py | <reponame>uhh-lt/triframes
import pandas as pd
filename_input = "full.csv"
filename_output = "full_clean.csv"
#read data
df = pd.read_csv(filename_input, delimiter="\t", header=None)
df.columns = ['verb', 'subject', 'object', 'score']
# sum up scores
df['score'] = df.groupby(['verb', 'subject', 'object'])['score'].transform('sum')
# drop duplicates
df = df.drop_duplicates(subset=['verb', 'subject', 'object', 'score'])
df = df.sort_values(['score'], ascending=False)
df.to_csv(filename_output, "\t", header=False, index=False) |
uhh-lt/triframes | fi/exact_full/build_global_graph.py | from __future__ import print_function
from pandas import read_csv
from traceback import format_exc
import codecs
def build_graph(preframes_fpath, output_fpath):
df = read_csv(preframes_fpath, sep="\t", encoding='utf8', names=["cluster","features"])
with codecs.open(output_fpath, "w", "utf-8") as out:
for i, row in df.iterrows():
try:
senses = [s.strip() for s in row.cluster.split(",")]
if len(senses) <= 1: continue
sense_0, _ = senses[0].split(":")
for sense_sim_j in senses[1:]:
sense_j, sim_0j = sense_sim_j.split(":")
out.write("{}\t{}\t{}\n".format(sense_0, sense_j, int(sim_0j)))
out.write("{}\t{}\t{}".format(sense_j, sense_0, int(sim_0j)))
except:
print(format_exc())
print("Output graph:", output_fpath)
build_graph(
preframes_fpath = "exact-full.txt", # e.g. tmp/verbs/exact-full.txt
output_fpath = preframes_fpath + ".graph")
|
uhh-lt/triframes | fi/eval/generate_triples.py | <filename>fi/eval/generate_triples.py
#!/usr/bin/env python
from __future__ import print_function
import argparse
from pandas import read_csv
from traceback import format_exc
from itertools import combinations, product
import codecs
verbose = False
def extract_verb_lus(lus_str):
try:
lus = set()
for lu in lus_str.split(","):
try:
lu = lu.strip()
if " " in lu or ".v" not in lu: continue
lus.add(lu)
except:
if verbose: print(format_exc())
return lus
except:
if verbose: print(lus_str)
return set()
def extract_lus(lus_str):
lus_bow = {}
for lu in lus_str.split(","):
try:
lu_name, lu_freq = lu.split(":")
lu_name = lu_name.strip()
if " " in lu_name: continue
lu_freq = int(lu_freq)
lus_bow[lu_name] = lu_freq
except:
print(lu)
return lus_bow
def count_lus(lus_str):
total_freq = 0
lus_bow = {}
for lu in lus_str.split(","):
try:
lu_name, lu_freq = lu.split(":")
lu_name = lu_name.strip()
if " " in lu_name: continue
lu_freq = int(lu_freq)
lus_bow[lu_name] = lu_freq
total_freq += lu_freq
except:
print(lu)
return total_freq
def generate_triples(roles, frame2lus, min_lus_size = 10):
top_roles = roles[roles.lus_size >= min_lus_size]
with codecs.open(output_fpath, "w", "utf-8") as out:
frame_num = 0
frame_num_written = 0
svo_triples_num = 0
for frame_name, role_groups in top_roles.groupby("frame"):
frame_num += 1
if len(role_groups) < 3: continue
# Save two most frequent roles
frame_roles = {}
for i, role in role_groups.iterrows():
if role.role != "FEE" and len(frame_roles) < 2:
frame_roles[role.role] = role.lus_bow
if len(frame_roles) != 2: continue
label = u"\n# {}: {} FEE {}".format(frame_name,
frame_roles.keys()[0], frame_roles.keys()[1])
# Generate the triples from all roles plus the verb cluster
fees = frame2lus.get(frame_name, set())
if len(fees) == 0: continue
out.write("{}\n".format(label))
frame_num_written += 1
for verb in fees:
v = verb.replace(".v", "")
for role1, role2 in combinations(frame_roles.keys(), 2):
role1_lus = frame_roles[role1]
role2_lus = frame_roles[role2]
for role1_lu, role2_lu in product(role1_lus, role2_lus):
out.write(u"{}\t{}\t{}\n".format(role1_lu, v, role2_lu))
svo_triples_num += 1
#if frame_num > 10: break
print("Number of frames:", frame_num)
print("Number of written frames:", frame_num_written)
print("Number of written triples:", svo_triples_num)
print("Output:", output_fpath)
def run(roles_fpath, verbs_fpath, output_fpath, min_lus_size = 10):
# Load verbs
verbs = read_csv(verbs_fpath, sep="\t", encoding="utf-8")
verbs["lus_bow"] = verbs.lus.apply(extract_verb_lus)
frame2lus = {row.frame: row.lus_bow for i, row in verbs.iterrows() if len(row.lus_bow) > 0}
# Load roles
roles = read_csv(roles_fpath, sep="\t", encoding="utf-8", names=["frame", "role", "lus"])
roles["lus_size"] = roles.lus.apply(count_lus)
roles["lus_bow"] = roles.lus.apply(extract_lus)
roles = roles.sort_values(["frame","lus_size"], ascending=False)
# Generate triples from verbs and roles
generate_triples(roles, frame2lus, min_lus_size)
def main():
parser = argparse.ArgumentParser(description='Generates SVO triples from the framenet data.')
parser.add_argument('roles_fpath', help='Path to a CSV file with the parsed framenet roles.')
parser.add_argument('verbs_fpath', help='Path to a CSV file with the parsed framenet verb clusters.')
parser.add_argument('output_fpath', help='Output file with the triples.')
parser.add_argument('min_lus_size', help='Minimum size of the LU of the roles.', type=int)
args = parser.parse_args()
print("Roles:", args.roles_fpath)
print("Verbs:", args.verbs_fpath)
print("Output:", args.output_fpath)
print("Min LUs size:", args.min_lus_size)
run (args.roles_fpath, args.verbs_fpath, args.output_fpath, args.min_lus_size)
if __name__ == '__main__':
main()
# Example files on ltcpu3
# roles_fpath = "/home/panchenko/verbs/fi/fi/eval/output/slots.csv"
# verbs_fpath = "/home/panchenko/verbs/fi/fi/eval/lus-without-children.tsv"
|
uhh-lt/triframes | graph_embeddings/convert.py | <filename>graph_embeddings/convert.py<gh_stars>1-10
import pickle
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replace numerical ids with triples')
parser.add_argument('--inputfile', default='input.emb', help='Input embedding')
parser.add_argument('--outputfile', default='output.emb', help='Output embedding')
parser.add_argument('--dict', default='id_to_name.pkl', help='pkl file with dictionary')
args = parser.parse_args()
with open(args.dict, 'rb') as f:
pkl = pickle.load(f)
with open(args.inputfile) as f, open(args.outputfile, "w") as f1:
#copy first line with dimensions
f1.write(f.readline())
for line in f:
node_index = line.split(" ")[0]
triple = pkl.get(node_index)
output_str = "(" + triple['verb'] + " " + triple['subject'] + " " + triple['object'] + ")"
f1.write(output_str + line[len(node_index):])
|
uhh-lt/triframes | fi/hosg/counts2vocab.py | <filename>fi/hosg/counts2vocab.py<gh_stars>1-10
#!/usr/bin/python3
import gzip
from collections import Counter
from docopt import docopt
from representations.matrix_serializer import save_count_vocabulary
def main():
args = docopt("""
Usage:
counts2pmi.py <counts>
""")
counts_path = args['<counts>']
words = Counter()
contexts = Counter()
relations = Counter()
with gzip.open(counts_path) as f:
for line in f:
split = line.decode('utf-8').strip().split()
if len(split) == 4:
count, word, context, relation = split
else:
count, word, context = split
relation = None
count = int(count)
words[word] += count
contexts[context] += count
relations[relation] += count
words = sorted(list(words.items()), key=lambda x_y: x_y[1], reverse=True)
contexts = sorted(list(contexts.items()), key=lambda x_y1: x_y1[1], reverse=True)
relations = sorted(list(relations.items()), key=lambda x_y2: x_y2[1], reverse=True)
save_count_vocabulary(counts_path + '.words.vocab', words)
save_count_vocabulary(counts_path + '.contexts.vocab', contexts)
save_count_vocabulary(counts_path + '.relations.vocab', relations)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | triw2v.py | #!/usr/bin/env python
import argparse
import sys
from collections import Counter
import faiss
import networkx as nx
import numpy as np
from chinese_whispers import chinese_whispers, aggregate_clusters
from utils import triples, grouper, word_vectors, words_vec
parser = argparse.ArgumentParser()
parser.add_argument('--neighbors', '-n', type=int, default=10)
parser.add_argument('--min-weight', type=float, default=0.)
parser.add_argument('--pickle', type=argparse.FileType('wb'))
parser.add_argument('triples', type=argparse.FileType('r', encoding='UTF-8'))
group = parser.add_mutually_exclusive_group()
group.add_argument('--w2v', default=None, type=argparse.FileType('rb'))
group.add_argument('--pyro', default=None, type=str)
args = parser.parse_args()
w2v = word_vectors(args, lambda args: parser.error('Please set the --w2v or --pyro option.'))
spos, _ = triples(args.triples, min_weight=args.min_weight, build_index=False)
vocabulary = {word for triple in spos for word in (triple.subject, triple.predicate, triple.object)}
vectors = {}
for words in grouper(vocabulary, 512):
vectors.update(words_vec(w2v, words))
spos = [triple for triple in spos if
triple.subject in vectors and triple.predicate in vectors and triple.object in vectors]
# noinspection PyUnboundLocalVariable
X, index2triple = np.empty((len(spos), w2v.vector_size * 3), 'float32'), {}
for i, triple in enumerate(spos):
X[i] = np.concatenate((vectors[triple.subject], vectors[triple.predicate], vectors[triple.object]))
index2triple[i] = (triple.subject, triple.predicate, triple.object)
knn = faiss.IndexFlatIP(X.shape[1])
knn.add(X)
G, maximal_distance = nx.Graph(), -1
for slice in grouper(range(X.shape[0]), 2048):
slice = [j for j in slice if j is not None]
D, I = knn.search(X[slice, :], args.neighbors + 1)
last = min(slice)
print('%d / %d' % (last, X.shape[0]), file=sys.stderr)
for i, (_D, _I) in enumerate(zip(D, I)):
source = index2triple[last + i]
words = Counter()
for d, j in zip(_D.ravel(), _I.ravel()):
if last + i != j:
words[index2triple[j]] = float(d)
for target, distance in words.most_common(args.neighbors):
G.add_edge(source, target, weight=distance)
maximal_distance = distance if distance > maximal_distance else maximal_distance
for _, _, d in G.edges(data=True):
d['weight'] = maximal_distance / d['weight']
if args.pickle is not None:
import pickle
pickle.dump(list(G.edges(data=True)), args.pickle, protocol=3)
sys.exit(0)
chinese_whispers(G, weighting='top', iterations=20)
clusters = aggregate_clusters(G)
for label, cluster in sorted(aggregate_clusters(G).items(), key=lambda e: len(e[1]), reverse=True):
print('# Cluster %d\n' % label)
predicates = {predicate for predicate, _, _ in cluster}
subjects = {subject for _, subject, _ in cluster}
objects = {object for _, _, object in cluster}
print('Predicates: %s' % ', '.join(predicates))
print('Subjects: %s' % ', '.join(subjects))
print('Objects: %s\n' % ', '.join(objects))
|
uhh-lt/triframes | fi/eval/extract_xml_framenet_roles.py | #!/usr/bin/env python
from __future__ import print_function
from glob import glob
import argparse
import xml.etree.ElementTree as et
from collections import defaultdict
from os.path import join
import codecs
from traceback import format_exc
from extract_conll_framenet_roles import save_frame2role2lu
sample_xml = """
<sentence corpID="246" docID="25616" sentNo="1" paragNo="1" aPos="0" ID="4164976">
<text>Forest fires continue to rage in Spain</text>
<annotationSet cDate="07/17/2014 11:43:11 PDT Thu" status="UNANN" ID="6669146">
<layer rank="1" name="PENN">
<label end="5" start="0" name="nn"/>
<label end="11" start="7" name="nns"/>
<label end="20" start="13" name="VVP"/>
<label end="23" start="22" name="to"/>
<label end="28" start="25" name="VV"/>
<label end="31" start="30" name="in"/>
<label end="37" start="33" name="NP"/>
</layer>
<layer rank="1" name="NER">
<label end="37" start="33" name="location"/>
</layer>
<layer rank="1" name="WSL">
<label end="23" start="22" name="NT"/>
<label end="31" start="30" name="NT"/>
<label end="37" start="33" name="NT"/>
</layer>
</annotationSet>
<annotationSet cDate="07/17/2014 01:15:28 PDT Thu" luID="17455" luName="fire.n" frameID="2824" frameName="Fire_burning" status="MANUAL" ID="6669245">
<layer rank="1" name="Target">
<label cBy="WMcQ" end="11" start="7" name="Target"/>
</layer>
<layer rank="1" name="FE">
<label cBy="WMcQ" feID="16020" bgColor="800080" fgColor="FFFFFF" end="5" start="0" name="Fuel"/>
<label cBy="WMcQ" feID="16018" bgColor="0000FF" fgColor="FFFFFF" end="11" start="7" name="Fire"/>
</layer>
<layer rank="1" name="GF">
<label end="5" start="0" name="Dep"/>
</layer>
<layer rank="1" name="PT">
<label end="5" start="0" name="N"/>
</layer>
<layer rank="1" name="Other"/>
<layer rank="1" name="Sent"/>
<layer rank="1" name="Noun"/>
</annotationSet>
</sentence>
"""
def extact_frame2role2lu(xml_dir, output_dir, verbose = False):
""" Count frame - role - lu frequencies in the sentences """
xml_fpaths = join(xml_dir, "*xml")
frame2role2lu = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for i, text_fpath in enumerate(glob(xml_fpaths)):
if verbose: print(text_fpath)
tree = et.parse(text_fpath)
root = tree.getroot()
for child in root:
if child.tag.endswith("sentence"):
text = ""
for gchild in child:
if gchild.tag.endswith("text"):
text = gchild.text
if verbose: print("="*50, "\n", text)
elif gchild.tag.endswith("annotationSet") and "frameID" in gchild.attrib:
fee = gchild.attrib["luName"]
frame_id = gchild.attrib["frameID"]
frame_name = gchild.attrib["frameName"]
frame2role2lu[frame_name]["FEE"][fee] += 1
if verbose: print("\n>>>",fee, frame_id, frame_name)
for ggchild in gchild:
if ggchild.tag.endswith("layer") and ggchild.attrib["name"] == "FE":
for gggchild in ggchild:
if gggchild.tag.endswith("label") and "end" in gggchild.attrib:
role_beg = int(gggchild.attrib["start"])
role_end = int(gggchild.attrib["end"]) + 1
role_name = gggchild.attrib["name"]
role_lu = text[role_beg:role_end]
if verbose: print(role_name, ":", role_lu)
frame2role2lu[frame_name][role_name][role_lu] += 1
return frame2role2lu
def run(xml_dir, output_fpath):
""" Gets the path to the framenet root directory and output the csv file with role lus. """
frame2role2lu = extact_frame2role2lu(xml_dir, output_fpath)
save_frame2role2lu(frame2role2lu, output_fpath)
def main():
parser = argparse.ArgumentParser(description='Extracts an evaluation dataset for role induction'
'from the xml framenet files.')
parser.add_argument('xml_dir', help='Directory with the xml files of framenet.')
parser.add_argument('output_fpath', help='Output directory.')
args = parser.parse_args()
print("Input: ", args.xml_dir)
print("Output: ", args.output_fpath)
run(args.xml_dir, args.output_fpath)
if __name__ == '__main__':
main()
|
uhh-lt/triframes | fi/eval/extract_common_svo_triples.py | <filename>fi/eval/extract_common_svo_triples.py
from __future__ import print_function
import argparse
import gzip
from os.path import join
verbose = False
def load_framenet_svo(framenet_svo_fpath):
""" Load the framenet svo: SVO + comment lines """
fn_svo = set()
with gzip.open(framenet_svo_fpath, "rt", encoding="utf-8") as fn:
for i, line in enumerate(fn):
try:
if i % 1000000 == 0: print(i / 1000000, "million of svo triples")
if line.startswith("# ") or len(line) < 5: continue
s, v, o = line.split("\t")
o = o.strip()
fn_svo.add( (s,v,o) )
except KeyboardInterrupt:
break
except:
if verbose: print("Bad line:", line)
return fn_svo
def extract_lemma(lemma_pos):
lemma, pos = lemma_pos.split("#")
lemma = lemma.lower()
return lemma
def load_depcc_svo(depcc_svo_fpath):
"""Load the depcc svo in the format 'help#VB you#PR_I#PR 72040.0' """
dep_svo = set()
with gzip.open(depcc_svo_fpath, "rt", encoding="utf-8") as dep:
for i, line in enumerate(dep):
try:
line = line.replace("_", "\t")
if i % 1000000 == 0: print(i / 1000000, "million of svo triples")
v_pos, s_pos, o_pos, freq = line.split("\t")
v = extract_lemma(v_pos)
s = extract_lemma(s_pos)
o = extract_lemma(o_pos)
dep_svo.add( (s,v,o) )
except KeyboardInterrupt:
break
except:
if verbose: print("Bad line:", line)
return dep_svo
def filter_depcc_svo(depcc_svo_fpath, output_fpath, common_svo):
"""Filter the depcc svo in the format 'help#VB you#PR_I#PR 72040.0'
according to the common triples with the framenet. """
with gzip.open(depcc_svo_fpath, "rt", encoding="utf-8") as dep_in, gzip.open(output_fpath, "wt", encoding="utf-8") as dep_out:
svo_num = 0
common_svo_num = 0
for i, line in enumerate(dep_in):
try:
line = line.replace("_", "\t")
if i % 1000000 == 0: print(i / 1000000, "million of svo triples")
v_pos, s_pos, o_pos, freq = line.split("\t")
v = extract_lemma(v_pos)
s = extract_lemma(s_pos)
o = extract_lemma(o_pos)
svo_num += 1
if (s,v,o) in common_svo:
dep_out.write(line)
common_svo_num += 1
except KeyboardInterrupt:
break
except:
if verbose: print("Bad line:", line)
print("Number of frames svo triples:", svo_num)
print("Number of common frame + dep svo triples:", common_svo_num)
print("Output:", output_fpath)
def filter_framenet_svo(framenet_svo_fpath, output_fpath, dep_svo):
""" Filter the framenet svo provided in the form of 'svo + comment lines'
according the the list of dependency parsed svo triples. """
with gzip.open(framenet_svo_fpath, "rt", encoding="utf-8") as fn_in, gzip.open(output_fpath, "wt", encoding="utf-8") as fn_out:
svo_num = 0
common_svo_num = 0
for i, line in enumerate(fn_in):
try:
if i % 1000000 == 0: print(i / 1000000, "million of svo triples")
if line.startswith("# ") or len(line) < 5: fn_out.write(line)
s, v, o = line.split("\t")
o = o.strip()
svo_num += 1
if (s,v,o) in dep_svo:
fn_out.write(line)
common_svo_num += 1
except KeyboardInterrupt:
break
except:
if verbose: print("Bad line:", line)
print("Number of frames svo triples:", svo_num)
print("Number of common frame + dep svo triples:", common_svo_num)
print("Output:", output_fpath)
def run(framenet_svo_fpath, depcc_svo_fpath, output_dir):
# Load triples
fn_svo = load_framenet_svo(framenet_svo_fpath)
dep_svo = load_depcc_svo(depcc_svo_fpath)
# Make the intersection
common_svo = fn_svo.intersection(dep_svo)
print("Number of common svo triples:", len(common_svo))
# Save the intersection
output_fn_fpath = join(output_dir, "framenet-common-triples.tsv.gz")
filter_framenet_svo(framenet_svo_fpath, output_fn_fpath, common_svo)
output_depcc_fpath = join(output_dir, "depcc-common-triples.tsv.gz")
filter_depcc_svo(depcc_svo_fpath, output_depcc_fpath, common_svo)
def main():
parser = argparse.ArgumentParser(description='Extracts common SVO triples: framenet + depcc. ')
parser.add_argument('framenet_svo_fpath', help='Path to the framenet SVO triples.')
parser.add_argument('depcc_svo_fpath', help='Path the the depcc SVO triples.')
parser.add_argument('output_dir', help='Output directory.')
args = parser.parse_args()
print("Framenet SVOs:", args.framenet_svo_fpath)
print("DepCC SVOs:", args.depcc_svo_fpath)
print("Output:", args.output_dir)
run(args.framenet_svo_fpath, args.depcc_svo_fpath, args.output_dir)
if __name__ == '__main__':
main()
# Example paths of ltcpu3
# framenet_svo_fpath = "/home/panchenko/verbs/fi/fi/eval/framenet-triples-full-lus1.tsv.gz"
# depcc_svo_fpath = "/home/panchenko/tmp/verbs/frames/2.6t/vso-26m.csv.gz"
|
xiay-nv/bazel-compilation-database | clang-tidy.bzl | <filename>clang-tidy.bzl
# Copyright 2020 NVIDIA, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(":aspects.bzl", "compilation_database_aspect")
_clang_tidy_script = """\
#!/bin/bash
pwd
cmd="ln -sf {compdb_file} compile_commands.json"
echo $cmd
eval $cmd
cmd="clang-tidy {options} $@ {sources}"
echo $cmd
eval $cmd
"""
def _clang_tidy_check_impl(ctx):
compdb_file = ctx.attr.src[OutputGroupInfo].compdb_file.to_list()[0]
src_files = ctx.attr.src[OutputGroupInfo].source_files.to_list()
hdr_files = ctx.attr.src[OutputGroupInfo].header_files.to_list()
if len(src_files) == 0:
if ctx.attr.mandatory:
fail("`src` must be a target with at least one source or header.")
else:
test_script = ctx.actions.declare_file(ctx.attr.name + ".sh")
ctx.actions.write(output = test_script, content = "#noop", is_executable = True)
return DefaultInfo(executable = test_script)
sources = " ".join([ src.short_path for src in src_files ])
build_path = compdb_file.dirname.replace(compdb_file.root.path + "/", "")
options = " ".join(ctx.attr.options)
content = _clang_tidy_script.format(
compdb_file = compdb_file.short_path,
build_path = build_path,
sources = sources,
options = options,
)
test_script = ctx.actions.declare_file(ctx.attr.name + ".sh")
ctx.actions.write(output = test_script, content = content, is_executable = True)
runfiles = src_files + hdr_files + [compdb_file]
if ctx.attr.config != None:
files = ctx.attr.config.files.to_list()
if len(files) != 1:
fail("`config` attribute in rule `clang_tidy_test` must be single file/target")
runfiles.append(files[0])
return DefaultInfo(
files = depset([test_script, compdb_file]),
runfiles = ctx.runfiles(files = runfiles),
executable = test_script,
)
clang_tidy_test = rule(
attrs = {
"src": attr.label(
aspects = [compilation_database_aspect],
doc = "Source target to run clang-tidy on.",
),
"mandatory": attr.bool(
default = False,
doc = "Throw error if `src` is not eligible for linter check, e.g. have no C/C++ source or header.",
),
"config": attr.label(
doc = "Clang tidy configuration file",
allow_single_file = True,
),
"options": attr.string_list(
doc = "options given to clang-tidy",
)
},
test = True,
implementation = _clang_tidy_check_impl,
)
|
thinkmoore/blight | test/test_util.py | <gh_stars>0
import pytest
from blight import util
from blight.actions import Record
from blight.exceptions import BlightError
def test_die():
with pytest.raises(SystemExit):
util.die(":(")
def test_rindex():
assert util.rindex([1, 1, 2, 3, 4, 5], 1) == 1
assert util.rindex([1, 1, 2, 3, 4, 5], 6) is None
assert util.rindex([1, 1, 2, 3, 4, 5], 5) == 5
def test_load_actions(monkeypatch):
monkeypatch.setenv("BLIGHT_ACTIONS", "Record")
monkeypatch.setenv("BLIGHT_ACTION_RECORD", "key=value")
actions = util.load_actions()
assert len(actions) == 1
assert actions[0].__class__ == Record
assert actions[0]._config == {"key": "value"}
def test_load_actions_nonexistent(monkeypatch):
monkeypatch.setenv("BLIGHT_ACTIONS", "ThisActionDoesNotExist")
with pytest.raises(BlightError):
util.load_actions()
def test_load_actions_empty_config(monkeypatch):
monkeypatch.setenv("BLIGHT_ACTIONS", "Record")
actions = util.load_actions()
assert len(actions) == 1
assert actions[0].__class__ == Record
assert actions[0]._config == {}
|
thinkmoore/blight | src/blight/exceptions.py | <gh_stars>0
"""
Exceptions for blight.
"""
class BlightError(ValueError):
"""
Raised whenever an internal condition isn't met.
"""
pass
class BuildError(BlightError):
"""
Raised whenever a wrapped tool fails.
"""
pass
|
thinkmoore/blight | src/blight/protocols.py | <reponame>thinkmoore/blight
"""
Substructural typing protocols for blight.
These are, generally speaking, an implementation detail.
"""
from typing import Dict, List
from typing_extensions import Protocol
from blight.enums import Lang
class ArgsProtocol(Protocol):
@property
def args(self) -> List[str]:
... # pragma: no cover
class LangProtocol(ArgsProtocol, Protocol):
@property
def lang(self) -> Lang:
... # pragma: no cover
class IndexedUndefinesProtocol(ArgsProtocol, Protocol):
@property
def indexed_undefines(self) -> Dict[str, int]:
... # pragma: no cover
|
thinkmoore/blight | src/blight/actions/benchmark.py | <reponame>thinkmoore/blight<filename>src/blight/actions/benchmark.py
"""
The `Benchmark` action.
"""
import json
import time
from pathlib import Path
from blight.action import Action
from blight.util import flock_append
class Benchmark(Action):
def before_run(self, tool):
self._start_nanos = time.monotonic_ns()
def after_run(self, tool):
elapsed = (time.monotonic_ns() - self._start_nanos) // 1000
bench_file = Path(self._config["output"])
with flock_append(bench_file) as io:
bench_record = {"tool": tool.asdict(), "elapsed": elapsed}
print(json.dumps(bench_record), file=io)
|
thinkmoore/blight | src/blight/actions/record.py | <filename>src/blight/actions/record.py
"""
The `Record` action.
"""
import json
from pathlib import Path
from blight.action import Action
from blight.util import flock_append
class Record(Action):
def before_run(self, tool):
record_file = Path(self._config["output"])
with flock_append(record_file) as io:
print(json.dumps(tool.asdict()), file=io)
|
thinkmoore/blight | src/blight/action.py | <gh_stars>0
"""
The different actions supported by blight.
"""
import blight.tool
class Action:
"""
A generic action, run with every tool (both before and after the tool's execution).
"""
def __init__(self, config):
self._config = config
def _should_run_on(self, tool):
return True
def before_run(self, tool): # pragma: no cover
"""
Invoked right before the underlying tool is run.
Args:
tool (`blight.tool.Tool`): The tool about to run
"""
pass
def _before_run(self, tool):
if self._should_run_on(tool):
self.before_run(tool)
def after_run(self, tool): # pragma: no cover
"""
Invoked right after the underlying tool is run.
Args:
tool (`blight.tool.Tool`): The tool that just ran
"""
pass
def _after_run(self, tool):
if self._should_run_on(tool):
self.after_run(tool)
class CCAction(Action):
"""
A `cc` action, run whenever the tool is a `blight.tool.CC` instance.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.CC)
class CXXAction(Action):
"""
A `c++` action, run whenever the tool is a `blight.tool.CXX` instance.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.CXX)
class CompilerAction(CCAction, CXXAction):
"""
A generic compiler action, run whenever the tool is a `blight.tool.CC`
or `blight.tool.CXX` instance.
**NOTE:** Action writers should generally prefer this over `CCAction` and `CXXAction`,
as messy builds may use `cc` to compile C++ sources (via `-x c`) and vice versa.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.CC) or isinstance(tool, blight.tool.CXX)
class CPPAction(Action):
"""
A `cpp` action, run whenever the tool is a `blight.tool.CPP` instance.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.CPP)
class LDAction(Action):
"""
An `ld` action, run whenever the tool is a `blight.tool.LD` instance.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.LD)
class ASAction(Action):
"""
An `as` action, run whenever the tool is a `blight.tool.AS` instance.
"""
def _should_run_on(self, tool):
return isinstance(tool, blight.tool.AS)
|
thinkmoore/blight | src/blight/enums.py | <filename>src/blight/enums.py
"""
Enumerations for blight.
"""
import enum
class CompilerStage(enum.Enum):
"""
Models the known stages that a compiler tool can be in.
"""
# TODO(ww): Maybe handle -v, -###, --help, --help=..., etc.
Preprocess = enum.auto()
"""
Preprocess only (e.g., `cc -E`)
"""
SyntaxOnly = enum.auto()
"""
Preprocess, parse, and typecheck only (e.g., `cc -fsyntax-only`)
"""
Assemble = enum.auto()
"""
Compile to assembly but don't run the assembler (e.g., `cc -S`)
"""
CompileObject = enum.auto()
"""
Compile and assemble to an individual object (e.g. `cc -c`)
"""
AllStages = enum.auto()
"""
All stages, including the linker (e.g. `cc`)
"""
Unknown = enum.auto()
"""
An unknown or unqualified stage.
"""
class Lang(enum.Enum):
"""
Models the known languages for a tool.
"""
# TODO(ww): Maybe add each of the following:
# * Asm (assembly)
# * PreprocessedC (C that's already gone through the preprocessor)
# * PreprocessedCxx (C++ that's already gone through the preprocessor)
C = enum.auto()
"""
The C programming language.
"""
Cxx = enum.auto()
"""
The C++ programming language.
"""
Unknown = enum.auto()
"""
An unknown language.
"""
class Std(enum.Enum):
"""
Models the various language standards for a tool.
"""
def is_unknown(self) -> bool:
"""
Returns:
`True` if the standard is unknown
"""
return self in [Std.CUnknown, Std.CxxUnknown, Std.GnuUnknown, Std.GnuxxUnknown, Std.Unknown]
C89 = enum.auto()
"""
C89 (a.k.a. C90, iso9899:1990)
"""
C94 = enum.auto()
"""
C94 (a.k.a. iso9899:199409)
"""
C99 = enum.auto()
"""
C99 (a.k.a. C9x, iso9899:1999, iso9899:199x)
"""
C11 = enum.auto()
"""
C11 (a.k.a. C1x, iso9899:2011)
"""
C17 = enum.auto()
"""
C17 (a.k.a. C18, iso9899:2017, iso9899:2018)
"""
C2x = enum.auto()
"""
C2x
"""
Gnu89 = enum.auto()
"""
GNU C89 (a.k.a. GNU C 90)
"""
Gnu99 = enum.auto()
"""
GNU C99 (a.k.a. GNU C 9x)
"""
Gnu11 = enum.auto()
"""
GNU C11 (a.k.a. GNU C11x)
"""
Gnu17 = enum.auto()
"""
GNU C17 (a.k.a. GNU C18)
"""
Gnu2x = enum.auto()
"""
GNU C2x
"""
Cxx03 = enum.auto()
"""
C++03 (a.k.a. C++98)
"""
Cxx11 = enum.auto()
"""
C++11 (a.k.a. C++0x)
"""
Cxx14 = enum.auto()
"""
C++14 (a.k.a. C++1y)
"""
Cxx17 = enum.auto()
"""
C++17 (a.k.a. C++1z)
"""
Cxx2a = enum.auto()
"""
C++2a
"""
Gnuxx03 = enum.auto()
"""
GNU C++03 (a.k.a. GNU C++98)
"""
Gnuxx11 = enum.auto()
"""
GNU C++11 (a.k.a. GNU C++0x)
"""
Gnuxx14 = enum.auto()
"""
GNU C++14 (a.k.a. GNU C++1y)
"""
Gnuxx17 = enum.auto()
"""
GNU C++17 (a.k.a. GNU C++1z)
"""
Gnuxx2a = enum.auto()
"""
GNU C++2a
"""
CUnknown = enum.auto()
"""
Standard C, but an unknown version.
"""
CxxUnknown = enum.auto()
"""
Standard C++, but an unknown version.
"""
GnuUnknown = enum.auto()
"""
GNU C, but an unknown version.
"""
GnuxxUnknown = enum.auto()
"""
GNU C++, but an unknown version.
"""
Unknown = enum.auto()
"""
A completely unknown language standard.
"""
class OptLevel(enum.Enum):
"""
Models the known optimization levels.
"""
def for_size(self) -> bool:
"""
Returns:
`True` if the optimization is for compiled size
"""
return self == OptLevel.OSize or self == OptLevel.OSizeZ
def for_performance(self) -> bool:
"""
Returns:
`True` if the optimization is for performance
"""
return self in [OptLevel.O1, OptLevel.O2, OptLevel.O3, OptLevel.OFast]
def for_debug(self) -> bool:
"""
Returns:
`True` if the optimization is for debugging experience
"""
return self == OptLevel.ODebug
O0 = enum.auto()
"""
No optimizations.
"""
O1 = enum.auto()
"""
Minimal performance optimizations.
"""
O2 = enum.auto()
"""
Some performance optimizations.
"""
O3 = enum.auto()
"""
Aggressive performance optimizations.
"""
OFast = enum.auto()
"""
Aggressive, possibly standards-breaking performance optimizations.
"""
OSize = enum.auto()
"""
Size optimizations.
"""
OSizeZ = enum.auto()
"""
More aggressive size optimizations (Clang only).
"""
ODebug = enum.auto()
"""
Debugging experience optimizations.
"""
Unknown = enum.auto()
"""
An unknown optimization level.
"""
|
thinkmoore/blight | test/test_enums.py | from blight import enums
def test_optlevel_predictates():
assert enums.OptLevel.OSize.for_size()
assert enums.OptLevel.OSizeZ.for_size()
assert enums.OptLevel.O1.for_performance()
assert enums.OptLevel.O2.for_performance()
assert enums.OptLevel.O3.for_performance()
assert enums.OptLevel.OFast.for_performance()
assert enums.OptLevel.ODebug.for_debug()
|
thinkmoore/blight | test/conftest.py | import shutil
import pytest
@pytest.fixture(autouse=True)
def blight_env(monkeypatch):
monkeypatch.setenv("BLIGHT_WRAPPED_CC", shutil.which("cc"))
monkeypatch.setenv("BLIGHT_WRAPPED_CXX", shutil.which("c++"))
monkeypatch.setenv("BLIGHT_WRAPPED_CPP", shutil.which("cpp"))
monkeypatch.setenv("BLIGHT_WRAPPED_LD", shutil.which("ld"))
monkeypatch.setenv("BLIGHT_WRAPPED_AS", shutil.which("as"))
|
thinkmoore/blight | src/blight/util.py | """
Helper utilities for blight.
"""
import contextlib
import fcntl
import os
import shlex
import sys
from typing import Any, List, Optional, Sequence
from blight.exceptions import BlightError
def die(message):
"""
Aborts the program with a final message.
Args:
message (str): The message to print
"""
print(f"Fatal: {message}", file=sys.stderr)
sys.exit(1)
def rindex(items: Sequence[Any], needle: Any) -> Optional[int]:
"""
Args:
items (sequence): The items to search
needle (object): The object to search for
Returns:
The rightmost index of `needle`, or `None`.
"""
for idx, item in enumerate(reversed(items)):
if item == needle:
return len(items) - idx - 1
return None
def rindex_prefix(items: Sequence[str], prefix: str) -> Optional[int]:
"""
Args:
items (sequence of str): The items to search
prefix (str): The prefix to find
Returns:
The rightmost index of the element that starts with `prefix`, or `None`
"""
for idx, item in enumerate(reversed(items)):
if item.startswith(prefix):
return len(items) - idx - 1
return None
def insert_items_at_idx(parent_items: Sequence[Any], idx: int, items: Sequence[Any]) -> List[Any]:
"""
Inserts `items` at `idx` in `parent_items`.
Args:
parent_items (sequence of any): The parent sequence to insert within
idx (int): The index to insert at
items (sequence of any): The items to insert
Returns:
A new list containing both the parent and inserted items
"""
def _insert_items_at_idx(parent_items, idx, items):
for pidx, item in enumerate(parent_items):
if pidx != idx:
print(item)
yield item
else:
for item in items:
yield item
return list(_insert_items_at_idx(parent_items, idx, items))
@contextlib.contextmanager
def flock_append(filename):
"""
Open the given file for appending, acquiring an exclusive lock on it in
the process.
Args:
filename (str): The file to open for appending
Yields:
An open fileobject for `filename`
"""
with open(filename, "a") as io:
try:
fcntl.flock(io, fcntl.LOCK_EX)
yield io
finally:
fcntl.flock(io, fcntl.LOCK_UN)
def load_actions():
"""
Loads any blight actions requested via the environment.
Each action is loaded from the `BLIGHT_ACTIONS` environment variable,
separated by colons.
For example, the following loads the `Record` and `Benchmark` actions:
```bash
BLIGHT_ACTIONS="Record:Benchmark"
```
Each action additionally receives a configuration dictionary from
`BLIGHT_ACTION_{UPPERCASE_NAME}`. The contents of each of these variables
is shell-quoted, in `key=value` format.
For example, the following:
```bash
BLIGHT_ACTION_RECORD="output=/tmp/whatever.jsonl"
```
yields the following configuration dictionary:
```python
{"output": "/tmp/whatever.jsonl"}
```
Returns:
A list of `blight.action.Action`s.
"""
import blight.actions
action_names = os.getenv("BLIGHT_ACTIONS")
if action_names is None:
return []
actions = []
for action_name in action_names.split(":"):
action_class = getattr(blight.actions, action_name, None)
if action_class is None:
raise BlightError(f"Unknown action: {action_name}")
action_config = os.getenv(f"BLIGHT_ACTION_{action_name.upper()}", None)
if action_config is not None:
action_config = shlex.split(action_config)
action_config = dict(c.split("=") for c in action_config)
else:
action_config = {}
actions.append(action_class(action_config))
return actions
|
thinkmoore/blight | src/blight/__init__.py | <filename>src/blight/__init__.py
from blight.version import __version__ # noqa: F401
# These don't need library documentation.
__pdoc__ = {"cli": False, "version": False}
|
thinkmoore/blight | setup.py | #!/usr/bin/env python3
from setuptools import find_packages, setup
version = {}
with open("./src/blight/version.py") as f:
exec(f.read(), version)
with open("./README.md") as f:
long_description = f.read()
setup(
name="blight",
version=version["__version__"],
license="Apache-2.0",
author="<NAME>",
author_email="<EMAIL>",
description="A catch-all compile-tool wrapper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/trailofbits/blight",
project_urls={"Documentation": "https://trailofbits.github.io/blight/"},
packages=find_packages(where="src"),
package_dir={"": "src"},
entry_points={
"console_scripts": [
"blight-env = blight.cli:env",
"blight-cc = blight.cli:tool",
"blight-c++ = blight.cli:tool",
"blight-cpp = blight.cli:tool",
"blight-ld = blight.cli:tool",
"blight-as = blight.cli:tool",
]
},
platforms="any",
python_requires=">=3.7",
install_requires=["click ~= 7.1", "typing_extensions"],
extras_require={
"dev": [
"flake8",
"black",
"isort",
"pytest",
"pytest-cov",
"coverage[toml]",
"twine",
"pdoc3",
"mypy",
]
},
)
|
thinkmoore/blight | src/blight/actions/find_outputs.py | <filename>src/blight/actions/find_outputs.py
"""
The `FindOutputs` action.
"""
import enum
import json
from collections import defaultdict
from pathlib import Path
from blight.action import Action
from blight.tool import CC, CXX, LD
from blight.util import flock_append
@enum.unique
class OutputKind(enum.Enum):
"""
A collection of common output kinds for build tools.
This enumeration is not exhaustive.
"""
Object: str = "object"
SharedLibrary: str = "shared"
StaticLibrary: str = "static"
Executable: str = "executable"
KernelModule: str = "kernel"
Unknown: str = "unknown"
OUTPUT_SUFFIX_KIND_MAP = {
".o": OutputKind.Object,
".obj": OutputKind.Object,
".so": OutputKind.SharedLibrary,
".dylib": OutputKind.SharedLibrary,
".dll": OutputKind.SharedLibrary,
".a": OutputKind.StaticLibrary,
".lib": OutputKind.StaticLibrary,
"": OutputKind.Executable,
".exe": OutputKind.Executable,
".bin": OutputKind.Executable,
".elf": OutputKind.Executable,
".com": OutputKind.Executable,
".ko": OutputKind.KernelModule,
".sys": OutputKind.KernelModule,
}
"""
A mapping of common output suffixes to their (expected) file kinds.
This mapping is not exhaustive.
"""
class FindOutputs(Action):
def before_run(self, tool):
output_map = defaultdict(list)
for output in tool.outputs:
output = Path(output)
if not output.is_absolute():
output = tool.cwd / output
# Special case: a.out is produced by both the linker
# and compiler tools by default.
if output.name == "a.out" and tool.__class__ in [CC, CXX, LD]:
output_map[OutputKind.Executable.value].append(str(output))
else:
kind = OUTPUT_SUFFIX_KIND_MAP.get(output.suffix, OutputKind.Unknown)
output_map[kind.value].append(str(output))
output = Path(self._config["output"])
with flock_append(output) as io:
outputs_record = {"tool": tool.asdict(), "outputs": output_map}
print(json.dumps(outputs_record), file=io)
# TODO(ww): Could do after_run here and check whether each output
# in output_map was actually created.
|
thinkmoore/blight | test/actions/test_inject_flags.py | import shlex
from blight.actions import InjectFlags
from blight.tool import CC, CXX
def test_inject_flags():
inject_flags = InjectFlags(
{"CFLAGS": "-more -flags", "CXXFLAGS": "-these -are -ignored", "CPPFLAGS": "-foo"}
)
cc = CC(["-fake", "-flags"])
inject_flags.before_run(cc)
assert cc.args == shlex.split("-fake -flags -more -flags -foo")
def test_inject_flags_cxx():
inject_flags = InjectFlags(
{"CFLAGS": "-these -are -ignored", "CXXFLAGS": "-more -flags", "CPPFLAGS": "-bar"}
)
cxx = CXX(["-fake", "-flags"])
inject_flags.before_run(cxx)
assert cxx.args == shlex.split("-fake -flags -more -flags -bar")
def test_inject_flags_unknown_lang():
inject_flags = InjectFlags(
{"CFLAGS": "-these -are -ignored", "CXXFLAGS": "-so -are -these", "CPPFLAGS": "-and -this"}
)
cxx = CXX(["-x", "-unknownlanguage"])
inject_flags.before_run(cxx)
assert cxx.args == shlex.split("-x -unknownlanguage")
|
thinkmoore/blight | test/test_action.py | <filename>test/test_action.py<gh_stars>0
import pytest
from blight import action, tool
@pytest.mark.parametrize(
("action_class", "tool_class", "should_run_on"),
[
(action.Action, tool.CC, True),
(action.CCAction, tool.CC, True),
(action.CXXAction, tool.CC, False),
(action.CompilerAction, tool.CC, True),
(action.CPPAction, tool.CC, False),
(action.LDAction, tool.CC, False),
(action.ASAction, tool.CC, False),
],
)
def test_should_run_on(action_class, tool_class, should_run_on):
action = action_class({})
tool = tool_class([])
assert action._should_run_on(tool) == should_run_on
|
thinkmoore/blight | test/actions/test_find_outputs.py | <gh_stars>0
import json
from blight.actions import FindOutputs
from blight.actions.find_outputs import OutputKind
from blight.tool import CC
def test_find_outputs(tmp_path):
output = tmp_path / "outputs.jsonl"
find_outputs = FindOutputs({"output": output})
cc = CC(["-o", "foo", "foo.c"])
find_outputs.before_run(cc)
outputs = json.loads(output.read_text())["outputs"]
assert outputs[OutputKind.Executable.value] == [str(cc.cwd / "foo")]
def test_find_outputs_multiple(tmp_path):
fake_cs = [tmp_path / fake_c for fake_c in ["foo.c", "bar.c", "baz.c"]]
[fake_c.touch() for fake_c in fake_cs]
output = tmp_path / "outputs.jsonl"
find_outputs = FindOutputs({"output": output})
cc = CC(["-c", *[str(fake_c) for fake_c in fake_cs]])
find_outputs.before_run(cc)
outputs = json.loads(output.read_text())["outputs"]
assert outputs[OutputKind.Object.value] == [
str(cc.cwd / fake_c.with_suffix(".o").name) for fake_c in fake_cs
]
def test_find_outputs_handles_a_out(tmp_path):
output = tmp_path / "outputs.jsonl"
find_outputs = FindOutputs({"output": output})
cc = CC(["foo.c"])
find_outputs.before_run(cc)
outputs = json.loads(output.read_text())["outputs"]
assert outputs[OutputKind.Executable.value] == [str(cc.cwd / "a.out")]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.