text
stringlengths 8
6.05M
|
|---|
class Fly:
@staticmethod
def move(unit):
unit.position += 10
class Walk:
@staticmethod
def move(unit):
unit.position += 1
class Viking:
def __init__(self):
self.move_behavior = Walk()
self.position = 0
def move(self):
self.move_behavior.move(self)
|
import contextlib
import functools
import importlib
import inspect
import itertools
import os
import pathlib
import platform
import random
import shutil
import string
import struct
import tarfile
import unittest
import unittest.mock
import zipfile
from collections import defaultdict
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
import PIL
import PIL.Image
import pytest
import torch
import torchvision.datasets
import torchvision.io
from common_utils import disable_console_output, get_tmp_dir
from torch.utils._pytree import tree_any
from torchvision.transforms.functional import get_dimensions
__all__ = [
"UsageError",
"lazy_importer",
"test_all_configs",
"DatasetTestCase",
"ImageDatasetTestCase",
"VideoDatasetTestCase",
"create_image_or_video_tensor",
"create_image_file",
"create_image_folder",
"create_video_file",
"create_video_folder",
"make_tar",
"make_zip",
"create_random_string",
]
class UsageError(Exception):
"""Should be raised in case an error happens in the setup rather than the test."""
class LazyImporter:
r"""Lazy importer for additional dependencies.
Some datasets require additional packages that are no direct dependencies of torchvision. Instances of this class
provide modules listed in MODULES as attributes. They are only imported when accessed.
"""
MODULES = (
"av",
"lmdb",
"pycocotools",
"requests",
"scipy.io",
"scipy.sparse",
"h5py",
)
def __init__(self):
modules = defaultdict(list)
for module in self.MODULES:
module, *submodules = module.split(".", 1)
if submodules:
modules[module].append(submodules[0])
else:
# This introduces the module so that it is known when we later iterate over the dictionary.
modules.__missing__(module)
for module, submodules in modules.items():
# We need the quirky 'module=module' and submodules=submodules arguments to the lambda since otherwise the
# lookup for these would happen at runtime rather than at definition. Thus, without it, every property
# would try to import the last item in 'modules'
setattr(
type(self),
module,
property(lambda self, module=module, submodules=submodules: LazyImporter._import(module, submodules)),
)
@staticmethod
def _import(package, subpackages):
try:
module = importlib.import_module(package)
except ImportError as error:
raise UsageError(
f"Failed to import module '{package}'. "
f"This probably means that the current test case needs '{package}' installed, "
f"but it is not a dependency of torchvision. "
f"You need to install it manually, for example 'pip install {package}'."
) from error
for name in subpackages:
importlib.import_module(f".{name}", package=package)
return module
lazy_importer = LazyImporter()
def requires_lazy_imports(*modules):
def outer_wrapper(fn):
@functools.wraps(fn)
def inner_wrapper(*args, **kwargs):
for module in modules:
getattr(lazy_importer, module.replace(".", "_"))
return fn(*args, **kwargs)
return inner_wrapper
return outer_wrapper
def test_all_configs(test):
"""Decorator to run test against all configurations.
Add this as decorator to an arbitrary test to run it against all configurations. This includes
:attr:`DatasetTestCase.DEFAULT_CONFIG` and :attr:`DatasetTestCase.ADDITIONAL_CONFIGS`.
The current configuration is provided as the first parameter for the test:
.. code-block::
@test_all_configs()
def test_foo(self, config):
pass
.. note::
This will try to remove duplicate configurations. During this process it will not preserve a potential
ordering of the configurations or an inner ordering of a configuration.
"""
def maybe_remove_duplicates(configs):
try:
return [dict(config_) for config_ in {tuple(sorted(config.items())) for config in configs}]
except TypeError:
# A TypeError will be raised if a value of any config is not hashable, e.g. a list. In that case duplicate
# removal would be a lot more elaborate, and we simply bail out.
return configs
@functools.wraps(test)
def wrapper(self):
configs = []
if self.DEFAULT_CONFIG is not None:
configs.append(self.DEFAULT_CONFIG)
if self.ADDITIONAL_CONFIGS is not None:
configs.extend(self.ADDITIONAL_CONFIGS)
if not configs:
configs = [self._KWARG_DEFAULTS.copy()]
else:
configs = maybe_remove_duplicates(configs)
for config in configs:
with self.subTest(**config):
test(self, config)
return wrapper
class DatasetTestCase(unittest.TestCase):
"""Abstract base class for all dataset testcases.
You have to overwrite the following class attributes:
- DATASET_CLASS (torchvision.datasets.VisionDataset): Class of dataset to be tested.
- FEATURE_TYPES (Sequence[Any]): Types of the elements returned by index access of the dataset. Instead of
providing these manually, you can instead subclass ``ImageDatasetTestCase`` or ``VideoDatasetTestCase```to
get a reasonable default, that should work for most cases. Each entry of the sequence may be a tuple,
to indicate multiple possible values.
Optionally, you can overwrite the following class attributes:
- DEFAULT_CONFIG (Dict[str, Any]): Config that will be used by default. If omitted, this defaults to all
keyword arguments of the dataset minus ``transform``, ``target_transform``, ``transforms``, and
``download``. Overwrite this if you want to use a default value for a parameter for which the dataset does
not provide one.
- ADDITIONAL_CONFIGS (Sequence[Dict[str, Any]]): Additional configs that should be tested. Each dictionary can
contain an arbitrary combination of dataset parameters that are **not** ``transform``, ``target_transform``,
``transforms``, or ``download``.
- REQUIRED_PACKAGES (Iterable[str]): Additional dependencies to use the dataset. If these packages are not
available, the tests are skipped.
Additionally, you need to overwrite the ``inject_fake_data()`` method that provides the data that the tests rely on.
The fake data should resemble the original data as close as necessary, while containing only few examples. During
the creation of the dataset check-, download-, and extract-functions from ``torchvision.datasets.utils`` are
disabled.
Without further configuration, the testcase will test if
1. the dataset raises a :class:`FileNotFoundError` or a :class:`RuntimeError` if the data files are not found or
corrupted,
2. the dataset inherits from `torchvision.datasets.VisionDataset`,
3. the dataset can be turned into a string,
4. the feature types of a returned example matches ``FEATURE_TYPES``,
5. the number of examples matches the injected fake data, and
6. the dataset calls ``transform``, ``target_transform``, or ``transforms`` if available when accessing data.
Case 3. to 6. are tested against all configurations in ``CONFIGS``.
To add dataset-specific tests, create a new method that takes no arguments with ``test_`` as a name prefix:
.. code-block::
def test_foo(self):
pass
If you want to run the test against all configs, add the ``@test_all_configs`` decorator to the definition and
accept a single argument:
.. code-block::
@test_all_configs
def test_bar(self, config):
pass
Within the test you can use the ``create_dataset()`` method that yields the dataset as well as additional
information provided by the ``ìnject_fake_data()`` method:
.. code-block::
def test_baz(self):
with self.create_dataset() as (dataset, info):
pass
"""
DATASET_CLASS = None
FEATURE_TYPES = None
DEFAULT_CONFIG = None
ADDITIONAL_CONFIGS = None
REQUIRED_PACKAGES = None
# These keyword arguments are checked by test_transforms in case they are available in DATASET_CLASS.
_TRANSFORM_KWARGS = {
"transform",
"target_transform",
"transforms",
}
# These keyword arguments get a 'special' treatment and should not be set in DEFAULT_CONFIG or ADDITIONAL_CONFIGS.
_SPECIAL_KWARGS = {
*_TRANSFORM_KWARGS,
"download",
}
# These fields are populated during setupClass() within _populate_private_class_attributes()
# This will be a dictionary containing all keyword arguments with their respective default values extracted from
# the dataset constructor.
_KWARG_DEFAULTS = None
# This will be a set of all _SPECIAL_KWARGS that the dataset constructor takes.
_HAS_SPECIAL_KWARG = None
# These functions are disabled during dataset creation in create_dataset().
_CHECK_FUNCTIONS = {
"check_md5",
"check_integrity",
}
_DOWNLOAD_EXTRACT_FUNCTIONS = {
"download_url",
"download_file_from_google_drive",
"extract_archive",
"download_and_extract_archive",
}
def dataset_args(self, tmpdir: str, config: Dict[str, Any]) -> Sequence[Any]:
"""Define positional arguments passed to the dataset.
.. note::
The default behavior is only valid if the dataset to be tested has ``root`` as the only required parameter.
Otherwise, you need to overwrite this method.
Args:
tmpdir (str): Path to a temporary directory. For most cases this acts as root directory for the dataset
to be created and in turn also for the fake data injected here.
config (Dict[str, Any]): Configuration that will be passed to the dataset constructor. It provides at least
fields for all dataset parameters with default values.
Returns:
(Tuple[str]): ``tmpdir`` which corresponds to ``root`` for most datasets.
"""
return (tmpdir,)
def inject_fake_data(self, tmpdir: str, config: Dict[str, Any]) -> Union[int, Dict[str, Any]]:
"""Inject fake data for dataset into a temporary directory.
During the creation of the dataset the download and extract logic is disabled. Thus, the fake data injected
here needs to resemble the raw data, i.e. the state of the dataset directly after the files are downloaded and
potentially extracted.
Args:
tmpdir (str): Path to a temporary directory. For most cases this acts as root directory for the dataset
to be created and in turn also for the fake data injected here.
config (Dict[str, Any]): Configuration that will be passed to the dataset constructor. It provides at least
fields for all dataset parameters with default values.
Needs to return one of the following:
1. (int): Number of examples in the dataset to be created, or
2. (Dict[str, Any]): Additional information about the injected fake data. Must contain the field
``"num_examples"`` that corresponds to the number of examples in the dataset to be created.
"""
raise NotImplementedError("You need to provide fake data in order for the tests to run.")
@contextlib.contextmanager
def create_dataset(
self,
config: Optional[Dict[str, Any]] = None,
inject_fake_data: bool = True,
patch_checks: Optional[bool] = None,
**kwargs: Any,
) -> Iterator[Tuple[torchvision.datasets.VisionDataset, Dict[str, Any]]]:
r"""Create the dataset in a temporary directory.
The configuration passed to the dataset is populated to contain at least all parameters with default values.
For this the following order of precedence is used:
1. Parameters in :attr:`kwargs`.
2. Configuration in :attr:`config`.
3. Configuration in :attr:`~DatasetTestCase.DEFAULT_CONFIG`.
4. Default parameters of the dataset.
Args:
config (Optional[Dict[str, Any]]): Configuration that will be used to create the dataset.
inject_fake_data (bool): If ``True`` (default) inject the fake data with :meth:`.inject_fake_data` before
creating the dataset.
patch_checks (Optional[bool]): If ``True`` disable integrity check logic while creating the dataset. If
omitted defaults to the same value as ``inject_fake_data``.
**kwargs (Any): Additional parameters passed to the dataset. These parameters take precedence in case they
overlap with ``config``.
Yields:
dataset (torchvision.dataset.VisionDataset): Dataset.
info (Dict[str, Any]): Additional information about the injected fake data. See :meth:`.inject_fake_data`
for details.
"""
if patch_checks is None:
patch_checks = inject_fake_data
special_kwargs, other_kwargs = self._split_kwargs(kwargs)
complete_config = self._KWARG_DEFAULTS.copy()
if self.DEFAULT_CONFIG:
complete_config.update(self.DEFAULT_CONFIG)
if config:
complete_config.update(config)
if other_kwargs:
complete_config.update(other_kwargs)
if "download" in self._HAS_SPECIAL_KWARG and special_kwargs.get("download", False):
# override download param to False param if its default is truthy
special_kwargs["download"] = False
patchers = self._patch_download_extract()
if patch_checks:
patchers.update(self._patch_checks())
with get_tmp_dir() as tmpdir:
args = self.dataset_args(tmpdir, complete_config)
info = self._inject_fake_data(tmpdir, complete_config) if inject_fake_data else None
with self._maybe_apply_patches(patchers), disable_console_output():
dataset = self.DATASET_CLASS(*args, **complete_config, **special_kwargs)
yield dataset, info
@classmethod
def setUpClass(cls):
cls._verify_required_public_class_attributes()
cls._populate_private_class_attributes()
cls._process_optional_public_class_attributes()
super().setUpClass()
@classmethod
def _verify_required_public_class_attributes(cls):
if cls.DATASET_CLASS is None:
raise UsageError(
"The class attribute 'DATASET_CLASS' needs to be overwritten. "
"It should contain the class of the dataset to be tested."
)
if cls.FEATURE_TYPES is None:
raise UsageError(
"The class attribute 'FEATURE_TYPES' needs to be overwritten. "
"It should contain a sequence of types that the dataset returns when accessed by index."
)
@classmethod
def _populate_private_class_attributes(cls):
defaults = []
for cls_ in cls.DATASET_CLASS.__mro__:
if cls_ is torchvision.datasets.VisionDataset:
break
argspec = inspect.getfullargspec(cls_.__init__)
if not argspec.defaults:
continue
defaults.append(
{
kwarg: default
for kwarg, default in zip(argspec.args[-len(argspec.defaults) :], argspec.defaults)
if not kwarg.startswith("_")
}
)
if not argspec.varkw:
break
kwarg_defaults = dict()
for config in reversed(defaults):
kwarg_defaults.update(config)
has_special_kwargs = set()
for name in cls._SPECIAL_KWARGS:
if name not in kwarg_defaults:
continue
del kwarg_defaults[name]
has_special_kwargs.add(name)
cls._KWARG_DEFAULTS = kwarg_defaults
cls._HAS_SPECIAL_KWARG = has_special_kwargs
@classmethod
def _process_optional_public_class_attributes(cls):
def check_config(config, name):
special_kwargs = tuple(f"'{name}'" for name in cls._SPECIAL_KWARGS if name in config)
if special_kwargs:
raise UsageError(
f"{name} contains a value for the parameter(s) {', '.join(special_kwargs)}. "
f"These are handled separately by the test case and should not be set here. "
f"If you need to test some custom behavior regarding these parameters, "
f"you need to write a custom test (*not* test case), e.g. test_custom_transform()."
)
if cls.DEFAULT_CONFIG is not None:
check_config(cls.DEFAULT_CONFIG, "DEFAULT_CONFIG")
if cls.ADDITIONAL_CONFIGS is not None:
for idx, config in enumerate(cls.ADDITIONAL_CONFIGS):
check_config(config, f"CONFIGS[{idx}]")
if cls.REQUIRED_PACKAGES:
missing_pkgs = []
for pkg in cls.REQUIRED_PACKAGES:
try:
importlib.import_module(pkg)
except ImportError:
missing_pkgs.append(f"'{pkg}'")
if missing_pkgs:
raise unittest.SkipTest(
f"The package(s) {', '.join(missing_pkgs)} are required to load the dataset "
f"'{cls.DATASET_CLASS.__name__}', but are not installed."
)
def _split_kwargs(self, kwargs):
special_kwargs = kwargs.copy()
other_kwargs = {key: special_kwargs.pop(key) for key in set(special_kwargs.keys()) - self._SPECIAL_KWARGS}
return special_kwargs, other_kwargs
def _inject_fake_data(self, tmpdir, config):
info = self.inject_fake_data(tmpdir, config)
if info is None:
raise UsageError(
"The method 'inject_fake_data' needs to return at least an integer indicating the number of "
"examples for the current configuration."
)
elif isinstance(info, int):
info = dict(num_examples=info)
elif not isinstance(info, dict):
raise UsageError(
f"The additional information returned by the method 'inject_fake_data' must be either an "
f"integer indicating the number of examples for the current configuration or a dictionary with "
f"the same content. Got {type(info)} instead."
)
elif "num_examples" not in info:
raise UsageError(
"The information dictionary returned by the method 'inject_fake_data' must contain a "
"'num_examples' field that holds the number of examples for the current configuration."
)
return info
def _patch_download_extract(self):
module = inspect.getmodule(self.DATASET_CLASS).__name__
return {unittest.mock.patch(f"{module}.{function}") for function in self._DOWNLOAD_EXTRACT_FUNCTIONS}
def _patch_checks(self):
module = inspect.getmodule(self.DATASET_CLASS).__name__
return {unittest.mock.patch(f"{module}.{function}", return_value=True) for function in self._CHECK_FUNCTIONS}
@contextlib.contextmanager
def _maybe_apply_patches(self, patchers):
with contextlib.ExitStack() as stack:
mocks = {}
for patcher in patchers:
with contextlib.suppress(AttributeError):
mocks[patcher.target] = stack.enter_context(patcher)
yield mocks
def test_not_found_or_corrupted(self):
with pytest.raises((FileNotFoundError, RuntimeError)):
with self.create_dataset(inject_fake_data=False):
pass
def test_smoke(self):
with self.create_dataset() as (dataset, _):
assert isinstance(dataset, torchvision.datasets.VisionDataset)
@test_all_configs
def test_str_smoke(self, config):
with self.create_dataset(config) as (dataset, _):
assert isinstance(str(dataset), str)
@test_all_configs
def test_feature_types(self, config):
with self.create_dataset(config) as (dataset, _):
example = dataset[0]
if len(self.FEATURE_TYPES) > 1:
actual = len(example)
expected = len(self.FEATURE_TYPES)
assert (
actual == expected
), "The number of the returned features does not match the the number of elements in FEATURE_TYPES: "
f"{actual} != {expected}"
else:
example = (example,)
for idx, (feature, expected_feature_type) in enumerate(zip(example, self.FEATURE_TYPES)):
with self.subTest(idx=idx):
assert isinstance(feature, expected_feature_type)
@test_all_configs
def test_num_examples(self, config):
with self.create_dataset(config) as (dataset, info):
assert len(list(dataset)) == len(dataset) == info["num_examples"]
@test_all_configs
def test_transforms(self, config):
mock = unittest.mock.Mock(wraps=lambda *args: args[0] if len(args) == 1 else args)
for kwarg in self._TRANSFORM_KWARGS:
if kwarg not in self._HAS_SPECIAL_KWARG:
continue
mock.reset_mock()
with self.subTest(kwarg=kwarg):
with self.create_dataset(config, **{kwarg: mock}) as (dataset, _):
dataset[0]
mock.assert_called()
@test_all_configs
def test_transforms_v2_wrapper(self, config):
from torchvision import tv_tensors
from torchvision.datasets import wrap_dataset_for_transforms_v2
try:
with self.create_dataset(config) as (dataset, info):
for target_keys in [None, "all"]:
if target_keys is not None and self.DATASET_CLASS not in {
torchvision.datasets.CocoDetection,
torchvision.datasets.VOCDetection,
torchvision.datasets.Kitti,
torchvision.datasets.WIDERFace,
}:
with self.assertRaisesRegex(ValueError, "`target_keys` is currently only supported for"):
wrap_dataset_for_transforms_v2(dataset, target_keys=target_keys)
continue
wrapped_dataset = wrap_dataset_for_transforms_v2(dataset, target_keys=target_keys)
assert isinstance(wrapped_dataset, self.DATASET_CLASS)
assert len(wrapped_dataset) == info["num_examples"]
wrapped_sample = wrapped_dataset[0]
assert tree_any(
lambda item: isinstance(item, (tv_tensors.TVTensor, PIL.Image.Image)), wrapped_sample
)
except TypeError as error:
msg = f"No wrapper exists for dataset class {type(dataset).__name__}"
if str(error).startswith(msg):
pytest.skip(msg)
raise error
except RuntimeError as error:
if "currently not supported by this wrapper" in str(error):
pytest.skip("Config is currently not supported by this wrapper")
raise error
class ImageDatasetTestCase(DatasetTestCase):
"""Abstract base class for image dataset testcases.
- Overwrites the FEATURE_TYPES class attribute to expect a :class:`PIL.Image.Image` and an integer label.
"""
FEATURE_TYPES = (PIL.Image.Image, int)
@contextlib.contextmanager
def create_dataset(
self,
config: Optional[Dict[str, Any]] = None,
inject_fake_data: bool = True,
patch_checks: Optional[bool] = None,
**kwargs: Any,
) -> Iterator[Tuple[torchvision.datasets.VisionDataset, Dict[str, Any]]]:
with super().create_dataset(
config=config,
inject_fake_data=inject_fake_data,
patch_checks=patch_checks,
**kwargs,
) as (dataset, info):
# PIL.Image.open() only loads the image metadata upfront and keeps the file open until the first access
# to the pixel data occurs. Trying to delete such a file results in an PermissionError on Windows. Thus, we
# force-load opened images.
# This problem only occurs during testing since some tests, e.g. DatasetTestCase.test_feature_types open an
# image, but never use the underlying data. During normal operation it is reasonable to assume that the
# user wants to work with the image he just opened rather than deleting the underlying file.
with self._force_load_images():
yield dataset, info
@contextlib.contextmanager
def _force_load_images(self):
open = PIL.Image.open
def new(fp, *args, **kwargs):
image = open(fp, *args, **kwargs)
if isinstance(fp, (str, pathlib.Path)):
image.load()
return image
with unittest.mock.patch("PIL.Image.open", new=new):
yield
class VideoDatasetTestCase(DatasetTestCase):
"""Abstract base class for video dataset testcases.
- Overwrites the 'FEATURE_TYPES' class attribute to expect two :class:`torch.Tensor` s for the video and audio as
well as an integer label.
- Overwrites the 'REQUIRED_PACKAGES' class attribute to require PyAV (``av``).
- Adds the 'DEFAULT_FRAMES_PER_CLIP' class attribute. If no 'frames_per_clip' is provided by 'inject_fake_data()'
and it is the last parameter without a default value in the dataset constructor, the value of the
'DEFAULT_FRAMES_PER_CLIP' class attribute is appended to the output.
"""
FEATURE_TYPES = (torch.Tensor, torch.Tensor, int)
REQUIRED_PACKAGES = ("av",)
FRAMES_PER_CLIP = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_args = self._set_default_frames_per_clip(self.dataset_args)
def _set_default_frames_per_clip(self, dataset_args):
argspec = inspect.getfullargspec(self.DATASET_CLASS.__init__)
args_without_default = argspec.args[1 : (-len(argspec.defaults) if argspec.defaults else None)]
frames_per_clip_last = args_without_default[-1] == "frames_per_clip"
@functools.wraps(dataset_args)
def wrapper(tmpdir, config):
args = dataset_args(tmpdir, config)
if frames_per_clip_last and len(args) == len(args_without_default) - 1:
args = (*args, self.FRAMES_PER_CLIP)
return args
return wrapper
def test_output_format(self):
for output_format in ["TCHW", "THWC"]:
with self.create_dataset(output_format=output_format) as (dataset, _):
for video, *_ in dataset:
if output_format == "TCHW":
num_frames, num_channels, *_ = video.shape
else: # output_format == "THWC":
num_frames, *_, num_channels = video.shape
assert num_frames == self.FRAMES_PER_CLIP
assert num_channels == 3
@test_all_configs
def test_transforms_v2_wrapper(self, config):
# `output_format == "THWC"` is not supported by the wrapper. Thus, we skip the `config` if it is set explicitly
# or use the supported `"TCHW"`
if config.setdefault("output_format", "TCHW") == "THWC":
return
super().test_transforms_v2_wrapper.__wrapped__(self, config)
def _no_collate(batch):
return batch
def check_transforms_v2_wrapper_spawn(dataset):
# On Linux and Windows, the DataLoader forks the main process by default. This is not available on macOS, so new
# subprocesses are spawned. This requires the whole pipeline including the dataset to be pickleable, which is what
# we are enforcing here.
if platform.system() != "Darwin":
pytest.skip("Multiprocessing spawning is only checked on macOS.")
from torch.utils.data import DataLoader
from torchvision import tv_tensors
from torchvision.datasets import wrap_dataset_for_transforms_v2
wrapped_dataset = wrap_dataset_for_transforms_v2(dataset)
dataloader = DataLoader(wrapped_dataset, num_workers=2, multiprocessing_context="spawn", collate_fn=_no_collate)
for wrapped_sample in dataloader:
assert tree_any(
lambda item: isinstance(item, (tv_tensors.Image, tv_tensors.Video, PIL.Image.Image)), wrapped_sample
)
def create_image_or_video_tensor(size: Sequence[int]) -> torch.Tensor:
r"""Create a random uint8 tensor.
Args:
size (Sequence[int]): Size of the tensor.
"""
return torch.randint(0, 256, size, dtype=torch.uint8)
def create_image_file(
root: Union[pathlib.Path, str], name: Union[pathlib.Path, str], size: Union[Sequence[int], int] = 10, **kwargs: Any
) -> pathlib.Path:
"""Create an image file from random data.
Args:
root (Union[str, pathlib.Path]): Root directory the image file will be placed in.
name (Union[str, pathlib.Path]): Name of the image file.
size (Union[Sequence[int], int]): Size of the image that represents the ``(num_channels, height, width)``. If
scalar, the value is used for the height and width. If not provided, three channels are assumed.
kwargs (Any): Additional parameters passed to :meth:`PIL.Image.Image.save`.
Returns:
pathlib.Path: Path to the created image file.
"""
if isinstance(size, int):
size = (size, size)
if len(size) == 2:
size = (3, *size)
if len(size) != 3:
raise UsageError(
f"The 'size' argument should either be an int or a sequence of length 2 or 3. Got {len(size)} instead"
)
image = create_image_or_video_tensor(size)
file = pathlib.Path(root) / name
# torch (num_channels x height x width) -> PIL (width x height x num_channels)
image = image.permute(2, 1, 0)
# For grayscale images PIL doesn't use a channel dimension
if image.shape[2] == 1:
image = torch.squeeze(image, 2)
PIL.Image.fromarray(image.numpy()).save(file, **kwargs)
return file
def create_image_folder(
root: Union[pathlib.Path, str],
name: Union[pathlib.Path, str],
file_name_fn: Callable[[int], str],
num_examples: int,
size: Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]] = None,
**kwargs: Any,
) -> List[pathlib.Path]:
"""Create a folder of random images.
Args:
root (Union[str, pathlib.Path]): Root directory the image folder will be placed in.
name (Union[str, pathlib.Path]): Name of the image folder.
file_name_fn (Callable[[int], str]): Should return a file name if called with the file index.
num_examples (int): Number of images to create.
size (Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]]): Size of the images. If
callable, will be called with the index of the corresponding file. If omitted, a random height and width
between 3 and 10 pixels is selected on a per-image basis.
kwargs (Any): Additional parameters passed to :func:`create_image_file`.
Returns:
List[pathlib.Path]: Paths to all created image files.
.. seealso::
- :func:`create_image_file`
"""
if size is None:
def size(idx: int) -> Tuple[int, int, int]:
num_channels = 3
height, width = torch.randint(3, 11, size=(2,), dtype=torch.int).tolist()
return (num_channels, height, width)
root = pathlib.Path(root) / name
os.makedirs(root, exist_ok=True)
return [
create_image_file(root, file_name_fn(idx), size=size(idx) if callable(size) else size, **kwargs)
for idx in range(num_examples)
]
def shape_test_for_stereo(
left: PIL.Image.Image,
right: PIL.Image.Image,
disparity: Optional[np.ndarray] = None,
valid_mask: Optional[np.ndarray] = None,
):
left_dims = get_dimensions(left)
right_dims = get_dimensions(right)
c, h, w = left_dims
# check that left and right are the same size
assert left_dims == right_dims
assert c == 3
# check that the disparity has the same spatial dimensions
# as the input
if disparity is not None:
assert disparity.ndim == 3
assert disparity.shape == (1, h, w)
if valid_mask is not None:
# check that valid mask is the same size as the disparity
_, dh, dw = disparity.shape
mh, mw = valid_mask.shape
assert dh == mh
assert dw == mw
@requires_lazy_imports("av")
def create_video_file(
root: Union[pathlib.Path, str],
name: Union[pathlib.Path, str],
size: Union[Sequence[int], int] = (1, 3, 10, 10),
fps: float = 25,
**kwargs: Any,
) -> pathlib.Path:
"""Create a video file from random data.
Args:
root (Union[str, pathlib.Path]): Root directory the video file will be placed in.
name (Union[str, pathlib.Path]): Name of the video file.
size (Union[Sequence[int], int]): Size of the video that represents the
``(num_frames, num_channels, height, width)``. If scalar, the value is used for the height and width.
If not provided, ``num_frames=1`` and ``num_channels=3`` are assumed.
fps (float): Frame rate in frames per second.
kwargs (Any): Additional parameters passed to :func:`torchvision.io.write_video`.
Returns:
pathlib.Path: Path to the created image file.
Raises:
UsageError: If PyAV is not available.
"""
if isinstance(size, int):
size = (size, size)
if len(size) == 2:
size = (3, *size)
if len(size) == 3:
size = (1, *size)
if len(size) != 4:
raise UsageError(
f"The 'size' argument should either be an int or a sequence of length 2, 3, or 4. Got {len(size)} instead"
)
video = create_image_or_video_tensor(size)
file = pathlib.Path(root) / name
torchvision.io.write_video(str(file), video.permute(0, 2, 3, 1), fps, **kwargs)
return file
@requires_lazy_imports("av")
def create_video_folder(
root: Union[str, pathlib.Path],
name: Union[str, pathlib.Path],
file_name_fn: Callable[[int], str],
num_examples: int,
size: Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]] = None,
fps=25,
**kwargs,
) -> List[pathlib.Path]:
"""Create a folder of random videos.
Args:
root (Union[str, pathlib.Path]): Root directory the video folder will be placed in.
name (Union[str, pathlib.Path]): Name of the video folder.
file_name_fn (Callable[[int], str]): Should return a file name if called with the file index.
num_examples (int): Number of videos to create.
size (Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]]): Size of the videos. If
callable, will be called with the index of the corresponding file. If omitted, a random even height and
width between 4 and 10 pixels is selected on a per-video basis.
fps (float): Frame rate in frames per second.
kwargs (Any): Additional parameters passed to :func:`create_video_file`.
Returns:
List[pathlib.Path]: Paths to all created video files.
Raises:
UsageError: If PyAV is not available.
.. seealso::
- :func:`create_video_file`
"""
if size is None:
def size(idx):
num_frames = 1
num_channels = 3
# The 'libx264' video codec, which is the default of torchvision.io.write_video, requires the height and
# width of the video to be divisible by 2.
height, width = (torch.randint(2, 6, size=(2,), dtype=torch.int) * 2).tolist()
return (num_frames, num_channels, height, width)
root = pathlib.Path(root) / name
os.makedirs(root, exist_ok=True)
return [
create_video_file(root, file_name_fn(idx), size=size(idx) if callable(size) else size, **kwargs)
for idx in range(num_examples)
]
def _split_files_or_dirs(root, *files_or_dirs):
files = set()
dirs = set()
for file_or_dir in files_or_dirs:
path = pathlib.Path(file_or_dir)
if not path.is_absolute():
path = root / path
if path.is_file():
files.add(path)
else:
dirs.add(path)
for sub_file_or_dir in path.glob("**/*"):
if sub_file_or_dir.is_file():
files.add(sub_file_or_dir)
else:
dirs.add(sub_file_or_dir)
if root in dirs:
dirs.remove(root)
return files, dirs
def _make_archive(root, name, *files_or_dirs, opener, adder, remove=True):
archive = pathlib.Path(root) / name
if not files_or_dirs:
# We need to invoke `Path.with_suffix("")`, since call only applies to the last suffix if multiple suffixes are
# present. For example, `pathlib.Path("foo.tar.gz").with_suffix("")` results in `foo.tar`.
file_or_dir = archive
for _ in range(len(archive.suffixes)):
file_or_dir = file_or_dir.with_suffix("")
if file_or_dir.exists():
files_or_dirs = (file_or_dir,)
else:
raise ValueError("No file or dir provided.")
files, dirs = _split_files_or_dirs(root, *files_or_dirs)
with opener(archive) as fh:
for file in sorted(files):
adder(fh, file, file.relative_to(root))
if remove:
for file in files:
os.remove(file)
for dir in dirs:
shutil.rmtree(dir, ignore_errors=True)
return archive
def make_tar(root, name, *files_or_dirs, remove=True, compression=None):
# TODO: detect compression from name
return _make_archive(
root,
name,
*files_or_dirs,
opener=lambda archive: tarfile.open(archive, f"w:{compression}" if compression else "w"),
adder=lambda fh, file, relative_file: fh.add(file, arcname=relative_file),
remove=remove,
)
def make_zip(root, name, *files_or_dirs, remove=True):
return _make_archive(
root,
name,
*files_or_dirs,
opener=lambda archive: zipfile.ZipFile(archive, "w"),
adder=lambda fh, file, relative_file: fh.write(file, arcname=relative_file),
remove=remove,
)
def create_random_string(length: int, *digits: str) -> str:
"""Create a random string.
Args:
length (int): Number of characters in the generated string.
*digits (str): Characters to sample from. If omitted defaults to :attr:`string.ascii_lowercase`.
"""
if not digits:
digits = string.ascii_lowercase
else:
digits = "".join(itertools.chain(*digits))
return "".join(random.choice(digits) for _ in range(length))
def make_fake_pfm_file(h, w, file_name):
values = list(range(3 * h * w))
# Note: we pack everything in little endian: -1.0, and "<"
content = f"PF \n{w} {h} \n-1.0\n".encode() + struct.pack("<" + "f" * len(values), *values)
with open(file_name, "wb") as f:
f.write(content)
def make_fake_flo_file(h, w, file_name):
"""Creates a fake flow file in .flo format."""
# Everything needs to be in little Endian according to
# https://vision.middlebury.edu/flow/code/flow-code/README.txt
values = list(range(2 * h * w))
content = (
struct.pack("<4c", *(c.encode() for c in "PIEH"))
+ struct.pack("<i", w)
+ struct.pack("<i", h)
+ struct.pack("<" + "f" * len(values), *values)
)
with open(file_name, "wb") as f:
f.write(content)
|
# -*-coding:utf-8-*-
"""
归并排序算法
"""
import numpy as np
def create_array(a):
"""产生随机数组"""
return np.random.randint(0, 10, size=10)
"""方法1:对数组整体排序 """
def merge1(list1, list2):
ls = []
i = j = 0
while i < len(list1) and j < len(list2):
if list1[i] > list2[j]:
ls.append(list2[j])
j += 1
else:
ls.append(list1[i])
i += 1
if i < len(list1):
ls.extend(list1[i:len(list1)])
if j < len(list2):
ls.extend(list2[j:len(list2)])
return ls
def merge_sort1(lists):
if len(lists) <= 1:
return lists
middle = (len(lists)) // 2
left = merge_sort1(lists[0:middle])
right = merge_sort1(lists[middle:])
return merge1(left, right)
"""方法2:对数组部分元素排序 """
def merge2(a, left, middle, right):
l1 = [0] * (middle - left + 1)
r1 = [0] * (right - middle)
for i in range(0, middle - left + 1):
l1[i] = a[left + i]
for j in range(0, right - middle):
r1[j] = a[middle + 1 + j]
i = j = 0
k = left
while i < len(l1) and j < len(r1):
if l1[i] < r1[j]:
a[k] = l1[i]
i += 1
else:
a[k] = r1[j]
j += 1
k += 1
while i < len(l1):
a[k] = l1[i]
k += 1
i += 1
while j < len(r1):
a[k] = r1[j]
k += 1
j += 1
def merge_sort2(a, left, right):
if left < right:
s = int((right + left) / 2)
merge_sort2(a, left, s)
merge_sort2(a, s + 1, right)
merge2(a, left, s, right)
if __name__ == '__main__':
arr1 = create_array(10)
print('元素组:', arr1)
merge_sort1(arr1)
print('排序后:', arr1)
arr2 = create_array(10)
print('元素组:', arr2)
merge_sort2(arr2, 0, 9)
print('排序后:', arr2)
|
from django.urls import path
from .views import MyApiView, ReadUpdateView, ItemApiView, ReadUpdateItemView, ReadShopItemView
urlpatterns = [
path('', MyApiView.as_view(), name="myapiview"),
path('/<int:id>', ReadUpdateView.as_view(), name="readUpdate"),
path('/item', ItemApiView.as_view(), name="itemapiview"),
path('/item/<int:id>', ReadUpdateItemView.as_view(), name="readUpdateItem"),
path('/item/byStore', ReadShopItemView.as_view(), name="readShopUpdateItem")
]
|
from django.contrib.auth.models import User
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from django.http import JsonResponse
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from garmin.serializers import UserLastSyncedSerializer
from fitbit.serializers import UserFitbitLastSyncedSerializer
from apple.serializers import UserAppleLastSyncedSerializer
import json
from garmin.models import UserLastSynced,GarminConnectToken
from fitbit.models import UserFitbitLastSynced,FitbitConnectToken
from users.models import GarminToken
from quicklook.calculations.calculation_driver import which_device
from .models import UserDataBackfillRequest
from .serializers import UserBackfillRequestSerializer,AACustomRangesSerializer
from apple.models import AppleUser,UserAppleLastSynced
class UserLastSyncedItemview(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserLastSyncedSerializer
def get(self,request, format=None):
user = self.request.user
device_type = which_device(user)
if device_type == 'garmin':
queryset = UserLastSynced.objects.all()
try:
last_synced_obj = queryset.get(user=self.request.user)
if last_synced_obj:
serializer = UserLastSyncedSerializer(last_synced_obj)
return Response(serializer.data,status=status.HTTP_200_OK)
else:
return Response({})
except UserLastSynced.DoesNotExist as e:
return Response({})
elif device_type == 'fitbit':
queryset = UserFitbitLastSynced.objects.all()
try:
last_synced_obj = queryset.get(user=self.request.user)
if last_synced_obj:
serializer = UserFitbitLastSyncedSerializer(last_synced_obj)
new_data = {}
new_data["last_synced"] = serializer.data["last_synced_fitbit"]
new_data.update(serializer.data)
new_data.pop("last_synced_fitbit")
return Response(new_data,status=status.HTTP_200_OK)
else:
return Response({})
except UserFitbitLastSynced.DoesNotExist as e:
return Response({})
elif device_type == 'apple':
queryset = UserAppleLastSynced.objects.all()
try:
last_synced_obj = queryset.get(user=self.request.user)
if last_synced_obj:
serializer = UserAppleLastSyncedSerializer(last_synced_obj)
new_data = {}
new_data["last_synced"] = serializer.data["last_synced_apple"]
new_data.update(serializer.data)
new_data.pop("last_synced_apple")
return Response(new_data,status=status.HTTP_200_OK)
else:
return Response({})
except UserFitbitLastSynced.DoesNotExist as e:
return Response({})
else:
return Response({})
class HaveTokens(APIView):
'''
Check availability of garmin connect, garmin health token
and fitbit tokens for current user
'''
permission_classes = (IsAuthenticated,)
def get(self,request,format="json"):
have_tokens = {
"linked_devices":False,
"have_garmin_health_token":False,
"have_garmin_connect_token":False,
"have_fitbit_token":False,
"have_apple_token": False
}
if GarminToken.objects.filter(user=request.user).exists():
have_tokens['have_garmin_health_token'] = True
have_tokens['linked_devices'] = True
if GarminConnectToken.objects.filter(user=request.user).exists():
have_tokens['have_garmin_connect_token'] = True
have_tokens['linked_devices'] = True
if FitbitConnectToken.objects.filter(user=request.user).exists():
have_tokens['have_fitbit_token'] = True
have_tokens['linked_devices'] = True
if AppleUser.objects.filter(user=request.user).exists():
have_tokens['have_apple_token'] = True
have_tokens['linked_devices'] = True
return Response(have_tokens,status=status.HTTP_200_OK)
class UserBackfillRequestView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserBackfillRequestSerializer
def get(self,request,*args,**kwargs):
userrequestmodel=UserDataBackfillRequest.objects.all()
serializer=UserBackfillRequestSerializer(userrequestmodel,many=True)
return Response(serializer.data)
def post(self,request,*args,**kwargs):
serializer = UserBackfillRequestSerializer(data=request.data,
context={'user_id':request.user.id})
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class AACustomRangesView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = AACustomRangesSerializer
def post(self,request,*args,**kwargs):
serializer = AACustomRangesSerializer(data=request.data,
context={'user_id':request.user.id})
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_user_id(request):
user = request.user
print(user,"user")
user = User.objects.get(username=user)
return JsonResponse({'user_id':user.id})
|
"""268"""
import codecs
from sklearn import svm
from sklearn.externals import joblib
vectors = {}
with codecs.open('features', 'r', 'utf8') as reader:
for line in reader:
values = line.strip().split(' ')
word = values[0]
vector = map(float, values[1:])
vectors[word] = vector
topics = {}
with codecs.open('topics', 'r', 'utf8') as reader:
for line in reader:
topics[line.strip()] = True
emotions = {}
with codecs.open('emotions', 'r', 'utf8') as reader:
for line in reader:
word, rank = line.strip().split(' ')
emotions[word] = int(rank)
X, y = [], []
for key, val in vectors.items():
X.append(val)
if key in emotions:
y.append(2)
elif key in topics:
y.append(1)
else:
y.append(0)
clf = svm.NuSVC(nu=0.005)
clf.fit(X, y)
joblib.dump(clf, 'attr_clf.pkl')
X, y = [], []
for key, val in vectors.items():
if key in emotions:
X.append(val)
y.append(emotions[key])
clf = svm.NuSVC(nu=0.005)
clf.fit(X, y)
joblib.dump(clf, 'rank_clf.pkl')
|
# -*- coding:utf8 -*-
import gspread
import httplib2
from .drive import get_file_list, get_credentials_from_file
from apiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from . import healthservice_blueprint as healthservice
from sqlalchemy import create_engine, MetaData, Table, select, func
from flask import jsonify
from flask_cors import cross_origin
from collections import defaultdict
from models import ServicedCustomers, ServiceCenter
engine = create_engine('postgresql+psycopg2://likit@localhost/healthdw_dev',
convert_unicode=True)
metadata = MetaData(bind=engine)
con = engine.connect()
facts = Table('facts', metadata, autoload=True)
dates = Table('dates', metadata, autoload=True)
companies = Table('companies', metadata, autoload=True)
@healthservice.route('/customers/count/')
@cross_origin(origin='*')
def get_annual_customers():
data = []
for year in range(2007,2018):
s = select([func.count(facts.c.customer_id.distinct())])
s = s.select_from(facts.join(dates)).where(facts.c.service_date_id==dates.c.date_id)
s = s.where(dates.c.gregorian_year==year)
rp = con.execute(s)
data.append(dict(year=year, count=rp.scalar()))
return jsonify({'data': data})
@healthservice.route('/customers/companies/engagement/')
@cross_origin(origin='*')
def get_companies_engagement_rate():
data = []
total_counts = defaultdict(int)
for year in range(2008, 2018):
counts = []
s = select([companies.c.name.distinct()])
s = s.select_from(facts.join(dates).join(companies))
s = s.where(facts.c.service_date_id==dates.c.date_id)
s = s.where(facts.c.company_id==companies.c.company_id)
s = s.where(dates.c.gregorian_year==year)
rp = con.execute(s)
for c in rp:
total_counts[c[companies.c.name]] += 1
counts.append({
'company': c[companies.c.name],
'count': total_counts[c[companies.c.name]]
})
data.append(dict(year=year, value=counts))
return jsonify(data=data)
@healthservice.route('/gdrive/customers/update/')
def udpate_wrs():
'''Load data from Wellrounded scholar spreadsheet to DB'''
cred = get_credentials_from_file() # get_credentials func cannot run
# inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_cnhXZDhOaGlrejQ'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
records = {}
medilab = ServiceCenter.objects(slug="medilab-center").first()
toxicology = ServiceCenter.objects(slug="toxicology").first()
chromosome = ServiceCenter.objects(slug="chromosome").first()
mobile = ServiceCenter.objects(slug="mobile-unit").first()
gjmt = ServiceCenter.objects(slug="gjmt").first()
gjrt = ServiceCenter.objects(slug="gjrt").first()
for f in files:
file_name, file_id = f['name'], f['id']
print('Loading data from file: {} {}'.format(file_name, file_id))
wks = gc.open_by_key(file_id).get_worksheet(0)
for c in range(2,10):
data = wks.col_values(c)[2:11]
for i in range(len(data)):
try:
data[i] = float(data[i])
except:
data[i] = 0
_ = ServicedCustomers(year=int(data[0]), center=medilab,
customers=data[1])
_.save()
_ = ServicedCustomers(year=int(data[0]), center=mobile,
customers=data[3])
_.save()
_ = ServicedCustomers(year=int(data[0]), center=toxicology,
customers=data[4])
_.save()
_ = ServicedCustomers(year=int(data[0]), center=chromosome,
customers=data[5])
_.save()
_ = ServicedCustomers(year=int(data[0]), center=gjmt,
customers=data[7])
_.save()
_ = ServicedCustomers(year=int(data[0]), center=gjrt,
customers=data[8])
_.save()
return jsonify(message='fuck')
@healthservice.route('/gdrive/files/')
@cross_origin()
def get_gdrive_file_list():
cred = get_credentials_from_file() # get_credentials func cannot run inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(pageSize=10,
fields="nextPageToken, files(id, name, parents)").execute()
items = results.get('files', [])
if not items:
return jsonify({'files': [], 'message': 'No files found.'})
else:
files = []
for item in items:
files.append({'id': item['id'], 'name': item['name'], 'parents': item.get('parents', '')})
#print('{0} ({1})'.format(item['name'], item['id']))
return jsonify({'files': files})
@healthservice.route('/gdrive/customers/stats/')
@cross_origin()
def get_customers_stat():
data = []
years = defaultdict(list)
for s in ServicedCustomers.objects:
d = {
'customers' : s.customers,
'year': s.year,
'center_slug': s.center.slug
}
years[d['year']].append(d)
for yrs in years:
data.append({
'year': yrs,
'data': years[yrs]
})
return jsonify(data)
|
import requests
r = requests.get("https://stepic.org/media/attachments/course67/3.6.2/316.txt")
s = [i for i in r.text.splitlines()]
print(len(s))
|
from flask_wtf import FlaskForm
from ..models import User
from .. import db
from wtforms import StringField, DateField, SubmitField, TextAreaField, PasswordField,ValidationError, validators
from wtforms.validators import Required, Optional, Email, EqualTo
from wtforms import RadioField
class BlogForm(FlaskForm):
title = StringField('Title', validators = [Required()])
blog = TextAreaField('Content', validators = [Required()])
submit = SubmitField('Submit')
class LoginForm(FlaskForm):
username = StringField("Username", validators = [Required()])
password = PasswordField("Password", validators = [Required()])
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
username = StringField("Username", validators = [Required()])
email_address =StringField("Email Address", validators = [Required(), Email()])
password = PasswordField("Password", validators = [Required(),EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField("Confirm Password")
def validate_username(self, username):
if User.query.filter_by(username = username.data).first():
raise ValidationError(f'That username is no longer available, try {username.data}xy2')
def validate_email(self, email_field):
if User.query.filter_by(email = email_field.data).first():
raise ValidationError("That email is already registered here. If it's your email, please log in")
submit = SubmitField('Register')
class CommentForm(FlaskForm):
comment =StringField("Comment", validators = [Required()])
submit =SubmitField("Submit Comment")
class DeleteForm(FlaskForm):
delete = SubmitField("Delete")
class UpdateForm(FlaskForm):
update = SubmitField("Update")
class SubscribeForm(FlaskForm):
name = StringField("Name", validators = [Required()])
email =StringField("Email", validators = [Required(), Email()])
submit = SubmitField("Subscribe")
|
try:
1 / 0
except Exception as E:
raise TypeError('Bad') from E # Explicitly chained exceptions
|
password="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
print("Goodbye, World!")
|
import json
from tqdm import tqdm
import pdb
from analysis.map_condition_phrases import read_embeddings
import numpy as np
import jsonlines
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize
import os
import sys
import random
import re
import scipy.cluster.hierarchy as hcluster
from shutil import copyfile
from sklearn.feature_extraction.text import TfidfVectorizer
import argparse
import pickle
import copy
import spacy
import time
sys.path.append("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
from nltk.tokenize import sent_tokenize
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
import matplotlib.pyplot as plt
from nltk.translate.meteor_score import meteor_score
from itertools import count, permutations
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from examples.run_fusion import InputExample
from examples.run_fusion import convert_examples_to_features
from allennlp.predictors.predictor import Predictor
torch.manual_seed(0)
random.seed(42)
lower_limit = 20
upper_limit = 25
#if len(sys.argv) > 1:
# THRESHOLD = float(sys.argv[1])
#else:
THRESHOLD = 0.7
lemmatizer = WordNetLemmatizer()
spacy_nlp = spacy.load("en_core_web_sm")
class PolicyChoices(nn.Module):
def __init__(self, inputs):
super(PolicyChoices, self).__init__()
self.representation = nn.Sequential(
nn.Linear(400,30),
nn.Tanh(),
nn.Linear(30,1)
)
self.affine1 = nn.Linear(inputs, 2*inputs)
self.hidden = nn.Linear(2*inputs, 2*inputs)
self.affine2 = nn.Linear(2*inputs, 1)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = torch.cat((x[:,:2],\
#x[:,202:204], x[:,-1].unsqueeze(1)),1)
self.representation(torch.cat((x[:,2:202],x[:,2:202]),1)),\
x[:,202:204], x[:,-1].unsqueeze(1)),1)
#x[:,202:204]),1)
x = self.affine1(x)
x = torch.tanh(x)
x = self.hidden(x)
x = torch.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=0)
class Policy(nn.Module):
def __init__(self, inputs):
super(Policy, self).__init__()
self.affine1 = nn.Linear(inputs, 8)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(8, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x).clamp(max=1e5)
x = self.dropout(x).clamp(max=1e5)
x = F.relu(x)
action_scores = self.affine2(x).clamp(max=1e5)
return F.softmax(action_scores, dim=1)
eps = np.finfo(np.float32).eps.item()
gamma=0.99
def select_action(state, policy):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = policy(state)
m = Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item()
def select_action_choice(states, policy, epoch):
states = torch.from_numpy(states).float()
probs = policy(states).squeeze()
if len(probs.size()) == 0:
action = 0
policy.saved_log_probs.append(torch.log(probs.unsqueeze(0)))
return 0
if epoch < 0:
n = Categorical(torch.cat((\
torch.max(probs[:-1]).unsqueeze(0),probs[-1:]),0).squeeze())
if n.sample().item() == 0:
m = Categorical(probs[:-1])
action = m.sample()
else:
action = torch.tensor(len(probs)-1)
else:
m = Categorical(probs)
action = torch.argmax(probs)#m.sample()
m = Categorical(probs)
policy.saved_log_probs.append(\
m.log_prob(action).unsqueeze(0))
return action.item()
def select_action_choice_pretrained(states, policy, fixed_choice):
states = torch.from_numpy(states).float()
probs = policy(states).squeeze()
if len(probs.size()) == 0:
policy.saved_log_probs.append(torch.log(probs.unsqueeze(0)))
return 0
policy.saved_log_probs.append(torch.log(probs[fixed_choice]).unsqueeze(0))
return fixed_choice
def finish_episode(policy, optimizer, prev_loss=[], batch_size=8, \
do_policy=True):
if prev_loss is None:
prev_loss = []
R = 0
policy_loss = []
returns = []
for r in policy.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
#if len(returns) > 1:
# returns = (returns - returns.mean()) / (returns.std() + eps)
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * returns[0])
policy_loss = torch.cat(policy_loss).sum()
#print(policy_loss)
#policy_loss = sum(policy.saved_log_probs) * sum(returns)
if do_policy:
prev_loss.append(policy_loss/len(returns))
if len(prev_loss) == batch_size:
optimizer.zero_grad()
loss = 0
for prev_l in prev_loss:
if prev_l > 0 or True:
loss += prev_l
if loss > 0 or True:
loss.backward()
optimizer.step()
#loss = sum(prev_loss)
#loss.backward()
#optimizer.step()
prev_loss = []
del policy.rewards[:]
del policy.saved_log_probs[:]
return prev_loss
class Environment:
def __init__(self, gold_relations, gold_populations, embeddings, \
cause_style_rationales_representations, sentence_representations,\
all_gold_sentence_extracted_representations,\
all_pubmed_sentence_representations,\
rhs, gold_sentence,\
pubmed_sentences=None, max_phrases=5):
self.gold_relations = gold_relations
self.gold_populations = gold_populations
self.embeddings = embeddings if embeddings is not []\
else read_embeddings()
self.cause_style_rationales_representations = \
cause_style_rationales_representations
self.sentence_representations = sentence_representations
self.all_gold_sentence_extracted_representations = \
all_gold_sentence_extracted_representations
self.all_pubmed_sentence_representations = \
all_pubmed_sentence_representations
self.max_phrases = max_phrases
self.rhs = rhs
self.gold_sentence = gold_sentence
self.pubmed_sentences=pubmed_sentences
def step(self, state, action, structures_selected, remaining_structures,\
remaining_types, corresponding_sentiment, gold_sentiment):
if action == 0 or len(remaining_structures)==0 or state[1]+1\
>= self.max_phrases:
state[1] += 1
return state, self.get_reward(structures_selected), \
True, structures_selected
else:
if len(structures_selected) > 0:
existing_reward = self.get_reward(structures_selected)
else:
existing_reward = 0.0
best_structure = None
best_reward = -1e10
for additional,remaining_type in \
zip(remaining_structures,remaining_types):
if remaining_type == 1:
new_reward = self.get_reward(structures_selected + \
[additional]) - existing_reward
else:
new_reward = 0
#assert new_reward >= 0
if new_reward >= best_reward:
best_reward = new_reward
best_structure = additional
state[0] += 1
state[1] += 1
assert best_structure is not None
new_structures_selected = structures_selected + [best_structure]
if state[1] >= self.max_phrases:
import sys; sys.exit()
return state, -1/self.max_phrases, True, new_structures_selected
#-1/self.max_phrases, True, \
#self.get_reward(new_structures_selected),\
#True, new_structures_selected
return state, -1/self.max_phrases,\
len(new_structures_selected) >= self.max_phrases,\
new_structures_selected
def get_reward(self, structures):
overall_score = _best_overal_score(self.gold_relations,\
[(x,y) for x,y,a,b,c,d in structures], self.embeddings)
return overall_score
def get_relation_sentence_annotations(article_name, healthline_sentence,\
metadata):
summary_inputs = metadata[article_name]['summary_inputs']
summary_annotations = summary_inputs\
['summary_healthline_relation_annotations'][healthline_sentence\
.strip()]
pubmed_inputs = summary_inputs['summary_pubmed_articles']\
[healthline_sentence]
pubmed_annotations = {}
pubmed_relation_sentiments = {}
for pubmed in pubmed_inputs:
if pubmed not in metadata['pubmed_sentences_annotations']:
pubmed_annotations = {}
break
if 'pubmed_sentences_relation_annotations' in \
metadata['pubmed_sentences_annotations'][pubmed]:
pubmed_annotations[pubmed] = metadata[\
'pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_relation_annotations']
if 'relation_structure_sentiment' in \
metadata['pubmed_sentences_annotations'][pubmed]:
pubmed_relation_sentiments[pubmed] = {}
for element in metadata['pubmed_sentences_annotations'][pubmed]\
['relation_structure_sentiment']:
if element[0][0].endswith("/") or element[0][0].endswith("\\"):
element[0][0] = element[0][0][:-1]
if element[0][1].endswith("/") or element[0][1].endswith("\\"):
element[0][1] = element[0][1][:-1]
pubmed_relation_sentiments[pubmed][tuple(element[0])] =\
element[2:]
new_relations = []
for p in pubmed_annotations[pubmed][0]:
relation = []
for q in p:
if q.endswith("/"):
q = q[:len(q)-1]
relation.append(q)
new_relations.append(relation)
new_contains = []
for p in pubmed_annotations[pubmed][4]:
contains = []
for q in p:
if q.endswith("/") or q.endswith("\\"):
q = q[:len(q)-1]
contains.append(q)
new_contains.append(contains)
new_modifies = []
for p in pubmed_annotations[pubmed][7]:
modifies = []
for q in p:
if q.endswith("/") or q.endswith("\\"):
q = q[:len(q)-1]
modifies.append(q)
new_modifies.append(modifies)
pubmed_annotations[pubmed][0] = new_relations
pubmed_annotations[pubmed][4] = new_contains
pubmed_annotations[pubmed][7] = new_modifies
return summary_annotations, pubmed_annotations, pubmed_relation_sentiments
def get_entity_sentence_annotations(article_name, healthline_sentence, \
metadata):
summary_inputs = metadata[article_name]['summary_inputs']
summary_annotations = {}
summary_modifiers = {}
for sentence, tag in summary_inputs[\
'summary_healthline_entity_annotations']:
if sentence == healthline_sentence.strip():
food_entities = _get_entities(sentence.split(),tag.split(),'Food')
for food_entity in food_entities:
summary_annotations[food_entity] = 'Food'
condition_entities = _get_entities(sentence.split(),tag.split(),\
'Condition')
for condition_entity in condition_entities:
summary_annotations[condition_entity] = 'Condition'
nutrition_entities = _get_entities(sentence.split(),tag.split(),\
'Nutrition')
for nutrition_entity in nutrition_entities:
summary_annotations[nutrition_entity] = 'Nutrition'
population_entities = _get_entities(sentence.split(),tag.split(),\
'Population')
for population_entity in population_entities:
summary_modifiers[population_entity] = 'Population'
pubmed_annotations = {}
pubmed_modifiers = {}
pubmed_inputs = summary_inputs['summary_pubmed_articles']\
[healthline_sentence]
for pubmed in pubmed_inputs:
if pubmed not in metadata['pubmed_sentences_annotations']:
pubmed_annotations = {}
break
pubmed_sentence_tuples = metadata['pubmed_sentences_annotations']\
[pubmed]['pubmed_sentences_entity_annotations']
for sentence,tags in pubmed_sentence_tuples:
food_entities = _get_entities(sentence.split(),\
tags.split(),'Food')
for entity in food_entities:
pubmed_annotations[entity] = 'Food'
condition_entities = _get_entities(sentence.split(),\
tags.split(),'Condition')
for entity in condition_entities:
pubmed_annotations[entity] = 'Condition'
nutrition_entities = _get_entities(sentence.split(),\
tags.split(),'Nutrition')
for entity in nutrition_entities:
pubmed_annotations[entity] = 'Nutrition'
population_entities = _get_entities(sentence.split(),\
tags.split(),'Population')
for entity in population_entities:
pubmed_modifiers[entity] = 'Population'
pubmed_entities = list(pubmed_annotations.keys())
for entity in pubmed_entities:
if entity.endswith("/"):
pubmed_annotations[entity[:entity.find("/")]] = \
pubmed_annotations[entity]
if entity.endswith(","):
pubmed_annotations[entity[:entity.find(",")]] = \
pubmed_annotations[entity]
summary_entities = list(summary_annotations.keys())
for entity in summary_entities:
if entity.endswith(","):
summary_annotations[entity[:entity.find(",")]] = \
summary_annotations[entity]
for entity in pubmed_modifiers:
if entity.endswith("/"):
pubmed_modifiers[entity[:entity.find("/")]] = \
pubmed_modifiers[entity]
if entity.endswith(","):
pubmed_modifiers[entity[:entity.find(",")]] = \
pubmed_modifiers[entity]
for entity in summary_modifiers:
if entity.endswith(","):
summary_modifiers[entity[:entity.find(",")]] = \
summary_modifiers[entity]
return summary_annotations, pubmed_annotations, \
summary_modifiers, pubmed_modifiers
def paired_annotations(metadata):
sentence_relation_annotations = {}
sentence_entity_annotations = {}
sentence_modifier_entity_annotations = {}
sentence_file_names = {}
sentence_pubmed_relations_sentiments = {}
for file_name in tqdm(metadata):
if 'summary_inputs' not in metadata[file_name]:
continue
for healthline_sentence in tqdm(metadata[file_name]['summary_inputs']\
['summary_pubmed_articles']):
summary,pubmed,relation_sentiments = \
get_relation_sentence_annotations(file_name,\
healthline_sentence, metadata)
sentence_pubmed_relations_sentiments[healthline_sentence.strip()] =\
{}
for pubmed_name in relation_sentiments:
for relation in relation_sentiments[pubmed_name]:
sentence_pubmed_relations_sentiments[\
healthline_sentence.strip()][relation] = \
relation_sentiments[pubmed_name][relation]
summary_entities,\
pubmed_entities,\
summary_modifiers, pubmed_modifiers = \
get_entity_sentence_annotations(file_name, healthline_sentence,\
metadata)
if len(pubmed) == 0:
continue
input = None
for pubmed_file in pubmed:
if input is None:
input = pubmed[pubmed_file]
else:
for i in range(len(input)):
for x in pubmed[pubmed_file][i]:
input[i].append(x)
sentence_relation_annotations[healthline_sentence.strip()] = \
[summary,list.copy(input)]
sentence_entity_annotations[healthline_sentence.strip()] = \
[summary_entities,pubmed_entities]
sentence_modifier_entity_annotations[healthline_sentence.strip()]\
= [summary_modifiers,pubmed_modifiers]
sentence_file_names[healthline_sentence.strip()] = file_name
return sentence_relation_annotations, sentence_file_names,\
sentence_entity_annotations, sentence_modifier_entity_annotations,\
sentence_pubmed_relations_sentiments
def _get_conjunction(fusion_model, tokenizer, sentence, label_list):
examples = []
examples.append(InputExample(guid=0, text_a=\
sentence, text_b=None, label="", weight=0.0))
eval_features = convert_examples_to_features(examples, \
label_list, 128, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features],\
dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features],\
dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],\
dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features],\
dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask,\
all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.cuda()
input_mask = input_mask.cuda()
segment_ids = segment_ids.cuda()
label_ids = label_ids.cuda()
tmp_eval_loss, logits = fusion_model(\
input_ids, segment_ids, input_mask, label_ids)
logits = list(logits.view(-1).detach().cpu().numpy())
print(label_list[logits.index(max(logits))])
return label_list[logits.index(max(logits))]
return
def combine_individuals(sentences, fusion_model, tokenizer, label_list):
if len(sentences) == 0:
return ""
final_sentence = sentences[0]
for sentence in sentences[1:]:
conjunction= _get_conjunction(fusion_model, tokenizer, \
final_sentence + " ### " + sentence, label_list)
if conjunction != "":
final_sentence += " " + conjunction + " "+ sentence
else:
final_sentence += " " + sentence
#final_sentence = final_sentence.replace("<X>","").replace("</X>","")\
# .replace("<Y>","").replace("</Y>","")
#final_sentence = final_sentence.replace(" "," ")
return final_sentence
def _compare_causes(input_cause, output_causes, embeddings, file_name):
for output_cause in output_causes:
if equal_strings(output_cause[0],input_cause[0],embeddings) and\
equal_strings(output_cause[1],input_cause[1],embeddings):#\
return 1.0
return 0.0
def _compare_contains(input_contains, output_contains, embeddings, file_name):
f = jsonlines.open("contains_correct.jsonl","a")
for output_contain in output_contains:
if equal_strings(output_contain[0],input_contains[0],embeddings)\
and equal_strings(output_contain[1],input_contains[1],embeddings):
dict = {'article':file_name,'input':output_contains,\
'output':input_contains}
f.write(dict)
f.close()
return 1
dict = {'article':file_name,'input':output_contains,\
'output':input_contains}
f = jsonlines.open("contains_wrong.jsonl","a")
f.write(dict)
f.close()
return 0
def _best_overal_score(input_causes, output_causes, embeddings):
selected_output_causes = {}
total_score = 0
for input_cause in input_causes:
current_score = 0
current_selection = None
for output_cause in output_causes:
if selected_output_causes.get(tuple(output_cause),0) > 0:
continue
score = matching_score(input_cause[0].lower(),\
output_cause[0].lower(),embeddings)\
+ matching_score(input_cause[1].lower(),\
output_cause[1].lower(),embeddings)
score /= 2
if score > current_score:
current_score = score
current_selection = tuple(output_cause)
selected_output_causes[current_selection] = selected_output_causes.setdefault(\
current_selection,0) + 1
if current_score > 0:
total_score += (current_score>=THRESHOLD)*current_score
return total_score
def _best_causes_score(input_cause, output_causes, embeddings):
best_score = -1
for output_cause in output_causes:
score = matching_score(input_cause[0],output_cause[0],embeddings)\
+ matching_score(input_cause[1],output_cause[1],embeddings)
score /= 2.0
if score > best_score:
best_score = score
return best_score
def _best_causes(input_cause, output_causes, embeddings):
best_score = -1
best_causes = None
for output_cause in output_causes:
score = matching_score(input_cause[0],output_cause[0],embeddings)\
+ matching_score(input_cause[1],output_cause[1],embeddings)
if score > best_score:
best_score = score
best_causes = output_cause
return best_causes
def _just_causes(input_cause, output_causes, embeddings):
best_score = -1
just_causes= None
for output_cause in output_causes:
score = matching_score(input_cause[1],output_cause[1],embeddings)
if score > best_score:
best_score = score
just_causes= output_cause
return just_causes
def _best_contains(input_contains, output_contains, embeddings):
best_score = -1
best_contains = None
for output_contain in output_contains:
score = matching_score(input_contains[0],output_contain[0],\
embeddings) +\
matching_score(input_contains[1],output_contain[1],\
embeddings)
if score > best_score:
best_score = score
best_contains = output_contain
return best_contains
def _get_entity_embeddings(string, embeddings):
representation = [0.0] * 50
word_list = []
if ' ' in string:
word_list = string.split()
else:
word_list = [string]
for word in word_list:
if word in embeddings:
representation += embeddings[word]
norm = np.linalg.norm(representation)
if norm != 0.0:
representation /= norm
return np.array(representation)
def _get_phrase_embeddings(string, embeddings):
return _get_entity_embeddings(string, embeddings)
def _get_all_entity_embeddings(entity_names, embeddings):
entity_vectors = {}
for entity in entity_names:
entity_vectors[entity] = {}
entity_vectors[entity]['names'] = []
entity_vectors[entity]['vectors'] = []
for phrase in entity_names[entity]:
entity_vectors[entity]['names'].append(phrase)
entity_vectors[entity]['vectors'].append(\
_get_entity_embeddings(phrase, embeddings))
entity_vectors[entity]['vectors'] = \
np.array(entity_vectors[entity]['vectors'])
return entity_vectors
def equal_strings(string1, string2, embeddings):
representation1 = [0.0] * 50
representation2 = [0.0] * 50
words1 = []
words2 = []
if ' ' in string1:
words1 = string1.split()
else:
words1 = [string1]
if ' ' in string2:
words2 = string2.split()
else:
words2 = [string2]
lemmatized_words1 = []
lemmatized_words2 = []
for word in words1:
lemmatized_words1.append(lemmatizer.lemmatize(word))
for word in words2:
lemmatized_words2.append(lemmatizer.lemmatize(word))
for word in lemmatized_words1:
if word in lemmatized_words2:
return 1
representation1 = _get_entity_embeddings(string1, embeddings)
representation2 = _get_entity_embeddings(string2, embeddings)
if np.dot(representation1,representation2) > THRESHOLD:
return 1
return 0
def matching_score(string1, string2, embeddings):
representation1 = _get_entity_embeddings(string1, embeddings)
representation2 = _get_entity_embeddings(string2, embeddings)
return np.dot(representation1,representation2)
def get_population_annotations(sentence_relation_annotations,\
sentence_modifier_entity_annotations):
sentence_population_relations = {}
sentence_population_entities = {}
for sentence in sentence_relation_annotations:
summary, pubmed = sentence_relation_annotations[sentence]
summary_modifiers, pubmed_modifiers = \
sentence_modifier_entity_annotations[sentence]
summary_population_relations = []
pubmed_population_relations = []
summary_population_entities = []
pubmed_population_entities = []
for r in summary[7]:
#if r[0] in summary_modifiers and \
# summary_modifiers[r[0]] == 'Population':
if not any([x in r[0] for x in ['male','female','men','women',\
'human','child','patient','individual','mammal','rat',\
'mice','animal']]):
continue
summary_population_relations.append(r)
summary_population_entities.append(r[0])
for r in pubmed[7]:
#if r[0] in pubmed_modifiers and \
# pubmed_modifiers[r[0]] == 'Population':
if not any([x in r[0] for x in ['male','female','men','women',\
'human','child','patient','individual','mammal','rat',\
'mice','animal']]):
continue
pubmed_population_relations.append(r)
pubmed_population_entities.append(r[0])
sentence_population_relations[sentence] = [\
summary_population_relations, pubmed_population_relations]
sentence_population_entities[sentence] = [\
summary_population_entities, pubmed_population_entities]
return sentence_population_relations, sentence_population_entities
def get_sentiment_annotations(metadata):
sentence_sentiment = {}
for file_name in metadata:
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_healthline_sentiment' not in \
metadata[file_name]['summary_inputs']:
continue
for sentence, sentiment in metadata[file_name]['summary_inputs']\
['summary_healthline_sentiment'].items():
sentence_sentiment[sentence] = sentiment
return sentence_sentiment
def get_population_correlation(sentence_population_entities, \
sentence_sentiment):
non_human_neutral = 0
non_human_total = 0
human_neutral = 0
human_total = 0
only_animal_total = 0
only_animal_neutral = 0
for sentence in sentence_population_entities:
summary, pubmeds = sentence_population_entities[sentence]
any_human = False
only_animal = True
for p in pubmeds:
if any ([x in p for x in ['male','female','men','women',\
'human','child','patient','individual']]):
any_human = True
only_animal = False
break
if any_human:
if sentence_sentiment[sentence] == 'Neutral':
human_neutral += 1
human_total += 1
else:
if len(pubmeds) > 0:
only_animal_total += 1
if sentence_sentiment[sentence] == 'Neutral':
only_animal_neutral += 1
if sentence_sentiment[sentence] == 'Neutral':
non_human_neutral += 1
non_human_total += 1
print(non_human_neutral, \
non_human_total, \
only_animal_neutral, \
only_animal_total, human_neutral, human_total)
def get_sentiment_statistics(sentence_sentiment, sentence_file_names,\
metadata, splits):
print(splits)
sentiment_files = {}
sentiment_sentences = {}
for sentence,file_name in sentence_file_names.items():
if metadata[file_name]['split'] not in splits:
continue
sentiment = sentence_sentiment[sentence]
sentiment_files[sentiment] = \
sentiment_files.setdefault(sentiment,[]) + \
[file_name]
sentiment_sentences[sentiment] = \
sentiment_sentences.setdefault(sentiment,[]) +\
[sentence]
sentiment_files[sentiment] = list(set(sentiment_files[sentiment]))
sentiment_sentences[sentiment] = \
list(set(sentiment_sentences[sentiment]))
print("Files " + str(sum([len(x) for x in sentiment_files.values()])) + " "+\
str([[x,len(y)] for x,y in sentiment_files.items()]))
print("Sentences " + str(sum([len(x) for x in sentiment_sentences.values()]))\
+ " "+ str([[x,len(y)] for x,y in sentiment_sentences.items()]))
def expand_pubmed_information(input_pubmed_information):
pubmed_information = list.copy(input_pubmed_information)
contains_information = pubmed_information[4]
updated_information = []
entity_edges = {}
entity_counts= {}
for pair in contains_information:
entity_edges[pair[0]] = set()
entity_edges[pair[1]] = set()
entity_counts[pair[0]]=0
entity_counts[pair[1]]=0
for pair in contains_information:
entity_edges[pair[1]].add(pair[0])
entity_counts[pair[1]] += 1
for entity in sorted(entity_counts, key=entity_counts.get, reverse=True):
stack = []
visited = set()
stack = list(entity_edges.get(entity,[]))
candidates = set()
while len(stack) - len(visited) > 0:
current = stack[len(visited)]
visited.add(current)
parents = entity_edges.get(current, [])
for parent in parents:
if parent not in stack:
stack.append(parent)
candidates.add(parent)
for candidate in candidates:
pair = [candidate,entity]
if pair not in updated_information+contains_information:
updated_information.append(pair)
output_pubmed_information = list.copy(pubmed_information)
for pair in updated_information:
output_pubmed_information[4].append(pair)
current_relations = pubmed_information[0]
updated_relations = []
for relation in current_relations:
for pair in output_pubmed_information[4]:
if pair[1] == relation[0]:
new_relation = [pair[0],relation[1],relation[2]]
if new_relation not in updated_relations + current_relations:
updated_relations.append(new_relation)
output_pubmed_information[4] = list.copy(pubmed_information[4]) + \
list.copy(updated_relations)
return pubmed_information
def _get_entities(tokens, labels, entity):
entities_found = []
current = ""
for token,label in zip(tokens,labels):
if label == 'B-'+entity:
if current != "":
entities_found.append(current)
current = token
elif label == 'I-'+entity:
current += " " + token
else:
if current != "":
entities_found.append(current)
current = ""
if current != "":
entities_found.append(current)
return entities_found
def get_corpus_entity_types(metadata):
entity_types = {}
for pubmed in metadata['pubmed_sentences_annotations']:
if 'pubmed_sentences_entity_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed]:
continue
for tokens, labels in \
metadata['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_entity_annotations']:
food_entities = _get_entities(tokens.split(),labels.split(),'Food')
for food in food_entities:
entity_types[food] = entity_types.setdefault(food,[])+['Food']
condition_entities = \
_get_entities(tokens.split(),labels.split(),'Condition')
for condition in condition_entities:
entity_types[condition] = \
entity_types.setdefault(condition,[])+['Condition']
nutrition_entities = \
_get_entities(tokens.split(),labels.split(),'Nutrition')
for nutrition in nutrition_entities:
entity_types[nutrition] = \
entity_types.setdefault(nutrition,[])+['Nutrition']
return entity_types
def get_title_entities(metadata):
file_name_food = {}
for file_name in metadata:
file_name_food[file_name] = \
_get_entities(metadata[file_name]['title_entities'][0].split(),\
metadata[file_name]['title_entities'][1].split(), 'Food')\
if 'title_entities' in metadata[file_name] else []
return file_name_food
def add_title_contains(input, food_entities, embeddings):
extra_contains = []
for pair in input[4]:
for food_entity in food_entities:
if matching_score(food_entity,pair[0],embeddings) < 0.75:
extra_contains.append([food_entity,pair[0]])
output = list.copy(input)
for pair in extra_contains:
output[4].append(pair)
return output
def get_target_entities(metadata, entities = ['Food','Condition','Nutrition']):
entities_phrases = {}
for file_name in metadata:
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_healthline_entity_annotations' not in \
metadata[file_name]['summary_inputs']:
continue
for sentence in metadata[file_name]['summary_inputs']\
['summary_healthline_entity_annotations']:
tokens = sentence[0].split()
labels = sentence[1].split()
assert len(tokens) == len(labels)
for entity in entities:
phrases = set(_get_entities(tokens, labels, entity))
entities_phrases[entity] = \
entities_phrases.setdefault(entity,set()).union(phrases)
return entities_phrases
def get_source_entities(metadata, entities = ['Food','Condition','Nutrition']):
entities_phrases = {}
ctr = 0
for pubmed in metadata['pubmed_sentences_annotations']:
if 'pubmed_sentences_entity_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed]:
continue
ctr += 1
for sentence in metadata\
['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_entity_annotations']:
tokens = sentence[0].split()
labels = sentence[1].split()
assert len(tokens) == len(labels)
for entity in entities:
phrases = set(_get_entities(tokens, labels, entity))
entities_phrases[entity] = \
entities_phrases.setdefault(entity,set()).union(phrases)
return entities_phrases
def get_best_entity(entity, target_type_embeddings, embeddings):
embedding = _get_entity_embeddings(entity, embeddings).reshape(1,-1)
dot_products = np.dot(embedding, target_type_embeddings.T)\
.reshape(-1)
return np.argmax(dot_products.reshape(-1))
def rewrite_from_target_dictionary(inputs, target_embeddings, embeddings):
modified_inputs = list.copy(inputs)
len1 = min(1000,len(inputs[0]))
len2 = min(1000,len(inputs[4]))
ctr = 0
for i in range(len1):
input = inputs[0][i]
new_input = list.copy(input)
new_input[0] = (target_embeddings['Food']['names']+\
target_embeddings['Nutrition']['names'])\
[get_best_entity(input[0], \
np.concatenate((target_embeddings['Food']['vectors'], \
target_embeddings['Nutrition']['vectors'])),\
embeddings)]
new_input[1] = target_embeddings['Condition']['names']\
[get_best_entity(input[1], \
target_embeddings['Condition']['vectors'], \
embeddings)]
modified_inputs[0].append(new_input)
for i in range(len2):
input = inputs[4][i]
new_input = list.copy(input)
new_input[0] = (target_embeddings['Food']['names']+\
target_embeddings['Nutrition']['names'])\
[get_best_entity(input[0], \
np.concatenate((target_embeddings['Food']['vectors'], \
target_embeddings['Nutrition']['vectors'])),\
embeddings)]
new_input[1] = target_embeddings['Nutrition']['names'][\
get_best_entity(input[1],
target_embeddings['Nutrition']['vectors'],
embeddings)]
modified_inputs[4].append(input)
return modified_inputs
def rewrite_from_target_with_type_dictionary(inputs, type_dictionary, target_embeddings, embeddings):
modified_inputs = list.copy(inputs)
len1 = min(1000,len(inputs[0]))
len2 = min(1000,len(inputs[4]))
ctr = 0
for i in range(len1):
input = inputs[0][i]
new_input = list.copy(input)
if new_input[0] in type_dictionary:
new_input[0] = target_embeddings[type_dictionary[new_input[0]]]['names']\
[get_best_entity(input[0], target_embeddings[type_dictionary[new_input[0]]]['vectors'],\
embeddings)]
else:
new_input[0] = (target_embeddings['Food']['names']+\
target_embeddings['Nutrition']['names']+target_embeddings['Condition']['names'])\
[get_best_entity(input[0], \
np.concatenate((target_embeddings['Food']['vectors'], \
target_embeddings['Nutrition']['vectors'],\
target_embeddings['Condition']['vectors'])),\
embeddings)]
new_input[1] = target_embeddings['Condition']['names']\
[get_best_entity(input[1], \
target_embeddings['Condition']['vectors'], \
embeddings)]
modified_inputs[0].append(new_input)
for i in range(len2):
input = inputs[4][i]
new_input = list.copy(input)
if new_input[0] in type_dictionary:
new_input[0] = target_embeddings[type_dictionary[new_input[0]]]['names']\
[get_best_entity(input[0], target_embeddings[type_dictionary[new_input[0]]]['vectors'],\
embeddings)]
else:
new_input[0] = (target_embeddings['Food']['names']+\
target_embeddings['Nutrition']['names'])\
[get_best_entity(input[0], \
np.concatenate((target_embeddings['Food']['vectors'], \
target_embeddings['Nutrition']['vectors'])),\
embeddings)]
new_input[1] = target_embeddings['Nutrition']['names'][\
get_best_entity(input[1],\
target_embeddings['Nutrition']['vectors'],\
embeddings)]
modified_inputs[4].append(input)
return modified_inputs
def get_causes_graph(causes_tuples, num_x=20, num_y=20, num_z=5):
graph = np.ndarray(shape=(num_z), dtype=float)
list_x = []
list_y = []
list_z = ['increases','decreases','controls','satisfies']
for causes in causes_tuples:
list_x.append(causes[0])
list_y.append(causes[1])
list_x = list(set(list_x))
list_y = list(set(list_y))
list_z = list(set(list_z))
for ind,causes in enumerate(causes_tuples):
z = list_z.index(causes[2])
graph[z] += 1.0
graph_norm = np.linalg.norm(graph.reshape(-1))
if graph_norm > 0.0:
return graph.reshape(-1)/graph_norm
return graph.reshape(-1)
def get_causes_dict(causes, entity_types):
dictionary = {}
for causes_triplet in causes:
if causes_triplet[2] not in dictionary:
dictionary[causes_triplet[2]] = {}
if causes_triplet[1] not in dictionary[causes_triplet[2]]:
dictionary[causes_triplet[2]][causes_triplet[1]] = []
if causes_triplet[0] not in entity_types:
if causes_triplet[0].lower() in entity_types:
causes_triplet[0] = causes_triplet[0].lower()
if causes_triplet[0] not in entity_types and len(causes_triplet[0])>0:
causes_triplet[0] = causes_triplet[0][0].upper() +\
causes_triplet[0][1:]
if causes_triplet[0] not in entity_types:
dictionary[causes_triplet[2]][causes_triplet[1]].append('Food')
else:
dictionary[causes_triplet[2]][causes_triplet[1]].append(\
entity_types.get(causes_triplet[0],'Food'))
return dictionary
def get_named_causes_dict(causes):
dictionary = {}
for causes_triplet in causes:
if causes_triplet[2] not in dictionary:
dictionary[causes_triplet[2]] = {}
if causes_triplet[1] not in dictionary[causes_triplet[2]]:
dictionary[causes_triplet[2]][causes_triplet[1]] = []
if causes_triplet[0] not in \
dictionary[causes_triplet[2]][causes_triplet[1]]:
dictionary[causes_triplet[2]][causes_triplet[1]].append(\
causes_triplet[0])
return dictionary
def get_modified_sentence(sentence_annotations, sentence_entity_annotations,\
sentence, causes, entities_type, current_sentence):
output, _ = sentence_annotations[sentence]
for contain in output[4]:
output[0].append(contain + ['contains'])
z_list = ['increases','decreases','controls','satisfies','contains']
causes_type_dict = get_causes_dict(causes, \
sentence_entity_annotations[sentence][0])
get_original_type_dict = get_causes_dict(output[0], entities_type)
causes_dict = get_named_causes_dict(causes)
get_original_dict = get_named_causes_dict(output[0])
modified_sentence = sentence.lower()
selected_tuples = set()
for cause in get_original_dict:
if cause not in causes_dict:
continue
tuple_counts = {}
for condition2 in causes_dict[cause].keys():
for input2 in causes_dict[cause][condition2]:
tuple_counts[tuple([condition2,input2])] = 0
for condition1 in get_original_dict[cause].keys():
for input1 in get_original_dict[cause][condition1]:
for cond2,in2 in sorted(tuple_counts, \
key=tuple_counts.get, reverse=False):
condition2 = cond2
input2 = in2
if input2 not in entities_type:
input2 = input2.lower()
if input1 not in sentence_entity_annotations\
[sentence][0]:
input1 = input1.lower()
if input1 not in sentence_entity_annotations\
[sentence][0]:
pass
if tuple([condition1,input1]) \
in selected_tuples:
continue
if entities_type.get(input2,"Food") == \
sentence_entity_annotations[sentence][0].get(input1,"Food"):
modified_sentence = modified_sentence.replace(\
input1.lower(),input2.lower())
modified_sentence = modified_sentence.replace(\
condition1.lower(),condition2.lower())
tuple_counts[tuple([cond2,in2])] += 1
selected_tuples.add(tuple([condition1,input1]))
return modified_sentence
def compare_annotations(sentence_annotations, sentence_entity_annotations,\
embeddings, target_embeddings,\
sentence_file_names, title_entities):
causes_similar = []
contains_similar=[]
for sentence in tqdm(sentence_annotations):
output, input = sentence_annotations[sentence]
if len(input[0]) > 5000 or len(input[4]) > 5000:
final_input = input
else:
new_input = add_title_contains(input, \
title_entities[sentence_file_names[sentence]],\
embeddings)
final_input = expand_pubmed_information(new_input)
input_entity_types = sentence_entity_annotations[sentence][1]
#input = rewrite_from_target_dictionary(input, \
# target_embeddings, embeddings)
#input = rewrite_from_target_with_type_dictionary(input, \
# input_entity_types, target_embeddings, embeddings)
causes_scores = [_compare_causes(x, final_input[0], embeddings, \
sentence_file_names[sentence]) for x in output[0]]
contains_scores=[_compare_contains(x, final_input[4], embeddings,\
sentence_file_names[sentence]) for x in output[4]]
if len(causes_scores) > 0:
causes_similar.append(sum(causes_scores)/(len(causes_scores)))
if len(contains_scores) > 0:
contains_similar.append(sum(contains_scores)/\
(len(contains_scores)))
return causes_similar, contains_similar
def follow_up_annotations(sentence_annotations, embeddings, target_embeddings,\
sentence_file_names, title_entities):
sentence_causes = {}
sentence_contains = {}
sentence_all_causes = {}
sentence_all_contains = {}
gold_sentence_causes = {}
gold_sentence_contains = {}
for sentence in tqdm(sentence_annotations):
output, input = list.copy(sentence_annotations[sentence])
if len(input[0]) > 5000 or len(input[4]) > 5000:
final_input = input
else:
new_input = add_title_contains(list.copy(input), \
title_entities[sentence_file_names[sentence]],\
embeddings)
final_input = expand_pubmed_information(new_input)
#current_causes = [_best_causes(x, final_input[0], embeddings) for x in \
# output[0]]
#current_contains=[_best_contains(x, final_input[4], embeddings) for x in \
# output[4]]
current_causes = [_just_causes(x, final_input[0], embeddings) for x in \
output[0]]
current_contains=[_just_causes(x, final_input[4], embeddings) for x in \
output[4]]
constraint_causes = [x for x in current_causes if x is not None]
constraint_contains = [x for x in current_contains if x is not None]
sentence_causes[sentence] = constraint_causes
sentence_contains[sentence]= constraint_contains
sentence_all_causes[sentence] = list.copy(final_input[0])
sentence_all_contains[sentence] = list.copy(final_input[4])
gold_sentence_causes[sentence] = list.copy(output[0])
gold_sentence_contains[sentence]= list.copy(output[4])
return sentence_causes, sentence_contains, sentence_all_causes,\
sentence_all_contains, gold_sentence_causes, gold_sentence_contains
def get_property_rationales(metadata, sentence_relation_annotations,\
sentence_file_names, sentence_entity_annotations,\
sentence_relation_sentiments, splits=['train'], to_write=True):
sentence_properties_annotations = {}
for sentence in sentence_relation_annotations:
sub_sentences = sent_tokenize(sentence)
possible_sub_sentences = [1] * len(sub_sentences)
if metadata[sentence_file_names[sentence]]['split'] not \
in splits:
continue
pubmed, input = sentence_relation_annotations[sentence]
sentence_properties_annotations[sentence] = {}
for cause_triplet in pubmed[0]:
x = cause_triplet[0]
y = cause_triplet[1]
if x.lower() not in sentence.lower() or y.lower() not in\
sentence.lower():
continue
for ind in range(len(sub_sentences)):
if x.lower() in sub_sentences[ind].lower() or \
y.lower() in sub_sentences[ind].lower():
possible_sub_sentences[ind]=0
template = sentence.lower()
template = template.replace(x.lower(), "<X> "+x.lower()+" </X>")
template = template.replace(y.lower(), "<Y> "+y.lower()+" </Y>")
sentence_properties_annotations[sentence]['causes'] = \
sentence_properties_annotations[sentence].setdefault('causes',\
[]) + [template]
for contain_triplet in pubmed[4]:
x = contain_triplet[0]
y = contain_triplet[1]
if x.lower() not in sentence.lower() or y.lower() not in\
sentence.lower():
continue
for ind in range(len(sub_sentences)):
if x.lower() in sub_sentences[ind].lower()\
or y.lower() in sub_sentences[ind].lower():
possible_sub_sentences[ind] = 0
template = sentence.lower()
template = template.replace(x.lower(), "<X> "+x.lower()+" </X>")
template = template.replace(y.lower(), "<Y> "+y.lower()+" </Y>")
sentence_properties_annotations[sentence]['contains'] = \
sentence_properties_annotations[sentence].setdefault('contains',\
[]) + [template]
sentence_properties_annotations[sentence]['sentiment'] = [\
sub_sentences[ind] for ind in range(len(sub_sentences))\
if possible_sub_sentences[ind]==1]
if to_write:
text_files = {'causes':open("train_annotations_causes.txt","w"),\
'contains':open("train_annotations_contains.txt","w"),\
'sentiment':open("train_annotations_sentiment.txt","w")}
json_files = {'causes':\
jsonlines.open("train_annotations_causes.jsonl","w"),\
'contains':\
jsonlines.open("train_annotations_contains.jsonl","w"),\
'sentiment':\
jsonlines.open("train_annotations_sentiment.jsonl","w")}
for sentence in sorted(sentence_properties_annotations):
for property in sorted(sentence_properties_annotations[sentence]):
for case in sorted(sentence_properties_annotations[sentence]\
[property]):
text_files[property].write("0\t0\t"+case+"\n")
dict = {'sentence':case,'original':sentence}
json_files[property].write(dict)
for property in text_files.keys():
text_files[property].close()
json_files[property].close()
return sentence_properties_annotations
def get_predicted_property_rationales():
text_files = {'causes':open("train_annotations_causes.txt"\
"","r").readlines()[1:],\
'contains':open("train_annotations_contains.txt"\
"","r").readlines()[1:],\
'sentiment':open("train_annotations_sentiment.txt"\
"","r").readlines()[1:]}
json_files = {'causes':\
jsonlines.open("train_annotations_causes.jsonl","r"),\
'contains':\
jsonlines.open("train_annotations_contains.jsonl","r"),\
'sentiment':\
jsonlines.open("train_annotations_sentiment.jsonl","r")}
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/"\
"allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz")
sentence_properties_rationale_predictions = {}
sentence_verb_phrase_rationale= {}
sentence_constituency_parsing = {}
sentence_verb_phrases = {}
if os.path.exists("target_constituency_parse.p"):
sentence_constituency_parsing = pickle.load(open(\
"target_constituency_parse.p","rb"))
sentence_verb_phrases = pickle.load(open(\
"target_verb_phrases.p","rb"))
for property in tqdm(['causes', 'contains', 'sentiment']):
for r,line in tqdm(zip(json_files[property],text_files[property])):
line = line.strip()
sentence = r['original']
if sentence not in sentence_properties_rationale_predictions:
sentence_properties_rationale_predictions[sentence] = {}
sentence_verb_phrase_rationale[sentence] = {}
if property not in \
sentence_properties_rationale_predictions[sentence]:
sentence_properties_rationale_predictions[sentence]\
[property] = []
sentence_verb_phrase_rationale[sentence]\
[property] = []
parts = line.split('\t')
tokens= parts[2].split()
take = [1] * len(tokens)#[int(x) for x in parts[3].split()]
span = ""
for token,t in zip(tokens,take):
if t == 1:
span += token + " "
span = span.strip()
#if (span in sentence_properties_rationale_predictions[sentence]\
# .get('causes',[]) or \
# sentence_properties_rationale_predictions[sentence]\
# .get('contains',[])) and property=='sentiment':
# continue
if sentence not in sentence_constituency_parsing:
sentence_constituency_parsing[sentence] = \
predictor.predict(sentence=sentence)
stack = []
if 'children' in sentence_constituency_parsing\
[sentence]['hierplane_tree']['root']:
stack += sentence_constituency_parsing[sentence]\
['hierplane_tree']['root']['children']
verb_phrases = []
while len(stack) > 0:
child = stack[0]
if 'children' in child:
stack += child['children']
if child['nodeType'] == 'VP':
verb_phrases.append(child['word'])
del stack[0]
sentence_verb_phrases[sentence] = verb_phrases
tokened_sentence = ' '.join(tokens).strip()
y_string = r['sentence'][r['sentence'].find("<Y>")+3:\
r['sentence'].find("</Y>")].strip()
best_rationale = sentence
for verb_phrase in sentence_verb_phrases[sentence]:
if y_string not in verb_phrase:
continue
if len(verb_phrase) < len(best_rationale):
best_rationale = verb_phrase
assert y_string in verb_phrase
sentence_properties_rationale_predictions[sentence]\
[property].append(r['sentence'] + " ### " + best_rationale)
sentence_verb_phrase_rationale[sentence][property]\
.append(best_rationale)
pickle.dump(sentence_constituency_parsing, \
open("target_constituency_parse.p","wb"))
pickle.dump(sentence_verb_phrases, \
open("target_verb_phrases.p","wb"))
return sentence_properties_rationale_predictions
#return sentence_verb_phrase_rationale
def get_fusion_training_data(sentence_extracted_rationales,\
sentence_relation_annotations, sentence_file_names, title_entities, \
embeddings):
num_data_points = 0
all_title_entities = set()
for entities in title_entities.values():
for entity in entities:
all_title_entities.add(entity)
title_closest_entities = {}
for entity in all_title_entities:
current_entities = []
current_scores = []
for entity2 in all_title_entities:
if entity2 == entity:
continue
current_entities.append(entity2)
current_scores.append(matching_score(entity, entity2, embeddings))
indices = np.argsort(-np.array(current_scores))[:30]
title_closest_entities[entity] = [current_entities[x] for x in indices]
all_condition_entities = set()
for sentence in sentence_relation_annotations:
input,_ = sentence_relation_annotations[sentence]
for cause in input[0]:
all_condition_entities.add(cause[1])
condition_closest_entities = {}
for entity in all_condition_entities:
current_entities = []
current_scores = []
for entity2 in all_condition_entities:
if entity2 == entity:
continue
current_entities.append(entity2)
current_scores.append(matching_score(entity, entity2, embeddings))
indices = np.argsort(-np.array(current_scores))[:200]
condition_closest_entities[entity] = [current_entities[x] for x in \
indices]
training_jsonl = jsonlines.open("train_fusion.jsonl","w")
training_instances = []
for sentence in tqdm(sentence_extracted_rationales):
if sentence_extracted_rationales[sentence].get('causes',[]) == []:
continue
sentence_causes = sentence_extracted_rationales[sentence]['causes']
sentence_contain_relations = sentence_relation_annotations[sentence]\
[0][4]
sentence_conditions = [cause[cause.find("<Y>")+3:\
cause.find("</Y>")].strip() for cause in sentence_causes]
sentence_r_causes = [x[x.find("###")+3:].strip() for x in \
sentence_causes]
assert len(sentence_causes) == len(sentence_conditions)
food_title = ""
if len(title_entities[sentence_file_names[sentence]]) > 0:
food_title = title_entities[sentence_file_names[sentence]][0]\
.lower()
for current_title in [food_title] + list(title_closest_entities.get(\
food_title,[])):
possible_sentences = sent_tokenize(sentence.lower())
useful_sentences = []
for possible_sentence in possible_sentences:
tokenized_possible_sentence = ' '.join([\
token.text for token in \
spacy_nlp(possible_sentence.lower())]).strip()
for cause_sentence in sentence_causes:
cause_sentence = cause_sentence[\
cause_sentence.find("###")+3:].strip()
if cause_sentence.lower() in tokenized_possible_sentence:
useful_sentences.append(tokenized_possible_sentence)
break
target_sentence = ' '.join(useful_sentences).strip()
if target_sentence.strip() == "":
continue
if food_title in target_sentence and len(food_title) > 0:
target_sentence = target_sentence.replace(food_title,\
current_title)
source_sentence = current_title + " ### "
ignore_data = False
for contain_relation in sentence_contain_relations:
if contain_relation[0].lower() in target_sentence:
target_sentence = target_sentence.replace(\
contain_relation[0].lower(), current_title.lower())
contain_relation[0] = current_title
else:
ignore_data = True
break
source_sentence += ' contains '.join(contain_relation) + " ### "
if ignore_data:
break
reduced_sentence_causes = []
#if len(sentence_contain_relations) > 0:
meta_source_sentence = source_sentence
short_sentence_causes = list(set([sentence_cause[sentence_cause.find("###")\
+3:].strip() for sentence_cause in sentence_causes if \
sentence_cause[sentence_cause.find("###")+3:].strip()\
!= sentence]))
for sent in short_sentence_causes:
reduced_sentence_causes.append(sent)
for sen in short_sentence_causes:
if sen == sent:
continue
if sent in sen:
del reduced_sentence_causes[-1]
break
if any([sent.lower() not in target_sentence for sent in \
reduced_sentence_causes]):
continue
cause_indices = [target_sentence.index(sent.lower()) for sent in \
reduced_sentence_causes]
sorted_indices= np.argsort(np.array(cause_indices))
reduced_sentence_causes = [reduced_sentence_causes[ind] for ind in\
sorted_indices]
reduced_sentence_conditions = [sentence_conditions[sentence_r_causes\
.index(sentence_cause)] for sentence_cause in \
reduced_sentence_causes]
for ind, (cause_sentence, cause_condition) in enumerate(\
zip(reduced_sentence_causes,\
reduced_sentence_conditions)):
possible_replacements = [cause_condition] + \
condition_closest_entities.get(cause_condition, [])
for possible_replacement in possible_replacements:
source_sentence = meta_source_sentence
target_sentence = ' '.join(useful_sentences).strip()
for i, (c_s, c_c) in enumerate(\
zip(reduced_sentence_causes,\
reduced_sentence_conditions)):
if i == ind:
c_s = c_s.replace(cause_condition, \
possible_replacement)
target_sentence = target_sentence.replace(\
cause_condition,\
possible_replacement)
source_sentence += c_s + " ### "
else:
source_sentence += c_s + " ### "
fraction = sum([word in source_sentence for word\
in target_sentence.split()])/\
len(target_sentence.split())
if not( fraction >= 0.8 or (fraction >= 0.5 and \
'contains' in meta_source_sentence) ):
continue
fusion_dict = {'target': target_sentence}
fusion_dict['source'] = source_sentence
training_instances.append(fusion_dict)
num_data_points += 1
print("Created %d training data points" %num_data_points)
random.shuffle(training_instances)
for instance in training_instances:
training_jsonl.write(instance)
training_jsonl.close()
def get_mapped_cosine_similarities(metadata, sentence_extracted_rationales,\
sentence_file_names, sentence_all_causes, embeddings):
vectorizer = TfidfVectorizer(max_features=5000)
corpus_sentences = []
sentence_pubmed_articles = {}
file_name_pubmed = {}
pubmed_sentences = []
sentence_pubmeds = {}
for file_name in metadata:
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_pubmed_articles' not in metadata[file_name]\
['summary_inputs']:
continue
for sentence,pubmeds in metadata[file_name]\
['summary_inputs']['summary_pubmed_articles'].items():
corpus_sentences += [sentence]
sentence_pubmed_articles[sentence.strip()] = pubmeds
file_name_pubmed[file_name] = []
if 'pubmed_sentences' not in metadata[file_name]:
continue
for pubmed in metadata[file_name]["pubmed_sentences"]:
sentence_pubmeds[pubmed] = []
for pubmed_text,label_token in metadata[file_name]\
["pubmed_sentences"][pubmed][1:]:
corpus_sentences.append(' '.join(pubmed_text).strip())
file_name_pubmed[file_name].append(corpus_sentences[-1])
sentence_pubmeds[pubmed].append(' '.join(pubmed_text).strip())
f_train = jsonlines.open("importance_train.jsonl","w")
f_dev = jsonlines.open("importance_dev.jsonl","w")
f_train_rationale = {'causes':open('T5_train_general.txt','w'),\
'sentiment':open('T5_train_sentiment.txt','w')}
f_dev_rationale = {'causes':open('T5_dev_general.txt','w'),\
'sentiment':open('T5_dev_sentiment.txt','w')}
f_train_rationale_jsonl = {'causes':jsonlines.open('T5_train_general.jsonl','w'),\
'sentiment':jsonlines.open('T5_train_sentiment.jsonl','w')}
f_dev_rationale_jsonl = {'causes':jsonlines.open('T5_dev_general.jsonl','w'),\
'sentiment':jsonlines.open('T5_dev_sentiment.jsonl','w')}
sentence_pubmed_causes = {}
sentence_pubmed_causes_importance = {}
for sentence in sentence_extracted_rationales:
for pubmed in sentence_pubmed_articles[sentence]:
if pubmed not in sentence_pubmed_causes:
sentence_pubmed_causes[pubmed] = []
sentence_pubmed_causes_importance[pubmed] = []
for pubmed_sentence in sentence_pubmeds[pubmed]:
for cause_triplet in sentence_all_causes[sentence]:
if cause_triplet[0] in pubmed_sentence and \
cause_triplet[1] in pubmed_sentence:
modified_sentence = pubmed_sentence.replace(\
cause_triplet[0], " <X> " + cause_triplet[0]\
+ " </X> ")
modified_sentence = modified_sentence.replace(\
cause_triplet[1], " <Y> " + cause_triplet[1]\
+ " </Y> ")
if modified_sentence not in sentence_pubmed_causes[pubmed]:
sentence_pubmed_causes[pubmed].append(modified_sentence)
sentence_pubmed_causes_importance[pubmed].append(0.0)
if args.run_features:
line_importance_file = jsonlines.open("T4_dev_general.jsonl","w")
input_dicts = []
for pubmed in sentence_pubmed_causes:
for pubmed_sentence in sentence_pubmed_causes[pubmed]:
dict = {'sentence':pubmed_sentence, 'gold_label': 'increases',\
'uid': 0, 'pubmed': pubmed}
input_dicts.append(dict)
if args.run_features:
line_importance_file.write(dict)
if args.run_features:
line_importance_file.close()
if args.run_features:
os.chdir("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
os.system("python examples/run_causes.py --task_name re_task --do_eval --do_lower_case --data_dir /data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/ --bert_model bert-base-uncased --max_seq_length 128 --train_batch_size 32 --learning_rate 5e-5 --num_train_epochs 3.0 --output_dir t4_general_causes_output --output_preds")
os.chdir("/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/t4_general_causes_output/"\
"preds.jsonl","pubmed_line_importance_preds.jsonl")
prediction_file = jsonlines.open(\
"pubmed_line_importance_preds.jsonl","r")
for p,input_dict in zip(prediction_file,input_dicts):
pubmed_file = input_dict['pubmed']
sentence = input_dict['sentence']
pred_prob = float(p['increases'])
sentence_pubmed_causes_importance[pubmed_file][\
sentence_pubmed_causes[pubmed_file].index(sentence)] = \
pred_prob
prediction_file.close()
healthline_sentence_pubmed_causes = {}
healthline_sentence_pubmed_causes_importance = {}
for sentence in sentence_extracted_rationales:
healthline_sentence_pubmed_causes[sentence] = []
healthline_sentence_pubmed_causes_importance[sentence] = []
for pubmed in sentence_pubmed_articles[sentence]:
healthline_sentence_pubmed_causes[sentence] += \
sentence_pubmed_causes[pubmed]
healthline_sentence_pubmed_causes_importance[sentence] += \
sentence_pubmed_causes_importance[pubmed]
vectorizer.fit_transform(corpus_sentences)
for sentence in tqdm(sentence_extracted_rationales):
for property in sentence_extracted_rationales[sentence]:
if property in ['contains']:
continue
accepted_indices = []
for case in tqdm(sentence_extracted_rationales[sentence]\
.get(property,[])):
sent_representation = vectorizer.transform([case])
pubmed_sentences = []
pubmed_sentences_entities = {}
for pubmed in sentence_pubmed_articles[sentence]:
if pubmed in sentence_pubmeds:
pubmed_sentences += sentence_pubmeds[pubmed]
pubmed_representations = vectorizer.transform(pubmed_sentences)
dot_products = np.dot(sent_representation, \
pubmed_representations.transpose()).toarray().flatten()
if property == 'causes':
for pubmed_sentence in pubmed_sentences:
for cause in sentence_all_causes.get(sentence,[]):
if cause[0] in pubmed_sentence and cause[1] in \
pubmed_sentence:
pubmed_sentences_entities[pubmed_sentence] = \
pubmed_sentences_entities.setdefault(pubmed_sentence,\
[]) + [cause]
entity_sums = []
x_string = sentence[sentence.find("<X>"):\
sentence.find("</X>")]
y_string = sentence[sentence.find("<Y>"):\
sentence.find("</Y>")]
enlarged_sentences = []
enlarged_original = []
full_pubmed_representations = \
pubmed_representations.toarray()
for pubmed_sentence, pubmed_representation in \
tqdm(zip(pubmed_sentences, \
full_pubmed_representations)):
max_score = 0.0
for cause in pubmed_sentences_entities.get(pubmed_sentence,[]):
modified_sentence = pubmed_sentence.replace(\
cause[0], ' <X> ' + cause[0] + ' </X> ')
modified_sentence = modified_sentence.replace(\
cause[1], ' <Y> ' + cause[1] + ' </Y> ')
existing_general_prob = 0.1
if sentence in healthline_sentence_pubmed_causes:
if modified_sentence in \
healthline_sentence_pubmed_causes\
[sentence]:
existing_general_prob = \
healthline_sentence_pubmed_causes_importance[sentence]\
[healthline_sentence_pubmed_causes[sentence].index(modified_sentence)]
enlarged_original.append(pubmed_sentence)
enlarged_sentences.append(modified_sentence)
current_score = matching_score(cause[0],\
x_string, embeddings) + \
matching_score(cause[1],\
y_string, embeddings) * existing_general_prob
entity_sums.append(current_score)
if current_score > max_score:
max_score = current_score
dot_products = np.array(entity_sums)
pubmed_sentences = enlarged_sentences
if len(enlarged_original) == 0:
continue
pubmed_representations = vectorizer.transform(enlarged_original)
dot_products += np.dot(sent_representation, \
pubmed_representations.transpose()).toarray().flatten()
accepted_indices.append(np.argmax(dot_products))
accepted_indices = list(set(accepted_indices))
for ind,p_sentence in enumerate(pubmed_sentences):
label = 'increases' if ind in accepted_indices else 'NA'
if label == 'NA':
if random.choice([i for i in range(15)]) != 0:
continue
dict = {'sentence':p_sentence, 'gold_label':label, 'uid':0}
if random.choice([0,0,0,0,1]) == 1:
f_dev.write(dict)
f_dev_rationale_jsonl[property].write(dict)
f_dev_rationale[property].write(str(ind)+"\t"+\
str(int(label=='increases'))+"\t"+p_sentence+"\n")
else:
f_train.write(dict)
f_train_rationale_jsonl[property].write(dict)
f_train_rationale[property].write(str(ind)+"\t"+\
str(int(label=='increases'))+"\t"+p_sentence+"\n")
f_dev.close()
f_train.close()
for property in f_train_rationale:
f_train_rationale[property].close()
f_train_rationale_jsonl[property].close()
f_dev_rationale[property].close()
f_dev_rationale_jsonl[property].close()
return vectorizer
def match_dicts(input_dict, output_dict):
total = 0
for r in output_dict:
for c in output_dict[r]:
total += len(output_dict[r][c])
if total == 0:
return 0
total2 = 0
for r in input_dict:
for c in input_dict[r]:
total2 += len(input_dict[r][c])
if total2 == 0:
return 0
matching = 0
for r in output_dict:
if r not in input_dict:
continue
types1 = sorted([output_dict[r][c] for c in output_dict[r]])
types2 = sorted([input_dict[r][c] for c in input_dict[r]])
indices_selected = set()
for xs in types1:
best_ind = -1
best_len = 0
for ind,ys in enumerate(types2):
if ind in indices_selected:
continue
ctr = 0
set_xs = set(xs)
for v in set_xs:
ctr += min(xs.count(v), ys.count(v))
len1 = ctr
ctr = 0
if len1 > best_len:
best_len = len1
best_ind = ind
if best_ind != -1:
indices_selected.add(best_ind)
matching += best_len
assert matching <= total
assert matching <= total2
return (matching/total + matching/total2)/2
#return (matching/total - 100*((total-matching)/total))
def create_importance_classification_data(sentence_file_names, metadata, \
sentence_causes, sentence_all_causes, sentence_contains,\
sentence_all_contains, split_name):
train_data = jsonlines.open("importance_train.jsonl","w")
dev_data = jsonlines.open("importance_dev.jsonl","w")
test_data = jsonlines.open("importance_test.jsonl","w")
dictionary = {'train':train_data, 'dev':dev_data}
for sentence in sentence_causes:
file_name = sentence_file_names[sentence]
split = metadata[file_name]["split"]
if split in ['train','dev']:
candidates = sentence_all_causes[sentence] + sentence_all_contains[sentence]
random.shuffle(candidates)
for s1,s2 in zip(sentence_causes[sentence]+sentence_contains[sentence],\
candidates):
dict1 = {'sentence':s1[0]+" # "+s1[1],'gold_label':'increases'}
dictionary[split].write(dict1)
if s2 != s1:
dict2 = {'sentence':s2[0]+" # "+s2[1],'gold_label':'NA'}
dictionary[split].write(dict2)
elif split == split_name:
for s1 in sentence_causes[sentence]+sentence_contains[sentence]:
dict1 = {'sentence':s1[0]+" # "+s1[1],'gold_label':'increases',\
'original_sentence':sentence,'structure':s1}
test_data.write(dict1)
if file_name == '11-proven-benefits-of-bananas':
if s1[0].lower() == "salmon":
assert False
for s2 in sentence_all_causes[sentence]+sentence_all_contains[sentence]:
if s2 not in sentence_causes[sentence]+sentence_contains[sentence]:
dict2 = {'sentence':s2[0]+" # "+s2[1],'gold_label':'NA',\
'original_sentence':sentence,'structure':s2}
test_data.write(dict2)
if file_name == '11-proven-benefits-of-bananas':
if s1[0].lower() == "salmon":
assert False
train_data.close()
dev_data.close()
test_data.close()
def get_predicted_structures(input_file, predicted_file, embeddings,\
sentence_file_names, title_entities,\
sentence_relation_sentiments, metadata, cluster_threshold=1.5,\
prob_threshold=0.2):
sentence_structures = {}
sentence_probabilities={}
reader1 = jsonlines.open(input_file,"r")
reader2 = jsonlines.open(predicted_file,"r")
for r1,r2 in tqdm(zip(reader1,reader2)):
#if not (1 - float(r2['NA']) > prob_threshold) and \:
# continue
causes_structures = []
contains_structures=[]
causes_representations = []
contains_representations=[]
if r2['pred_label'] != 'NA' or True:
sentence_structures[r1['original_sentence'].strip()] = \
sentence_structures.setdefault(\
r1['original_sentence'].strip(),[])\
+ [r1['structure']]
sentence_probabilities[r1['original_sentence'].strip()] = \
sentence_probabilities.setdefault(\
r1['original_sentence'].strip(),[])\
+ [1-float(r2['NA'])]
for sentence in tqdm(sentence_structures):
causes_structures = []
contains_structures=[]
causes_representations = []
contains_representations=[]
causes_probabilities = []
contains_probabilities = []
causes_sentiments= []
for structure,prob in zip(sentence_structures[sentence],\
sentence_probabilities[sentence]):
if len(title_entities[sentence_file_names[sentence]]) > 0:
rep1 = _get_entity_embeddings(structure[0], embeddings)
rep2 = _get_entity_embeddings(title_entities[\
sentence_file_names[sentence]][0], embeddings)
if np.dot(rep1,rep2) > 0.95:
prob = 1.0
if prob < prob_threshold:
continue
if len(structure) == 2:
contains_structures.append(structure)
rep1 = _get_entity_embeddings(structure[0], embeddings)
rep2 = _get_entity_embeddings(structure[1], embeddings)
contains_representations.append(np.concatenate((rep1,rep2)))
contains_representations[-1] = \
np.concatenate((contains_representations[-1],[0,0,0]))
contains_probabilities.append(prob)
else:
causes_structures.append(structure)
rep1 = _get_entity_embeddings(structure[0], embeddings)
rep2 = _get_entity_embeddings(structure[1], embeddings)
causes_representations.append(np.concatenate((rep1,rep2)))
causes_probabilities.append(prob)
causes_sentiments.append(\
sentence_relation_sentiments[sentence]\
.get(tuple(structure),['Good'])[0])
if causes_sentiments[-1] == 'Bad':
causes_probabilities[-1] += 100.0 * \
float(sentence_relation_sentiments\
[sentence][tuple(structure)][1])
if tuple(structure) in sentence_relation_sentiments[sentence]\
and sentence_relation_sentiments[sentence]\
[tuple(structure)][-1] == True:
causes_probabilities[-1] *= 1.0
sentiment_feature = [0,0,0]
sentiment_feature[['Good','Bad','Neutral']\
.index(causes_sentiments[-1])] = 1
causes_representations[-1] = \
np.concatenate((causes_representations[-1],\
sentiment_feature))
chain_structures = []
chain_representations = []
chain_probabilities = []
chain_sentiments = []
for i in range(len(contains_structures)):
rep1 = _get_entity_embeddings(contains_structures[i][0], embeddings)
rep2 = _get_entity_embeddings(contains_structures[i][1], embeddings)
if np.dot(rep1,rep2) > 0.75:
continue
if [contains_structures[i]] not in chain_structures:
chain_structures.append([contains_structures[i]])
chain_probabilities.append(contains_probabilities[i])
chain_representations.append(contains_representations[i])
for j in range(len(causes_structures)):
if [causes_structures[j]] not in chain_structures:
chain_structures.append([causes_structures[j]])
chain_probabilities.append(causes_probabilities[j])
chain_representations.append(causes_representations[j])
rep1 = _get_entity_embeddings(contains_structures[i][1],\
embeddings)
rep2 = _get_entity_embeddings(causes_structures[j][0],\
embeddings)
if np.dot(rep1,rep2) > 0.95:
chain_rep = (contains_representations[i] +\
causes_representations[j])/2
chain_prob= contains_probabilities[i] + \
causes_probabilities[j]
chain_structures.append([contains_structures[i],\
causes_structures[j]])
chain_representations.append(chain_rep)
chain_probabilities.append(chain_prob)
if len(contains_structures) == 0:
chain_structures = [[x] for x in causes_structures]
chain_representations = list.copy(causes_representations)
chain_probabilities = list.copy(causes_probabilities)
chain_clusters = []
if len(chain_representations) > 1:
chain_clusters = hcluster.fclusterdata(chain_representations,\
cluster_threshold, criterion="distance").tolist()
chain_structures_dict = {}
if chain_clusters != []:
for ind, cluster_index in enumerate(chain_clusters):
if cluster_index not in chain_structures_dict:
chain_structures_dict[cluster_index] = ind
else:
if chain_probabilities[ind] > \
chain_probabilities[chain_structures_dict[cluster_index]]:
chain_structures_dict[cluster_index] = ind
sentence_structures[sentence] = []
for cluster_index,ind in chain_structures_dict.items():
assert len(chain_structures[ind]) <= 2
for structure in chain_structures[ind]:
sentence_structures[sentence].append(structure)
if sentence_file_names[sentence] == 'legumes-good-or-bad':
#pass
if ['raw red kidney beans', 'acute gastroenteritis', 'increases']\
in sentence_structures[sentence]:
print("Our friend is found")
else:
assert len(chain_representations) <= 1
sentence_structures[sentence] = []
for chain_structure in chain_structures:
for structure in chain_structure:
sentence_structures[sentence].append(structure)
return sentence_structures
def get_causes_contains_structures(sentence_structures, sentence_all_causes,\
sentence_all_contains):
sentence_learnt_causes = {}
sentence_learnt_contains = {}
for sentence in sentence_structures:
sentence_learnt_causes[sentence] = []
sentence_learnt_contains[sentence]= []
for structure in sentence_structures[sentence]:
if len(structure) == 3:
sentence_learnt_causes[sentence] = sentence_learnt_causes\
.setdefault(sentence,[]) + [structure]
else:
sentence_learnt_contains[sentence] = sentence_learnt_contains\
.setdefault(sentence,[]) + [structure]
return sentence_learnt_causes, sentence_learnt_contains
def predict_importance_sentences(metadata, split_name, args):
input_file = jsonlines.open("importance_dev.jsonl","w")
sentiment_file = jsonlines.open("dev_sentiment.jsonl","w")
all_pubmeds = {}
pubmed_sentences = {}
pubmed_sentences_labels = {}
pubmed_file_name = {}
for file_name in metadata:
if 'split' not in metadata[file_name]:
continue
if metadata[file_name]['split'] != split_name:
continue
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_pubmed_articles' not in metadata[file_name]\
['summary_inputs']:
continue
for sentence,pubmeds in metadata[file_name]['summary_inputs']\
['summary_pubmed_articles'].items():
for pubmed in pubmeds:
all_pubmeds[file_name] = all_pubmeds.setdefault(file_name,[])+\
[pubmed]
pubmed_file_name[pubmed] = file_name
for file_name in all_pubmeds:
for pubmed in all_pubmeds[file_name]:
if pubmed not in pubmed_sentences:
pubmed_sentences[pubmed] = []
if pubmed in metadata\
['pubmed_sentences_annotations']:
title_sentence = metadata['pubmed_sentences_annotations']\
[pubmed]['pubmed_sentences_entity_annotations'][0]\
[0]
for sentence,labels in metadata\
['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_entity_annotations'][1:]:
if sentence in pubmed_sentences[pubmed] or \
sentence == title_sentence:
continue
pubmed_sentences[pubmed].append(sentence)
pubmed_sentences_labels[sentence] = labels
input_sentences = []
sentiment_sentences = []
for pubmed in pubmed_sentences:
for sentence in pubmed_sentences[pubmed]:
dict = {'sentence':sentence, 'pubmed':pubmed, 'uid':0, \
'gold_label':'NA', 'file_name':pubmed_file_name[pubmed],\
'entity_string':pubmed_sentences_labels[sentence]}
sentiment_dict = {'sentence':sentence, 'pubmed':pubmed, 'uid':0, \
'gold_label':'Good'}
sentiment_sentences.append(sentiment_dict)
input_file.write(dict)
input_sentences.append(dict)
sentiment_file.write(sentiment_dict)
input_file.close()
sentiment_file.close()
# get sentence sentiments
if args.run_features:
from evaluation.create_sentiment_outputs import produce_sentiment_outputs
results = produce_sentiment_outputs("dev_sentiment.jsonl", \
"full_sentiment_classification")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/full_sentiment_classification/"\
"preds.jsonl","pubmed_"+split_name+"_preds.jsonl")
else:
#results = jsonlines.open("/data/rsg/nlp/darsh/"\
# "pytorch-pretrained-BERT/full_sentiment_classification/"\
# "preds.jsonl","r")
results = jsonlines.open("pubmed_"+split_name+"_preds.jsonl")
# get_importance_probabilities
if args.run_features:
os.chdir("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
os.system("python examples/run_importance.py --task_name re_task --do_eval --do_lower_case --data_dir /data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/ --bert_model bert-base-uncased --max_seq_length 128 --train_batch_size 32 --learning_rate 5e-5 --num_train_epochs 3.0 --output_dir Importance_Classification --output_preds")
os.chdir("/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/Importance_Classification/"\
"preds.jsonl","pubmed_importance_"+split_name+"_preds.jsonl")
prediction_file = jsonlines.open(\
"pubmed_importance_"+split_name+"_preds.jsonl","r")
output_sentences= []
for p,r in zip(prediction_file,results):
p['sentiment'] = r['pred_label']
output_sentences.append(p)
prediction_file.close()
print("Sending %d pubmed sentences for downstream fun" \
%len(input_sentences))
return input_sentences, output_sentences
def _get_baseline_sentences(env, sentences):
kept_sentences = set()
attended_relations = set()
for g_r in env.gold_relations:
if _compare_causes(g_r,list(kept_sentences),env.embeddings,""):
continue
for sentence in sentences:
if _compare_causes(g_r,[sentence],env.embeddings,""):
kept_sentences.add(tuple(sentence))
break
return list(kept_sentences), 0, 0
def _get_policy_sentences(env, sentences, policy, optimizer, prev_loss=[],\
batch_size=8):
rewards = []
for I in range(1):
state = np.array([0,0])
selected_sentences = []
for t in range(env.max_phrases):
remaining_sentences = [x for x in sentences if x not \
in selected_sentences]
action= select_action(state, policy)
state, reward, done, selected_sentences = env.step(\
state, action, selected_sentences, remaining_sentences)
policy.rewards.append(reward)
if done:
rewards.append(sum(policy.rewards))
break
prev_loss = \
finish_episode(policy, optimizer, prev_loss, batch_size)
return selected_sentences, (sum(rewards)/len(rewards)), prev_loss
def _get_threshold_policy_sentences(env, sentences, importances, policy, \
optimizer, prev_loss=[], batch_size=8, threshold=0.4):
rewards = []
for I in range(1):
state = np.array([0,0,1.0,1.0])
selected_sentences = []
for t in range(env.max_phrases):
if len(selected_sentences) != 0:
remaining_sentences = [x for x in sentences if \
not _compare_causes(x,selected_sentences,env.embeddings,"")]
else:
remaining_sentences = sentences
remaining_importances = [y for x,y in zip(\
sentences,importances) if x in remaining_sentences]
assert len(remaining_sentences) == len(remaining_importances)
if len(remaining_sentences) > 0:
max_importance = max(remaining_importances)
max_ind = remaining_importances.index(max_importance)
state[2] = max_importance
action = select_action(state, policy)
state, reward, done, selected_sentences = env.step(\
state, action, selected_sentences, [remaining_sentences\
[max_ind]])
policy.rewards.append(reward)
else:
done = True
if done:
rewards.append(sum(policy.rewards))
break
prev_loss = \
finish_episode(policy, optimizer, prev_loss, batch_size)
return selected_sentences, (sum(rewards)/len(rewards)), prev_loss
def _get_choice_policy_sentences(env, sentences, importances, sentiments, types,\
sentence_representations, policy, optimizer, gold_sentiment, vectorizer, \
cause_style_rationales_representations,\
prev_loss=[], batch_size=8, epoch_number=-1, pretrain_until=-1, \
repeat_instance=5):
try:
embeddings
except:
embeddings = read_embeddings()
do_pretrain = False#epoch_number < pretrain_until
rewards = []
sentence_full_representations = []
for sentence, sentence_representation in zip(sentences,\
sentence_representations):
sentence_full_representations.append(\
list(_get_entity_embeddings(sentence[0],embeddings))+\
list(_get_entity_embeddings(sentence[1],embeddings))+\
list(_get_entity_embeddings(sentence[0],embeddings))+\
list(_get_entity_embeddings(sentence[1],embeddings)))
#list(_get_entity_embeddings(sentence[5],embeddings))+\
#list(sentence_representation))
sentences_index = {}
for ind,sent in enumerate(sentences):
sentences_index[tuple(sent[:3]+sent[4:])] = ind
for I in range(repeat_instance):
#probs = [env.get_reward([sentence]) for sentence in sentences]
selected_sentences = []
selected_representations = []
selected_full_representations = []
for t in range(env.max_phrases):
#remaining_sentences = [x for x in sentences if x not in \
# selected_sentences]
remaining_sentences = []
for i,x in enumerate(sentences):
found=False
for s_s in selected_sentences:
found = True
for a,b in zip(x,s_s):
if ((type(a)!=np.ndarray and a!=b) or \
(type(a)==np.ndarray and not \
all([a1==b1 for a1,b1 in zip(a,b)]))):
found = False
break
if found:
break
if found:
continue
remaining_sentences.append(x)
remaining_importances=[]
remaining_sentiments =[]
remaining_types = []
remaining_representations=[]
remaining_counts = []
for ind in range(len(sentences)):
found = False
for r_s in remaining_sentences:
found=True
for a,b in zip(sentences[ind],r_s):
if ((type(a)!=np.ndarray and a!=b) or \
(type(a)==np.ndarray and not \
all([a1==b1 for a1,b1 in zip(a,b)]))):
found=False
break
if found:
break
if found:
remaining_importances.append(importances[ind])
remaining_sentiments.append(sentiments[ind])
remaining_types.append(types[ind])
remaining_representations.append(\
list(_get_entity_embeddings(sentences[ind][0],embeddings))+\
list(_get_entity_embeddings(sentences[ind][1],embeddings))+\
list(_get_entity_embeddings(sentences[ind][0],embeddings))+\
list(_get_entity_embeddings(sentences[ind][1],embeddings)))
#list(_get_entity_embeddings(sentences[ind][5],embeddings))+\
#list(sentence_representations[ind]))
remaining_counts.append(env.pubmed_sentences[sentences[ind][-2]].lower()\
.count(sentences[ind][0].lower())+\
env.pubmed_sentences[sentences[ind][-2]].lower()\
.count(sentences[ind][1].lower())+\
5*env.pubmed_sentences[sentences[ind][-2]].lower()\
[:env.pubmed_sentences[sentences[ind][-2]].lower().find("###")]\
.count(sentences[ind][0].lower())+\
5*env.pubmed_sentences[sentences[ind][-2]].lower()\
[:env.pubmed_sentences[sentences[ind][-2]].lower().find("###")]\
.count(sentences[ind][1].lower()))
#remaining_importances=[y for x,y in zip(sentences,importances)\
# if x in remaining_sentences]
#remaining_sentiments =[y for x,y in zip(sentences,sentiments)\
# if x in remaining_sentences]
#remaining_types =[y for x,y in zip(sentences,types)\
# if x in remaining_sentences]
#remaining_representations = [list(_get_entity_embeddings(x[0],embeddings))+\
# list(_get_entity_embeddings(x[1],embeddings))+list(y) for x,y in zip(sentences,\
# sentence_representations) if x in remaining_sentences]
assert len(remaining_sentences) == len(remaining_importances)
minimum_differences= [1] * len(remaining_sentences)
min_text_differences=[1] * len(remaining_sentences)
overall_similarity =[0] * len(remaining_sentences)
like_target =[0] * len(remaining_sentences)
#for remaining_sentence,remaining_type,remaining_representation\
# in zip(remaining_sentences,remaining_types,\
# remaining_representations):
# min_difference = 0
# text_difference=0
# if remaining_type == 0:
# min_difference = 0.5
# text_difference = 0.5
# else:
# assert len(selected_sentences) == len(selected_representations)
# for sentence,sentence_representation in \
# zip(selected_sentences,selected_representations):
# difference = (matching_score(remaining_sentence[0],\
# sentence[0],env.embeddings) + \
# matching_score(remaining_sentence[1],\
# sentence[1],env.embeddings))/2
# sentence_difference = np.dot(sentence_representation,\
# remaining_representation)
# #difference = (difference+sentence_difference)/2
# if difference > min_difference:
# min_difference = difference
# if sentence_difference > text_difference:
# text_difference = sentence_difference
# minimum_differences.append(min_difference)
# min_text_differences.append(text_difference)
if len(selected_representations) > 0 and \
len(remaining_representations) > 0:
sent_remaining_representations = [x[100:150] for x in \
remaining_representations]
max_similarity_with_selected = \
np.max(np.dot(np.array(sent_remaining_representations),\
np.array(selected_representations).transpose()),axis=1)
minimum_differences = [1 - x for x in max_similarity_with_selected]
min_text_differences =[1 - x for x in max_similarity_with_selected]
words_selected = sum([len(structure[2][\
structure[2].find("###")+3:].strip().split()) \
for structure in selected_sentences])
all_lhs=[]
if len(remaining_representations)>0:
sent_remaining_representations = [x[100:150] for x in \
remaining_representations]
if env.cause_style_rationales_representations\
!= []:
all_lhs= np.sum(np.dot(np.array(sent_remaining_representations),\
env.cause_style_rationales_representations.transpose()),axis=1)
all_max_lhs = np.max(np.dot(np.array(sent_remaining_representations),\
env.cause_style_rationales_representations.transpose()),axis=1)
else:
all_lhs = np.sum(np.dot(np.array(sent_remaining_representations),\
np.array([[0]*50]).transpose()),axis=1)
all_max_lhs = np.max(np.dot(np.array(sent_remaining_representations),\
np.array([[0]*50]).transpose()),axis=1)
if len(selected_full_representations) == 0:
selected_full_representations = [[0] * 200]
selected_full_representations = np.array(\
selected_full_representations)
selected_full_representations = np.sum(selected_full_representations,\
axis=0)
states =[[len(selected_sentences),t] + \
#importance +\
representation + [int(1-min_diff<=0.9)]\
#+[int(lhs>env.rhs)]\
+[0]\
+list(selected_full_representations)\
+[remaining_count]\
#+ [min_diff] + [int(sim<=0.9)]\
#+ sentiment\
#+ [type]\ +
#+ [l_t]\
#+[int(words_selected<=lower_limit)] + [int(words_selected+\
#len(remaining_sentence[2][\
#remaining_sentence[2].find("###")+3:].strip()\
#.split()) < upper_limit)] + \
#[len(selected_sentences) > 1]
for remaining_sentence, importance,\
representation, min_diff,\
min_text_diff, sentiment, type, sim, l_t, \
remaining_count in zip(remaining_sentences,\
remaining_importances, remaining_representations,\
minimum_differences,\
min_text_differences,remaining_sentiments,\
remaining_types, overall_similarity, like_target,\
remaining_counts)\
]# + \
if t > 2 or len(states)==0:
states += \
[[len(selected_sentences),t]+\
[0]*200 +[-1,-1] + [0]*200+[0]]\
+[[len(selected_sentences),t]+\
[0]*200 + [-2,-2] + [0]*200+[0]]
assert len(remaining_sentences) == len(states)-2 or t<=2
if do_pretrain:
fixed_choice = len(states)-1
original_reward = env.get_reward(selected_sentences)
best_reward = 0.0
for i in range(len(states)-1):
assert i < len(remaining_sentences)
new_reward = env.get_reward(selected_sentences + \
[remaining_sentences[i]])
if new_reward-original_reward> best_reward:
best_reward = new_reward-original_reward
fixed_choice= i
action = select_action_choice_pretrained(np.array(states), policy,\
fixed_choice)
target_label = torch.LongTensor([fixed_choice])
criterion = nn.CrossEntropyLoss()
prev_loss.append(criterion(\
policy(torch.from_numpy(\
np.array(states)).float()).unsqueeze(0), \
target_label.unsqueeze(0)))
else:
action = select_action_choice(np.array(states), policy, t)
corresponding_action = action < len(remaining_sentences) or t==0
corresponding_extra = []
corresponding_sentiment = []
corresponding_type = []
if corresponding_action:
corresponding_extra = remaining_sentences[action]
corresponding_sentiment = remaining_sentiments[action]
corresponding_type = remaining_types[action]
if action == len(states)-1:
n_selected_sentences = []
for selected_sentence in selected_sentences:
n_selected_sentence = list(selected_sentence)
n_selected_sentence[-1] = ''
n_selected_sentences.append(tuple(n_selected_sentence))
else:
n_selected_sentences = selected_sentences
_, reward, done, n_selected_sentences = env.step(\
[len(selected_sentences),t,-1,-1,-1], \
corresponding_action, n_selected_sentences,\
[corresponding_extra], [corresponding_type],\
corresponding_sentiment, \
gold_sentiment)
if len(selected_sentences) == len(n_selected_sentences):
pass
else:
selected_sentences += [n_selected_sentences[-1]]
selected_representations = []
selected_full_representations = []
for selected_sentence in selected_sentences:
selected_full_representations.append(\
sentence_full_representations[\
sentences_index[tuple(selected_sentence[:3]+\
selected_sentence[4:])]])
selected_representations.append(\
sentence_representations[\
sentences_index[tuple(selected_sentence[:3]+\
selected_sentence[4:])]])
policy.rewards.append(reward)
if done:
if t==0:
pass
break
env_reward = env.get_reward(selected_sentences)
rewards.append(sum(policy.rewards))
prev_loss = \
finish_episode(policy, optimizer, prev_loss, batch_size,\
not do_pretrain)
return selected_sentences, (sum(rewards)/len(rewards)), prev_loss
def _get_clustered_sentences(sentences, sentence_probs,\
vectorizer, cluster_threshold=1.0):
all_representations = vectorizer.transform(sentences)
all_clusters = hcluster.fclusterdata(all_representations.toarray(),\
cluster_threshold, criterion="distance").tolist()
cluster_sentences = {}
cluster_probabilities = {}
assert len(all_clusters) == len(sentences)
for ind,cluster in enumerate(all_clusters):
cluster_sentences[cluster] = cluster_sentences.setdefault(cluster,\
[]) + [sentences[ind]]
if type(sentence_probs[ind]) == float:
cluster_probabilities[cluster] = \
cluster_probabilities.setdefault(\
cluster,[]) + [sentence_probs[ind]]
else:
cluster_probabilities[cluster] = \
cluster_probabilities.setdefault(\
cluster,[]) + [sentence_probs[ind][0]]
selected_cluster_sentences = {}
for cluster in cluster_sentences:
max_i = cluster_probabilities[cluster].index(\
max(cluster_probabilities[cluster]))
selected_cluster_sentences[cluster] = cluster_sentences[cluster][max_i]
return list(selected_cluster_sentences.values())
def get_causes_contains_from_pubmed(input_sentences, output_sentences,\
sentence_all_causes, sentence_all_contains, \
sentence_file_names, metadata, sentence_sentiment, \
vectorizer, embeddings, gold_sentence_causes, policy,\
optimizer, args, sentence_extracted_rationales,\
fusion_model, tokenizer, label_list,\
pubmed_entity_types,\
property_style_rationales,\
cluster_threshold=1.0, split_name='test'):
#cause_style_rationales_representations = \
# np.array([_get_entity_embeddings(sentence, embeddings)\
# for sentence in property_style_rationales['causes']])
property_style_rationales_representations = {}
for property in property_style_rationales:
property_style_rationales_representations[property] = \
np.array([_get_entity_embeddings(sentence, embeddings)\
for sentence in property_style_rationales[property]])\
sentence_extracted_rationale_representations = {}
for sentence in sentence_extracted_rationales:
for property,spans in sentence_extracted_rationales[sentence].items():
if len(spans) == 0:
continue
reps = np.array([_get_entity_embeddings(span, embeddings) for span \
in spans])
if sentence not in sentence_extracted_rationale_representations:
sentence_extracted_rationale_representations[sentence] = reps
else:
sentence_extracted_rationale_representations[sentence] = \
np.concatenate((\
sentence_extracted_rationale_representations[sentence]\
, reps), axis=0)
all_gold_sentence_extracted_representations = []
for file_name in metadata:
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_healthline_entity_annotations' not in \
metadata[file_name]['summary_inputs']:
continue
for sentence,labels in \
metadata[file_name]['summary_inputs']\
['summary_healthline_entity_annotations']:
all_gold_sentence_extracted_representations.append(\
_get_entity_embeddings(sentence, embeddings))
all_gold_sentence_extracted_representations = np.array(\
all_gold_sentence_extracted_representations)
all_pubmed_sentence_representations = []
all_pubmed_rationale_representations= []
pubmed_sentence_verb_phrases = pickle.load(open(\
"pubmed_sentence_verb_phrases.p","rb"))
for pubmed in metadata['pubmed_sentences_annotations']:
if 'pubmed_sentences_entity_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed]:
continue
for sentence,label in metadata['pubmed_sentences_annotations']\
[pubmed]['pubmed_sentences_entity_annotations']:
all_pubmed_sentence_representations.append(\
_get_entity_embeddings(sentence, embeddings))
verb_phrases = pubmed_sentence_verb_phrases.get(sentence,[])
for verb_phrase in verb_phrases:
all_pubmed_rationale_representations.append(\
_get_entity_embeddings(verb_phrase,embeddings))
all_pubmed_sentence_representations = np.array(\
all_pubmed_sentence_representations)
all_pubmed_rationale_representations= np.array(\
all_pubmed_rationale_representations)
rhs =\
np.sum(np.dot(all_pubmed_rationale_representations,\
(np.concatenate((property_style_rationales_representations['causes'],\
property_style_rationales_representations['contains'],\
property_style_rationales_representations['sentiment']))).transpose()))\
/len(all_pubmed_rationale_representations)
gold_sentiment_file = jsonlines.open("dev_sentiment.jsonl","w")
gold_sentence_sentiments = {}
gold_sentence_inputs= []
for sentence in sentence_all_causes:
dict = {'sentence':sentence,'uid':0,'gold_label':'Good'}
gold_sentence_inputs.append(dict)
gold_sentiment_file.write(dict)
gold_sentiment_file.close()
if args.run_features:
from evaluation.create_sentiment_outputs import produce_sentiment_outputs
results = produce_sentiment_outputs("dev_sentiment.jsonl", \
"full_sentiment_classification")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/full_sentiment_classification/"\
"preds.jsonl","healthline_sentiment.jsonl")
else:
results = jsonlines.open("healthline_sentiment.jsonl","r")
for gold_input,result in zip(gold_sentence_inputs,results):
gold_sentence_sentiments[gold_input['sentence']] = result['pred_label']
all_cause_tuples = set()
all_cause_sentence_tuples = {}
for sentence,causes in sentence_all_causes.items():
if sentence not in all_cause_sentence_tuples:
all_cause_sentence_tuples[sentence] = set()
for cause in causes:
all_cause_sentence_tuples[sentence].add(tuple(cause[:2]))
all_cause_tuples.add(tuple(cause[:2]))
pubmed_important_sentences = {}
all_important_sentences = {}
all_important_sentences_entities = {}
all_important_sentences_importance_probabilities = {}
all_important_sentences_pubmed = {}
number_of_sentences = 0
pubmed_all_sentences_considered = set()
for input,output in zip(input_sentences, output_sentences):
if output['pred_label'] != "NA" or True:
pubmed = input['pubmed']
sentence=input['sentence']
pubmed_all_sentences_considered.add(sentence)
label_str=input['entity_string']
all_important_sentences_importance_probabilities[sentence] = \
float(output['increases'])
all_important_sentences_pubmed[sentence] = pubmed
if pubmed not in pubmed_important_sentences:
pubmed_important_sentences[pubmed] = {}
if output['sentiment'] not in pubmed_important_sentences[pubmed]:
pubmed_important_sentences[pubmed][output['sentiment']] = []
if sentence not in \
pubmed_important_sentences[pubmed][output['sentiment']]:
pubmed_important_sentences[pubmed][output['sentiment']]\
.append(sentence)
all_important_sentences[sentence] = label_str
all_important_sentences_entities[sentence] = {}
for entity_type in ['Food','Nutrition','Condition']:
all_important_sentences_entities[sentence]\
[entity_type] = _get_entities(sentence.split(),\
label_str.split(), entity_type)
number_of_sentences += 1
sentence_pubmeds = {}
sentence_structures = {}
to_annotate_sentence_original = {}
relation_sentences_considered = set()
if not os.path.exists(\
split_name + "_importance_pubmed_general.txt"\
".rationale.machine_readable.tsv")\
or not \
os.path.exists(\
split_name + "_importance_pubmed_sentiment."\
"txt.rationale.machine_readable.tsv") or True:
f_causes = open(split_name + "_importance_pubmed_general.txt","w")
f_sentiment = open(split_name + "_importance_pubmed_sentiment.txt","w")
pubmed_sentence_verb_phrases = \
pickle.load(open("pubmed_sentence_verb_phrases.p","rb"))
for sentence in all_important_sentences_entities:
f_sentiment.write("0\t0\t"+sentence+"\n")
#if sentence.startswith("This could explain"):
#"This could explain in part why the severe deficiency in <X> omega-3 </X> intake pointed by numerous epidemiologic studies may increase the <Y> brain </Y> 's vulnerability representing an important risk factor in the development and/or deterioration of certain cardio- and neuropathologies ."
pubmed = all_important_sentences_pubmed[sentence]
if pubmed not in metadata['pubmed_sentences_annotations'] or \
'pubmed_sentences_relation_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed]:
continue
relations = metadata['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_relation_annotations'][0]
added_sentences = set()
for r in relations:
if r[0] in sentence and r[1] in sentence:
x = r[0]
y = r[1]
test_sentence = sentence.replace(x, ' <X> '+x+' </X> ')
test_sentence = test_sentence.replace(y, ' <Y> '\
+y+' </Y> ').strip()
test_sentence = ' '.join(test_sentence.split()).strip()
f_causes.write("0\t0\t"+test_sentence+"\n")
to_annotate_sentence_original[test_sentence] = sentence
relation_sentences_considered.add(sentence)
if 'pubmed_sentences_entity_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed]:
continue
for pubmed_sentence, sentence_entities in \
metadata['pubmed_sentences_annotations'][pubmed]\
['pubmed_sentences_entity_annotations']:
if sentence != pubmed_sentence:
continue
x_entities = _get_entities(pubmed_sentence.split(),\
sentence_entities.split(), 'Food') + _get_entities(\
pubmed_sentence.split(), sentence_entities.split(),\
'Nutrition')
y_entities = _get_entities(pubmed_sentence.split(),\
sentence_entities.split(), 'Condition')
for x in x_entities:
for y in y_entities:
test_sentence = pubmed_sentence.replace(x, \
' <X> '+x+' </X> ')
test_sentence = test_sentence.replace(y, ' <Y> '\
+y+' </Y> ').strip()
test_sentence = ' '.join(test_sentence.split()).strip()
to_annotate_sentence_original[test_sentence] = \
pubmed_sentence
if test_sentence not in relation_sentences_considered:
f_causes.write("0\t0\t"+test_sentence+"\n")
relation_sentences_considered.add(test_sentence)
f_causes.close()
f_sentiment.close()
all_important_pubmed_phrases = {}
lines = open(split_name + "_importance_pubmed_general.txt"\
".rationale.machine_prob_readable.tsv","r").readlines()[1:]#\
#+ open(split_name + "_importance_pubmed_sentiment.txt"\
#".rationale.machine_prob_readable.tsv","r").readlines()[1:]
pubmed_sentence_causal_rationales = {}
pubmed_sentence_causal_rationales_importance = {}
if args.run_features:
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/"\
"allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz")
not_part_of_to_annotations = 0
pubmed_sentence_verb_phrase_rationales = {}
pubmed_sentence_verb_phrases = {}
pubmed_sentence_constituency_parser = {}
if os.path.exists("/tmp/pubmed_sentence_constituency_parse.p"):
pubmed_sentence_constituency_parser = \
pickle.load(open("/tmp/pubmed_sentence_constituency_parse.p","rb"))
pubmed_sentence_verb_phrases = \
pickle.load(open("pubmed_sentence_verb_phrases.p","rb"))
parsing_sentences_considered = set()
for line in tqdm(lines):
line = line.strip()
parts= line.split("\t")
if float(parts[1]) == 0.0 and False:
continue
importance_prob = float(parts[1])
selected_tokens = []
for token, rationale in zip(parts[2].split(), parts[3].split()):
if int(rationale) == 1 or True:
selected_tokens.append(token)
selected_tokens = ' '.join(selected_tokens).strip()
span = selected_tokens
#span = selected_tokens.replace('<X>','').replace('</X>','')\
# .replace('<Y>','').replace('</Y>','')
if ' '.join(parts[2].strip().split()).strip() \
not in to_annotate_sentence_original:
not_part_of_to_annotations += 1
continue
x_string = span[span.find("<X>")+3:span.find("</X>")].strip()
y_string = span[span.find("<Y>")+3:span.find("</Y>")].strip()
#if tuple([x_string,y_string]) not in all_cause_tuples:
# not_part_of_to_annotations += 1
# continue
original_sentence = \
' '.join(to_annotate_sentence_original[\
' '.join(parts[2].strip().split()).strip()].split())\
.strip()
parsing_sentences_considered.add(original_sentence)
if original_sentence not in pubmed_sentence_constituency_parser\
and args.run_features:
pubmed_sentence_constituency_parser[original_sentence] = \
predictor.predict(sentence=original_sentence)
if original_sentence not in pubmed_sentence_verb_phrases\
and args.run_features:
verb_phrases = []
stack = []
if 'children' in pubmed_sentence_constituency_parser\
[original_sentence]['hierplane_tree']['root']:
stack += pubmed_sentence_constituency_parser[\
original_sentence]['hierplane_tree']\
['root']['children']
while len(stack) > 0:
child = stack[0]
if 'children' in child:
stack += child['children']
if child['nodeType'] == 'VP':
verb_phrases.append(child['word'])
del stack[0]
pubmed_sentence_verb_phrases[original_sentence] = verb_phrases
best_verb_phrase = original_sentence
for verb_phrase in pubmed_sentence_verb_phrases[original_sentence]:
if y_string not in verb_phrase:
continue
if len(verb_phrase) < len(best_verb_phrase):
best_verb_phrase = verb_phrase
pubmed_sentence_verb_phrase_rationales[original_sentence] = \
pubmed_sentence_verb_phrase_rationales.setdefault(original_sentence,\
[]) + [best_verb_phrase]
new_importance_prob = all_important_sentences_importance_probabilities\
[original_sentence]
if 'NCBI' in span:
span = span.replace('NCBI','')
if 'PubMed' in span:
span = span.replace('PubMed','')
if 'NCBI' in best_verb_phrase:
best_verb_phrase = best_verb_phrase.replace('NCBI','')
if 'PubMed' in best_verb_phrase:
best_verb_phrase = best_verb_phrase.replace('PubMed','')
pubmed_sentence_causal_rationales[original_sentence] = \
pubmed_sentence_causal_rationales.setdefault(original_sentence,\
[]) + [span + " ### " + best_verb_phrase]\
#[parts[2] + " ### " + span]#[span + " ### " + best_verb_phrase]
pubmed_sentence_causal_rationales_importance[original_sentence] = \
pubmed_sentence_causal_rationales_importance.setdefault(original_sentence,\
[]) + [new_importance_prob]#[importance_prob]
print("%d not found" %not_part_of_to_annotations)
print(len(relation_sentences_considered), len(parsing_sentences_considered))
if args.run_features:
pickle.dump(pubmed_sentence_constituency_parser,\
open("/tmp/pubmed_sentence_constituency_parse.p","wb"))
pickle.dump(pubmed_sentence_verb_phrases,\
open("pubmed_sentence_verb_phrases.p","wb"))
sentence_importance_file\
= jsonlines.open("T4_dev_general.jsonl","w")
character_importance_file\
= jsonlines.open("importance_dev.jsonl","w")
input_dicts = []
for pubmed_sentence in pubmed_sentence_causal_rationales:
for rationale in pubmed_sentence_causal_rationales[pubmed_sentence]:
x_string = rationale[rationale.find("<X>")+3:\
rationale.find("</X>")].strip()
y_string = rationale[rationale.find("<Y>")+3:\
rationale.find("</Y>")].strip()
c_dict = {'sentence':x_string + " # " + y_string,\
'rationale': rationale, 'pubmed_sentence':\
pubmed_sentence, 'gold_label':'increases'}
character_importance_file.write(c_dict)
dict = {'sentence':rationale, 'rationale': rationale,\
'pubmed_sentence': pubmed_sentence, 'gold_label':\
'increases'}
input_dicts.append(dict)
sentence_importance_file.write(dict)
character_importance_file.close()
sentence_importance_file.close()
if args.run_features:
os.chdir("/data/rsg/nlp/darsh/pytorch-pretrained-BERT")
os.system("python examples/run_causes.py --task_name re_task "\
"--do_eval --do_lower_case --data_dir "\
"/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/"\
" --bert_model bert-base-uncased --max_seq_length 128"\
" --train_batch_size 32 --learning_rate 5e-5"\
" --num_train_epochs 3.0 --output_dir "\
"t4_general_causes_output --output_preds")
os.system("python examples/run_importance.py --task_name re_task "\
"--do_eval --do_lower_case --data_dir "\
"/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT/"\
" --bert_model bert-base-uncased --max_seq_length 128"\
" --train_batch_size 32 --learning_rate 5e-5"\
" --num_train_epochs 3.0 --output_dir "\
"importance_classification --output_preds")
os.chdir("/data/rsg/nlp/darsh/aggregator/crawl_websites/NUT")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/t4_general_causes_output/preds.jsonl",\
"sentence_importance_"+split_name+"_preds.jsonl")
copyfile("/data/rsg/nlp/darsh/"\
"pytorch-pretrained-BERT/importance_classification/preds.jsonl",\
"character_importance_"+split_name+"_preds.jsonl")
predictions = jsonlines.open("sentence_importance_"+split_name+\
"_preds.jsonl","r")
predictions2= jsonlines.open("character_importance_"+split_name+\
"_preds.jsonl","r")
for input_dict, prediction, prediction2 in zip(input_dicts, \
predictions, predictions2):
pubmed_sentence = input_dict['pubmed_sentence']
rationale = input_dict['rationale']
x_string = rationale[rationale.find("<X>")+3:\
rationale.find("</X>")].strip()
y_string = rationale[rationale.find("<Y>")+3:\
rationale.find("</Y>")].strip()
#assert tuple([x_string, y_string]) in all_cause_tuples
importance_prob = float(prediction['increases'])
character_prob = float(prediction2['increases'])
rationale_index = pubmed_sentence_causal_rationales[pubmed_sentence]\
.index(rationale)
pubmed_sentence_causal_rationales_importance[pubmed_sentence]\
[rationale_index] = [character_prob,\
importance_prob]#character_prob#importance_prob * character_prob
print("Found %d sentences from the pubmed articles." %number_of_sentences)
sentence_pubmed_sentences = {}
sentence_pubmed_pubmeds = {}
sentence_causes = {}
sentence_contains={}
main_keep_sentences = set()
darsh_pubmed_causes = pickle.load(open("pubmed_causes.p","rb"))
extractive_output_summaries = jsonlines.open(\
"extractive_multi_summaries.jsonl","w")
extractive_permuted_output_summaries = \
jsonlines.open("extractive_perm_multi_summaries.jsonl","w")
epoch_rewards = []
epoch_missed = []
split_sentences = set()
sentences_with_causes = set()
total_epochs = args.epochs
sentence_keys= list(sentence_all_causes.keys())
all_cause_templates = []
for x in sentence_extracted_rationales.values():
if 'causes' in x:
if len(x['causes']) != 1 or \
x['causes'][0][x['causes'][0].find("<X>"):\
x['causes'][0].find("</X>")].strip() == "" or\
x['causes'][0][x['causes'][0].find("<Y>"):\
x['causes'][0].find("</Y>")].strip() == "" or\
x['causes'][0].count('<X>') != 1 or \
x['causes'][0].count('</X>') != 1 or \
x['causes'][0].count('<Y>') != 1 or \
x['causes'][0].count('</Y>') != 1:
continue
all_cause_templates += x['causes']
f = open("missed.txt","w")
current_input_causes = {}
inferred_causes = {}
cached_inputs = {}
for I in tqdm(range(total_epochs)):
current_considered = 0
average_sentences = []
avg_max_candidates = []
if split_name != 'train' or I == total_epochs-1:
policy.eval()
gold_sentence_consider_sentences = {}
rewards = []
bad_sentences = 0
correctly_bad = 0
missed_both_cases = 0
policy_misses = 0
prev_loss = []
gold_causes_count = []
min_templates_required = {}
new_min_templates_required = {}
number_successful = {}
if args.data_points == -1 and args.split_name == 'train':
random.shuffle(sentence_keys)
else:
sorted(sentence_keys)
for darsh in range(1):
for sentence in tqdm(sentence_keys):
title_food_name = ""
if len(title_entities.get(sentence_file_names[sentence],[""]))>0:
title_food_name = title_entities.get(sentence_file_names[sentence],[""])[0]
dict = {'file_name':sentence_file_names[sentence], 'gold':sentence,\
'output':'', 'outputs':[], 'consider_tuples':None, \
'food_name':title_food_name}
if len(average_sentences) == args.data_points:
break
if metadata[sentence_file_names[sentence]]['split'] != split_name:
continue
split_sentences.add(sentence)
gold_sentence_consider_sentences[sentence] = dict
sentence_causes[sentence] = []
sentence_pubmed_sentences[sentence] = {}
sentence_pubmed_pubmeds[sentence] = {}
for sent,pubmeds in metadata[sentence_file_names[sentence]]\
['summary_inputs']['summary_pubmed_articles'].items():
if sent.strip() != sentence:
continue
for pubmed in pubmeds:
if pubmed not in pubmed_important_sentences:
continue
for sentiment in pubmed_important_sentences[pubmed]:
if pubmed_important_sentences[pubmed][sentiment] in \
sentence_pubmed_sentences[sentence].get(sentiment,[]):
continue
sentence_pubmed_sentences[sentence][sentiment] = \
sentence_pubmed_sentences[sentence].setdefault(\
sentiment,[]) + pubmed_important_sentences[pubmed]\
[sentiment]
sentence_pubmed_pubmeds[sentence][sentiment] = \
sentence_pubmed_pubmeds[sentence].setdefault(\
sentiment,[]) + [pubmed] * \
len(pubmed_important_sentences[pubmed])
for sentiment in ['Bad','Good','Neutral'][:1]:
consider_sentences = []
for i in range(1):
if len(sentence_pubmed_sentences[sentence].get('Bad',[]))+\
len(\
sentence_pubmed_sentences[sentence].get(\
'Good',[])) +\
len(sentence_pubmed_sentences[sentence].get(\
'Neutral',[])) >= 1:
batch_sentences = []
batch_importance= []
batch_sentiment = []
batch_pubmed = []
for sent,pubmed in\
zip(sentence_pubmed_sentences[sentence]\
.get('Good',[]),\
sentence_pubmed_pubmeds[sentence].get('Good',[])):
batch_sentences += \
pubmed_sentence_causal_rationales.get(sent,[])
batch_importance += \
pubmed_sentence_causal_rationales_importance\
.get(sent,[])
batch_sentiment += [[1,0,0]]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
batch_pubmed += [pubmed]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
for sent,pubmed in zip(\
sentence_pubmed_sentences[sentence].get(\
'Bad',[]),\
sentence_pubmed_pubmeds[sentence].get('Bad',[])):
batch_sentences += \
pubmed_sentence_causal_rationales.get(sent,[])
batch_importance += \
pubmed_sentence_causal_rationales_importance\
.get(sent,[])
batch_sentiment += [[0,1,0]]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
batch_pubmed += [pubmed]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
for sent,pubmed in zip(sentence_pubmed_sentences[sentence].get(\
'Neutral',[]),\
sentence_pubmed_pubmeds[sentence].get('Neutral',[])):
batch_sentences += \
pubmed_sentence_causal_rationales.get(sent,[])
batch_importance += \
[x for x in pubmed_sentence_causal_rationales_importance\
.get(sent,[])]
batch_sentiment += [[0,0,1]]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
batch_pubmed += [pubmed]*len(\
pubmed_sentence_causal_rationales_importance.get(\
sent,[]))
b_sentences = []
b_importance= []
b_sentiment = []
b_type = []
b_pubmed = []
current_input_causes[sentence] = set()
assert len(batch_sentences) == len(batch_pubmed)
for b_s, b_i, b_senti, b_pub in zip(batch_sentences,\
batch_importance, batch_sentiment, batch_pubmed):
if b_s is not None and b_s not in b_sentences:
if "<X>" in b_s and "</X>" in b_s and "<Y>"\
in b_s and "</Y>" in b_s:
x_string = b_s[b_s.find("<X>")+3:\
b_s.find("</X>")].strip()
y_string = b_s[b_s.find("<Y>")+3:\
b_s.find("</Y>")].strip()
if tuple([x_string,y_string]) not in \
all_cause_sentence_tuples[sentence]:
f.write(b_s + "\n")
continue
b_sentences.append(b_s)
b_importance.append(b_i)
b_sentiment.append(b_senti)
b_pubmed.append(b_pub)
if "<X>" in b_s and "</X>" in b_s and "<Y>"\
in b_s and "</Y>" in b_s:
b_type.append(1)
current_input_causes[sentence].add(tuple([x_string,y_string]))
else:
continue
b_type.append(0)
batch_sentences = b_sentences
batch_importance= b_importance
batch_sentiment = b_sentiment
batch_type = b_type
batch_pubmed = b_pubmed
assert len(batch_sentiment) == len(batch_sentences)
assert not any([x is None for x in batch_sentences])
if len(batch_sentences) == 0:
if len(sentence_all_causes[sentence]) > 0:
possible_things = set()
pubmed_candidates = metadata[sentence_file_names[sentence]]\
["pubmed_sentences"].keys()
for pubmed_candidate in pubmed_candidates:
if pubmed_candidate not in metadata['pubmed_sentences_annotations']:
continue
if 'pubmed_sentences_relation_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed_candidate]\
or 'pubmed_sentences_entity_annotations' not in \
metadata['pubmed_sentences_annotations'][pubmed_candidate]:
continue
relations = metadata\
['pubmed_sentences_annotations']\
[pubmed_candidate]\
['pubmed_sentences_relation_annotations'][0]
for relation in relations:
for sentence_p,labels in metadata['pubmed_sentences_annotations']\
[pubmed_candidate]['pubmed_sentences_entity_annotations']:
if relation[0] in sentence and relation[1] in sentence:
possible_things.add(tuple(relation + \
[sentence_p]))
break
assert len(batch_sentences) > 0
if len(batch_sentences) == 1:
consider_sentences = batch_sentences
else:
not_1 = False
batch_structures = []
for b_s,b_t,b_p in zip(batch_sentences,batch_type,\
batch_pubmed):
if b_t == 1:
x_string = b_s[b_s.find("<X>")+3:\
b_s.find("</X>")].strip()
y_string = b_s[b_s.find("<Y>")+3:\
b_s.find("</Y>")].strip()
batch_structures.append([x_string,\
y_string, b_s, b_p])
causes_count = [_compare_causes(g_r, batch_structures,\
embeddings, "") for g_r in \
gold_sentence_causes[sentence]]
causes_2d = [[_compare_causes(g_r, [structure],\
embeddings, "") for g_r in \
gold_sentence_causes[sentence]] \
for structure in \
batch_structures]
if any(causes_count) and len(causes_count) > 0:
if any([sum(x) == sum(causes_count)\
for x in causes_2d]):
min_templates_required[1] = \
min_templates_required.\
setdefault(1, 0) + 1
else:
min_templates_required["not 1"] = \
min_templates_required.\
setdefault("not 1", 0) + 1
not_1 = True
if args.split_name == 'train' and \
sum(causes_count) > 1:
ignore_indices = set()
for cause_ind in range(len(causes_2d)):
if causes_2d[cause_ind].count(1.0) == \
len(causes_2d[cause_ind]):
pass
n_batch_structures = []
n_batch_importance = []
n_batch_sentiment = []
n_batch_type = []
n_causes_2d = []
for cause_ind in range(len(batch_structures)):
if cause_ind in ignore_indices and \
len(ignore_indices) < len(batch_structures)-2:
continue
n_batch_structures.append(batch_structures[cause_ind])
n_batch_importance.append(batch_importance[cause_ind])
n_batch_sentiment.append(batch_sentiment[cause_ind])
n_batch_type.append(batch_type[cause_ind])
n_causes_2d.append(causes_2d[cause_ind])
batch_structures = n_batch_structures
batch_importance = n_batch_importance
batch_sentiment = n_batch_sentiment
batch_type = n_batch_type
causes_2d = n_causes_2d
if any(causes_count) and len(causes_count) > 0:
if any([sum(x) == sum(causes_count)\
for x in causes_2d]):
new_min_templates_required['1'] = \
new_min_templates_required.setdefault('1',0)+1
else:
new_min_templates_required['not 1'] = \
new_min_templates_required.setdefault('not 1',\
0)+1
if len(gold_sentence_causes[dict['gold']]) > 0 and\
any(causes_count) and args.task=='policy':
gold_causes_count.append(len(gold_sentence_causes[dict['gold']]))
pubmed_sentences = {}
for pubmed,p_sentence_tuples in metadata[dict['file_name']]\
['pubmed_sentences'].items():
text = ""
p_ind = 0
for p_s,p_t in p_sentence_tuples:
text += " ".join(p_s).strip() + " "
if p_ind == 0:
text+="### "
p_ind += 1
pubmed_sentences[pubmed] = text.strip()
env = Environment(gold_sentence_causes[\
sentence], \
sentence_population_entities\
[sentence][0],embeddings,\
np.concatenate(\
((property_style_rationales_representations['causes'],\
property_style_rationales_representations['contains'],\
property_style_rationales_representations['sentiment'])\
),axis=0),
sentence_extracted_rationale_representations.get(sentence,\
np.zeros((1,50))),
all_gold_sentence_extracted_representations,\
all_pubmed_rationale_representations, rhs,\
dict['gold'],pubmed_sentences=pubmed_sentences)
batch_structures = []
n_batch_importance = []
n_batch_type = []
n_batch_sentiment = []
structures_sentences= {}
for b_s,b_t,b_p,b_i,b_sent in zip(batch_sentences,\
batch_type,batch_pubmed,batch_importance,\
batch_sentiment):
pubmed_population = []
if b_p in \
metadata['pubmed_sentences_annotations']:
entity_annotations = \
metadata['pubmed_sentences_annotations']\
[b_p]['pubmed_sentences_entity_annotations']
all_populations = []
for element in entity_annotations:
element_population = _get_entities(\
element[0].split(),\
element[1].split(),\
'Population')
if element[0] == b_s and len(element_population)>0:
pubmed_population = element_population
break
else:
pubmed_population += element_population
pubmed_population = list(set(pubmed_population))
pubmed_population = " ".join(pubmed_population).strip()
if b_t == 1:
x_string = b_s[b_s.find("<X>")+3:\
b_s.find("</X>")].strip()
y_string = b_s[b_s.find("<Y>")+3:\
b_s.find("</Y>")].strip()
batch_structures.append(tuple([x_string,\
y_string, b_s, b_p, \
pubmed_population]))
structures_sentences[tuple([x_string,\
y_string, b_s])] = b_s
n_batch_importance.append(b_i)
n_batch_sentiment.append(b_sent)
n_batch_type.append(b_t)
else:
batch_structures.append(tuple(b_s))
structures_sentences[tuple(b_s)] = b_s
batch_importance = n_batch_importance
batch_type = n_batch_type
batch_sentiment = n_batch_sentiment
assert len(batch_structures) > 0
if True:
if split_name != 'train' or \
I == total_epochs-1:
policy.eval()
batch_extractions = [\
x[2][x[2].find("###")+3:].strip()\
.replace('NCBI', '').\
replace('PubMed','')for x in \
batch_structures]
batch_representations = \
[_get_phrase_embeddings(extraction\
, embeddings) for extraction in \
batch_extractions]
assert len(batch_structures) == \
len(batch_representations)
for ind,batch_rep in enumerate(\
batch_representations):
batch_structures[ind] =\
tuple(list(batch_structures[ind][:3]) + \
[batch_rep] + \
list(batch_structures[ind][3:]))
main_keep_sentences.add(sentence)
d_batch_structures = []
d_batch_importance = []
d_batch_sentiment = []
d_batch_type = []
d_batch_representations = []
darsh_pubmed_considered = metadata[dict['file_name']]\
['summary_inputs']\
['summary_pubmed_articles']
darsh_pubmed_considered = {x.strip():y for x,y in \
darsh_pubmed_considered.items()}
pubmed_considered = darsh_pubmed_considered[dict['gold']]
for p_c in pubmed_considered:
c_c = darsh_pubmed_causes.get(p_c,[])
for d_b in c_c:
if d_b[2] == 'None':
continue
d_batch_structure = [d_b[0],d_b[1],\
d_b[2]+" <X>"+\
d_b[0]+"</X> <Y>"+d_b[1]+"</Y> ",\
np.array([0]*50),p_c,'']
d_batch_structures.append(tuple(\
d_batch_structure))
d_batch_importance.append(1.0)
d_batch_sentiment.append([1,0,0])
d_batch_type.append(1)
d_batch_representations.append(np.array([0]*50))
batch_structures = d_batch_structures
batch_importance = d_batch_importance
batch_sentiment = d_batch_sentiment
batch_type = d_batch_type
batch_representations = d_batch_representations
if len(batch_structures) == 0:
continue
consider_tuples, reward, prev_loss =\
_get_choice_policy_sentences(env,\
batch_structures,\
batch_importance,\
batch_sentiment,\
batch_type,\
batch_representations,\
policy, optimizer,\
gold_sentence_sentiments[sentence],\
vectorizer,\
property_style_rationales_representations['causes'],\
prev_loss, batch_size=args.batch_size,\
epoch_number=I, pretrain_until=0,\
repeat_instance=\
args.repeat_instance if not_1\
else args.repeat_instance)
dict['consider_tuples'] = \
consider_tuples
inferred_causes[sentence] = consider_tuples
rewards.append(reward)
current_considered += 1
average_sentences.append(len(consider_tuples))
avg_max_candidates.append\
(min(5,len(batch_structures)))
consider_sentences = []
for consider_tuple in consider_tuples:
if tuple(consider_tuple[:3]) in \
structures_sentences:
consider_sentences.append(\
structures_sentences\
[tuple(consider_tuple[:3])])
break
if len(consider_sentences) == 0:
missed_both_cases += 1
policy_misses += 1
else:
consider_sentences = batch_sentences
consider_sentences = _get_clustered_sentences(\
batch_sentences, batch_importance, vectorizer, \
(2-i/10)*cluster_threshold)
for consider_sentence in consider_sentences:
consider_sentence = consider_sentence.lower()
x_string = consider_sentence[consider_sentence.find("<x>")+3:\
consider_sentence.find("</x>")].strip()
y_string = consider_sentence[consider_sentence.find("<y>")+3:\
consider_sentence.find("</y>")].strip()
inferred_causes[sentence] = \
inferred_causes.get(sentence,[]) + \
[[x_string,y_string]]
else:
consider_sentences = sentence_pubmed_sentences[sentence]\
.get('Good',[]) + \
sentence_pubmed_sentences[sentence].get('Bad',[]) + \
sentence_pubmed_sentences[sentence].get('Neutral',[])
candidates = []
for cause_triplet in sentence_all_causes[sentence]:
if cause_triplet[1].lower() in \
' '.join(consider_sentences).strip():
if any([" <x> " + cause_triplet[0].lower() + " </x> "\
in x.lower() and \
" <y> " + cause_triplet[1].lower() + " </y> " in x.lower()\
for x in consider_sentences]):
sentence_causes[sentence] = sentence_causes.setdefault(\
sentence,[]) + [cause_triplet]
if len(sentence_causes[sentence]) == 0:
sentence_causes[sentence] = candidates
if len(sentence_causes[sentence]) > 1:
sentence_pubmed_sentences[sentence] = consider_sentences
if sentiment == 'Bad':
bad_sentences += 1
if sentence_sentiment[sentence] == sentiment:
correctly_bad += 1
break
if len(consider_sentences) > 0:
dict['outputs'] = list(consider_sentences)
dict['output'] = ' '.join(\
list(consider_sentences)).strip()
assert sentence in split_sentences
gold_sentence_consider_sentences[sentence] = dict
if len(sentence_causes.get(sentence,[])) > 1:
dict['outputs'] = list(consider_sentences)
dict['output'] = ' '.join(\
list(consider_sentences)).strip()
assert sentence in split_sentences
gold_sentence_consider_sentences[sentence] = dict
break
consider_sentences_set = set()
new_consider_sentences = []
for consider_sentence in consider_sentences:
consider_clean_sentence = re.sub('<[^>]+>', '', \
consider_sentence)
consider_clean_sentence = consider_clean_sentence.replace(\
'<','').replace('>','')
consider_clean_sentence = re.sub(' +', ' ',\
consider_clean_sentence)
if consider_clean_sentence not in consider_sentences_set:
new_consider_sentences.append(consider_sentence)
consider_sentences = new_consider_sentences
dict['outputs'] = list(consider_sentences)
dict['output'] = ' '.join(\
list(consider_sentences)).strip()
assert sentence in split_sentences
gold_sentence_consider_sentences[sentence] = dict
epoch_rewards.append(sum(rewards)/len(rewards) if len(rewards)>0 else 0)
print("Epoch Rewards ", epoch_rewards)
epoch_missed.append(policy_misses)
print(min_templates_required)
print(new_min_templates_required)
if args.task == 'policy':
print(policy_misses, len(average_sentences), \
sum(average_sentences)/len(average_sentences),\
sum(avg_max_candidates)/len(avg_max_candidates),\
sum(gold_causes_count)/len(gold_causes_count))
if split_name != 'train':
break
if split_name == 'train':
torch.save(policy.state_dict(),open('choice_policy_%d.pt' %(I+1),'wb'))
print(len(split_sentences), len(gold_sentence_consider_sentences))
torch.save(policy.state_dict(),open('choice_policy.pt','wb'))
f.close()
plt.plot(epoch_rewards[:-1])
plt.ylabel("Rewards")
plt.savefig('rewards.png')
plt.plot(epoch_missed[:-1])
plt.ylabel("Missed Cases")
plt.savefig("missed.png")
matching_counts = 0
missed_counts = 0
print("Trained on %d main sentences" %len(main_keep_sentences))
for sentence in sentence_all_contains:
if sentence not in gold_sentence_consider_sentences:
continue
if metadata[sentence_file_names[sentence]]['split'] != split_name:
continue
if sentence == "Raw legumes harbor antinutrients, which may cause harm."\
" However, proper preparation methods get rid of most of them.":
pass
if sentence in gold_sentence_consider_sentences:
dict = gold_sentence_consider_sentences[sentence]
consider_sentences = dict['outputs']
predicted_populations = None if dict['consider_tuples'] is None else\
[x[5] for x in dict['consider_tuples']]
gold_populations = sentence_population_entities[\
dict['gold']][0]
if predicted_populations is None or \
all([len(x)==0 for x in predicted_populations]):
if len(gold_populations) == 0:
matching_counts += 1
else:
missed_counts += 1
else:
if len(gold_populations) > 0:
matching_counts += 1
else:
missed_counts += 1
sentence_specific_contains = {}
for consider_sentence in consider_sentences:
x_string = consider_sentence[consider_sentence.find("<X>")+3:\
consider_sentence.find("</X>")].strip()
candidate_contains = []
for contain in sentence_all_contains.get(sentence,[]):
if matching_score(contain[1].lower(),\
x_string.lower(),embeddings) > 0.85:
candidate_contains.append(contain)
best_candidate_contains = None
best_cosine_score = -1.0
for candidate_contain in candidate_contains:
if \
len(title_entities.get(sentence_file_names[sentence],\
[''])) == 0:
continue
current_cosine = matching_score(title_entities.get(\
sentence_file_names[sentence],[''])\
[0].lower(),\
candidate_contain[0].lower(),embeddings)
if current_cosine > best_cosine_score and current_cosine > 0.85:
best_cosine_score = current_cosine
best_candidate_contains = candidate_contain
if best_candidate_contains is None and\
len(title_entities.get(\
sentence_file_names[sentence],[])) > 0\
and matching_score(title_entities.get(\
sentence_file_names[sentence],[''])\
[0].lower(),\
x_string, embeddings) < 0.85:
best_candidate_contains = [title_entities.get(\
sentence_file_names[sentence],[''])[0],\
x_string]
sentence_specific_contains[consider_sentence] = best_candidate_contains
new_consider_sentences = []
new_tupled_consider_sentences = []
for consider_sentence in \
consider_sentences:
new_consider_sentences.append(consider_sentence)
new_tupled_consider_sentences.append([consider_sentence])
gold_sentence_consider_sentences[sentence]['outputs'] = \
new_consider_sentences
gold_sentence_consider_sentences[sentence]['perm_outputs'] = \
new_tupled_consider_sentences
gold_sentence_consider_sentences[sentence]['output'] = \
(' '.join(new_consider_sentences)).strip()
sentence_contains[sentence] = []
already_computed = False
for sent,pubmeds in metadata[sentence_file_names[sentence]]\
['summary_inputs']['summary_pubmed_articles'].items():
if sent.strip() != sentence:
continue
if sentence in sentence_pubmed_sentences:
already_computed = True
break
for pubmed in pubmeds:
sentence_pubmed_sentences[sentence] = \
sentence_pubmed_sentences.setdefault(sentence,[]) + \
pubmed_important_sentences.get(pubmed, [])
for sentiment in ['Bad','Good']:
if sentiment in sentence_pubmed_sentences[sentence]\
and not already_computed:
sentence_pubmed_sentences[sentence] = _get_clustered_sentences(\
sentence_pubmed_sentences[sentence][sentiment], \
[1.0] * sentence_pubmed_sentences[sentence][sentiment], \
vectorizer, cluster_threshold)
for contains_tuple in sentence_all_contains[sentence]:
if contains_tuple[1].lower() in \
' '.join(sentence_pubmed_sentences[sentence]).strip():
sentence_contains[sentence] = sentence_contains.setdefault(\
sentence,[]) + [contains_tuple]
if len(sentence_contains[sentence]) > 0:
break
print("Matching population counts %d , missed %d" \
%(matching_counts,missed_counts))
print("Missed %d sentences for not having any importance stuff "\
%missed_both_cases)
print("Selected %d of actual as negative sentiment sentences" %correctly_bad)
print("Selected %d of all as negative sentiment sentences." %bad_sentences)
all_cause_templates_representations = vectorizer.transform(\
all_cause_templates).todense()
all_causes_tuples = []
for cause_template in all_cause_templates:
x_string = cause_template[cause_template.find("<X>"):\
cause_template.find("</X>")].strip()
y_string = cause_template[cause_template.find("<Y>"):\
cause_template.find("</Y>")].strip()
all_causes_tuples.append([x_string,y_string])
templated_summaries_jsonl = jsonlines.open(\
"templated_extractive_summaries.jsonl", "w")
handwritten_summaries_jsonl = jsonlines.open(\
"handwritten_summaries.jsonl","w")
recalls = []
all_input_recalls = []
oracle_recalls = []
s_ind = 0
for sentence,dicti in tqdm(gold_sentence_consider_sentences.items()):
s_ind += 1
gold_causes = gold_sentence_causes.get(sentence,[])
oracle_causes = sentence_all_causes.get(sentence,[])
input_causes = current_input_causes.get(sentence,[])
infered_structures = inferred_causes.get(sentence,[])
for output in dicti['outputs']:
x_string = output[output.find("<X>")+3:\
output.find("</X>")].strip()
y_string = output[output.find("<Y>")+3:\
output.find("<Y>")].strip()
if x_string == "" or y_string == ""\
or "<X>" in x_string or "<Y>" in x_string\
or "<Y>" in y_string or "<Y>" in y_string:
continue
if '<X>' not in output or '<Y>' not in output:
if 'contains' in output:
infered_structures.append(output.split('contains'))
else:
pass
else:
infered_structures.append([x_string,y_string])
old_consider_tuples = dicti['consider_tuples']
if old_consider_tuples is None:
dicti['consider_tuples'] = []
for output in dicti['outputs']:
x_string = output[output.find("<X>")+3:\
output.find("</X>")].strip()
y_string = output[output.find("<Y>")+3:\
output.find("</Y>")].strip()
dicti['consider_tuples'].append(tuple([x_string,y_string,\
output]))
old_consider_tuples = dicti['consider_tuples']
o_consider_tuples = []
for consider_tuple in old_consider_tuples:
if pubmed_entity_types.get(consider_tuple[0],\
[]).count('Nutrition')>\
pubmed_entity_types.get(consider_tuple[0],\
[]).count('Food') and len(title_entities.get(\
sentence_file_names.get(dicti['gold'],[])\
,[]))\
>0:
o_consider_tuples.append([title_entities[\
sentence_file_names[dicti['gold']]][0],\
consider_tuple[0]])
old_consider_tuples += o_consider_tuples
dicti['consider_tuples'] = old_consider_tuples
correct_cases = 0
oracle_cases = 0
input_cases = 0
#for gold_cause in gold_causes:
# correct_cases += _compare_causes(gold_cause, infered_structures,\
# embeddings,"")
# oracle_cases += _compare_causes(gold_cause, oracle_causes, \
# embeddings, "")
# input_cases += _compare_causes(gold_cause, input_causes,\
# embeddings, "")
if len(gold_causes) > 0:
recalls.append(correct_cases/len(gold_causes))
oracle_recalls.append(oracle_cases/len(gold_causes))
all_input_recalls.append(input_cases/len(gold_causes))
output_sentences = dicti['outputs']
extractive_dicti = copy.copy(dicti)
rationale_sentence = ""
rationale_sentence_candidates = []
for INd,p_output_sentences in enumerate(\
[[[x] for x in dicti['outputs']]]):
p_output_sentences = sum(p_output_sentences,[])
rationale_sentence = ""
added_nutrients = set()
for ind,sent in enumerate(p_output_sentences):
if "###" in sent:
rationale = sent[sent.find("###")+3:].strip()
x_string = sent[sent.find("<X>")+3:sent.find("</X>")]\
.strip()
if x_string.lower() not in rationale.lower() and \
(ind == 0 or x_string not in output_sentences[ind-1]):
rationale = x_string + " _ " + rationale + " . "
else:
parts = [sent]
entity_type = None
if ' contains ' in sent:
parts = sent.split(" contains ")
entity_type = 'Nutrition'
elif ' a ' in sent:
parts = sent.split(" a ")
entity_type = 'Food'
if len(parts) > 1 and np.dot(_get_phrase_embeddings(parts[0].lower(),\
embeddings),_get_phrase_embeddings(parts[1].lower(),embeddings\
)) < 0.85 and parts[1] not in added_nutrients:
nutrient = parts[1]
added_nutrients.add(nutrient)
if "###" in output_sentences[ind+1] and \
output_sentences[ind+1].split(\
"###")[1].strip().lower()\
.startswith(nutrient.lower()):
if entity_type == 'Nutrition':
rationale = parts[0] + " | "
else:
rationale = parts[0] + " * "
else:
if entity_type == "Nutrition":
rationale = parts[0] + " | " + parts[1]
else:
rationale = parts[0] + " * " + parts[1]
else:
rationale = parts[0]
if rationale_sentence is not "":
if rationale_sentence.strip().endswith("|"):
rationale_sentence += rationale
else:
rationale_sentence += " _ " + rationale
else:
rationale_sentence = rationale
extractive_dicti['rationale_output'] = rationale_sentence
extractive_dicti['consider_tuples'] = \
[x[:2]+\
x[4:] if len(x)>3 else x for x in \
extractive_dicti['consider_tuples']]
extractive_permuted_output_summaries.write(extractive_dicti)
rationale_sentence_candidates.append(rationale_sentence)
extractive_dicti['rationale_output'] = rationale_sentence
extractive_dicti['rationale_output_candidates'] = \
rationale_sentence_candidates
extractive_output_summaries.write(extractive_dicti)
extractive_output_summaries.close()
if args.split_name == 'train':
time_stamp = str(time.time())
torch.save(policy.state_dict(),open('choice_policy.pt_'+time_stamp,'wb'))
print("Recall", sum(recalls)/len(recalls), \
sum(oracle_recalls)/len(oracle_recalls),\
sum(all_input_recalls)/len(all_input_recalls), len(recalls))
return sentence_causes, sentence_contains
def get_templated_sentences(sentence_relation_annotations, \
sentence_entity_annotations, sentence_causes, sentence_contains,\
title_entities, sentence_file_names, metadata):
output_file = jsonlines.open("templated_summaries.jsonl","w")
healthline_graphs = []
healthline_sentences = []
for sentence in sentence_relation_annotations:
output, _ = sentence_relation_annotations[sentence]
data_points = output[0]
for data_point in output[4]:
data_points.append(data_point + ['contains'])
healthline_graphs.append(get_causes_dict(data_points,\
sentence_entity_annotations[sentence][0]))
healthline_sentences.append(sentence)
healthline_graphs = np.array(healthline_graphs)
modified_sentences = {}
template_scores = []
for sentence in tqdm(sentence_causes):
assert sentence in sentence_causes
assert sentence in sentence_relation_annotations
assert sentence in sentence_entity_annotations
entity_types = {}
if metadata[sentence_file_names[sentence]]['split'] != 'test':
continue
for entity in title_entities[sentence_file_names[sentence]]:
entity_types[entity] = 'Food'
data_points = sentence_causes[sentence]
for data_point in sentence_contains[sentence]:
data_points.append(data_point + ['contains'])
causes_graph = get_causes_dict(data_points,\
{**sentence_entity_annotations[sentence][1],\
**entity_types})
matching_scores = [match_dicts(causes_graph,ref_dict) \
for ref_dict in healthline_graphs]
matched_sentence = \
healthline_sentences[matching_scores.index(max(matching_scores))]
template_scores.append(max(matching_scores))
if template_scores[-1] == 0.0:
continue
modified_sentences[sentence] = get_modified_sentence(\
sentence_relation_annotations,\
sentence_entity_annotations,\
matched_sentence, data_points,\
{**sentence_entity_annotations[sentence][1],\
**entity_types}, sentence)
dict = {}
dict['gold'] = sentence
dict['output']= modified_sentences[sentence]
dict['output_dictioanary'] = data_points
dict['retrieved_dict'] = healthline_graphs[\
matching_scores.index(max(matching_scores))]
output_file.write(dict)
output_file.close()
print(sum(template_scores)/len(template_scores))
return modified_sentences
def get_rationale_entities(file_name):
templates = []
entities = []
lines = open(file_name, "r").readlines()[1:]
for line in lines:
line = line.strip()
parts= line.split("\t")
if float(parts[1]) < 0.5:
continue
tokens = parts[2].split()
extracts= parts[3].split()
template = []
for token,keep in zip(tokens,extracts):
if int(keep) == 1:
template.append(token)
template = ' '.join(template).strip()
if any([x not in template for x in ['<X>','</X>','<Y>','</Y>']]):
continue
templates.append(template)
str1 = template[template.index("<X>"):template.index("</X>")+4]
str2 = template[template.index("<Y>"):template.index("</Y>")+4]
entities.append([str1,str2])
return templates, entities
def get_simple_templated_sentences(sentence_relation_annotations,\
sentence_entity_annotations, sentence_causes, sentence_contains,\
title_entities, sentence_file_names, metadata, embeddings):
healthline_graphs = []
healthline_sentences = []
for sentence in sentence_relation_annotations:
output, _ = sentence_relation_annotations[sentence]
data_points = output[0]
for data_point in output[4]:
data_points.append(data_point + ['contains'])
healthline_graphs.append(get_causes_dict(data_points,\
sentence_entity_annotations[sentence][0]))
healthline_sentences.append(sentence)
f = open("summary_train_template_sentences.txt", "w")
f_contains = open("summary_train_template_sentences_contains.txt", "w")
label_indices = {}
label_entities = {}
label_templates = {}
label_entities = {}
for ind,graph in enumerate(healthline_graphs):
if len(graph.keys()) == 1 and sum([len(graph[x]) for x in graph]) == 1\
and len(sentence_entity_annotations[healthline_sentences[ind]][0].keys()) == 2:
if metadata[sentence_file_names[healthline_sentences[ind]]]\
['split'] == 'train':
rev_entities = sentence_entity_annotations\
[healthline_sentences[ind]][0]
entities = {x:y for y,x in rev_entities.items()}
entity_names = list(rev_entities.keys())
index1 = healthline_sentences[ind].index(entity_names[0])
index2 = healthline_sentences[ind].index(entity_names[1])
if index1 < index2:
if rev_entities[entity_names[0]] != 'Condition':
updated_sentence = healthline_sentences[ind][:index1] + \
" <X> " + entity_names[0] + " </X> " + \
healthline_sentences[ind]\
[index1+len(entity_names[0]):index2] + " <Y> " + \
entity_names[1] + " </Y> " + \
healthline_sentences[ind]\
[index2+len(entity_names[1]):]
f.write("0\t0\t" + updated_sentence + "\n")
if rev_entities[entity_names[0]] != 'Condition' and \
rev_entities[entity_names[1]] != 'Condition' and \
rev_entities[entity_names[1]] != 'Food':
f_contains.write("0\t0\t" + updated_sentence + "\n")
#f.write(updated_sentence + "\n")
if rev_entities[entity_names[1]] != 'Condition':
updated_sentence = healthline_sentences[ind][:index1] + \
" <Y> " + entity_names[0] + " </Y> " + \
healthline_sentences[ind]\
[index1+len(entity_names[0]):index2] + " <X> " + \
entity_names[1] + " </X> " + \
healthline_sentences[ind]\
[index2+len(entity_names[1]):]
f.write("0\t0\t" + updated_sentence + "\n")
#f.write(updated_sentence + "\n")
if rev_entities[entity_names[0]] != 'Condition' and \
rev_entities[entity_names[1]] != 'Condition'\
and rev_entities[entity_names[0]] != 'Food':
f_contains.write("0\t0\t" + updated_sentence + "\n")
else:
if rev_entities[entity_names[1]] != 'Condition':
updated_sentence = healthline_sentences[ind][:index2] + \
" <X> " + entity_names[1] + " </X> " + \
healthline_sentences[ind]\
[index2+len(entity_names[1]):index1] + " <Y> " + \
entity_names[0] + " </Y> " + \
healthline_sentences[ind]\
[index1+len(entity_names[0]):]
f.write("0\t0\t" + updated_sentence + "\n")
#f.write(updated_sentence + "\n")
if rev_entities[entity_names[0]] != 'Condition' and \
rev_entities[entity_names[1]] != 'Condition'\
and rev_entities[entity_names[0]] != 'Condition':
f_contains.write("0\t0\t" + updated_sentence + "\n")
if rev_entities[entity_names[0]] != 'Condition':
updated_sentence = healthline_sentences[ind][:index2] + \
" <Y> " + entity_names[1] + " </Y> " + \
healthline_sentences[ind]\
[index2+len(entity_names[1]):index1] + " <X> " + \
entity_names[0] + " </X> " + \
healthline_sentences[ind]\
[index1+len(entity_names[0]):]
f.write("0\t0\t" + updated_sentence + "\n")
if rev_entities[entity_names[0]] != 'Condition' and \
rev_entities[entity_names[1]] != 'Condition'\
and rev_entities[entity_names[1]] != 'Condition':
f_contains.write("0\t0\t" + updated_sentence + "\n")
#f.write(updated_sentence + "\n")
label_indices[list(graph.keys())[0]] = \
label_indices.setdefault(list(graph.keys())[0],[]) + [ind]
label_entities[healthline_sentences[ind]] = \
sentence_relation_annotations[healthline_sentences[ind]][0]\
[0][0] if \
sentence_relation_annotations[healthline_sentences[ind]][0][0][0]\
is not [] else \
sentence_relation_annotations[healthline_sentences[ind]][0][4][0]
f.close()
f_contains.close()
copyfile("summary_train_template_sentences.txt",\
"summary_train_template_sentences_increases.txt")
copyfile("summary_train_template_sentences.txt",\
"summary_train_template_sentences_decreases.txt")
copyfile("summary_train_template_sentences.txt",\
"summary_train_template_sentences_controls.txt")
copyfile("summary_train_template_sentences.txt",\
"summary_train_template_sentences_satisfies.txt")
label_templates['increases'], label_entities['increases'] = \
get_rationale_entities(''\
'summary_train_template_sentences_increases.txt.'\
'rationale.machine_readable.tsv')
label_templates['decreases'], label_entities['decreases'] = \
get_rationale_entities(''\
'summary_train_template_sentences_decreases.txt.'\
'rationale.machine_readable.tsv')
label_templates['controls'], label_entities['controls'] = \
get_rationale_entities(''\
'summary_train_template_sentences_controls.txt.'\
'rationale.machine_readable.tsv')
label_templates['satisfies'], label_entities['satisfies'] = \
get_rationale_entities(''\
'summary_train_template_sentences_satisfies.txt.'\
'rationale.machine_readable.tsv')
label_templates['contains'], label_entities['contains'] = \
get_rationale_entities(''\
'summary_train_template_sentences_contains.txt.'\
'rationale.machine_readable.tsv')
modified_sentences = {}
templated_scores = []
output_file = jsonlines.open("templated_multi_summaries.jsonl","w")
input_sentiment_file = jsonlines.open("test1_sentiment.jsonl","w")
output_sentiment_file = jsonlines.open("test2_sentiment.jsonl","w")
for sentence in sentence_causes:
entity_types = {}
for entity in title_entities[sentence_file_names[sentence]]:
entity_types[entity] = 'Food'
if metadata[sentence_file_names[sentence]]['split'] != split_name:
all_relations = sentence_causes[sentence]
for contain_relation in sentence_contains[sentence]:
all_relations.append(contain_relation + ['contains'])
else:
all_relations = metadata[sentence_file_names[sentence]]\
['summary_inputs']\
['summary_healthline_relation_annotations'][sentence.strip()][0]
unique_relations = []
added_relations = set()
for relation in all_relations:
if tuple([x.lower() for x in relation]) in added_relations:
continue
unique_relations.append(relation)
added_relations.add(tuple([x.lower() for x in relation]))
all_relations = unique_relations
if len(all_relations) == 0:
continue
dict = {}
dict['file_name'] = sentence_file_names[sentence]
dict['gold'] = sentence
dict['outputs'] = []
dict['output'] = ''
dict['ouput_dictionary'] = all_relations
dict['split'] = metadata[sentence_file_names[sentence]]['split']
dict1 = {'gold_label':'Good','sentence':sentence}
for relation in all_relations:
cause_type = relation[2]
cosine_score = -1e100
best_modified_sentence = ""
if False:
for template_index in label_indices.get(cause_type,[]):
modified_sentence = get_modified_sentence(\
sentence_relation_annotations,\
sentence_entity_annotations,\
healthline_sentences[template_index],\
[relation],\
{**sentence_entity_annotations[sentence][1],\
**entity_types}, sentence)
if relation[0].lower() in modified_sentence and \
relation[1].lower() in modified_sentence:
current_cosine = np.dot(_get_entity_embeddings(\
relation[0],embeddings),\
_get_entity_embeddings(\
label_entities[healthline_sentences[template_index]][0]\
,embeddings)) + \
np.dot(_get_entity_embeddings(\
relation[1],embeddings),\
_get_entity_embeddings(\
label_entities[healthline_sentences[template_index]][1]\
,embeddings))
if current_cosine > cosine_score:
cosine_score = current_cosine
best_modified_sentence = modified_sentence
if True:
for template, original_entities in zip(\
label_templates[cause_type], label_entities[cause_type]):
x_substr = template[template.index('<X>'):template.index('</X>')+4]
y_substr = template[template.index('<Y>'):template.index('</Y>')+4]
if len(template) <= len(x_substr) + len(y_substr) + 3:
continue
if x_substr == "" or y_substr == "":
continue
modified_sentence = template.replace(x_substr, relation[0])\
.replace(y_substr, relation[1])
if len(modified_sentence) <= len(relation[0]) +\
len(relation[1]) + 3:
continue
current_cosine = np.dot(_get_entity_embeddings(\
relation[0],embeddings),\
_get_entity_embeddings(\
original_entities[0]\
,embeddings)) + \
np.dot(_get_entity_embeddings(\
relation[1],embeddings),\
_get_entity_embeddings(\
original_entities[1]\
,embeddings))\
- len(modified_sentence.split())/15
if current_cosine > cosine_score:
cosine_score = current_cosine
best_modified_sentence = modified_sentence
modified_sentence = best_modified_sentence
#if relation[0].lower() not in modified_sentence or \
# relation[1].lower() not in modified_sentence:
# continue
#relation1_index = modified_sentence.index(relation[0].lower())
#relation2_index = modified_sentence.index(relation[1].lower())
#if relation1_index < relation2_index:
# modified_sentence = modified_sentence[relation1_index:\
# relation2_index+len(relation[1])]
#else:
# modified_sentence = modified_sentence[relation2_index:\
# relation1_index+len(relation[0])]
dict['outputs'].append(modified_sentence)
dict2 = {'gold_label':'Good','sentence':\
' '.join(dict['output']).strip()}
dict['output'] = ' '.join(dict['outputs']).strip()
output_file.write(dict)
input_sentiment_file.write(dict1)
output_sentiment_file.write(dict2)
output_file.close()
input_sentiment_file.close()
output_sentiment_file.close()
if __name__ == "__main__":
if os.path.exists("contains_wrong.jsonl"):
os.remove("contains_wrong.jsonl")
os.remove("contains_correct.jsonl")
if os.path.exists("causes_wrong.jsonl"):
os.remove("causes_wrong.jsonl")
os.remove("causes_correct.jsonl")
parser = argparse.ArgumentParser(description='Read features.')
parser.add_argument('--run_features', action='store_true')
parser.add_argument('--split_name', default='train', type=str)
parser.add_argument('--fusion_model', default=\
'transformer-abstractive-summarization/fusion/'\
'discofuse_v1/wikipedia/classifier_large/pytorch_model.bin',\
type=str)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--repeat_instance', default=5, type=int)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--data_points', default=-1, type=int)
parser.add_argument('--load_model', default='choice_policy.pt', type=str)
parser.add_argument('--lr', default=0.05, type=float)
parser.add_argument('--task', default='policy', type=str)
args = parser.parse_args()
metadata = json.load(open("annotated_metadata5.json","r"))
embeddings = read_embeddings()
#fusion_state_dict = torch.load(args.fusion_model)
#fusion_model = \
# BertForSequenceClassification.from_pretrained(\
# "bert-base-uncased", state_dict=fusion_state_dict, num_labels=5)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased",\
do_lower_case=True)
#fusion_model.cuda()
fusion_model = None
label_list = ["","and","but","although","however"]
categorized_sentences = pickle.load(open("categorized_summaries.p","rb"))
#policy = Policy(2)
#policy = Policy(4)
policy = PolicyChoices(2+2+2)
if os.path.exists(args.load_model) and args.split_name != 'train':
print("Loading saved model %s" %args.load_model)
policy.load_state_dict(torch.load(open(args.load_model,"rb")))
optimizer = optim.Adam(policy.parameters(), lr=args.lr, weight_decay=1e-4)
target_entities = get_target_entities(metadata)
target_embeddings= _get_all_entity_embeddings(target_entities, embeddings)
sentence_relation_annotations, sentence_file_names, \
sentence_entity_annotations,\
sentence_modifier_entity_annotations,\
sentence_relation_sentiments = paired_annotations(metadata)
pickle.dump(sentence_relation_annotations, open(""\
"sentence_relation_annotations.p","wb"))
pickle.dump(sentence_file_names, open(""\
"sentence_file_names.p","wb"))
if (args.split_name == 'train' and args.run_features )\
or (not os.path.exists("sentence_extracted_rationales.p")):
get_property_rationales(metadata, sentence_relation_annotations,\
sentence_file_names, sentence_entity_annotations, \
sentence_relation_sentiments, splits=['train'])
sentence_extracted_rationales = get_predicted_property_rationales()
pickle.dump(sentence_extracted_rationales, open("\
sentence_extracted_rationales.p", "wb"))
else:
sentence_extracted_rationales = \
pickle.load(open("sentence_extracted_rationales.p", "rb"))
property_style_rationales = {}
for sentence in sentence_extracted_rationales:
for property in sentence_extracted_rationales[sentence]:
property_style_rationales[property] = \
property_style_rationales.setdefault(property,[]) + \
sentence_extracted_rationales[sentence][property]
cause_style_rationales = \
[x[x.find("###")+3:].strip() for x in \
sum([sentence_extracted_rationales[x].get(\
'causes',[]) for x in sentence_extracted_rationales],[])]
title_entities = get_title_entities(metadata)
if args.split_name == 'train' and args.run_features:
get_fusion_training_data(sentence_extracted_rationales,\
sentence_relation_annotations, sentence_file_names, \
title_entities, embeddings)
print("Found %d sentences with annotations paired." \
%len(sentence_relation_annotations))
sentence_population_relations, \
sentence_population_entities = get_population_annotations(\
sentence_relation_annotations,\
sentence_modifier_entity_annotations)
sentence_sentiment = get_sentiment_annotations(\
metadata)
old_keys = list(sentence_population_relations.keys())
sentence_population_relations_a = {}
sentence_population_entities_a = {}
sentence_relation_annotations_a = {}
for x in old_keys:
sentence_population_relations_a[x.replace(' ','')] = \
sentence_population_relations[x]
old_keys = list(sentence_population_entities.keys())
for x in old_keys:
sentence_population_entities_a[x.replace(' ','')] = \
sentence_population_entities[x]
old_keys = list(sentence_relation_annotations.keys())
for x in old_keys:
sentence_relation_annotations_a[x.replace(' ','')] = \
sentence_relation_annotations[x]
sentence_pubmed_articles = {}
for file_name in metadata:
if 'summary_inputs' not in metadata[file_name]:
continue
if 'summary_pubmed_articles' not in metadata[file_name]\
['summary_inputs']:
continue
for sentence,pubmeds in metadata[file_name]\
['summary_inputs']['summary_pubmed_articles'].items():
sentence_pubmed_articles[sentence.strip().replace(' ','')] = pubmeds
pubmed_new_causes = pickle.load(open("pubmed_causes.p","rb"))
pubmed_entity_types = get_corpus_entity_types(metadata)
#get_population_correlation(sentence_population_entities,sentence_sentiment)
#get_sentiment_statistics(sentence_sentiment, sentence_file_names,\
# metadata, ['train','dev','test'])
#get_sentiment_statistics(sentence_sentiment, sentence_file_names,\
# metadata, ['train'])
#get_sentiment_statistics(sentence_sentiment, sentence_file_names,\
# metadata, ['dev'])
#get_sentiment_statistics(sentence_sentiment, sentence_file_names,\
# metadata, ['test'])
sentence_causes, sentence_contains,\
sentence_all_causes, sentence_all_contains,\
gold_sentence_causes, gold_sentence_contains,\
= follow_up_annotations(\
sentence_relation_annotations,\
embeddings, target_embeddings, sentence_file_names,\
title_entities)
pickle.dump(sentence_causes, open("sentence_causes.p","wb"))
pickle.dump(sentence_contains,open("sentence_contains.p","wb"))
pickle.dump(sentence_relation_annotations,open("\
sentence_relation_annotations.p","wb"))
split_name = args.split_name
if split_name == 'train':
vectorizer\
= get_mapped_cosine_similarities(metadata,\
sentence_extracted_rationales, sentence_file_names,\
sentence_all_causes, embeddings)
pickle.dump(vectorizer,open("vectorizer.p","wb"))
else:
vectorizer = pickle.load(open("vectorizer.p","rb"))
sentence_extracted_rationales = pickle.load(open(\
"sentence_extracted_rationales.p","rb"))
if split_name == 'train':
create_importance_classification_data(sentence_file_names, metadata,\
sentence_causes, sentence_all_causes, sentence_contains,\
sentence_all_contains, split_name)
if False:
sentence_structures = get_predicted_structures("importance_test.jsonl",\
"/data/rsg/nlp/darsh/"+\
"pytorch-pretrained-BERT/importance_classification/preds.jsonl",\
embeddings, sentence_file_names, title_entities,\
sentence_relation_sentiments, metadata,\
cluster_threshold=1.5, prob_threshold=0.4)
sentence_causes, sentence_contains = get_causes_contains_structures(\
sentence_structures, sentence_all_causes, sentence_all_contains)
else:
input_sentences, output_sentences = \
predict_importance_sentences(metadata,split_name,args)
assert input_sentences is not None
assert output_sentences is not None
assert sentence_all_causes is not None
assert sentence_all_contains is not None
assert sentence_file_names is not None
assert metadata is not None
sentence_causes, sentence_contains = get_causes_contains_from_pubmed(\
input_sentences, output_sentences, sentence_all_causes,\
sentence_all_contains, sentence_file_names, metadata,\
sentence_sentiment, vectorizer, embeddings, \
gold_sentence_causes, policy, optimizer, args,\
sentence_extracted_rationales,\
fusion_model, tokenizer, label_list,\
pubmed_entity_types,\
property_style_rationales,\
cluster_threshold=1.5, split_name=split_name)
get_simple_templated_sentences(sentence_relation_annotations,\
sentence_entity_annotations, sentence_causes, sentence_contains,\
title_entities, sentence_file_names, metadata, embeddings)
#causes,contains = compare_annotations(sentence_relation_annotations,\
# sentence_entity_annotations, embeddings,\
# target_embeddings, sentence_file_names, title_entities)
#print(sum(causes)/len(causes),sum(contains)/len(contains))
|
from ctypes import *
user32 = windll.LoadLibrary('user32.dll')
user32.LockWorkStation()
|
#coding=utf-8
import os
from flask import Flask
import config
from flask_mongoengine import MongoEngine
from flask_bootstrap import Bootstrap
bootstrap = Bootstrap()
db = MongoEngine()
def create_app():
app = Flask(__name__)
app.config.from_object(config)
bootstrap.init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
#!/usr/bin/env python
REDIS_DB = 0
REDIS_PORT = 6379
REDIS_HOST = 'localhost'
SALT = 'retwitter'
r =None
|
# -*- coding: utf-8 -*-
#a=int(input())
#b=int(input())
#c=int(input())
#x1=0
#x2=0
#x1=(-1*b+(b**2-(4*a*c))**0.5)/2*a
#x1=(-1*b-(b**2-(4*a*c))**0.5)/2*a
#print(x1)
#print(x2)
def compute(a,b,c):
x1=0
x2=0
if((b**2-4*a*c)<0):
print("Your equation has no root.")
else:
x1=((-1)*b+(b**2-4*a*c)**0.5)/(2*a)
x2=((-1)*b-(b**2-4*a*c)**0.5)/(2*a)
print("%.1f,"%x1,x2)
a=int(input())
b=int(input())
c=int(input())
compute(a,b,c)
|
from tkinter import *
from tkinter import messagebox
root = Tk()
root.title("Tic-Tac-Toe")
w=17
h=4
global i
i=1
status = [-1,-2,-3,-4,-5,-6,-7,-8,-9]
def resetBoard():
global i
i=1
btn1 = Button(root, text="", width=w, height=h, command= lambda: clicked(1))
btn2 = Button(root, text="", width=w, height=h, command= lambda: clicked(2))
btn3 = Button(root, text="", width=w, height=h, command= lambda: clicked(3))
btn4 = Button(root, text="", width=w, height=h, command= lambda: clicked(4))
btn5 = Button(root, text="", width=w, height=h, command= lambda: clicked(5))
btn6 = Button(root, text="", width=w, height=h, command= lambda: clicked(6))
btn7 = Button(root, text="", width=w, height=h, command= lambda: clicked(7))
btn8 = Button(root, text="", width=w, height=h, command= lambda: clicked(8))
btn9 = Button(root, text="", width=w, height=h, command= lambda: clicked(9))
v2=3
player_1.grid(row=0, column=0, padx=v2, pady=v2)
player_2.grid(row=1, column=0, padx=v2, pady=v2)
player_1_name.grid(row=0, column=1, padx=v2, pady=v2)
player_2_name.grid(row=1, column=1, padx=v2, pady=v2)
new_game_btn.grid(row=0, column=2, padx=v2, pady=v2)
ins_game_btn.grid(row=1, column=2, padx=v2, pady=v2)
btn1.grid(row=2, column=0)
btn2.grid(row=2, column=1)
btn3.grid(row=2, column=2)
btn4.grid(row=3, column=0)
btn5.grid(row=3, column=1)
btn6.grid(row=3, column=2)
btn7.grid(row=4, column=0)
btn8.grid(row=4, column=1)
btn9.grid(row=4, column=2)
j=-1
for x in range(0,9):
status[x]=j
j-=1
def check_win():
if (status[0]==status[1] and status[1]==status[2]) or (status[3]==status[4] and status[4]==status[5]) or (status[6]==status[7] and status[7]==status[8]) or (status[0]==status[3] and status[3]==status[6]) or (status[1]==status[4] and status[4]==status[7]) or (status[2]==status[5] and status[5]==status[8]) or (status[0]==status[4] and status[4]==status[8]) or (status[2]==status[4] and status[4]==status[6]):
if int(i)%2==0:
messagebox.showinfo(" ", "Congratulations!!! " + str(player_1_name.get()) + " won.")
resetBoard()
elif int(i)%2==1:
messagebox.showinfo(" ", "Congratulations!!! " + str(player_2_name.get()) + " won.")
resetBoard()
else:
return
def clicked(n):
global i
if n==1:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=0)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=0)
status[n-1]=0
i+=1
if n==2:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=1)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=1)
status[n-1]=0
i+=1
if n==3:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=2)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=2, column=2)
status[n-1]=0
i+=1
if n==4:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=0)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=0)
status[n-1]=0
i+=1
if n==5:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=1)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=1)
status[n-1]=0
i+=1
if n==6:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=2)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=3, column=2)
status[n-1]=0
i+=1
if n==7:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=0)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=0)
status[n-1]=0
i+=1
if n==8:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=1)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=1)
status[n-1]=0
i+=1
if n==9:
if (int(i)%2==1):
btn1 = Button(root, text="X", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=2)
status[n-1]=1
else:
btn1 = Button(root, text="O", width=w, height=h, relief=SUNKEN)
btn1.grid(row=4, column=2)
status[n-1]=0
i+=1
check_win()
def ins():
messagebox.showinfo("Instructions", "INSTRUCTIONS\n1. The game is played on a grid that's 3 squares by 3 squares.\n2. Player 1 is X, Player 2 is O. Players take turns putting their marks in empty squares.\n3. The first player to get 3 of her marks in a row (up, down, across, or diagonally) is the winner.\n4. When all 9 squares are full, the game is over. If no player has 3 marks in a row, the game ends in a tie.")
player_1 = Label(root, text="Player 1", anchor=W, font=("Verdana", 10))
player_2 = Label(root, text="Player 2", anchor=W, font=("Verdana", 10))
player_1_name = Entry(root, text="Name1")
player_2_name = Entry(root, text="Name2")
new_game_btn = Button(root, text="New Game", command=resetBoard)
ins_game_btn = Button(root, text="Instructions", command=ins)
btn1 = Button(root, text="", width=w, height=h, command= lambda: clicked(1))
btn2 = Button(root, text="", width=w, height=h, command= lambda: clicked(2))
btn3 = Button(root, text="", width=w, height=h, command= lambda: clicked(3))
btn4 = Button(root, text="", width=w, height=h, command= lambda: clicked(4))
btn5 = Button(root, text="", width=w, height=h, command= lambda: clicked(5))
btn6 = Button(root, text="", width=w, height=h, command= lambda: clicked(6))
btn7 = Button(root, text="", width=w, height=h, command= lambda: clicked(7))
btn8 = Button(root, text="", width=w, height=h, command= lambda: clicked(8))
btn9 = Button(root, text="", width=w, height=h, command= lambda: clicked(9))
v2=3
player_1.grid(row=0, column=0, padx=v2, pady=v2)
player_2.grid(row=1, column=0, padx=v2, pady=v2)
player_1_name.grid(row=0, column=1, padx=v2, pady=v2)
player_2_name.grid(row=1, column=1, padx=v2, pady=v2)
new_game_btn.grid(row=0, column=2, padx=v2, pady=v2)
ins_game_btn.grid(row=1, column=2, padx=v2, pady=v2)
btn1.grid(row=2, column=0)
btn2.grid(row=2, column=1)
btn3.grid(row=2, column=2)
btn4.grid(row=3, column=0)
btn5.grid(row=3, column=1)
btn6.grid(row=3, column=2)
btn7.grid(row=4, column=0)
btn8.grid(row=4, column=1)
btn9.grid(row=4, column=2)
root.mainloop()
|
# Find the maximum total from top to bottom of the triangle below:
# Comments Section:
# - My first try was simply using a brute force algorithm but since there was said that existed
# a better algorithm I took some time to think about it. From the forum I found that this was exactly
# what we should aim to. It consists on thinking on the triangle as a binary tree and starting from the bottom
# where where should add the biggest value of the 2 sucessors of a node to the node himself.
# Doing this recursevely the top node would have the sum of the best path.
triangle = [[75],
[95,64],
[17,47,82],
[18,35,87,10],
[20,4,82,47,65],
[19,1,23,75,3,34],
[88,2,77,73,7,63,67],
[99,65,4,28,6,16,70,92],
[41,41,26,56,83,40,80,70,33],
[41,48,72,33,47,32,37,16,94,29],
[53,71,44,65,25,43,91,52,97,51,14],
[70,11,33,28,77,73,17,78,39,68,17,57],
[91,71,52,38,17,14,91,43,58,50,27,29,48],
[63,66,4,68,89,53,67,30,73,16,69,87,40,31],
[4,62,98,27,23,9,70,98,73,93,38,53,60,4,23]]
def problem18():
for i in range(13,-1, -1):
for j in range(len(triangle[i])):
if triangle[i+1][j] >= triangle[i+1][j+1]:
triangle[i][j] += triangle[i+1][j]
else:
triangle[i][j] += triangle[i+1][j+1]
return triangle[0][0]
|
from __future__ import absolute_import, unicode_literals
import logging
import requests
from requests import Response, RequestException
from django.conf import settings
from common_services.errors import *
try:
# Load Python3 urljoin
from urllib.parse import urljoin
except:
# If failed, load Python2.7 urljoin
from urlparse import urljoin
logger = logging.getLogger(__name__)
class CommonServicesBaseClient(object):
SUCCESS_STATUS_CODE_RANGE = range(200, 300)
BAD_GATEWAY_STATUS_CODE = 502
COMMON_SERVICES_CONNECTION_TIMEOUT_S = 5
COMMON_SERVICES_RESPONSE_TIMEOUT_S = 30
BAD_GATEWAY_MAX_RETRIES = 3
def __init__(self):
# type: () -> None
self.BASE_URL = settings.COMMON_SERVICES_ENDPOINT
def _get_timeout(self):
# type: () -> Tuple
return self.COMMON_SERVICES_CONNECTION_TIMEOUT_S, self.COMMON_SERVICES_RESPONSE_TIMEOUT_S
def _make_get_request(self, rel_url):
# type: (unicode) -> Response
return self._make_request('get', rel_url)
def _make_post_request(self, rel_url, data):
# type: (unicode, dict) -> Response
return self._make_request('post', rel_url, json=data)
def _make_put_request(self, rel_url, data):
# type: (unicode, dict) -> Response
return self._make_request('put', rel_url, data=data)
def _make_request(self, method, rel_url, **kwargs):
# type: (unicode, unicode, dict) -> Response
url = urljoin(self.BASE_URL, rel_url)
# Pop the `retry_count` before sending kwargs to `requests.request`
retry_count = kwargs.pop('retry_count', 0)
try:
resp = requests.request(method, url, timeout=self._get_timeout(), **kwargs)
except RequestException as exc:
raise CommonServicesError(repr(exc))
if resp.status_code == self.BAD_GATEWAY_STATUS_CODE:
if retry_count < self.BAD_GATEWAY_MAX_RETRIES:
kwargs['retry_count'] = retry_count + 1
return self._make_request(method, rel_url, **kwargs)
return resp
def _extract_err_msg(self, resp):
# type: (Response) -> unicode
try:
decoded_resp = resp.json()
code = decoded_resp.get('code', '')
desc = decoded_resp.get('description', '')
return '{}: {}'.format(code, desc)
except ValueError:
return ''
def _handle_service_response(self, resp, raise_exc):
# type: (Response, bool) -> None
if resp.status_code in self.SUCCESS_STATUS_CODE_RANGE:
return
error_msg = self._extract_err_msg(resp)
if raise_exc:
if resp.status_code == 404:
raise ResourceNotFound(error_msg)
elif resp.status_code == 401 or resp.status_code == 403:
raise AuthenticationError(error_msg)
elif resp.status_code == 500:
raise ServerError(error_msg)
elif resp.status_code == 502:
raise BadGatewayError(error_msg)
elif resp.status_code == 503:
raise ServiceUnavailableError(error_msg)
else:
raise CommonServicesError(error_msg)
else:
logger.error('CommonServicesError: {}'.format(error_msg))
|
import json
import math
import requests
from recipes.models import Good, Pharmacy, Medicine
from recipes.serializers import PharmacySerializer, MedicineSerializer
def get_coordinates(address: str):
response = requests.get('https://geocode-maps.yandex.ru/1.x/?format=json&geocode={}'.format(address))
data = json.loads(response.content.decode(response.encoding))
try:
coordinates_str = data['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']['Point']['pos']
x, y = map(lambda s: float(s), coordinates_str.split())
return y, x
except:
pass
def get_pharmacies_and_medicines(data):
city = data['city_name']
print(city)
medicines = data['medicines']
print(medicines)
coordinates = data['coordinates'] if 'coordinates' in data else None
result = find_pharmacies(city, medicines, coordinates)
return [{'pharmacy':PharmacySerializer(k).data, 'medicines': [MedicineSerializer(i).data for i in v]} for k, v in result.items()]
def find_pharmacies(city_name, medicine_ids, coordinates=None):
result = dict()
if not coordinates:
coordinates = get_coordinates(city_name)
pharmacies = Pharmacy.objects.filter(city__name=city_name)
medicines = [Medicine.objects.get(id=m) for m in medicine_ids]
pharmacies_goods = dict()
for p in pharmacies:
pharmacies_goods[p] = [[], 0]
for m in medicines:
if Good.objects.filter(pharmacy=p, medicine=m).count():
pharmacies_goods[p][0].append(m)
pharmacies_goods[p][1] = get_distance(*coordinates, p.latitude, p.longitude)
found_medicines = 0
required_medicines = len(medicines)
pharmacies_goods_list = pharmacies_goods.items()
while found_medicines < required_medicines:
pharmacies_goods_list = sorted(pharmacies_goods_list, key=lambda x: (-len(x[1][0]), x[1][1]))
if len(pharmacies_goods_list[0][1][0]) == 0:
break
pharmacy = pharmacies_goods_list[0]
pharmacies_goods_list = pharmacies_goods_list[1:]
result[pharmacy[0]] = []
for m in pharmacy[1][0]:
result[pharmacy[0]].append(m)
found_medicines += 1
for k, v in pharmacies_goods_list:
if m in v[0]:
v[0].remove(m)
return result
def get_distance(x1, y1, x2, y2):
lon1, lat1, lon2, lat2 = map(math.radians, [x1, y1, x2, y2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
r = 6371 # radius of Earth in kilometers
return c * r
|
# Python script to create the index containing frame-number and timestamp
import os
import sys
import argparse
# PyAV - wrapper for FFMPEG
import av
# Local imports
def process(video_fname, imdb_key):
"""Generate the matidx file.
"""
if imdb_key is None:
movie_name = '.'.join(video_fname.split('/')[-1].split('.')[:-1])
else:
movie_name = imdb_key
# create mat-idx template
matidx_fname = os.path.join(args.base_dir, movie_name, movie_name + '.matidx')
if os.path.exists(matidx_fname):
print ('matidx for {} already exists'.format(movie_name))
return
else:
# check directory exists
out_dir = os.path.join(args.base_dir, movie_name)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fid = open(matidx_fname, 'w')
# open the video
try:
container = av.open(video_fname)
except:
RuntimeError('Failed to open the video!')
# video stream time-base
v_time_base = container.streams.video[0].time_base
real_v_time_base = 1.0 * v_time_base.numerator / v_time_base.denominator
# run for each frame, and get info
for fn, frame in enumerate(container.decode(video=0)):
ts = frame.pts * real_v_time_base
fid.write('%d %.3f\n' %(fn, ts))
fid.close()
print('Completed writing to', matidx_fname)
parser = argparse.ArgumentParser(description='Process video file inputs')
parser.add_argument('--video_fname', type=str, help='Video file path')
parser.add_argument('--imdb_key', type=str, help='IMDb key')
parser.add_argument('--base_dir', type=str, help='Base directory')
if __name__ == '__main__':
args = parser.parse_args()
print (args)
if args.imdb_key is not None:
assert args.imdb_key.startswith('tt'), 'Invalid IMDb key'
print ('Running for {}\n{}'.format(args.imdb_key, args.video_fname))
else:
print ('Running for {}'.format(args.video_fname))
process(args.video_fname, args.imdb_key)
|
import os
import sys
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
path = '/backend'
if path not in sys.path:
sys.path.append(path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = Cling(get_wsgi_application())
|
# IMPORTANT
#run in StanfordCoreNLP folder:
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer
import os
import re
import logging as lo
from pycorenlp import StanfordCoreNLP
from gensim.models import word2vec
import sys
import pickle
import pandas as pd
import numpy as np
#Class that parses input sentences using StanfordCoreNLP
class Parser():
def __init__(self, folder_name):
self.folder_name = folder_name
try:
self.nlp = StanfordCoreNLP('http://localhost:9000')
except:
print("INSTRUCTIONS")
print("in folder ./word2vec/stanford-corenlp-full-2018-02-27, open terminal and perform:")
print("java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer")
raise
#First remove punctuation from sentences and convert to lower case
def clean_sentence( self, s ):
try:
s = s.replace('...',' ')
s = s.lower()
s = re.sub('<[A-Za-z0-9/]>','',s)
s = re.sub('[^A-Za-z0-9 \',:;.?!]+', '', s)
s = s.replace('.','.')
s = s.replace('?','.')
s = s.replace('!','.')
while ' ' in s: s = s.replace(' ',' ')
s = s.lstrip().rstrip()
if s[-1] == ';':
s = s[:-1] + s[-1:].replace(';','.')
return s.lstrip().rstrip()
except KeyboardInterrupt:
raise
except:
print("Error with",s)
raise
#Split up the line in sentences by splitting on '.' then parse each sentence separately and extract POS tags
def parse_sentence( self, s ):
sentences = s.lower().split('.')
parsed_sentence = []
for sentence in sentences:
if len(sentence)>0:
try:
output = self.nlp.annotate(sentence, properties={
'annotators': 'parse',
'outputFormat': 'json',
'timeout': 100000
})
json = output['sentences'][0]['parse']
words = [m for m in re.findall('\([A-Z:.]+ [a-z0-9\',.?:;!]+\)', json)]
for w in words:
parsed_sentence.append(w)
except KeyboardInterrupt:
raise
except:
continue
return parsed_sentence
#Parse the entire cornell movie line dataset
def parse_data(self):
#sys.exit(0) # I don't really want to run this again
lines = {}
with open('../res/cornell/movie_lines.txt','rb') as f:
total = 304713.
i = 0
for line in f:
print(i/total*100,'% ',end='\r')
i += 1
line = line.decode("ISO-8859-1",errors='ignore')
l_data = line.split('+++$+++')
sentence = self.clean_sentence( l_data[4] )
parsed = self.parse_sentence( sentence )
lines[l_data[0]] = {'user':l_data[1],
'movie':l_data[2],
'name':l_data[3],
'line':parsed}
with open('movie_lines.p','w+b') as fp:
pickle.dump(lines,fp,protocol=pickle.HIGHEST_PROTOCOL)
#Parse MSR dataset. Currently unused because we changed sentence representation to a more comprehensible model
def parse_sem_spacer(self):
sentences = []
train_data = pd.read_csv('../res/dataset/msr/train_data.csv',sep='\t',error_bad_lines=False).dropna()
test_data = pd.read_csv('../res/dataset/msr/test_data.csv',sep='\t',error_bad_lines=False).dropna()
#train_labels = np.array(train_data['Quality'])
#train_inputs = np.array( [[sent2vecs(r[1]['#1 String']),sent2vecs(r[1]['#2 String'])] for r in train_data.iterrows()])
#no train-test set separation here since the dataset is quite small compared to the cornell dataset
total = train_data.shape[0]+test_data.shape[0]
i = 0.
for r in train_data.iterrows():
print( i/total*100,'% ',end='\r')
i += 1
qu = r[1]['Quality']
s1 = self.parse_sentence(self.clean_sentence(r[1]['#1 String']))
s2 = self.parse_sentence(self.clean_sentence(r[1]['#2 String']))
sentences.append({'Quality':qu,
'#1 String':s1,
'#2 String':s2})
for r in test_data.iterrows():
print( i/total*100, '% ', end='\r')
i += 1
qu = r[1]['Quality']
s1 = self.parse_sentence(self.clean_sentence(r[1]['#1 String']))
s2 = self.parse_sentence(self.clean_sentence(r[1]['#2 String']))
sentences.append({'Quality':qu,
'#1 String':s1,
'#2 String':s2})
data_array = np.array(sentences)
np.save('../res/word2vec/msr_dataset.npy', data_array)
if __name__ == '__main__':
parser = Parser('../res')
parser.parse_data()
parser.parse_sem_spacer()
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import re
from typing import Callable
def assert_equal_with_printing(
expected, actual, uniform_formatter: Callable[[str], str] | None = None
):
"""Asserts equality, but also prints the values so they can be compared on failure."""
str_actual = str(actual)
print("Expected:")
print(expected)
print("Actual:")
print(str_actual)
if uniform_formatter is not None:
expected = uniform_formatter(expected)
str_actual = uniform_formatter(str_actual)
assert expected == str_actual
def remove_locations_from_traceback(trace: str) -> str:
location_pattern = re.compile(r'"/.*", line \d+')
address_pattern = re.compile(r"0x[0-9a-f]+")
new_trace = location_pattern.sub("LOCATION-INFO", trace)
new_trace = address_pattern.sub("0xEEEEEEEEE", new_trace)
return new_trace
|
from flask import Flask, Response
from flask_accept import accept_fallback
from flask_restplus import Resource, Api
from flask_weasyprint import render_pdf, HTML
from config import Config
from formatter.report_formatter import ReportFormatter
from model.report import Report
from model.report import db
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
api = Api(app)
class ReportResource(Resource):
@accept_fallback
def get(self, id):
return ReportFormatter(Report.query.get_or_404(id)).as_dict()
@get.support('application/xml')
def get_xml(self, id):
return Response(ReportFormatter(Report.query.get_or_404(id)).as_xml(), mimetype='application/xml')
@get.support('application/pdf')
def get_pdf(self, id):
html_render = ReportFormatter(Report.query.get_or_404(id)).as_html(template='report.html')
return render_pdf(HTML(string=html_render))
api.add_resource(ReportResource, '/report/<int:id>')
if __name__ == '__main__':
app.run(debug=True)
|
class MyClass:
def __init__(self, firstname = "Vivek", lastname = "Khimani"):
self.name1 = firstname
self.name2 = lastname
def getName (self,age):
self.name2 = age
return self.name1
myObject = MyClass("Bhargav","Khimani")
print(myObject.name1)
print(myObject.name2)
print (myObject.getName("25"))
|
import ROOT
ROOT.gROOT.SetBatch(True)
import json
from array import array
# choose which year's eta-phi ROOT files to make!
year2017 = False
year2018 = False
year2016 = True
# "Average" : 0.5485,
# "NonPixelProblemBarrel" : 0.5570,
# "EndCap" : 0.5205,
# "PixelProblemBarrel" : 0.3589
def fillH2( trigger, wp, dm, sample, info_map, h2 ) :
print "Filling: ",h2
for x in range( 1, h2.GetXaxis().GetNbins()+1 ) :
for y in range( 1, h2.GetYaxis().GetNbins()+1 ) :
#print x,y
if x == 1 or x == 6 : # beyond eta range, abs(eta)>2.1
h2.SetBinContent( x, y, 0.0 )
elif x == 2 or x == 5 : # end cap, 1.5<abs(eta)<2.1
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "EndCap" ] )
elif x == 3 or (x == 4 and y != 2) : # barrel and not pixel probel region
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "NonPixelProblemBarrel" ] )
elif x == 4 and y == 2 : # barrel pixel probel region
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "PixelProblemBarrel" ] )
else :
print "Didn't we cover all the values?",x,y
def fillH2_2018( trigger, wp, dm, sample, info_map, h2 ) :
print "Filling: ",h2
for x in range( 1, h2.GetXaxis().GetNbins()+1 ) :
for y in range( 1, h2.GetYaxis().GetNbins()+1 ) :
#print x,y
if x == 1 or x == 6 : # beyond eta range, abs(eta)>2.1
h2.SetBinContent( x, y, 0.0 )
elif x == 2 and y == 2 : # end cap, broken HCAL modules region, 1.5< eta <2.1 and -1.6 < phi< -0.8
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "HCALProblemEndCap" ] )
elif((x == 2 and y !=2) or x == 5) : # end cap rest
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "NonHCALProblemEndCap" ] )
elif x == 3 or x == 4 : # barrel
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "Barrel" ] )
else :
print "Didn't we cover all the values?",x,y
def fillH2_2016( trigger, wp, dm, sample, info_map, h2 ) :
print "Filling: ",h2
for x in range( 1, h2.GetXaxis().GetNbins()+1 ) :
for y in range( 1, h2.GetYaxis().GetNbins()+1 ) :
if x == 1 or x == 3 : # beyond eta range, abs(eta)>2.1
h2.SetBinContent( x, y, 0.0 )
elif x == 2 : # the rest of the eta phi region, no eta phi separation applied for 2016
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "Average" ] )
else :
print "Didn't we cover all the values?",x,y
def fillAvgH2( trigger, wp, dm, sample, info_map, h2 ) :
print "Filling: ",h2
for x in range( 1, h2.GetXaxis().GetNbins()+1 ) :
for y in range( 1, h2.GetYaxis().GetNbins()+1 ) :
#print x,y
if x == 1 or x == 3 : # beyond eta range, abs(eta)>2.1
h2.SetBinContent( x, y, 0.0 )
elif x == 2 : # abs(eta)<2.1
h2.SetBinContent( x, y, info_map[ trigger ][ sample ][ wp ][ dm ][ "Average" ] )
else :
print "Didn't we cover all the values?",x,y
if(year2017):
with open('data/tauTriggerEfficienciesEtaPhiMap2017_FINAL.json') as etaPhiInfo :
info_map = json.load( etaPhiInfo )
elif(year2018):
with open('data/tauTriggerEfficienciesEtaPhiMap2018_pre.json') as etaPhiInfo :
info_map = json.load( etaPhiInfo )
elif(year2016):
with open('data/tauTriggerEfficienciesEtaPhiMap2016_pre.json') as etaPhiInfo :
info_map = json.load( etaPhiInfo )
print "Making Eta Phi Map"
#saveDir = '/afs/cern.ch/user/t/truggles/www/tau_fits_Feb13v2/'
#c = ROOT.TCanvas( 'c1', 'c1', 600, 600 )
#p = ROOT.TPad( 'p1', 'p1', 0, 0, 1, 1 )
#p.Draw()
#p.SetLeftMargin( ROOT.gPad.GetLeftMargin() * 1.5 )
#p.SetRightMargin( ROOT.gPad.GetRightMargin() * 1.5 )
#p.Draw()
if(year2017):
oFile = ROOT.TFile( 'data/tauTriggerEfficienciesEtaPhi2017_FINAL.root', 'RECREATE' )
oFile.cd()
elif(year2018):
oFile = ROOT.TFile( 'data/tauTriggerEfficienciesEtaPhi2018_pre.root', 'RECREATE' )
oFile.cd()
elif(year2016):
oFile = ROOT.TFile( 'data/tauTriggerEfficienciesEtaPhi2016_pre.root', 'RECREATE' )
oFile.cd()
xBinning = array('f', [-2.5, -2.1, -1.5, 0, 1.5, 2.1, 2.5] )
if(year2017):
yBinning = array('f', [-3.2, 2.8, 3.2] )
elif(year2018):
yBinning = array('f', [-3.2, -1.6, -0.8, 3.2] )
elif(year2016):
yBinning = array('f', [-3.2, 3.2] )
xBinning = array('f', [-2.5, -2.1, 2.1, 2.5] )
xBinningAvg = array('f', [-2.5, -2.1, 2.1, 2.5] )
yBinningAvg = array('f', [-3.2, 3.2] )
for trigger in ['ditau', 'etau', 'mutau'] :
for wp in ['vvloose', 'vloose', 'loose', 'medium', 'tight', 'vtight', 'vvtight' ] :
for dm in ['dm0', 'dm1', 'dm10', 'dmCmb'] :
print trigger, wp, dm
h_data = ROOT.TH2F( '%s_%sMVAv2_%s_DATA' % (trigger, wp, dm), '%s_%sMVAv2_%s_DATA;#tau #eta;#tau #phi;Efficiency' % (trigger, wp, dm), len(xBinning)-1, xBinning, len(yBinning)-1, yBinning)
h_mc = ROOT.TH2F( '%s_%sMVAv2_%s_MC' % (trigger, wp, dm), '%s_%sMVAv2_%s_MC;#tau #eta;#tau #phi;Efficiency' % (trigger, wp, dm), len(xBinning)-1, xBinning, len(yBinning)-1, yBinning)
h_data_avg = ROOT.TH2F( '%s_%sMVAv2_%s_DATA_AVG' % (trigger, wp, dm), '%s_%sMVAv2_%s_AVG_DATA;#tau #eta;#tau #phi;Efficiency' % (trigger, wp, dm), len(xBinningAvg)-1, xBinningAvg, len(yBinningAvg)-1, yBinningAvg)
h_mc_avg = ROOT.TH2F( '%s_%sMVAv2_%s_MC_AVG' % (trigger, wp, dm), '%s_%sMVAv2_%s_AVG_MC;#tau #eta;#tau #phi;Efficiency' % (trigger, wp, dm), len(xBinningAvg)-1, xBinningAvg, len(yBinningAvg)-1, yBinningAvg)
if(year2017):
fillH2( trigger, wp, dm, 'data', info_map, h_data )
fillH2( trigger, wp, dm, 'mc', info_map, h_mc )
elif(year2018):
fillH2_2018( trigger, wp, dm, 'data', info_map, h_data )
fillH2_2018( trigger, wp, dm, 'mc', info_map, h_mc )
elif(year2016):
fillH2_2016( trigger, wp, dm, 'data', info_map, h_data )
fillH2_2016( trigger, wp, dm, 'mc', info_map, h_mc )
fillAvgH2( trigger, wp, dm, 'data', info_map, h_data_avg )
fillAvgH2( trigger, wp, dm, 'mc', info_map, h_mc_avg )
oFile.cd()
h_data.Write()
h_mc.Write()
h_data_avg.Write()
h_mc_avg.Write()
#p.cd()
#h_data.Draw('COLZ TEXT')
#c.SaveAs( saveDir+'%s_%s_%s_DM%s.png' % (trigger, wp, 'DATA', dm) )
#h_mc.Draw('COLZ TEXT')
#c.SaveAs( saveDir+'%s_%s_%s_DM%s.png' % (trigger, wp, 'MC', dm) )
#h_data_avg.Draw('COLZ TEXT')
#c.SaveAs( saveDir+'%s_%s_%s_DM%s_AVG.png' % (trigger, wp, 'DATA', dm) )
#h_mc_avg.Draw('COLZ TEXT')
#c.SaveAs( saveDir+'%s_%s_%s_DM%s_AVG.png' % (trigger, wp, 'MC', dm) )
|
# Copyright (C) 2014 Yellow Feather Ltd
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
# With thanks to Lance: http://raspberrypi.stackexchange.com/users/1237/lance
# and this answer on StackExchange: http://raspberrypi.stackexchange.com/a/1685/19199
import usb
class Aviosys8800(object):
def __init__(self):
self.handle = None
self.dev = None
def open(self):
self.dev = usb.core.find(idVendor=0x067b, idProduct=0x2303)
if self.dev is None:
raise ValueError('USB switch not found')
self.dev.set_configuration()
usb.util.claim_interface(self.dev, 0)
def close(self):
self.USB1(0x21,34,2,0,0)
self.USB1(0x21,34,0,0,0)
usb.util.release_interface(self.dev, 0)
def init(self):
self.USB2(0x40,1,1028,0)
self.USB2(0x40,1,1028,1)
self.USB2(0x40,1,1028,32)
self.USB2(0x40,1,1028,33)
self.USB2(0x40,1,1028,64)
self.USB2(0x40,1,1028,65)
self.USB2(0x40,1,1028,96)
self.USB2(0x40,1,1028,97)
self.USB2(0x40,1,1028,128)
self.USB2(0x40,1,1028,129)
self.USB2(0x40,1,1028,160)
self.USB2(0x40,1,1028,161)
self.USB1(0x40,1,0,1)
self.USB1(0x40,1,1,0)
self.USB1(0x40,1,2,68)
#800
self.USB1(0,1,1,0,0)
self.USB1(0x21,32,0,0,7)
self.USB1(0xc0,1,128,0,2)
self.USB1(0xc0,1,129,0,2)
#804
self.USB1(0x40,1,1,0)
self.USB1(0x40,1,0,1)
self.USB1(0x21,34,1,0,0)
#807
self.USB1(0xc0,1,128,0,2)
self.USB1(0xc0,1,129,0,2)
self.USB1(0x40,1,1,0,0)
#810
self.USB1(0x40,1,0,1,0)
self.USB1(0x21,34,3,0,0)
self.USB1(0xc0,1,128,0,2)
self.USB1(0x40,1,0,1,0)
self.USB1(0xc0,1,128,0,2)
#815
self.USB1(0x40,1,0,1,0)
self.USB1(0x21,32,0,0,7)
self.USB1(0x40,1,2827,2,0)
#818
self.USB1(0x40,1,2313,0,0)
self.USB1(0x40,1,2056,0,0)
#820
self.USB1(0xc0,1,129,0,2)
self.USB1(0x40,1,1,32)
self.USB1(0xc0,1,36237,0,4)
def getStatus(self):
status = self.USB1(0xc0,1,129,0,2)
if status[0] == 0x20:
return False
if status[0] == 0xA0:
return True
raise ValueError('Unrecognised status')
def turnOn(self):
self.USB1(0x40,1,1,160)
def turnOff(self):
self.USB1(0x40,1,1,32)
def USB1 (self, utype=0xC0, ureq=1, uvalue=0, uindex=1028, ubytes=0):
if ubytes != 7:
status=self.dev.ctrl_transfer(int(utype), int(ureq), wValue = uvalue, wIndex = uindex, data_or_wLength = ubytes, timeout = 100)
else:
ubuffer = 0xB0,0x4,0x0,0x0,0x2,0x7
status=self.dev.ctrl_transfer(int(utype), int(ureq), wValue = uvalue, wIndex = uindex, data_or_wLength = ubuffer, timeout = 100)
return status
def USB2 (self, vtype, vreq=1, vvalue=1028, vindex=0, vbytes=0):
self.USB1(0xc0,1,33924,0,1)
self.USB1(vtype,vreq,vvalue,vindex,vbytes)
self.USB1(0xc0,1,33924,0,1)
self.USB1(0xc0,1,33667,0,1)
return
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
For i in range (14):
pixels[i] = (10,0,0)
|
import pytest
@pytest.mark.asyncio
async def test_exists(redis):
redis._redis.set('foo', 'bar')
redis._redis.set('baz', 'blub')
val = await redis.exists('blargh')
assert 0 == val
val = await redis.exists('foo')
assert 1 == val
val = await redis.exists('foo', 'baz')
assert 2 == val
val = await redis.exists('foo', 'baz', 'foo')
assert 3 == val
val = await redis.exists('foo', 'baz', 'blargh')
assert 2 == val
@pytest.mark.asyncio
async def test_delete(redis):
redis._redis.set('foo', 'bar')
redis._redis.set('baz', 'blub')
redis._redis.set('blargh', 'blurgh')
val = await redis.delete('foo')
assert 1 == val
assert False == redis._redis.exists('foo')
val = await redis.delete('foo', 'baz', 'blargh')
assert 2 == val
assert False == redis._redis.exists('baz')
assert False == redis._redis.exists('blargh')
|
#14. Write a Python program that accepts a string and calculate the number of digits and letters. Go to the editor
#Sample Data : Python 3.2
#Expected Output :
#Letters 6
#Digits 2
s = input("Input a string: ")
d = 0
l = 0
limit = 0
while len(s) > limit:
if s[limit].isdigit():
d = d + 1
elif s[limit].isalpha():
l = l + 1
#else:
# pass
limit = limit + 1
print("Letters", l)
print("Digits", d)
#print(limit)
#Tutor solution
#for c in s:
# if c.isdigit():
# d=d+1
# elif c.isalpha():
# l=l+1
# else:
# pass
#print("Letters", l)
#print("Digits", d)
#string = input("Calculate the number an digits of this string:")
#index = [string]
#len(string)
#digits = [0,1,2,3,4,5,6,7,8,9]
#stop = 0
#" " = -1
#while index > stop:
# stop = stop + 1
#print(index)
#print(string)
#print("\n")
|
from __future__ import print_function
import sys
import os
import requests
import logging
import json
from os.path import dirname
from jsonschema import validate
import importlib
import pkgutil
from halocli.util import Util
logger = logging.getLogger(__name__)
logging.root.setLevel(logging.INFO)
class PluginError(Exception):
pass
class Plugin():
def __init__(self, halo):
# init vars
self.halo = halo
# init work on halo config
# if self.halo.config ...
self.name = 'setup'
self.desc = 'config settings file'
# set commands
self.commands = {
'config': {
'usage': "Configure project settings file",
'lifecycleEvents': ['config'],
'options': {
'service_name': {
'usage': 'Name of service',
'required': True,
'shortcut': 's'
},
'swagger_path': {
'usage': 'Path to swagger file',
'required': True,
'shortcut': 'p'
}
},
},
}
# set hooks
self.hooks = {
'before:setup:config': self.before_setup_config,
'setup:config': self.setup_config,
'after:setup:config': self.after_setup_config,
}
# logger.info('finished plugin')
def run_plugin(self, options):
self.options = options
# do more
def before_setup_config(self):
pass
def setup_config(self):
if hasattr(self, 'options'):
for o in self.options:
if 'service_name' in o:
service_name = o['service_name']
if 'service_name' in o:
swagger_path = o['swagger_path']
else:
return
ret = Util.config_settings(self.halo.settings, service_name,swagger_path)
if ret == 0:
self.halo.cli.log("finished config seccessfuly")
return ret
def after_setup_config(self):
pass
|
# -*- coding=utf-8 -*-
from __future__ import unicode_literals
"""
Excel Reader
~~~~~~~~~~~~~
"""
import xlrd
from .base import cached_property, _missing
__all__ = ['ExcelReader']
class ExcelReader(object):
def __init__(self, filename=None, file_contents=None, file_point=None):
self.filename = filename
self.file_contents = file_contents
self.file_point = file_point
@cached_property
def workbook(self):
if self.file_point:
self.file_contents = self.file_point.read()
return xlrd.open_workbook(filename=self.filename,
file_contents=self.file_contents)
def parse_header(self, field_descs, header):
"""解析表头
:param field_descs: field descs, eg. (field1, field2, field3)
:param header: header row
:return: ((field, xl_index),
(<Field0 object>, 0), (<Field1 object>, 1), )
if field is missing, xl_index = _missing
"""
header_descs = []
col_index_mapping = {c.value: i for i, c in enumerate(header)}
for field in field_descs:
header_descs.append(
(field, col_index_mapping.get(field.xl_name, _missing)))
return header_descs
def parse_sheet_data(self, field_descs, sheet_index=0):
"""解析sheet数据
:param field_descs: 字段描述
:sheet_index: sheet下标
"""
records = []
sheet = self.workbook.sheets()[sheet_index]
nrows = sheet.nrows
headers = self.parse_header(field_descs, sheet.row(0))
for i_row in xrange(1, nrows):
row = sheet.row(i_row)
record = {}
for field, i_col in headers:
if i_col is not _missing:
record[field.key] = field(row[i_col])
else:
record[field.key] = field()
records.append(record)
return records
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import math
def main():
radius_pub = rospy.Publisher('radius', Float64, queue_size=10)
rospy.init_node('radius_pub_node', anonymous=True)
loop_rate = rospy.Rate(5)
radius = 1.0
PI = math.pi
angular_speed = 1.0
distance = 2*PI*radius
current_distance = 0
# while not rospy.is_shutdown():
# t0 = rospy.Time.now().to_sec() # Start Time
for i in range(2):
current_distance = 0
t0 = rospy.Time.now().to_sec() # Start Time
while current_distance < distance:
t1 = rospy.Time.now().to_sec() # Current time
t = t1 - t0 # Time valuetracker
current_distance = angular_speed*t*abs(radius) # distance = Radius*angular_speed*time
msg1 = Float64(radius)
rospy.loginfo("Publishing: ")
rospy.loginfo(msg1)
radius_pub.publish(msg1)
radius = -radius
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
for row in range(1,6):
for col in range(1,6):
if (col==1 and row!=1) or (col==5 and row!=1) or(( row==1 or row==3)and(col!=1)) and (col>1 and col<5):
print("0",end=" ")
else:
print(" ",end="")
print()
for row_b in range(1,6):
for col_b in range(1,6):
if col_b==1 or( row_b==1 and col_b!=5) or (col_b==5 and row_b!=1) or (row_b==3 and col_b!=5) or (row_b==5 and col_b!=5) :
print("b",end=" ")
else:
print(" ",end=" ")
print()
for row_c in range(1,6):
for col_c in range(1,6):
if (col_c==1 and row_c!=1 and row_c!=5) or(( row_c==1 and col_c!=1) or (row_c==5 and col_c!=1)):
print("C",end=" ")
else:
print(" ",end="")
print()
for row_d in range(1,6):
for col_d in range(1,6):
if (row_d==1 and col_d!=5) or col_d==1 or (col_d==5 and row_d!=1) or row_d==5:
print("D",end="")
else:
print(" ",end="")
print()
for row_e in range(1,6):
for col_e in range(1,6):
if (row_e==1 or col_e==1 or row_e==5 or row_e==3):
print("0",end="")
else:
print(" ",end="")
print()
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
__author__ = "golden"
__date__ = '2018/6/21'
from cleo import Command
class GreetCommand(Command):
"""
Greets someone
greet
{name? : Who do you want to greet?}
{--y|yell : If set, the task will yell in uppercase letters}
"""
def handle(self):
name = self.argument('name')
if name:
text = 'Hello %s' % name
else:
text = 'Hello'
if self.option('yell'):
text = text.upper()
self.line(text)
class GreetCommand1(Command):
"""
Greets someone
greet1
{name? : Who do you want to greet?}
{--y|yell : If set, the task will yell in uppercase letters}
"""
def handle(self):
name = self.argument('name')
if name:
text = 'Hello %s' % name
else:
text = 'Hello'
if self.option('yell'):
text = text.upper()
self.line(text)
from cleo import Application
application = Application()
application.add(GreetCommand())
application.add(GreetCommand1())
if __name__ == '__main__':
application.run()
|
import unittest
from katas.beta.who_took_the_car_key import who_took_the_car_key
class WhoTookTheCarKeyTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(who_took_the_car_key(
['01000001', '01101100', '01100101', '01111000', '01100001',
'01101110', '01100100', '01100101', '01110010']
), 'Alexander')
def test_equal_2(self):
self.assertEqual(who_took_the_car_key(
['01001010', '01100101', '01110010', '01100101', '01101101',
'01111001']
), 'Jeremy')
def test_equal_3(self):
self.assertEqual(who_took_the_car_key(
['01000011', '01101000', '01110010', '01101001', '01110011']
), 'Chris')
def test_equal_4(self):
self.assertEqual(who_took_the_car_key(
['01001010', '01100101', '01110011', '01110011', '01101001',
'01100011', '01100001']
), 'Jessica')
def test_equal_5(self):
self.assertEqual(who_took_the_car_key(
['01001010', '01100101', '01110010', '01100101', '01101101',
'01111001']
), 'Jeremy')
|
#!/usr/bin/env python3
import pickle
with open('temp.txt', 'w') as ff:
ff.write('this is test file')
with open('temp.txt', 'r') as fr:
print(fr.read())
data1 = {'a': [1, 2.0, 3, 4+6j],
'b': ('string', u'Unicode string'),
'c': None}
selfref_list = [1, 2, 3]
selfref_list.append(selfref_list)
print(selfref_list)
output = open('data.pkl', 'wb')
pickle.dump(data1, output)
pickle.dump(selfref_list, output, -1)
output.close()
|
import unittest
from src.card import Card
from src.card_game import CardGame
class TestCardGame(unittest.TestCase):
def setUp(self):
# Cards
self.card1 = Card("Hearts", 7)
self.card2 = Card("Spades", 2)
self.card3 = Card("Diamons", 1)
self.cards = [self.card1, self.card2, self.card3]
def test_check_for_ace(self):
check_if_ace = CardGame.check_for_ace(self, self.card3)
self.assertEqual(True, check_if_ace)
def test_highest_card(self):
highest_card = CardGame.highest_card(self, self.card1, self.card2)
self.assertEqual(7, highest_card.value)
def test_cards_total(self):
cards_total = CardGame.cards_total(self, self.cards)
self.assertEqual("You have a total of 10", cards_total)
|
#Christopher Hansen
#Programming for Data Science with Python - Udacity
import time
import pandas as pd
import numpy as np
import datetime
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
months = ('january', 'february', 'march', 'april', 'may', 'june')
days = ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday')
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
prompt = '\nPlease enter the city you would like to examine (Chicago, New York City, Washington):\n'
choice = input(prompt).lower().strip()
if choice in CITY_DATA.keys():
city = choice
break
else:
print('Incorrect input. Please try again.')
# get user input for month (all, january, february, ... june)
while True:
prompt = '\nPlease enter the month you would like to examine (All, January, February, ... June):\n'
choice = input(prompt).lower().strip()
if (choice in months) or (choice == 'all'):
month = choice
break
else:
print('Incorrect input. Please try again.')
# get user input for day of week (all, monday, tuesday, ... sunday)
while True:
prompt = '\nPlease enter the day you would like to examine (All, Monday, Tuesday, ... Sunday):\n'
choice = input(prompt).lower().strip()
if (choice in days) or (choice == 'all'):
day = choice
break
else:
print('Incorrect input. Please try again.')
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load the csv file into dataframe and choose the correct columns
#df = pd.read_csv(CITY_DATA[city], usecols=['Start Time', 'End Time', 'Trip Duration', 'Start Station', 'End Station', 'User Type'])
df = pd.read_csv(CITY_DATA[city])
# add columns to make filtering by month and day of week easier
df['Month'] = pd.DatetimeIndex(df['Start Time']).month
df['DayOfWeek'] = pd.DatetimeIndex(df['Start Time']).dayofweek
df['Hour'] = pd.DatetimeIndex(df['Start Time']).hour
# by month and day of week (month indexes start at 1, day indexes start at 0 (sunday))
if month != 'all':
df = df[df['Month'] == (months.index(month) + 1)]
if day != 'all':
df = df[df['DayOfWeek'] == days.index(day)]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# display the most common month
most_common_month = df['Month'].mode()[0]
print(months[most_common_month - 1] + ' is the month with the most trips taken in the filtered data.')
# display the most common day of week
most_common_day = df['DayOfWeek'].mode()[0]
print(days[most_common_day] + ' is the day of the week with the most trips taken in the filtered data.')
# display the most common start hour
start_hour = df['Hour'].mode()[0]
if start_hour < 12:
am_time = True
else:
am_time = False
if am_time:
print(str(start_hour) + ' am is the hour with the most trips taken in the filtered data.')
else:
print(str(start_hour - 12) + ' pm is the hour with the most trips taken in the filtered data.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# display most commonly used start station
most_common_start_station = str(df['Start Station'].mode()[0])
print(most_common_start_station + ' is the most common start station in the filtered data.')
# display most commonly used end station
most_common_end_station = str(df['Start Station'].mode()[0])
print(most_common_end_station + ' is the most common end station in the filtered data.')
# display most frequent combination of start station and end station trip
df['Trip'] = (df['Start Station'] + ' to ' + df['End Station'])
most_common_trip = str(df['Trip'].mode()[0])
print(most_common_trip + ' is the most common trip made in the filtered data.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel_seconds = df['Trip Duration'].sum()
years = int(total_travel_seconds // 31536000)
days = int((total_travel_seconds % 31536000) // 86400)
hours = int(((total_travel_seconds % 31536000) % 86400) // 3600)
minutes = int((((total_travel_seconds % 31536000) % 86400) % 3600) // 60)
seconds = int(((((total_travel_seconds % 31536000) % 86400) % 3600) % 60))
if (years > 0):
print('Total travel time: ' + str(years) + ' years, ' + str(days) + ' days, ' + str(hours) + ' hours, ' + str(minutes) + ' minutes, and ' + str(seconds) + ' seconds.')
else:
print('Total travel time: ' + str(days) + ' days, ' + str(hours) + ' hours, ' + str(minutes) + ' minutes, and ' + str(seconds) + ' seconds.')
# display mean travel time
mean_travel_seconds = df['Trip Duration'].mean()
days = int(mean_travel_seconds // 86400)
hours = int((mean_travel_seconds % 86400) // 3600)
minutes = int(((mean_travel_seconds % 86400) % 3600) // 60)
seconds = int((((mean_travel_seconds % 86400) % 3600) % 60))
print('Mean travel time: ' + str(minutes) + ' minutes and ' + str(seconds) + ' seconds.')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
if ('Gender' not in df.columns) or ('Birth Year') not in df.columns:
print('\nUser Stats not available for this data set.\n')
return
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
user_types = df['User Type'].value_counts()
print("\nDistribution of user types:")
#print(user_types)
for name, val in user_types.iteritems():
print(name + ': ' + str(val))
# Display counts of gender
genders = df['Gender'].value_counts()
print("\nDistribution of genders:")
#print(genders)
for name, val in genders.iteritems():
print(name + ': ' + str(val))
# Display earliest, most recent, and most common year of birth
earliest = int(df['Birth Year'].min())
print('\nEarliest Birth Year: ' + str(earliest))
most_recent = int(df['Birth Year'].max())
print('\nMost Recent Birth Year: ' + str(most_recent))
most_common = int(df['Birth Year'].mode()[0])
print('\nMost Common Birth Year: ' + str(most_common))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def raw_data(df):
"""Displays five rows of raw data for every request by the user"""
prompt = '\nWould you like to print 5 more lines of raw data? Enter yes or no.\n'
place = 0
while True:
choice = input(prompt).lower().strip()
if choice not in ('yes', 'ye', 'y'):
break
print(df.iloc[place:place + 5].to_string())
place += 5
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
raw_data(df)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
from .disability import DisabilityObserver
from .mortality import MortalityObserver
from .risk import CategoricalRiskObserver
from .disease import DiseaseObserver
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 07:45:14 2018
@author: jeremy.meyer
"""
import pandas as pd
#Series. s.index are row names. Can mix variabule types
s = pd.Series([1,2,3,4,5,6])
s.index = ['label1', 'l2', 'l3', 'l4', 'l5', 'l6']
print(s)
#Subsetting
s[1]
s[:2] #First 2 elements
s[1:4]
s[-2:] #Last two elements
s[[0,2]] #First and third elements
s['label1'] #Also dictionary-like
#Will subset according to boolean T/Fs
s[[True, False, True, True, False, False]]
s[[1,4,2,3]] #And subset out of order
s[s > s.mean()] #Elements greater than the mean
sum(s > 2) #Number of elements greater than 2
#Vector Math. Adds according to labels
s*3
s == 3
s + s*s
s[1:] + s[:-1]
#Data Frames
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), \
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
d = pd.DataFrame(d)
d.index #Row Names
d.columns #Column Names
#Change row names
d.index = range(1,5)
#Rename last column
d.rename(columns={d.columns[-1]: '2'}, inplace=True)
#dfs always rbind
df = pd.DataFrame([('A', 'B', 'C'), (1,2,3), ('THis', 'is', 'aTest')], columns = ['Vectors', 'are_stored', 'as_rows'])
df2 = pd.DataFrame([pd.Series([10,20,30]), pd.Series([4,5,6])]) #From Series
#Join (cbind). Outer join -> axis=0
df3 = pd.concat([df, df2], axis=1)
# This is by column name, not position
df3[0] = [0, 10, 100]
sleep = pd.read_csv('sleep.csv', header=None, index_col=0)
sleep.columns #Column names
sleep.index #Row Names
sleep[8] #Subsetting by column
sleep[sleep[8] > 8] #Only rows with sleep quality > 8
|
data = ""
with open("1day5data.txt") as f:
data = f.read()
data = data.split("\n")
for i in range(0, len(data)):
data[i] = int(data[i])
def cycle(data):
i = 0
previ = 0
stillInLoop = True
steps = 0
while stillInLoop:
if i > (len(data) - 1):
break
previ = i
i += data[i]
data[previ] += 1
steps += 1
return steps
print(str(cycle(data)))
|
"""
Various utils to retreive from database and export to file
"""
import config
from lib.Database import Database
import os
import shutil
from uuid import UUID
from dateutil.parser import parse as dateparse
import logging
import config
from lib.pymot.pymot import MOTEvaluation
from mpyx.F import EZ, As, By, F
from mpyx.F import Serial, Parallel, Broadcast, S, P, B
from mpyx.F import Iter, Const, Print, Map, Filter, Batch, Seq, Zip, Read, Write
async def main(args):
if len(args) == 0: print("What evaluation would you like to run? [pymot]")
else:
if args[0] == "pymot":
await evaluatePymot(args[1:])
if args[0] == "pymotMethod":
await evaluateMethod(args[1:])
if args[0] == "pymotSegment":
await evaluatePymotBySegment2(args[1:])
if args[0] == "pymotMethodSegment":
await evaluateMethodSegment(args[1:])
else:
print("Invalid export sub-command")
async def evaluateMethodSegment(args):
groundTruth = args[0]
method = args[1]
db = Database()
s = """
SELECT experiment, method
FROM experiment
WHERE method LIKE '{method}%'
ORDER BY method ASC
"""
q = s.format(method=method)
logFile = os.path.join(config.data_dir, "pymot", "eval_"+method+"_"+groundTruth+".log")
logger = logging.getLogger('pyMotEval')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(logFile)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
async for result in db.query(q):
experimentA = groundTruth
experimentB = str(result["experiment"])
print("Evaluating Experiment:", experimentB)
logger.info("Evaluating Experiment:"+ str(experimentB))
print("Method:", result["method"])
logger.info("Method:"+ str(result["method"]) )
await evaluatePymotBySegment2([experimentA, experimentB])
# logger.info("MOTA "+str(mota))
async def evaluateMethod(args):
groundTruth = args[0]
method = args[1]
db = Database()
s = """
SELECT experiment, method
FROM experiment
WHERE method LIKE '{method}%'
ORDER BY method ASC
"""
q = s.format(method=method)
logFile = os.path.join(config.data_dir, "pymot", "eval_"+method+"_"+groundTruth+".log")
logger = logging.getLogger('pyMotEval')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(logFile)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
async for result in db.query(q):
experimentA = groundTruth
experimentB = str(result["experiment"])
print("Evaluating Experiment:", experimentB)
logger.info("Evaluating Experiment:"+ str(experimentB))
print("Method:", result["method"])
logger.info("Method:"+ str(result["method"]) )
mota = await evaluatePymot([experimentA, experimentB])
logger.info("MOTA "+str(mota))
async def evaluatePymot(args):
'''
A will be considered ground truth
B will be up for evaluation
feature request:
- evaluate by segment
'''
db = Database()
experimentA = args[0]
experimentB = args[1]
jsonA = {"frames": [], "class": "video", "filename": experimentA}
jsonB = {"frames": [], "class": "video", "filename": experimentB}
q = """
SELECT frame, number
FROM frame
WHERE experiment = '{experiment}'
ORDER BY number ASC
"""
sA = q.format(experiment = experimentA)
# print("converting experiment A to pymot json...")
async for result in db.query(sA):
jsonA["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"annotations": []})
sB = q.format(experiment = experimentB)
#
# print("converting experiment B to pymot json...")
async for result in db.query(sB):
jsonB["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"hypotheses": []})
q = """
SELECT p.particle, f.number, t.location, p.radius
FROM track t, particle p, frame f
WHERE t.particle=p.particle
AND t.frame = f.frame
AND f.experiment = '{experiment}'
"""
sA = q.format(experiment=experimentA)
async for result in db.query(sA):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonA["frames"][result["number"]]["annotations"].append(r)
sB = q.format(experiment=experimentB)
async for result in db.query(sB):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonB["frames"][result["number"]]["hypotheses"].append(r)
# print('lA:',len(jsonA["frames"]), 'lB:',len(jsonB["frames"]))
evaluator = MOTEvaluation(jsonA, jsonB, 5)
evaluator.evaluate()
evaluator.printResults()
return evaluator.getMOTA()
async def evaluatePymotBySegment(args):
'''
A will be considered ground truth
B will be up for evaluation
feature request:
- evaluate by segment
'''
db = Database()
experimentA = args[0]
experimentB = args[1]
jsonA = {"frames": [], "class": "video", "filename": experimentA}
jsonB = {"frames": [], "class": "video", "filename": experimentB}
q = """
SELECT sA.segment as segmentA, sB.segment as segmentB, sA.number as number
FROM segment sA, segment sB
WHERE sA.experiment = '{experimentA}'
AND sB.experiment = '{experimentB}'
AND sA.number = sB.number
ORDER BY sA.number ASC
"""
s = q.format(experimentA = experimentA,
experimentB = experimentB)
async for result in db.query(s):
segmentA = result["segmenta"]
segmentB = result["segmentb"]
segmentNumber = result["number"]
print("Evaluating segment " + str(segmentNumber) + " ...")
q = """
SELECT frame, number
FROM frame
WHERE segment = '{segment}'
ORDER BY number ASC
"""
sA = q.format(segment=segmentA)
# print("converting experiment A to pymot json...")
async for result in db.query(sA):
jsonA["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"annotations": []})
sB = q.format(segment=segmentB)
#
# print("converting experiment B to pymot json...")
async for result in db.query(sB):
jsonB["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"hypotheses": []})
q = """
SELECT p.particle, f.number, t.location, p.radius
FROM track t, particle p, frame f
WHERE t.particle=p.particle
AND t.frame = f.frame
AND f.segment = '{segment}'
"""
sA = q.format(segment=segmentA)
async for result in db.query(sA):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonA["frames"][result["number"]]["annotations"].append(r)
sB = q.format(segment=segmentB)
async for result in db.query(sB):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonB["frames"][result["number"]]["hypotheses"].append(r)
# print('lA:',len(jsonA["frames"]), 'lB:',len(jsonB["frames"]))
evaluator = MOTEvaluation(jsonA, jsonB, 5)
evaluator.evaluate()
evaluator.printResults()
return
async def evaluatePymotBySegment2(args):
logger = None
if True:
logger = logging.getLogger('pyMotEval')
class SomeDataSource(F):
def setup(self):
self.myAsyncFoo = self.myAsync(self.foo)
self.stop()
async def foo(self):
'''
A will be considered ground truth
B will be up for evaluation
feature request:
- evaluate by segment
'''
db = Database()
experimentA = args[0]
experimentB = args[1]
q = """
SELECT sA.segment as segmentA, sB.segment as segmentB, sA.number as number
FROM segment sA, segment sB
WHERE sA.experiment = '{experimentA}'
AND sB.experiment = '{experimentB}'
AND sA.number = sB.number
ORDER BY sA.number ASC
"""
s = q.format(experimentA = experimentA,
experimentB = experimentB)
async for result in db.query(s):
jsonA = {"frames": [], "class": "video", "filename": experimentA}
jsonB = {"frames": [], "class": "video", "filename": experimentB}
minFrameInSegment = None
segmentA = result["segmenta"]
segmentB = result["segmentb"]
segmentNumber = result["number"]
# print("Evaluating segment " + str(segmentNumber) + " ...")
q = """
SELECT frame, number
FROM frame
WHERE segment = '{segment}'
ORDER BY number ASC
"""
sA = q.format(segment=segmentA)
# print("converting experiment A to pymot json...")
async for result in db.query(sA):
if minFrameInSegment is None:
minFrameInSegment = result["number"]
jsonA["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"annotations": []})
sB = q.format(segment=segmentB)
#
# print("converting experiment B to pymot json...")
async for result in db.query(sB):
jsonB["frames"].append({"timestamp": result["number"],
"num": result["number"],
"class": "frame",
"hypotheses": []})
q = """
SELECT p.particle, f.number, t.location, p.radius
FROM track t, particle p, frame f
WHERE t.particle=p.particle
AND t.frame = f.frame
AND f.segment = '{segment}'
"""
sA = q.format(segment=segmentA)
async for result in db.query(sA):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonA["frames"][result["number"]-minFrameInSegment]["annotations"].append(r)
sB = q.format(segment=segmentB)
async for result in db.query(sB):
r = {"dco": False,
"height": result["radius"]*2.0,
"width": result["radius"]*2.0,
"id": result["particle"],
"x": result["location"].x,
"y": result["location"].y
}
jsonB["frames"][result["number"]-minFrameInSegment]["hypotheses"].append(r)
self.put((jsonA, jsonB))
class FirstStageProcessing(F):
def do(self, i):
evaluator = MOTEvaluation(i[0], i[1], 5)
evaluator.evaluate()
mota = evaluator.getMOTA()
if logger is not None:
logger.info("MOTA "+str(mota))
self.put(mota)
# Set up a simple data source.
def Src(n = 5):
return Iter(range(1, (n+1)))
# Print info for each demo
def demo(description, ez):
print("\n")
print(description)
# print("\n")
# ez.printLayout()
# print(ez.graph())
#ez.watch(0.1)
ez.start().join()
# demo("Sanity.",
# EZ(Src(), Print("Serial"))
# )
# async for i in SomeDataSource():
# print(i)
# demo("Do pymot stuff",
# EZ(SomeDataSource(),
# FirstStageProcessing(),
# Print()
# )
# )
EZ(SomeDataSource(),
Seq(As(32, FirstStageProcessing)),
Print()
).start().join()
# # print('lA:',len(jsonA["frames"]), 'lB:',len(jsonB["frames"]))
# evaluator = MOTEvaluation(jsonA, jsonB, 5)
# evaluator.evaluate()
# evaluator.printResults()
return
|
from django.db import models
# Create your models here.
class class10(models.Model):
name = models.CharField(max_length=30, blank=True)
class Meta:
db_table = '_App1_class10'
class class2(models.Model):
name = models.CharField(max_length=30, blank=True)
class Meta:
db_table = '_App1_class2'
class class3(models.Model):
name = models.CharField(max_length=30, blank=True)
|
"""
剑指 Offer 39. 数组中出现次数超过一半的数字
数组中有一个数字出现的次数超过数组长度的一半,请找出这个数字。
你可以假设数组是非空的,并且给定的数组总是存在多数元素。
"""
"""
一言不合暴力破解,走一遍就行了,记住用hash表来记住这个值。
"""
def majorityElement( nums: list) -> int:
hash = {}
for i in nums:
if i not in hash:
hash[i] = 1
else:
hash[i]+=1
for i in hash:
if hash[i] > len(nums)//2:
return i
"""
虽然这个题是简单题,但是可以提出多种算法,还是有值得学习的地方的。
方法2,将数组排队,处于中间位置的数字一定是众数,切记,这个题说了众数是多余一半的,所以一定能得到。
"""
def majorityElement2( nums: list) -> int:
nums = sorted(nums)
return nums[len(nums)//2]
"""
第三种方法,是最为有意思的,被称作摩尔投票法,就是用来统计最多的那个票是谁。
摩尔投票的用以在于假定第一个数是最多的数,如果碰到了相同的数就vote+1,否则就-1,稍微一理解就明白了,把这个东西的道理复制下来就明白了。
摩尔投票法:
设输入数组 nums 的众数为 xx ,数组长度为 nn 。
推论一: 若记 众数 的票数为 +1+1 ,非众数 的票数为 -1−1 ,则一定有所有数字的 票数和 > 0>0 。
推论二: 若数组的前 aa 个数字的 票数和 = 0=0 ,则 数组剩余 (n-a)(n−a) 个数字的 票数和一定仍 >0>0 ,即后 (n-a)(n−a) 个数字的 众数仍为 xx 。
作者:jyd
链接:https://leetcode-cn.com/problems/shu-zu-zhong-chu-xian-ci-shu-chao-guo-yi-ban-de-shu-zi-lcof/solution/mian-shi-ti-39-shu-zu-zhong-chu-xian-ci-shu-chao-3/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
def majorityElement3( nums: list) -> int:
vote = 1
candidate = nums[0]
for i in range(1,len(nums)):
if vote == 0:
candidate = nums[i]
vote += 1
continue
if nums[i] == candidate:
vote += 1
else:
vote -= 1
return candidate
if __name__ == '__main__':
res = majorityElement3( [10,9,9,9,10])
print(res)
|
#! /usr/bin/python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rc('axes', titlesize=16) # fontsize of the axes title
plt.rc('axes', labelsize=16) # fontsize of the x and y labels
plt.rc('xtick', labelsize=12) # fontsize of the tick labels
plt.rc('ytick', labelsize=12) # fontsize of the tick labels
plt.rc('legend', fontsize=12) # legend fontsize
#plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
base_runtime=[1.4022, 9.7264, 3.8728, 2.1245, 8.6928]
raw_data = {'graph': ['Youtube', 'LiveJournal', 'Pokec', 'RMAT-19-32', 'RMAT-21-32'],
'2-pipeline-baseline': [1.19384, 9.46781, 4.02860, 2.21747, 8.83400],
'2-pipeline-balanced': [1.14972, 9.98414, 4.28803, 2.21594, 8.82374],
'3-pipeline-baseline': [1.15480, 9.67210, 4.16424, 2.22288, 8.87396],
'3-pipeline-balanced': [67.604, 61.359, 68.470, 96.326, 85.312]
}
df = pd.DataFrame(raw_data, columns = ['graph', '2-pipeline-baseline', '2-pipeline-balanced', '3-pipeline-baseline', '3-pipeline-balanced'])
label=('2-pipeline-baseline', '2-pipeline-balanced', '3-pipeline-base', '3-pipeline-balanced')
# Setting the positions and width for the bars
pos = list(range(len(df['1-pipeline'])))
width = 0.1
ecolor='k'
lw=0.5
print pos
#cmap = plt.get_cmap('jet')
#colors = cmap(np.linspace(0, 1.0, len(label)))
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['2-pipeline-baseline'],
# of width
width,
linewidth = lw,
edgecolor = ecolor,
# with alpha 0.5
alpha=0.5,
hatch=4*'.',
# with color
color='w',
# with label the second value in first_name
label=label[0])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['2-pipeline-balanced'],
# of width
width,
linewidth = lw,
edgecolor = ecolor,
# with alpha 0.5
alpha=0.5,
# with color
color='w',
hatch=4*'x',
# with label the third value in first_name
label=label[1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['3-pipeline-baseline'],
# of width
width,
linewidth = lw,
edgecolor = ecolor,
# with alpha 0.5
alpha=0.5,
# with color
color='w',
hatch=4*'-',
# with label the third value in first_name
label=label[2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*4 for p in pos],
#using df['post_score'] data,
df['3-baseline-balanced'],
# of width
width,
linewidth = lw,
edgecolor = ecolor,
# with alpha 0.5
alpha=0.5,
# with color
color='w',
hatch=4*'o',
# with label the third value in first_name
label=label[3])
# Set the y axis label
ax.set_ylabel('Normalized Performance')
# Set the chart's title
#ax.set_title('Test Subject Scores')
# Set the position of the x ticks
ax.set_xticks([p + 3 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['graph'])
#vals=ax.get_yticks()
#ax.set_yticklabels(['{:3.2f}%'.format(x) for x in vals])
ax.grid(linewidth=0.5)
ax.xaxis.grid(False)
# Setting the x-axis and y-axis limits
#plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, max(df['c1'] + df['c2'] + df['c3'] + df['c4'] + df['c5'] + df['c6'] + df['c7'] + df['c8'] + df['c9'])] )
# Adding the legend and showing the plot
plt.legend(['2-pipeline-base', '2-pipeline-balanced', '4-pipeline-base', '4-pipeline-balanced'],
loc='upper left',
ncol=2)
#bbox_to_anchor=(0.1, 0.3))
#plt.grid()
#plt.show()
plt.savefig("../pipeline-duplicate.pdf", bbox_inches='tight')
|
"""Generic operation class. """
class Operation(object):
languages = None
tasks = None
seed = 0
heavy = False
def __init__(self, seed=0, verbose=False):
self.seed = seed
self.verbose = verbose
if self.verbose:
print(f"Loading Operation {self.name()}")
@classmethod
def is_heavy(cls):
return cls.heavy
@classmethod
def domain(cls):
return cls.tasks, cls.languages
@classmethod
def name(cls):
return cls.__name__
|
#从摄像头中找到人脸,参考facerec_from_webcam_faster.py
#实时播放出来,打水印并把截图保存下来
import face_recognition
import cv2
video_capture = cv2.VideoCapture(0)
#找这些人脸
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
lijiawei_image = face_recognition.load_image_file("lijiawei.jpg")
lijiawei_face_encoding = face_recognition.face_encodings(lijiawei_image)[0]
known_face_encodings = [
obama_face_encoding,
lijiawei_face_encoding]
known_face_names = [
"Obama",
"lijiawei"]
#num用来为存储的截图计数命名
num = 0
#process_this_frame确定这一帧是否进行识别
process_this_frame = True
while True:
#切片得到一张截图
ret, frame = video_capture.read()
#在这里把视频缩放到只有1/4*1/4,调整RGB
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
#隔一帧做一次识别
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
process_this_frame = not process_this_frame
#呈现与保存
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
#将缩放还原
top *= 4
right *= 4
bottom *= 4
left *= 4
#找人名
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
for i in range(len(known_face_names)):
if matches[i]:name = known_face_names[i]
else:pass
#打水印保存截图
cv2.putText(frame, "cam", (50,100), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 1)
num = num + 1
filename = "output/frames_%s.jpg" % num
cv2.imwrite(filename, frame)
#实时展示画个框把脸框起来,画个标签框里面写上找到的人的名字
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#实时展示
cv2.imshow('Video', frame)
#Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Release
video_capture.release()
cv2.destroyAllWindows()
|
# テンプレートマッチング(NCC)のプログラム
# 参照URL = https://algorithm.joho.info/programming/python/opencv-template-matching-ncc-py/
# 域値を設定することで、その域値以上の検出結果を描画するプログラム
# ver1に追記する
# 回転処理を加えたプログラムを記述する(達成)
# このプログラムは、回転しても全体がきちんと映るように調整されています。
# ver2に追記
# 回転画像の黒以外の部分をテンプレートマッチングにかける(未達成)
# Q. 入力画像を回転させ、それに対してテンプレートマッチングを試すことで解決することはできないだろうか?
# A. できた。pythonとOpenCVの関数のそれぞれでマッチングした時の処理結果の違いが少し気になる
# ver4に追記
# OpenCVでマッチテンプレートを実行する処理を追加した
# OpecCVでマッチテンプレートを行うと、pythonのプログラムより早い。全体の処理が1分以内に完了する感じ
# ver5に追記
# 正面画像に限定する方針に切り替え
# python のマッチテンプレート関数は削除しました
# 回転させる処理も削除しました
# 回転を行う関数も削除しました
# 描画する関数も削除しました
# カラーヒストグラムを行う関数を追加しました(参照URL https://qiita.com/best_not_best/items/c9497ffb5240622ede01)
# カラーヒストグラムの類似度の結果を表示できるようになりました
# 3511.jpg
import cv2
import numpy as np
# テンプレートのスケールを変換する関数
def get_scale_template(scale,template):
scale = scale/10
# 今は応急処置として,手動でテンプレートのサイズを設定する
w_original = 195
h_original = 101
template = cv2.resize(template, dsize=(int(w_original*scale), int(h_original*scale)))
return template
# カラーヒストグラムを用いた類似度マッチング
def color_hist_compare(temp_img, crop_img):
# リサイズする関数(スケール変更した際に必要になりそう)
IMG_SIZE = (195,101)
temp_img = cv2.resize(temp_img, IMG_SIZE)
cv2.imwrite("./output/temp.png",temp_img)
crop_img = cv2.resize(crop_img, IMG_SIZE)
cv2.imwrite("./output/hikaku_taisyou.png",crop_img)
# テンプレートとクロップ画像のヒストグラムを計算し, 比較する
target_hist = cv2.calcHist([temp_img], [0], None, [256], [0, 256])
comparing_hist = cv2.calcHist([crop_img], [0], None, [256], [0, 256])
ret = cv2.compareHist(target_hist, comparing_hist, 0)
return ret
def main(): #=====================================
FileName = "3511"
# 入力画像とテンプレート画像をで取得
img_input = cv2.imread("../../vm_full_img/IMG_{}.jpg".format(FileName))
#img_input = cv2.imread("../../vm_1911_same_button/IMG_3514.jpg")
temp = cv2.imread("../../temp_img/IMG_1911.jpg")
# グレースケール変換
gray_input = cv2.cvtColor(img_input, cv2.COLOR_RGB2GRAY)
temp = cv2.cvtColor(temp, cv2.COLOR_RGB2GRAY)
# テンプレート画像の高さ・幅
h, w = temp.shape
# スケール変換実行
scale = 14
temp = get_scale_template(scale,temp)
ht,wt = temp.shape
print(ht,wt)
# テンプレートマッチング(OpenCV)
res = cv2.matchTemplate(gray_input,temp,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_input, pt, (pt[0] + wt, pt[1] + ht), (0,0,255), 2)
# rgb画像に域値以上の結果を描画する
cv2.imwrite("./output/ikiti_ijou_{}_{}_{}.jpg".format(scale,FileName,threshold), img_input)
# rgb画像にmaxの類似値を持つ結果を出力する
img_input = cv2.imread("../../vm_full_img/IMG_{}.jpg".format(FileName))
#img_input = cv2.imread("../../vm_1911_same_button/IMG_3514.jpg")
min_value, max_value, min_pt, max_pt = cv2.minMaxLoc(res)
print(max_value)
pt = max_pt
# 類似度最大の場所に対して描画する
#cv2.rectangle(img_input, (pt[0], pt[1] ), (pt[0] + wt, pt[1] + ht), (0,0,200), 3)
#cv2.imwrite("./output/whre_max_pt.png", img_input)
# 類似度が最大の出力結果をクロップする
similar_max_img = img_input[ pt[1] : pt[1]+ht, pt[0] : pt[0]+wt] # img[top : bottom, left : right]
cv2.imwrite("./output/max_crop.png", similar_max_img)
# 適当な画像で検証してみる
#similar_max_img_t = img_input[ 644 : 794, 871 : 1159]
# ヒストグラム比較を行う
ret = color_hist_compare(temp, similar_max_img)
print("類似度:{}".format(ret))
#=====================================#
if __name__ == "__main__":
main()
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'CLIENTE:STRING, '
'NOMBRE_CLIENTE:STRING, '
'DOCUMENTO:STRING, '
'T_DCTO:STRING, '
'F_EXPEDIC:STRING, '
'F_VENCIM:STRING, '
'DIASVC:STRING, '
'DEUDA:STRING, '
'PAGADO:STRING, '
'POR_VENC:STRING, '
'VENC_0_30:STRING, '
'VENC_31_60:STRING, '
'VENC_61_90:STRING, '
'VENC_91:STRING, '
'SALDO:STRING, '
'TIPO_CARTERA:STRING, '
'NOMBRE_TIPO_CARTERA:STRING, '
'NOMBRE_VENDEDOR:STRING, '
'CENTRO_COSTOS:STRING, '
'NOMBRE_CENTRO_DE_COSTOS:STRING '
)
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
'fecha' : self.mifecha,
'CLIENTE' : arrayCSV[0],
'NOMBRE_CLIENTE' : arrayCSV[1],
'DOCUMENTO' : arrayCSV[2],
'T_DCTO' : arrayCSV[3],
'F_EXPEDIC' : arrayCSV[4],
'F_VENCIM' : arrayCSV[5],
'DIASVC' : arrayCSV[6],
'DEUDA' : arrayCSV[7],
'PAGADO' : arrayCSV[8],
'POR_VENC' : arrayCSV[9],
'VENC_0_30' : arrayCSV[10],
'VENC_31_60' : arrayCSV[11],
'VENC_61_90' : arrayCSV[12],
'VENC_91' : arrayCSV[13],
'SALDO' : arrayCSV[14],
'TIPO_CARTERA' : arrayCSV[15],
'NOMBRE_TIPO_CARTERA' : arrayCSV[16],
'NOMBRE_VENDEDOR' : arrayCSV[17],
'CENTRO_COSTOS' : arrayCSV[18],
'NOMBRE_CENTRO_DE_COSTOS' : arrayCSV[19]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-tech-tof"
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
])
lines = pipeline | 'Lectura de Archivo PFC' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data PFC' >> beam.ParDo(formatearData(mifecha)))
transformed | 'Escritura a BigQuery PFC' >> beam.io.WriteToBigQuery(
gcs_project + ":Contento_Tech.profitto_bd_carteras",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
jobObject = pipeline.run()
return ("Corrio Full HD")
|
from qcodes.instrument.visa import VisaInstrument
from qcodes.utils import validators as vals
import numpy as np
class Weinschel_8320(VisaInstrument):
'''
QCodes driver for the stepped attenuator
Weinschel is formerly known as Aeroflex/Weinschel
'''
def __init__(self, name, address, **kwargs):
super().__init__(name, address, terminator='\r', **kwargs)
self.add_parameter('attenuation', unit='dB',
set_cmd='CHAN1;ATTN {};',
get_cmd='CHAN1;ATTN?',
vals=vals.Enum(*np.arange(0, 100.1, 2).tolist()),
get_parser=float)
self.connect_message()
if __name__ == "__main__":
try:
Instrument.close_all()
except KeyError:
pass
Aeroflex = Weinschel_8320(name = "Aeroflex", address = "GPIB::10::INSTR")
Aeroflex.attenuation.set(20)
print( Aeroflex.attenuation.get() )
|
from channels.routing import route
from . import consumers
routes = [
route('websocket.connect', consumers.data_entry_connect, path='^(?P<game_id>\d+)/score/$'),
route('websocket.receive', consumers.data_entry_receive, path='^(?P<game_id>\d+)/score/$'),
route('websocket.disconnect', consumers.data_entry_disconnect, path='^(?P<game_id>\d+)/score/$'),
]
|
#!/usr/bin/python3
# pip3 install matplotlib
from matplotlib import pyplot as plt
import numpy as np
x,y=np.loadtxt('exm2.csv',unpack=True,delimiter=',')
plt.scatter(x,y,color='r',linewidth=10,label='today')
#add labels
plt.grid(True,color='k')
plt.title("My Chart")
plt.ylabel("y label")
plt.xlabel("x label")
plt.legend()
plt.show()
|
'''
Created on Feb 2, 2016
@author: henry
'''
# A program to print a multiplication
num = int (input("Display multiplication table of?" ))
# Loop to iterate 15 times
for i in range(1,16):
print(num, 'x',i,'=',num*i)
|
from model.models import GRUMultiTask
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import defines as df
from time import gmtime, strftime
from datetime import datetime
from sklearn.metrics import f1_score
try:
import cPickle as pickle
except ImportError: # Python 3.x
import pickle
# Preprocessed data paths
preprocessed_data_paths = {
'training_rumors_tweets path': os.path.join('data', 'preprocessed data', 'training', 'rumors_tweets.npy'),
'training_rumors_labels path': os.path.join('data', 'preprocessed data', 'training', 'rumors_labels.npy'),
'training_stances_tweets path': os.path.join('data', 'preprocessed data', 'training', 'stances_tweets.npy'),
'training_stances_labels path': os.path.join('data', 'preprocessed data', 'training', 'stances_labels.npy'),
'validation_rumors_tweets path': os.path.join('data', 'preprocessed data', 'validation', 'rumors_tweets.npy'),
'validation_rumors_labels path': os.path.join('data', 'preprocessed data', 'validation', 'rumors_labels.npy'),
'validation_stances_tweets path': os.path.join('data', 'preprocessed data', 'validation', 'stances_tweets.npy'),
'validation_stances_labels path': os.path.join('data', 'preprocessed data', 'validation', 'stances_labels.npy'),
}
batch_size_training_rumors = 5
batch_size_training_stances = 5
batch_size_validation_rumors = 4
batch_size_validation_stances = 12
loss_function = 'BCELoss' # supported options: CrossEntropyLoss | BCELoss | L1Loss | MSELoss
learning_rate = 0.0005 # learning rate
epochs = 50
is_dropout = True # can be True or False
drop_prob = 0.2
def main():
# create 'TensorDataset's for rumors
train_data_rumors = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['training_rumors_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['training_rumors_labels path'])))
val_data_rumors = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['validation_rumors_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['validation_rumors_labels path'])))
train_loader_rumors = DataLoader(train_data_rumors, shuffle=True, batch_size=batch_size_training_rumors, drop_last=True)
val_loader_rumors = DataLoader(val_data_rumors, shuffle=False, batch_size=batch_size_validation_rumors, drop_last=True)
# create 'TensorDataset's for stances
train_data_stances = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['training_stances_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['training_stances_labels path'])))
val_data_stances = TensorDataset(torch.from_numpy(np.load(preprocessed_data_paths['validation_stances_tweets path'])),
torch.from_numpy(np.load(preprocessed_data_paths['validation_stances_labels path'])))
# create 'DataLoader's for stances
train_loader_stances = DataLoader(train_data_stances, shuffle=True, batch_size=batch_size_training_stances, drop_last=False)
val_loader_stances = DataLoader(val_data_stances, shuffle=False, batch_size=batch_size_validation_stances, drop_last=True)
# torch.cuda.is_available() checks and returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
# if we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
if is_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
# create the model
model = GRUMultiTask(input_length=df.input_length,
hidden_length_rumors=df.hidden_length_rumors,
hidden_length_stances=df.hidden_length_stances,
hidden_length_shared=df.hidden_length_shared,
loss_func=loss_function,
is_dropout=is_dropout,
drop_prob=drop_prob
)
model.to(device)
# Loss
if loss_function == 'BCELoss':
criterion = nn.BCELoss()
elif loss_function == 'L1Loss':
criterion = nn.L1Loss()
elif loss_function == 'MSELoss':
criterion = nn.MSELoss()
else: # the default
criterion = nn.CrossEntropyLoss()
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# train the model
model.train() # set the model to train mode
validation_min_loss = {
'min loss': np.Inf
}
last_save = {
'last save': 0,
}
# check time before training
start_time = gmtime()
start_time = strftime('%H:%M:%S', start_time)
h = model.init_hidden()
h_training = model.init_hidden()
for i in range(epochs):
print('\nEpoch: {}'.format(i + 1))
counter_batches = 0
sum_loss_training_rumors = 0
sum_loss_training_stances = 0
accuracy_r_avg_val = 0
accuracy_s_avg_val = 0
accuracy_r_avg_train = 0
accuracy_s_avg_train = 0
# iterate through all the batch
for (inputs_rumors, labels_rumors), (inputs_stances, labels_stances) \
in zip(train_loader_rumors, train_loader_stances):
counter_batches += 1
# make training
loss_rumors, accuracy, h_r = training_batch_iter(model, 'rumor', criterion, optimizer, device,
inputs_rumors, labels_rumors, h)
accuracy_r_avg_train = (accuracy_r_avg_train * (counter_batches - 1) + accuracy) / counter_batches
sum_loss_training_rumors += loss_rumors.item()
loss_stances, accuracy, h_s = training_batch_iter(model, 'stance', criterion, optimizer, device,
inputs_stances, labels_stances, h)
accuracy_s_avg_train = (accuracy_s_avg_train * (counter_batches - 1) + accuracy) / counter_batches
sum_loss_training_stances += loss_stances.item()
h_rumors, _, _ = h_r
_, h_stances, h_shared = h_s
h_training = h_rumors.clone(), h_stances.clone(), h_shared.clone()
# make validation and save the model if it is the best until now
if i > 3: # start validation only from epoch 5
if 1 == counter_batches:
print('Validation of model: ')
accuracy_r, accuracy_s = validation_or_testing(model, val_loader_rumors, val_loader_stances,
criterion, device, h_training, i+1,
validation_min_loss, loss_rumors, loss_stances,
counter_batches, last_save)
accuracy_r_avg_val = (accuracy_r_avg_val * (counter_batches - 1) + accuracy_r) / counter_batches
accuracy_s_avg_val = (accuracy_s_avg_val * (counter_batches - 1) + accuracy_s) / counter_batches
# print accuracy and loss of the training
training_loss_rumors = sum_loss_training_rumors / counter_batches
print('Training loss rumors: {:.3f}'.format(training_loss_rumors))
print('Training accuracy rumors: {:.3f}%'.format(accuracy_r_avg_train))
training_loss_stances = sum_loss_training_stances / counter_batches
print('Training loss stances: {:.3f}'.format(training_loss_stances))
print('Training accuracy stances: {:.3f}%'.format(accuracy_s_avg_train))
# print accuracy of the validation
if i > 3:
print('-----------------------------------------')
print('Validation accuracy rumors: {:.3f}%'.format(accuracy_r_avg_val))
print('Validation accuracy stances: {:.3f}%'.format(accuracy_s_avg_val))
print('-----------------------------------------')
print('Last save for model: epoch ' + str(last_save['last save']))
# check time so far
finish_time = gmtime()
finish_time = strftime('%H:%M:%S', finish_time)
formats = "%H:%M:%S"
time_so_far = datetime.strptime(finish_time, formats) - datetime.strptime(start_time, formats)
print('-----------------------------------------')
print('Total runtime: ', time_so_far)
print('-----------------------------------------')
torch.save(model.state_dict(), os.path.join('model', 'training_state_dict.pt'))
h_r, h_s, h_sh = h_training
h_dict = {'h_1': h_r.to('cpu').detach().numpy(), 'h_2': h_s.to('cpu').detach().numpy(), 'h_3': h_sh.to('cpu').detach().numpy()}
with open(os.path.join('model', 'h_prevs_training.pickle'), 'wb') as fp:
pickle.dump(h_dict, fp, protocol=pickle.HIGHEST_PROTOCOL)
def training_batch_iter(model, task_name, criterion, optimizer, device, inputs_batch, labels_batch, h):
"""
Makes the forward step of specific task and returns the loss and number of correct predictions
:param model: the multi-task model
:param task_name: 'rumor' or 'stances'
:param criterion: the loss function
:param optimizer: the optimizer
:param device: 'cpu' or 'gpu'
:param inputs_batch: the inputs batch
:param labels_batch: the target labels batch
:param h: the initial 'h_t's
:return: - loss of the batch and hidden states
- number of correct predictions
"""
# set initial 'h' vectors of model's GRUs
h_prev_task_rumors, h_prev_task_stances, h_prev_shared = h
inputs_batch, labels_batch = inputs_batch.to(device), labels_batch.to(device)
# Clear gradients parameters
optimizer.zero_grad()
# Forward pass to get outputs of the model
if 'rumor' == task_name:
outputs, h_prev_shared, h_prev_task_rumors = model(inputs_batch, h_prev_shared, df.task_rumors_no,
h_prev_rumors=h_prev_task_rumors)
else: # 'stance' == task_name
outputs, h_prev_shared, h_prev_task_stances = model(inputs_batch, h_prev_shared, df.task_stances_no,
h_prev_stances=h_prev_task_stances)
# Calculate Loss
if loss_function == 'BCELoss' or loss_function == 'MSELoss':
loss = criterion(outputs, labels_batch.float())
elif loss_function == 'L1Loss':
loss = criterion(outputs, labels_batch)
else: # the default
loss = criterion(outputs, (torch.max(labels_batch, 1)[1]).to(device))
# Getting gradients parameters
loss.backward()
# Updating parameters
optimizer.step()
# count the number of correct outputs
num_correct = count_correct(outputs, labels_batch, task_name, device)
h = h_prev_task_rumors, h_prev_task_stances, h_prev_shared
accuracy = (num_correct / len(outputs)) * 100
return loss, accuracy, h
def validation_or_testing(model, data_loader_rumors, data_loader_stances, criterion, device, h, epoch_no=None,
min_loss_dict=None, loss_train_r=None, loss_train_s=None, batch_no=None, last_save_dict=None,
operation='validation'):
"""
Makes validation on specific task. Saves the dict of the model if it gave the best results so far.
Returns the number of correct predictions
:param model: the multi-task model
:param data_loader_rumors: DataLoader of rumor detection task
:param data_loader_stances: DataLoader of stance detection task
:param criterion: the loss function
:param device: 'cpu' or 'gpu'
:param h: h_prev_task_rumors, h_prev_task_stances, h_prev_shared
:param epoch_no: epoch no
:param min_loss_dict: dictionary that contains the min losses of each task
:param loss_train_r : the loss of the training at this point of time for rumor detection task
:param loss_train_s : the loss of the training at this point of time for stance detection task
:param batch_no: batch no
:param last_save_dict dictionary containing the last epoch where a save happened for each task
:param operation validation' or 'testing'
:return: number of correct predictions
"""
all_losses_r = [] # for rumor detection task
all_losses_s = [] # for stance detection task
model.eval() # set the model to evaluation mode
sum_correct_r = 0 # for rumor detection task
sum_correct_s = 0 # for stance detection task
total_r = 0
total_s = 0
total_out_r = [] # for rumor detection task
total_lab_r = [] # for rumor detection task
total_out_s = [] # for stance detection task
total_lab_s = [] # for stance detection task
# get initial 'h' vectors of model's GRUs
h_prev_task_rumors, h_prev_task_stances, h_prev_shared = h
# iterate through the batch
for (inputs_rumors, labels_rumors), (inputs_stances, labels_stances) \
in zip(data_loader_rumors, data_loader_stances):
inputs_rumors, labels_rumors = inputs_rumors.to(device), labels_rumors.to(device)
inputs_stances, labels_stances = inputs_stances.to(device), labels_stances.to(device)
# Forward pass for rumor task, to get outputs of the model
out_r, h_prev_shared, h_prev_task_rumors = model(inputs_rumors, h_prev_shared, df.task_rumors_no,
h_prev_rumors=h_prev_task_rumors)
# Forward pass for stance task, to get outputs of the model
out_s, h_prev_shared, h_prev_task_stances = model(inputs_stances, h_prev_shared, df.task_stances_no,
h_prev_stances=h_prev_task_stances)
# we need this for calculation of F1 scores. we do it only for testing
if 'testing' == operation:
total_out_r += [element.item() for element in (torch.max(out_r, 1)[1])]
total_lab_r += [element.item() for element in (torch.max(labels_rumors, 1)[1])]
total_out_s += [element.item() for element in (torch.max(out_s, 1)[1])]
total_lab_s += [element.item() for element in (torch.max(labels_stances, 1)[1])]
# count the number of correct outputs
sum_correct_r += count_correct(out_r, labels_rumors, 'rumor', device)
sum_correct_s += count_correct(out_s, labels_stances, 'stance', device)
total_r += len(out_r)
total_s += len(out_s)
# Calculate Loss
if loss_function == 'BCELoss' or loss_function == 'MSELoss':
loss_r = criterion(out_r, labels_rumors.float())
loss_s = criterion(out_s, labels_stances.float())
elif loss_function == 'L1Loss':
loss_r = criterion(out_r, labels_rumors)
loss_s = criterion(out_s, labels_stances)
else: # the default
loss_r = criterion(out_r, (torch.max(labels_rumors, 1)[1]).to(device))
loss_s = criterion(out_s, (torch.max(labels_stances, 1)[1]).to(device))
all_losses_r.append(loss_r.item())
all_losses_s.append(loss_s.item())
# calculation of F1 scores
if 'testing' == operation:
# print F1 micro and macro scores for rumor detection
score_f1_micro = f1_score(total_lab_r, total_out_r, average='micro')
score_f1_macro = f1_score(total_lab_r, total_out_r, average='macro')
print('For rumor detection:')
print('F1 micro score: {:.3f}'.format(score_f1_micro))
print('F1 macro score: {:.3f}\n'.format(score_f1_macro))
# print F1 micro and macro scores for stance detection
score_f1_micro = f1_score(total_lab_s, total_out_s, average='micro')
score_f1_macro = f1_score(total_lab_s, total_out_s, average='macro')
print('For stance detection:')
print('F1 micro score: {:.3f}'.format(score_f1_micro))
print('F1 macro score: {:.3f}'.format(score_f1_macro))
if 'validation' == operation:
print_and_save(model, epoch_no, batch_no, loss_train_r, loss_train_s, all_losses_r, all_losses_s, min_loss_dict,
last_save_dict, h)
accuracy_r = (sum_correct_r/total_r)*100
accuracy_s = (sum_correct_s/total_s)*100
return accuracy_r, accuracy_s
def print_and_save(model, epoch_no, batch_no, loss_train_r, loss_train_s, all_losses_r, all_losses_s, min_loss_dict,
last_save_dict, h):
"""
Prints the details of the validation and saves the dict of the model if it gave the best results so far.
:param model: the multi-task model
:param epoch_no: epoch no
:param batch_no: batch no
:param loss_train_r: the loss of the training for rumor detection task
:param loss_train_s: the loss of the training for stance detection task
:param all_losses_r: list with all the losses of the validation for rumor detection task
:param all_losses_s: list with all the losses of the validation for stance detection task
:param min_loss_dict: dictionary that contains the min losses of each task
:param last_save_dict dictionary containing the the last epoch where a save happened for each task
:param h: h_prev_task_rumors, h_prev_task_stances, h_prev_shared
:return: void
"""
model.train() # set the model to train mode
val_loss_avg = (np.mean(all_losses_r) + np.mean(all_losses_s)) / 2
print('Epoch: {}/{}...'.format(epoch_no, epochs),
'batch: {}\n'.format(batch_no),
'Loss train for rumors: {:.6f}...'.format(loss_train_r.item()),
'Loss train for stances: {:.6f}\n'.format(loss_train_s.item()),
'Val Loss for rumors: {:.6f}'.format(np.mean(all_losses_r)),
'Val Loss for stances: {:.6f}\n'.format(np.mean(all_losses_s)),
'Val Loss avg: {:.6f}'.format(val_loss_avg))
if val_loss_avg <= min_loss_dict['min loss']:
torch.save(model.state_dict(), os.path.join('model', 'model_state_dict.pt'))
# save the h_prev_task_rumors, h_prev_task_stances, h_prev_shared to file
h_r, h_s, h_sh = h
h_dict = {'h_1': h_r.to('cpu').detach().numpy(), 'h_2': h_s.to('cpu').detach().numpy(), 'h_3': h_sh.to('cpu').detach().numpy()}
with open(os.path.join('model', 'h_prevs.pickle'), 'wb') as fp:
pickle.dump(h_dict, fp, protocol=pickle.HIGHEST_PROTOCOL)
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...\n'.format(min_loss_dict['min loss'],
val_loss_avg))
min_loss_dict['min loss'] = val_loss_avg
last_save_dict['last save'] = epoch_no
else:
print()
def count_correct(outputs, labels_batch, task_name, device):
"""
Counts the number of correct outputs (predictions)
:param outputs: the predictions of the model
:param labels_batch: the labels (targets)
:param task_name: 'rumor' or 'stances'
:param device: 'cpu' or 'gpu'
:return:
"""
num_correct = 0
for out, label in zip(outputs, labels_batch):
max_idx = torch.argmax(out, dim=0) # dim=0 means: look in the first row
if 'rumor' == task_name:
one_hot = torch.nn.functional.one_hot(torch.tensor([max_idx]), df.output_dim_rumors)
else: # 'stance' == task_name
one_hot = torch.nn.functional.one_hot(torch.tensor([max_idx]), df.output_dim_stances)
one_hot = torch.squeeze(one_hot, 0)
one_hot = one_hot.to(device)
if torch.equal(label, one_hot):
num_correct += 1
return num_correct
if __name__ == '__main__':
main()
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..model import kPost, User, Permission
from .decorators import permission_required
from .errors import forbidden
@api.route('/post/<int:userId>', methods=['GET' ,'POST'])
def get_userPost(userId):
kPost_ = kPost()
userPost = kPost_.display_userPost(userId=userId)
return jsonify(userPost)
@api.route('/post/<title>', methods=['GET' ,'POST'])
def searchPost_bytitle(title):
kPost_ = kPost()
returnPost = kPost_.readRow(title=title)
return jsonify(returnPost)
@api.route('/posts/<int:num>', methods=['GET' ,'POST'])
@api.route('/posts/', methods=['GET' ,'POST'])
def searchPost(num = None):
if num is None:
num = 3
kPost_ = kPost()
returnPost = kPost_.readRow(total=num)
return jsonify(returnPost)
@api.route('/createPost/', methods=['POST'])
@permission_required(Permission.WRITE)
def createPost():
post = kPost.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, {'Location': url_for('api.get_post', id=post.id)}
|
l=[]
while (1):
print(" press 1 for add a person in a list\n press 2 for go in the room\n press 3 to exit ")
n=int(input(""))
if n==1:
name=input("")
l.append(name)
elif n==2:
if len(l)>0:
print(l.pop(0),", now its your turn ")
else:
print("Ther is no person in the list for interview")
elif n==3:
break
else:
print("wrong input")
|
import os
import funcy
import requests
from mixpanel import Mixpanel
from bs4 import BeautifulSoup
from telegraph import Telegraph
mp = Mixpanel(os.environ.get('mix_token'))
telegraph = Telegraph()
telegraph.create_account(short_name='1337')
accents = {
'uk': {'class': 'sound audio_play_button pron-uk icon-audio',
'name': 'United Kingdom'},
'us': {'class': 'sound audio_play_button pron-us icon-audio',
'name': 'United States'}
}
audio_url = os.environ.get('audio_url')
search_url = os.environ.get('search_url')
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
}
def track_action(user_id, word):
mp.track(str(user_id), 'search', {'word': word})
def search(query):
result = requests.get(search_url.format(query),
timeout=1000,
headers=headers).json()
return [x['searchtext'] for x in result['results']]
def get_soup(text, ):
response = requests.get(audio_url.format(text.replace(" ", '+'), text),
headers=headers,
allow_redirects=False,
timeout=10000)
if response.status_code == 200:
return BeautifulSoup(response.text, features='lxml')
@funcy.ignore(AttributeError)
def get_audio(word, accent):
html = get_soup(word.replace(' ', '+'))
audio = html.find('div', class_=accents[accent]['class']).get('data-src-mp3')
if audio:
return {
'audio': audio,
'examples': get_examples(word, html)
}
def get_examples(word, soup):
examples = [x.text for x in soup.find_all('span', class_='x')]
if not examples:
return False
return make_telegraph(word, '\n-\n'.join(examples))
def make_telegraph(word, text):
response = telegraph.create_page(
f'Examples for [{word}]',
html_content=text
)
return 'http://telegra.ph/{}'.format(response['path'])
|
from lib.list import List
def check_node_and_size(head, k):
if head == None:
return (None, 1)
(node, size) = check_node_and_size(head.next, k)
returned_node = head if (size == k) else node
return (returned_node, size + 1)
def find_kth_to_last_elem(items, k):
node = check_node_and_size(items.head, k)[0]
return node.data if node else None
items = List()
items.add(1)
items.add(2)
items.add(3)
assert(find_kth_to_last_elem(items, 1)) == 3
assert(find_kth_to_last_elem(items, 2)) == 2
assert(find_kth_to_last_elem(items, 3)) == 1
assert(find_kth_to_last_elem(items, 4)) == None
assert(find_kth_to_last_elem(items, -4)) == None
|
from rest_framework import serializers
from main_app.models import Movie, Profile
from django.contrib.auth.models import User
from rest_framework_simplejwt.tokens import RefreshToken
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = [
'id',
'title',
'year',
'imdbID',
'movie_type',
'poster',
'trailer',
'rating',
'runtime',
'genre',
'writer',
'director',
'cast',
'plot',
'language']
class SearchResultSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = [
'id',
'title',
'year',
'poster'
]
class UserSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField(read_only=True)
isAdmin = serializers.SerializerMethodField(read_only=True)
savedMovies = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = ['id', 'username', 'email', 'name', 'isAdmin', 'savedMovies']
def get_name(self, obj):
name = obj.first_name
if name =='':
name = obj.email
return name
def get_isAdmin(self, obj):
return obj.is_staff
def get_savedMovies(self, obj):
profile = Profile.objects.all().filter(user=obj)
return profile[0].savedMovies
class UserSerializerWithToken(UserSerializer):
token = serializers.SerializerMethodField(read_only=True)
class Meta:
model = User
fields = ['id', 'username', 'email', 'name', 'isAdmin', 'token', 'savedMovies']
def get_token(self, obj):
token = RefreshToken.for_user(obj)
return str(token.access_token)
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
#!/usr/bin/env python
#
import os
import os.path
import shutil
import stat
import fileinput
import sys
import re
from subprocess import Popen, PIPE
################################################################################################################
# Various file-related utility functions.
################################################################################################################
################################################################################################################
# Removes all contents of a folder. Exceptions can be added as a list (full path).
################################################################################################################
def remove_folder_contents(folder_path, exceptions=[]):
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
if os.path.isfile(file_path) and file_path not in exceptions:
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
################################################################################################################
# Removes a folder and its contents (if they exist), and then creates the folder.
################################################################################################################
def recreate_folder(folder_path):
# First remove it, if it exists.
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
# Now create it.
create_folder_if_new(folder_path)
################################################################################################################
# Creates a folder path only if it does not exist.
################################################################################################################
def create_folder_if_new(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
################################################################################################################
# Protects a VM Image by making it read-only for all users.
################################################################################################################
def make_read_only_all(file_path):
if os.path.exists(file_path) and os.path.isfile(file_path):
os.chmod(file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
################################################################################################################
# Makes the files of a VM Image available (read and write) to all users.
################################################################################################################
def make_read_write_all(file_path):
if os.path.exists(file_path) and os.path.isfile(file_path):
os.chmod(file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
################################################################################################################
# Changes ownership of the given file to the user running the script.
# NOTE: needs sudo permissions.
################################################################################################################
def chown_to_current_user(file_path):
curr_user = os.geteuid()
curr_group = os.getegid()
# Execute sudo process to change ownership of potentially root owned file to the current user.
p = Popen(['sudo', 'chown', str(curr_user) + ":" + str(curr_group), file_path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
rc = p.returncode
if rc != 0:
print "Error getting ownership of file:\n%s" % err
raise Exception("Error getting ownersip of file:\n%s" % err)
##############################################################################################################
# Replaces all occurrences of a given regular expression "original_text" with a new one "new_text" in the given file.
##############################################################################################################
def replace_in_file(original_text, new_text, file_path):
# Iterate over all lines in the file, modifying it in place.
regex = re.compile(original_text, re.IGNORECASE)
if os.path.isfile(file_path):
for line in fileinput.input(file_path, inplace=True):
# Replace the string, if found in the current line.
line = regex.sub(new_text, line)
# Writes to stdout while using fileinput will replace the contents of the original file.
sys.stdout.write(line)
else:
print 'File ' + file_path + ' not found, not replacing text.'
|
import random
from past.builtins import range
import numpy as np
class Candidate(object):
""" A candidate solutions to the Sudoku puzzle. """
def __init__(self, Nd, sqrtVal):
self.Nd = Nd
self.sqrtVal = sqrtVal
self.values = np.zeros((self.Nd, self.Nd))
self.fitness = None
return
def update_fitness(self):
""" The fitness of a candidate solution is determined by how close it is to being the actual solution to the puzzle.
The actual solution (i.e. the 'fittest') is defined as a NdxNd grid of numbers in the range [1, Nd]
where each row, column and sqrtValxsqrtVal block contains the numbers [1, Nd] without any duplicates (see e.g. http://www.sudoku.com/);
if there are any duplicates then the fitness will be lower. """
column_count = np.zeros(self.Nd)
block_count = np.zeros(self.Nd)
column_sum = 0
block_sum = 0
self.values = self.values.astype(int)
# For each column....
for j in range(0, self.Nd):
for i in range(0, self.Nd):
column_count[self.values[i][j] - 1] += 1
for k in range(len(column_count)):
if column_count[k] == 1:
column_sum += (1/self.Nd)/self.Nd
column_count = np.zeros(self.Nd)
# For each block...
for i in range(0, self.Nd, self.sqrtVal):
for j in range(0, self.Nd, self.sqrtVal):
for k in range(0, self.sqrtVal):
for l in range(0, self.sqrtVal):
block_count[self.values[i+k][j+l] - 1] += 1
for k in range(len(block_count)):
if block_count[k] == 1:
block_sum += (1/self.Nd)/self.Nd
block_count = np.zeros(self.Nd)
# Calculate overall fitness.
if int(column_sum) == 1 and int(block_sum) == 1:
fitness = 1.0
else:
fitness = column_sum * block_sum
self.fitness = fitness
return
def mutate(self, mutation_rate, given):
""" Mutate a candidate by picking a row, and then picking two values within that row to swap. """
r = random.uniform(0, 1.1)
while r > 1: # Outside [0, 1] boundary - choose another
r = random.uniform(0, 1.1)
success = False
if r < mutation_rate: # Mutate.
while not success:
row1 = random.randint(0, 8)
row2 = random.randint(0, 8)
row2 = row1
from_column = random.randint(0, 8)
to_column = random.randint(0, 8)
while from_column == to_column:
from_column = random.randint(0, 8)
to_column = random.randint(0, 8)
# Check if the two places are free to swap
if given.values[row1][from_column] == 0 and given.values[row1][to_column] == 0:
# ...and that we are not causing a duplicate in the rows' columns.
if not given.is_column_duplicate(to_column, self.values[row1][from_column]) and not given.is_column_duplicate(from_column, self.values[row2][to_column]) and not given.is_block_duplicate(row2, to_column, self.values[row1][from_column]) and not given.is_block_duplicate(row1, from_column, self.values[row2][to_column]):
# Swap values.
temp = self.values[row2][to_column]
self.values[row2][to_column] = self.values[row1][from_column]
self.values[row1][from_column] = temp
success = True
return success
|
from django.conf.urls import url
from . import views
# URLs barril | lote | movimientos
urlpatterns = [
# Create object URLs
url(r'^lote/create/$', views.LoteCreate.as_view(), name='lote_create'),
url(r'^barril/create/$', views.BarrilCreate.as_view(),
name='barril_create'),
url(r'^movimiento/create/$', views.MovimientosBarrilCreate.as_view(),
name='movimiento_create'),
url(r'^$', views.LoteView.as_view(), name='lotelist'),
url(r'^lote/$', views.LoteView.as_view(), name='lotelist'),
url(r'^barril/$', views.BarrilView.as_view(), name='barrillist'),
url(r'^movimiento/$', views.MovimientosBarrilView.as_view(),
name='movimientoslist'),
url(r'^movimiento/(?P<lote>\d+)',
views.LoteMovimientosBarrilView.as_view(),
name='movimientoslistlote'),
url(r'^movimiento/(?P<barril>[-\w]+)/$',
views.BarrilMovimientosBarrilView.as_view(),
name='movimientoslistbarril'),
url(r'^lote/(?P<pk>\d+)$', views.LoteSeguimientosView.as_view(),
name='lote_seguimientos_list'),
url(r'^movimiento/update/(?P<pk>\d+)/$',
views.UpdateMovimientosBarrilView.as_view(),
name='movimientosupdate'),
url(r'^movimiento/ingresarbarril/(?P<slug>[-\w]+)/$',
views.IngresarMovimientosBarrilView.as_view(),
name='movimientosingreso'),
]
# URLs planillas
urlpatterns += [
# Planillas
url(r'^lote/(?P<pk>\d+)/BatchMaceracionCoccion/$',
views.BatchMaceracionCoccionlist.as_view(),
name='batch_maceracion_coccion_list'),
url(r'^lote/(?P<pk>\d+)/Fermentacion/$',
views.FermentacionUpdate.as_view(),
name='fermentacion_list'),
url(r'^lote/(?P<pk>\d+)/ClarificacionFiltracion/$',
views.ClarificacionFiltracionUpdate.as_view(),
name='clarificacion_filtracion_list'),
url(r'^lote/(?P<pk>\d+)/(?P<batch>\d+)/updateMaceracion/$',
views.MaceracionUpdate.as_view(), name='maceracion_update'),
url(r'^lote/(?P<pk>\d+)/(?P<batch>\d+)/updateCoccion/$',
views.CoccionUpdate.as_view(), name='coccion_update'),
url(r'^lote/(?P<pk>\d+)/createMaceracionCoccion/$',
views.SeguimientoMaceracionCoccionCreate,
name='maceracion_coccion_create'),
url(r'^lote/(?P<pk>\d+)/createFermentacion/$',
views.SeguimientoFermentacionCreate,
name='fermentacion_create'),
url(r'^lote/(?P<pk>\d+)/createClarificacionFiltracion/$',
views.SeguimientoClarificacionFiltracionCreate,
name='clarificacion_filtracion_create'),
]
|
import sys
import csv
import tweepy
import matplotlib.pyplot as plt
import os
import json
from collections import Counter
from aylienapiclient import textapi
def getSentiment(subject):
filepath = os.path.dirname(os.path.realpath(__file__))
# print filepath
oldFilename = "eventdata_"+subject+'.json'
filename = os.path.join(filepath,oldFilename)
# print filename
filelist = os.listdir(filepath)
# print 'oldFilename',oldFilename
# print 'filelist',filelist
if oldFilename in filelist:
print "File exists"
data = json.load(open(filename))
# print data
return data
# if sys.version_info[0] < 3:
# input = raw_input
## Twitter credentials
consumer_key = "qkSwIveFpaFGd2PkJXx8q45II"
consumer_secret = "AKZY0u4faQYituN9rqsp5T4dVOegRuRgn3ZKtKrey77zDBIvO4"
access_token = "1925790914-pDKSVrgpQ1XYDdI6sxGQziMmPFdDj9v8Cytxycp"
access_token_secret = "lPIMEi97wlk7tqd6eFtXb6NVMco6EftJfQd10JOucHM45"
## AYLIEN credentials
application_id = "c25cbda9"
application_key = "b12b250de88a56695ea787b3bf5b2993"
## set up an instance of Tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
## set up an instance of the AYLIEN Text API
client = textapi.Client(application_id, application_key)
## search Twitter for something that interests you
# query = input("What subject do you want to analyze for this example? \n")
# number = input("How many Tweets do you want to analyze? \n")
query = subject
number = 5
results = api.search(
lang="en",
q=query + " -rt",
count=number,
result_type="recent",
geocode = "1.352083,103.819836,25km"
)
print("--- Gathered Tweets \n")
## open a csv file to store the Tweets and their sentiment
# file_name = 'Sentiment_Analysis_of_{}_Tweets_About_{}.csv'.format(number, query)
# with open(file_name, 'w') as csvfile:
# csv_writer = csv.DictWriter(
# f=csvfile,
# fieldnames=["Tweet", "Sentiment"]
# )
# csv_writer.writeheader()
# print("--- Opened a CSV file to store the results of your sentiment analysis... \n")
listOfTweets = []
## tidy up the Tweets and send each to the AYLIEN Text API
for c, result in enumerate(results, start=1):
tweet = result.text
tidy_tweet = tweet.strip().encode('ascii', 'ignore')
if len(tweet) == 0:
print('Empty Tweet')
continue
response = client.Sentiment({'text': tidy_tweet})
listOfTweets.append((str(response['text']).replace('\n', ' '), str(response['polarity'])))
print("Analyzed Tweet {}".format(c))
sum = 0
for tweet in listOfTweets:
if tweet[1]=='positive':
sum+=1
elif tweet[1]=='neutral':
sum+=0.5
else:
sum+=0
jsondata = {'tweets':listOfTweets, 'overall':sum/float(number)}
with open(filename, 'w') as f:
json.dump(jsondata, f)
# return listOfTweets,sum/float(number)
return jsondata
if __name__=="__main__":
print getSentiment('River Hongbao 2018')
|
"""
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
"""
############################### Need to be fixed ###################################
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
start = 0
end = len(nums) - 1 # 5
# target_index = -1 # target =8
required_list = [-1, -1]
while start < end:
mid = (start + end) // 2 # 2
print("start: "+str(start))
print("end: "+str(end))
print(mid)
print("1111111111111")
if nums[mid] == target:
start = mid
end = mid
print("mid: "+str(mid))
while start > 0 and end < len(nums) and (nums[start] == target or nums[end] == target):
print("#############")
print("start2: "+str(start))
print("end2: " + str(end))
if nums[start] == target and start > 0:
start -= 1
elif nums[end] == target and end < len(nums):
end += 1
else:
break
required_list = [start+1 , end-1]
break
elif nums[mid] < target:
start = mid + 1
else:
end = mid
return required_list
my_solution = Solution()
print(my_solution.searchRange([5,8,8,8,8], 8))
|
import numpy as np
import open3d as o3d
from PyNomaly import loop
import time
# from numba import jit
class PointSet:
ply_=[]
len_=[]
tree_=[]
def __init__( self, path):
self.ply_ = o3d.io.read_point_cloud(path)
self.len_=len(self.ply_.points)
self.tree_=o3d.geometry.KDTreeFlann(self.ply_)
if len(self.ply_.colors)==0:
self.ply_.paint_uniform_color([0,1.0,0])
def write(self,path):
o3d.io.write_point_cloud(path)
def show(self):
o3d.visualization.draw_geometries([self.ply_])
# @jit
def point_cloud_outlier_removal(ps):
for i in range(ps.len_):
[_, idx, dist] = ps.tree_.search_knn_vector_3d(ps.ply_.points[i], k)
X=np.asarray(ps.ply_.points)[idx,:]
m=loop.LocalOutlierProbability(X).fit()
XScores=m.local_outlier_probabilities
idx_outlier=np.where(XScores>0.8)[0]
idx=np.asarray(idx)
# for j in range(len(idx_outlier)):
# np.asarray(ps.ply_.colors)[idx[idx_outlier[j]],:]=[255,0,0]
np.asarray(ps.ply_.colors)[idx[idx_outlier],:]=[255,0,0]
# Parameter
k=32
starttime = time.time()
# Read the point cloud
ps=PointSet("/home/llg/dataset/8_Couple.ply")
point_cloud_outlier_removal(ps)
# ps.write("llg-loop.ply")
endtime = time.time()
elapsed = endtime - starttime
print(elapsed)
ps.show()
|
import torch
import time
import torch.nn as nn
from IPython import embed
from . import losses as losses_lib
class PrimedBackpropper(object):
def __init__(self, initial, final, initial_num_images):
self.initial = initial
self.final = final
self.initial_num_images = initial_num_images
self.num_trained = 0
def next_partition(self, partition_size):
self.num_trained += partition_size
def get_backpropper(self):
return self.initial if self.num_trained < self.initial_num_images else self.final
@property
def optimizer(self):
return self.initial.optimizer if self.num_trained < self.initial_num_images else self.final.optimizer
def backward_pass(self, *args, **kwargs):
return self.get_backpropper().backward_pass(*args, **kwargs)
class SamplingBackpropper(object):
def __init__(self, device, net, optimizer, loss_fn):
self.optimizer = optimizer
self.net = net
self.device = device
self.loss_fn = loss_fn
def _get_chosen_examples(self, batch):
return [em for em in batch if em.example.select]
def _get_chosen_data_tensor(self, batch):
chosen_data = [em.example.datum for em in batch]
return torch.stack(chosen_data)
def _get_chosen_targets_tensor(self, batch):
chosen_targets = [em.example.target for em in batch]
return torch.stack(chosen_targets)
# def _get_chosen_targets_tensorRicap(self, batch):
# chosen_targets = [em.example.target for em in batch]
# chosen_W_ = [em.example.W_ for em in batch]
# return (torch.stack(chosen_targets), torch.stack(chosen_W_))
def _get_chosen_targets_tensor_ricap(self, examples):
# chosen_targets = [em.example.target for em in examples]
c_ = [torch.LongTensor([em.example.c_0,
em.example.c_1,
em.example.c_2,
em.example.c_3]) for em in examples]
# w_ = [em.example.W_ for em in examples]
return torch.stack(c_)#, torch.stack(w_))
def _get_chosen_targets_tensor_mixup(self, examples):
# chosen_targets = [em.example.target for em in examples]
c_ = [torch.LongTensor([em.example.c_0,
em.example.c_1]) for em in examples]
# w_ = [em.example.W_ for em in examples]
return torch.stack(c_)#, torch.stack(w_))
def backward_pass(self, batch):
# if len(batch) == 1420:
# embed()
self.net.train()
chosen_batch = self._get_chosen_examples(batch)
data = self._get_chosen_data_tensor(chosen_batch).to(self.device)
targets = self._get_chosen_targets_tensor(chosen_batch).to(self.device)
# Run forward pass
# print("embed() in backward pass")
outputs = self.net(data)
if self.loss_fn == losses_lib.CrossEntropyLossRicap:
targets_ricap = self._get_chosen_targets_tensor_ricap(chosen_batch).to(self.device)
losses = self.loss_fn(reduce=False)(outputs, targets_ricap, batch[0].example.W_)
elif self.loss_fn == losses_lib.CrossEntropyLossMixup:
targets_mixup = self._get_chosen_targets_tensor_mixup(chosen_batch).to(self.device)
losses = self.loss_fn(reduce=False)(outputs, targets_mixup, batch[0].example.W_)
else:
losses = self.loss_fn(reduce=False)(outputs, targets)
softmax_outputs = nn.Softmax()(outputs) # OPT: not necessary when logging is off
_, predicted = outputs.max(1)
is_corrects = predicted.eq(targets)
# Scale each loss by image-specific select probs
#losses = torch.div(losses, probabilities.to(self.device))
# Reduce loss
loss = losses.mean()
# Run backwards pass
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Add for logging selected loss
for em, loss, is_correct in zip(chosen_batch,
losses,
is_corrects):
em.example.loss = loss.item()
em.example.correct = is_correct.item()
em.metadata["loss"] = em.example.loss
return batch
class ReweightedBackpropper(SamplingBackpropper):
def __init__(self, device, net, optimizer, loss_fn):
super(ReweightedBackpropper, self).__init__(device,
net,
optimizer,
loss_fn)
def _get_chosen_weights_tensor(self, batch):
chosen_weights = [torch.tensor(em.example.weight, dtype=torch.float) for em in batch]
return torch.stack(chosen_weights)
def backward_pass(self, batch):
self.net.train()
chosen_batch = self._get_chosen_examples(batch)
data = self._get_chosen_data_tensor(chosen_batch).to(self.device)
targets = self._get_chosen_targets_tensor(chosen_batch).to(self.device)
weights = self._get_chosen_weights_tensor(chosen_batch).to(self.device)
# Run forward pass
outputs = self.net(data)
losses = self.loss_fn(reduce=False)(outputs, targets)
softmax_outputs = nn.Softmax()(outputs) # OPT: not necessary when logging is off
_, predicted = outputs.max(1)
is_corrects = predicted.eq(targets)
# Scale each loss by image-specific select probs
losses = torch.mul(losses, weights)
# Reduce loss
loss = losses.mean()
# Run backwards pass
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Add for logging selected loss
for em, loss, is_correct in zip(chosen_batch,
losses,
is_corrects):
em.example.loss = loss.item()
em.example.correct = is_correct.item()
em.metadata["loss"] = em.example.loss
return batch
class AlwaysOnBackpropper(object):
def __init__(self, device, net, optimizer, loss_fn):
super(SamplingBackpropper, self).__init__(device,
net,
optimizer,
loss_fn)
def _get_chosen_examples(self, batch):
return batch
|
import unittest
import numpy.testing as testing
import numpy as np
import hpgeom as hpg
from numpy import random
import healsparse
class BuildMapsTestCase(unittest.TestCase):
def test_build_maps_single(self):
"""
Test building a map for a single-value field
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
# Create an empty map
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, np.float64)
# Look up all the values, make sure they're all UNSEEN
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpg.UNSEEN)
# Fail to append because of wrong dtype
pixel = np.arange(4000, 20000)
values = np.ones_like(pixel, dtype=np.float32)
self.assertRaises(ValueError, sparse_map.update_values_pix, pixel, values)
# Append a bunch of pixels
values = np.ones_like(pixel, dtype=np.float64)
sparse_map.update_values_pix(pixel, values)
# Make a healpix map for comparison
hpmap = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
hpmap[pixel] = values
ipnest_test = hpg.angle_to_pixel(nside_map, ra, dec, nest=True)
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Replace the pixels
values += 1
sparse_map.update_values_pix(pixel, values)
hpmap[pixel] = values
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Replace and append more pixels
# Note that these are lower-number pixels, so the map is out of order
pixel2 = np.arange(3000) + 2000
values2 = np.ones_like(pixel2, dtype=np.float64)
sparse_map.update_values_pix(pixel2, values2)
hpmap[pixel2] = values2
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True), hpmap[ipnest_test])
# Test making empty maps
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map)
self.assertEqual(sparse_map2.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2.sentinel, sparse_map.sentinel)
sparse_map2b = healsparse.HealSparseMap.make_empty_like(sparse_map, cov_pixels=[0, 2])
self.assertEqual(sparse_map2b.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2b.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2b.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2b.sentinel, sparse_map.sentinel)
self.assertEqual(len(sparse_map2b._sparse_map),
sparse_map2._cov_map.nfine_per_cov*3)
testing.assert_array_equal(sparse_map2b._sparse_map, sparse_map.sentinel)
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map, nside_coverage=16)
self.assertEqual(sparse_map2.nside_coverage, 16)
self.assertEqual(sparse_map2.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2.sentinel, sparse_map.sentinel)
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map, nside_sparse=128)
self.assertEqual(sparse_map2.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2.nside_sparse, 128)
self.assertEqual(sparse_map2.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2.sentinel, sparse_map.sentinel)
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map, dtype=np.int32, sentinel=0)
self.assertEqual(sparse_map2.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2.dtype, np.int32)
def test_build_maps_recarray(self):
"""
Testing building a map for a recarray
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
# Create an empty map
dtype = [('col1', 'f4'), ('col2', 'f8')]
self.assertRaises(RuntimeError, healsparse.HealSparseMap.make_empty, nside_coverage,
nside_map, dtype)
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, primary='col1')
# Look up all the values, make sure they're all UNSEEN
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col1'], hpg.UNSEEN)
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col2'], hpg.UNSEEN)
pixel = np.arange(4000, 20000)
values = np.zeros_like(pixel, dtype=dtype)
values['col1'] = 1.0
values['col2'] = 2.0
sparse_map.update_values_pix(pixel, values)
# Make healpix maps for comparison
hpmapCol1 = np.zeros(hpg.nside_to_npixel(nside_map), dtype=np.float32) + hpg.UNSEEN
hpmapCol2 = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
hpmapCol1[pixel] = values['col1']
hpmapCol2[pixel] = values['col2']
ipnest_test = hpg.angle_to_pixel(nside_map, ra, dec, nest=True)
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col1'],
hpmapCol1[ipnest_test])
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col2'],
hpmapCol2[ipnest_test])
# Replace the pixels
values['col1'] += 1
values['col2'] += 1
sparse_map.update_values_pix(pixel, values)
hpmapCol1[pixel] = values['col1']
hpmapCol2[pixel] = values['col2']
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col1'],
hpmapCol1[ipnest_test])
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col2'],
hpmapCol2[ipnest_test])
# Replace and append more pixels
# Note that these are lower-number pixels, so the map is out of order
pixel2 = np.arange(3000) + 2000
values2 = np.zeros_like(pixel2, dtype=dtype)
values2['col1'] = 1.0
values2['col2'] = 2.0
sparse_map.update_values_pix(pixel2, values2)
hpmapCol1[pixel2] = values2['col1']
hpmapCol2[pixel2] = values2['col2']
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col1'],
hpmapCol1[ipnest_test])
testing.assert_almost_equal(sparse_map.get_values_pos(ra, dec, lonlat=True)['col2'],
hpmapCol2[ipnest_test])
# Test making empty maps
sparse_map2 = healsparse.HealSparseMap.make_empty_like(sparse_map)
self.assertEqual(sparse_map2.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2.sentinel, sparse_map.sentinel)
sparse_map2b = healsparse.HealSparseMap.make_empty_like(sparse_map, cov_pixels=[0, 2])
self.assertEqual(sparse_map2b.nside_coverage, sparse_map.nside_coverage)
self.assertEqual(sparse_map2b.nside_sparse, sparse_map.nside_sparse)
self.assertEqual(sparse_map2b.dtype, sparse_map.dtype)
self.assertEqual(sparse_map2b.sentinel, sparse_map.sentinel)
self.assertEqual(len(sparse_map2b._sparse_map),
sparse_map2._cov_map.nfine_per_cov*3)
testing.assert_array_equal(sparse_map2b._sparse_map['col1'], sparse_map.sentinel)
testing.assert_array_equal(sparse_map2b._sparse_map['col2'], hpg.UNSEEN)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from insertion_sort.insertion_sort import InsertionSort
from selection_sort.selection_sort import SelectionSort
import time
import copy
import random
sizes = [
1000,
10000,
50000
]
for size in sizes:
# random generation of items to be sorted
items = range
print "-"*10 + "sorting numbers" + "-"*10
items = []
for i in range(0,size):
items.append(random.randint(2,99999))
#print "original items: %r" % items
# the worse case
items_worse = range (size-1,-1,-1)
# the best case
items_best = range(0,size)
to_be_sorted = [
("random case",items),
("worse case",items_worse),
("best case",items_best)
]
def duration(sort_method):
# calculate execution time for our selection sort method
start = time.clock()
sort_method.sort()
end = time.clock()
duration = end - start
return duration
for item in to_be_sorted:
temp = copy.deepcopy(item) # for reversing use after a certain sort
print "-"*10 + item[0] + "-"*10
# calculate duration for insertion sort
insertion_sort = InsertionSort(item[1])
dinsertion = duration(insertion_sort)
item = temp
# calculate duration for selection sort
selection_sort = SelectionSort(item[1])
dselection = duration(selection_sort)
item = temp
# calculate duration for python builtin sort
dpython = duration(item[1])
print "%s: %ds" % ("insertion sort",dinsertion)
print "%s: %ds" % ("selection sort",dselection)
print "%s: %ds" % ("python built-in",dpython)
|
# coding: utf-8
import os
import requests
import json
from PIL import Image
from pymongo import MongoClient
from StringIO import StringIO
FLASK_BIND_PORT = int(os.environ.get('FLASK_BIND_PORT', '5000'))
mongodb_host = os.environ.get('MONGODB_HOST', 'localhost')
mongodb_port = int(os.environ.get('MONGODB_PORT', '27017'))
client = MongoClient(mongodb_host, mongodb_port)
db = client.resizephoto_db
app_dir = os.path.dirname(os.path.abspath(__file__))
media_path = os.path.join(app_dir, 'images/')
WEBSERVICE_ENDPOINT = os.environ.get('WEBSERVICE_ENDPOINT')
SIZES = {
'small': (320, 240),
'medium': (384, 288),
'large': (640, 480),
}
class Resizer(object):
def __init__(self, webservice_endpoint=WEBSERVICE_ENDPOINT, json=None):
self.webservice_endpoint = webservice_endpoint
self.raw_json = json
@classmethod
def create_document(cls, img_url):
# make a request to img's url in order to get its content later
img_response = requests.get(img_url)
# creates a PIL's Image object with response content
opened_img = Image.open(StringIO(img_response.content))
# get name and extension of the image
name, ext = img_url.split('/')[-1].split('.')
# create empty document for insertion in image collection
img_document = {
"image_url": img_url,
"resized_images_dict": {}
}
# loop over sizes
for label, size in SIZES.items():
width, height = size
new_img = opened_img.resize((width, height), Image.ANTIALIAS)
filename = '{}_{}.{}'.format(name, label, ext)
img_document['resized_images_dict'][label] = 'http://localhost:{}/images/{}'.format(
FLASK_BIND_PORT,
filename
)
new_img.save(os.path.join(media_path, filename))
return db.image_collection.insert_one(img_document).inserted_id
def resize_images(self):
if self.raw_json is None:
response = requests.get(self.webservice_endpoint)
images = response.json()['images']
else:
images = json.loads(self.raw_json)['images']
# loop over all images from specified endpoint
for img in images:
# get the image's url
img_url = img['url']
# search for already resized images with this url on MongoDB
img_document = db.image_collection.find_one({u'image_url': img_url})
# if nothing was found we should do the conversions and save at the end
if img_document is None:
inserted_id = Resizer.create_document(img_url)
print('-------> Created a new document - {} (id)'.format(inserted_id))
else:
print('-------> Using existing document - {} (id)'.format(img_document['_id']))
if __name__ == '__main__':
resizer = Resizer()
resizer.resize_images()
|
a = 1
b = 1
for i in range(100000000):
#print(b)
c = a
a = b + a
b = c
print(a)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 10:44:49 2018
This is Will's PSF notebook put into code so that I can understand what it does
@author: ppxee
"""
from __future__ import print_function, division
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
plt.close('all')
import sys
sem = str(sys.argv[1]) #1st arguement from call is the semester
def norm(array):
return array/np.nansum(array)
# reads in file with stamps
data = fits.open(sem+'_star_stamps_table.fits')[1].data
# extract stamps column and set non source regions to nans
stamps = data['VIGNET']
stamps[stamps<-5e29] = np.nan
# Plot some stars- not sure why, just for demonstration?
#plt.figure()
#plt.imshow(np.log(data['VIGNET'][24]))
#plt.figure()
#plt.imshow(np.log(stamps[1000,:,:]))
# Normalse all of the images - I presume to 1? Can't really see the difference...?
stampsnorm = stamps
for i in range(stamps.shape[0]):
stampsnorm[i,:,:] = norm(stamps[i,:,:])
# find the median image
stack = np.nanmedian(stampsnorm, axis=0)
# print shape to check its right?
print(stack.shape)
#normalise and plot the median image
stack = norm(stack)
plt.figure()
plt.imshow(np.log(stack))
hdu = fits.PrimaryHDU(stack)
hdu.writeto(sem+'_K_PSF.fits', overwrite=True)
# This image is the typical PSF of the stack - can be used to create a matching kernel
|
# heapq 이용해서 풀기 (최대 최소값 문제)
# 배운 부분: 따로 heap 설정한 리스트에 접근하여 값을 빼는 것이 아닌 값을 빼서 계산하고 다시 넣는 과정으로 진행
from heapq import *
def solution(n, works):
if sum(works) <= n:
return 0
answer = 0
works = [-i for i in works]
heapify(works)
for _ in range(n):
A = heappop(works)
A += 1
heappush(works, A)
for i in works:
answer += i ** 2
return answer
# 잘못된 풀이
# 계속해서 1을 빼 나가야하지만, 다른 값도 조정이 되기 때문에 문제가 됨
# [1, 7, 8, 2] n= 3
# [1, 6, 6, 2] 가 되어야 함
# 근데 계산 식은 [4 4 4 5]로 계산하게 되어 있음, 다른 업무로 값을 이동할 수 없기 때문에 잘못된 풀이
def solution(n, works):
answer = 0
SumWorks = sum(works)
# n이 남아있는 일보다 많거나 같으면 0 반환
if SumWorks <= n:
return 0
else:
# 값이 클수록 제곱근의 수가 커지므로 전체적으로 낮은 값을 갖는 것이 최적의 해
# 전체 일의 양에서 퇴근까지 남은 시간을 빼고 일의 길이만큼 나눠서 가진다
# 나머지가 있는 경우 나머지 수 만큼 나눠 가진 일에 +1하여 계산한다.
x = (SumWorks - n) // len(works)
y = (SumWorks - n) % len(works)
print(x, y, 1 // 10, 1 % 10, 0 ** 2)
answer += x ** 2 * (len(works) - y)
answer += (x + 1) ** 2 * (y)
return answer
43333928
|
from django.shortcuts import render, redirect, HttpResponse
from bbs.models import Comments, CommentsReply, UserInfo, User, Article, FriendShip
from notifications.models import Notification, NotificationQuerySet
from django.contrib.auth.views import login_required
from django.db import transaction
import json
@login_required(login_url='/bbs/signup')
def get_comment_list(request):
if request.method == 'GET':
user_info = UserInfo.objects.get(user=request.user)
notice_comments = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'])
unread_count_comment = notice_comments.filter(unread=True).count()
notice_follows = Notification.objects.filter(recipient=request.user, verb='关注了你')
unread_count_follow = notice_follows.filter(unread=True).count()
context = {"notice_comments": notice_comments, "user_info": user_info,
"unread_count_comment": unread_count_comment, "unread_count_follow": unread_count_follow}
return render(request, 'notice/notice_comments.html', context)
def comment_notice_update(request):
if request.method == 'GET':
notice_id = request.GET.get('notice_id')
if notice_id:
notice = Notification.objects.get(id=notice_id)
notice.mark_as_read()
if notice.verb == '评论了你':
comment = Comments.objects.get(article=notice.target, content=notice.description, author=notice.actor)
return redirect('/bbs/article/' + str(notice.target.id) + '#comment-' + str(comment.id))
else:
return redirect('/bbs/article/' + str(notice.target.article.id) + '#comment-' + str(notice.target.id))
else:
notices = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'], unread=True)
for notice in notices:
notice.mark_as_read()
return redirect("notice:notice_comment_list")
def get_likes_and_thumbs_up_list(request):
return render(request, 'notice/notice_likes.html')
@login_required(login_url='/bbs/signup')
def get_follow_list(request):
if request.method == 'GET':
user_info = UserInfo.objects.get(user=request.user)
notice_comments = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'])
unread_count_comment = notice_comments.filter(unread=True).count()
notice_follows = Notification.objects.filter(recipient=request.user, verb='关注了你')
unread_count_follow = notice_follows.filter(unread=True).count()
has_followed = FriendShip.objects.filter(following=request.user).values_list("followed_id")
has_followed = [x[0] for x in has_followed]
context = {"notice_follows": notice_follows, "user_info": user_info,
"unread_count_follow": unread_count_follow,
"unread_count_comment": unread_count_comment, "has_followed": has_followed}
return render(request, 'notice/notice_follows.html', context)
def follow_notice_update(request):
notices = Notification.objects.filter(recipient=request.user, verb='关注了你', unread=True)
for notice in notices:
notice.mark_as_read()
return redirect("notice:notice_follow_list")
def set_following(request):
following = request.user
if following.is_anonymous:
return redirect("bbs:signup")
else:
followed_id = request.POST.get('followed_id')
followed = User.objects.get(id=followed_id)
like = UserInfo.objects.get(user=following)
fans = UserInfo.objects.get(user=followed)
with transaction.atomic():
FriendShip.objects.create(following=following, followed=followed)
if like == fans:
like.like += 1
like.fans += 1
like.save()
else:
like.like += 1
like.save()
fans.fans += 1
fans.save()
status = 1
content = {
"status": status,
}
return HttpResponse(json.dumps(content))
def cancel_following(request):
following = request.user
if following.is_anonymous:
return redirect("bbs:signup")
followed_id = request.POST.get('followed_id')
followed = User.objects.get(id=followed_id)
like = UserInfo.objects.get(user=following)
fans = UserInfo.objects.get(user=followed)
with transaction.atomic():
FriendShip.objects.get(following=following, followed=followed).delete()
if like == fans:
like.like -= 1
like.fans -= 1
like.save()
else:
like.like -= 1
like.save()
fans.fans -= 1
fans.save()
status = 1
content = {
"status": status
}
return HttpResponse(json.dumps(content))
|
import claripy
import hashlib
def findAns(ind):
possible = s.eval(x[ind], 17, extra_constraints=ext)
if ind > 15:
ruleAry.append(ext[:])
print 'add new ext'
return
for i in possible:
ext.append(x[ind] == i)
findAns(ind + 1)
ext.pop()
ruleAry = []
s = claripy.Solver()
x = []
ans = [17, 0, 0, 10, 0, 0, 0, 6, 0, 18, 1, 0,
0, 0, 0, 0, 0, 0, 21, 2, 0, 16, 0, 0, 0]
for i in xrange(17):
x.append(claripy.BVS(str(i), 8))
for j in ans:
s.add(x[i] != j)
s.add(claripy.ULE(x[i], 25))
# add rule !=
for i, vali in enumerate(x):
for j, valj in enumerate(x):
if(i != j):
s.add(vali != valj)
# add x to ans
ind = 0
for i, val in enumerate(ans):
if val == 0:
ans[i] = x[ind]
ind += 1
# total(row), total(col) == 65
rule = [0 for i in range(10)]
for i in xrange(5):
for j in xrange(5):
rule[i] += ans[i * 5 + j]
rule[5 + j] += ans[i * 5 + j]
for i in rule:
s.add(i == 65)
s.add((x[4] - x[11] + x[16] - x[5]+0x15 - x[13] + 0x11 - x[2]) == 0)
ext = []
findAns(0)
for i in ruleAry:
# print i
curAns = []
for j in x:
curAns.append(s.eval(j, 1, extra_constraints=i)[0])
ind = 0
curAnsAry = ans[:]
for j, val in enumerate(curAnsAry):
if not isinstance(val, int):
curAnsAry[j] = curAns[ind]
ind += 1
curAnsAryChr = [chr(k) for k in curAnsAry]
ha = hashlib.sha256(''.join(curAnsAryChr)).hexdigest()
print ha
if ha == 'cf252238dc5077b46d45cf941d09d925' \
'd141cc55bb7a8f96a8648b594af3a6a5':
data = curAnsAry
break
f = [0x70, 0x7E, 0x77, 0x39, 0x70,
0x51, 0x5A, 0x65, 0x6D, 0x7C,
0x5E, 0x74, 0x62, 0x7F, 0x6F,
0x6D, 0x51, 0x21, 0x6D, 0x37,
0x2E, 0x31, 0x68, 0x7D, 0x74]
s = ''
for i, val in enumerate(data):
s += chr(val ^ f[i])
print s
|
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'make_global_settings': [
['LINK_wrapper', './check-ldflags.py'],
],
'targets': [
{
'target_name': 'test',
'type': 'executable',
'ldflags': [
'-Wl,--whole-archive <(PRODUCT_DIR)/lib1.a',
'-Wl,--no-whole-archive',
'-Wl,--whole-archive <(PRODUCT_DIR)/lib2.a',
'-Wl,--no-whole-archive',
],
'dependencies': [
'lib1',
'lib2',
],
'sources': [
'main.c',
],
},
{
'target_name': 'lib1',
'type': 'static_library',
'standalone_static_library': 1,
'sources': [
'lib1.c',
],
},
{
'target_name': 'lib2',
'type': 'static_library',
'standalone_static_library': 1,
'sources': [
'lib2.c',
],
},
],
}
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from typing import cast
from pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs
from pants.base.specs_parser import SpecsParser
from pants.core.util_rules.environments import determine_bootstrap_environment
from pants.core.util_rules.system_binaries import GitBinary
from pants.engine.addresses import AddressInput
from pants.engine.environment import EnvironmentName
from pants.engine.internals.scheduler import SchedulerSession
from pants.engine.internals.selectors import Params
from pants.engine.rules import QueryRule
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.util.frozendict import FrozenDict
from pants.vcs.changed import ChangedAddresses, ChangedOptions, ChangedRequest
from pants.vcs.git import GitWorktreeRequest, MaybeGitWorktree
logger = logging.getLogger(__name__)
class InvalidSpecConstraint(Exception):
"""Raised when invalid constraints are given via specs and arguments like --changed*."""
def calculate_specs(
options_bootstrapper: OptionsBootstrapper,
options: Options,
session: SchedulerSession,
working_dir: str,
) -> Specs:
"""Determine the specs for a given Pants run."""
global_options = options.for_global_scope()
unmatched_cli_globs = global_options.unmatched_cli_globs
specs = SpecsParser(working_dir=working_dir).parse_specs(
options.specs,
description_of_origin="CLI arguments",
unmatched_glob_behavior=unmatched_cli_globs,
)
changed_options = ChangedOptions.from_options(options.for_scope("changed"))
logger.debug("specs are: %s", specs)
logger.debug("changed_options are: %s", changed_options)
if specs and changed_options.provided:
changed_name = "--changed-since" if changed_options.since else "--changed-diffspec"
specs_description = specs.arguments_provided_description()
assert specs_description is not None
raise InvalidSpecConstraint(
f"You used `{changed_name}` at the same time as using {specs_description}. You can "
f"only use `{changed_name}` or use normal arguments."
)
if not changed_options.provided:
return specs
bootstrap_environment = determine_bootstrap_environment(session)
(git_binary,) = session.product_request(GitBinary, [Params(bootstrap_environment)])
(maybe_git_worktree,) = session.product_request(
MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary, bootstrap_environment)]
)
if not maybe_git_worktree.git_worktree:
raise InvalidSpecConstraint(
"The `--changed-*` options are only available if Git is used for the repository."
)
changed_files = tuple(changed_options.changed_files(maybe_git_worktree.git_worktree))
file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files)
changed_request = ChangedRequest(changed_files, changed_options.dependents)
(changed_addresses,) = session.product_request(
ChangedAddresses,
[Params(changed_request, options_bootstrapper, bootstrap_environment)],
)
logger.debug("changed addresses: %s", changed_addresses)
address_literal_specs = []
for address in cast(ChangedAddresses, changed_addresses):
address_input = AddressInput.parse(address.spec, description_of_origin="`--changed-since`")
address_literal_specs.append(
AddressLiteralSpec(
path_component=address_input.path_component,
target_component=address_input.target_component,
generated_component=address_input.generated_component,
parameters=FrozenDict(address_input.parameters),
)
)
return Specs(
includes=RawSpecs(
# We need both address_literals and file_literals to cover all our edge cases, including
# target-aware vs. target-less goals, e.g. `list` vs `count-loc`.
address_literals=tuple(address_literal_specs),
file_literals=file_literal_specs,
unmatched_glob_behavior=unmatched_cli_globs,
filter_by_global_options=True,
from_change_detection=True,
description_of_origin="`--changed-since`",
),
ignores=RawSpecs(description_of_origin="`--changed-since`"),
)
def rules():
return [
QueryRule(ChangedAddresses, [ChangedRequest, EnvironmentName]),
QueryRule(GitBinary, [EnvironmentName]),
QueryRule(MaybeGitWorktree, [GitWorktreeRequest, GitBinary, EnvironmentName]),
]
|
p1_total += 5 * (p1_table.count('Tempura') / 2)
p1_total += 10 * (p1_table.count('Sashimi') / 3)
p1_total += (p1_table.count('Dumpling') * (p1_table.count('Dumpling') + 1)) / 2
maki += p1_table.count('SingleMaki') + 2 * p1_table.count('DoubleMaki') + 3 * p1_table.count('TripleMaki')
p1_total += p1_table.count('EggNigiri')
p1_total += 2 * p1_table.count('SalmonNigiri')
p1_total += 3 * p1_table.count('SquidNigiri')
p1_total += 3 * p1_table.count('EggNigiriWasabi')
p1_total += 6 * p1_table.count('SalmonNigiriWasabi')
p1_total += 9 * p1_table.count('SquidNigiriWasabi')
|
from dotenv import load_dotenv
def load():
envFilePath = '/root/.env'
load_dotenv(envFilePath)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import ujson
import time
from sanic.log import logger
# from decimal import Decimal
from sanic.request import Request
# from shapely.geometry import Point, LineString, Polygon
# NVL POINT IMPORTS
from .specification.get_nvl_position_specification import (
get_nvl_position_list_query, get_nvl_position_list_count_query,
get_nvl_distance_query
)
from web_backend.nvlserver.helper.asyncpg_types import decode_geometry, encode_geometry
__all__ = [
# SERVICES WORKING ON LANGUAGE TABLE
'get_nvl_position_list', 'get_nvl_position_list_count', 'get_nvl_distance'
]
# NVL POINT SERVICES
async def get_nvl_position_list(
request: Request,
user_id: int = 0,
traceable_object_id: int = 0,
date_from: object = None,
date_to: object = None,
limit: int = 0,
offset: int = 0) -> list:
""" Get get_nvl_position_list .
:param request:
:param user_id:
:param traceable_object_id:
:param date_from:
:param date_to:
:param limit:
:param offset:
:return:
"""
ret_val = []
query_str = get_nvl_position_list_query
try:
async with request.app.pg.acquire() as connection:
await connection.set_type_codec(
'json',
encoder=ujson.dumps,
decoder=ujson.loads,
schema='pg_catalog'
)
await connection.set_type_codec(
'geometry',
encoder=encode_geometry,
decoder=decode_geometry,
format='binary',
)
# print('--------------------------------------------------------------')
# print(user_id, traceable_object_id, date_from, date_to, limit, offset)
# print('--------------------------------------------------------------')
# print(time.time())
# print(user_id, traceable_object_id, date_from, date_to, limit, offset)
if limit > 0:
# print('--------------------------------------------------------------')
query_str += ' ORDER BY hmup.id DESC LIMIT $5 OFFSET $6;'
# print('đđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđđ')
# print(query_str)
rows = await connection.fetch(
query_str, user_id, traceable_object_id, date_from, date_to, limit, offset)
# print('eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee')
# print(rows, type(rows))
# print('sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss')
else:
# print('-----------++++++++++++++++++++++++++++++++++++++++++++--------')
query_str += ' ORDER BY hmup.id DESC'
rows = await connection.fetch(query_str, user_id, traceable_object_id, date_from, date_to)
# print(rows)
if rows:
ret_val = [dict(x) for x in rows]
# print(ret_val)
# print(time.time())
except Exception as gclerr:
logger.error('get_nvl_position_list service erred with: {}'.format(gclerr))
return ret_val
async def get_nvl_position_list_count(
request: Request,
user_id: int = 0,
traceable_object_id: int = 0,
date_from: object = None,
date_to: object = None) -> int:
""" Get nvl_point list count.
:param request:
:param user_id:
:param traceable_object_id:
:param date_from:
:param date_to:
:return:
"""
ret_val = 0
query_str = get_nvl_position_list_count_query
try:
async with request.app.pg.acquire() as connection:
row = await connection.fetchval(query_str, user_id, traceable_object_id, date_from, date_to)
if row is not None:
ret_val = row
except Exception as gclcerr:
logger.error('get_nvl_position_list_count service erred with: {}'.format(gclcerr))
return ret_val
async def get_nvl_distance(
request: Request,
user_id: int = 0,
traceable_object_id: int = 0,
date_from: object = None,
date_to: object = None) -> float:
""" Get get_nvl_position_list .
:param request:
:param user_id:
:param traceable_object_id:
:param date_from:
:param date_to:
:return:
"""
ret_val = 0.0
query_str = get_nvl_distance_query
try:
async with request.app.pg.acquire() as connection:
await connection.set_type_codec(
'json',
encoder=ujson.dumps,
decoder=ujson.loads,
schema='pg_catalog'
)
await connection.set_type_codec(
'geometry',
encoder=encode_geometry,
decoder=decode_geometry,
format='binary',
)
row = await connection.fetchval(query_str, user_id, traceable_object_id, date_from, date_to)
if row is not None:
# print('THIS IS DISTANCE : {}'.format(row))
ret_val = row
except Exception as gclerr:
logger.error('get_nvl_distance service erred with: {}'.format(gclerr))
return ret_val
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 21:41:41 2018
@author: bolof
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('Position_Salaries.csv')
# create your independent and dependent variables of X and y
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,2].values
#no need for splitting the data to train and test sets since we have very few datasets
#feature scaling
#fittting the linear regression to the dataset
"""from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,y) """
from sklearn.linear_model import LinearRegression
lin_reg = Linear
|
import logging
import Currency
from telegram.ext import *
from DBMS import *
from sms import SMS
import unidecode
from bot_info import *
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
# DBMS
database = DBMS()
all_users = database.get_users()
users_dict = {}
for each in all_users:
users_dict[each[0]] = list(each)
all_users = users_dict
print(all_users)
prices = Currency.Currency()
def start(update: Update, context: CallbackContext) -> None:
person = update.message.from_user
print(f' New Thread with {person.username} at : {get_duration(update.message.date)} ago')
context.user_data['username'] = person.username
context.bot.send_message(update.message.chat_id, wellcome_text)
reply_markup = InlineKeyboardMarkup(main_keyboard)
update.message.reply_text(main_text, reply_markup=reply_markup)
return FIRST
def main_menu(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
reply_markup = InlineKeyboardMarkup(main_keyboard)
query.edit_message_text(main_text, reply_markup=reply_markup)
return FIRST
def menu_handler(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
if query.data == 'phone':
query.edit_message_text('👈 شماره همراه خود را وارد کنید:')
return FIFTH
if query.data == 'increase' or query.data == 'decrease':
context.user_data['wallet_action'] = query.data
reply_markup = InlineKeyboardMarkup(keyboards["wallet_action"][0])
query.edit_message_text(keyboards["wallet_action"][1], reply_markup=reply_markup)
return SIXTH
reply_markup = InlineKeyboardMarkup(keyboards[query.data][0])
if query.data == 'market':
query.edit_message_text('در حال ارزیابی قیمت ها ... ', reply_markup=reply_markup)
c = Currency.Currency()
c.get_prices()
query.edit_message_text(c.post_reporter(), reply_markup=reply_markup)
elif query.data == 'cash' or query.data == 'crypto':
context.user_data['d_type'] = query.data
query.edit_message_text(keyboards[query.data][1], reply_markup=reply_markup)
return SECOND
elif query.data == 'rules':
query.edit_message_text(keyboards[query.data][1], reply_markup=reply_markup)
else:
query.edit_message_text(keyboards[query.data][1], reply_markup=reply_markup)
return FIRST
def deal_handler(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
keyboard = [
[
InlineKeyboardButton("💎خرید از ما ", callback_data='buy'),
InlineKeyboardButton("💎فروش به ما ", callback_data='sell'),
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
if query.data == 'deal':
reply_markup = InlineKeyboardMarkup(keyboards[query.data][0])
query.edit_message_text(keyboards[query.data][1], reply_markup=reply_markup)
return FIRST
else:
query.edit_message_text(
text="منوی خرید و فروش", reply_markup=reply_markup)
context.user_data['currency'] = query.data
return THIRD
def wallet_handler(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
if query.data == 'wallet':
reply_markup = InlineKeyboardMarkup(keyboards[query.data][0])
query.edit_message_text(keyboards[query.data][1], reply_markup=reply_markup)
return FIRST
else:
query.edit_message_text(
text="👈 لطفا، مقدار را با اعداد وارد نمایید: ")
context.user_data['currency'] = query.data
return THIRD
def amount(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
context.user_data['T_type'] = query.data
c = Currency.Currency()
if context.user_data['d_type'] == 'crypto':
if query.data == 'buy':
query.edit_message_text(text="در حال محاسبه قیمت")
c.get_prices()
c.minimum_calc()
query.edit_message_text(text=c.minimum_reporter(context.user_data['currency'], True))
context.user_data['unit'] = c.min_prices[context.user_data['currency']]
else:
query.edit_message_text(text="در حال محاسبه قیمت")
c.get_prices()
c.minimum_calc()
query.edit_message_text(text=c.minimum_reporter(context.user_data['currency'], False))
context.user_data['unit'] = c.min_prices[context.user_data['currency']]
return THIRD
def transaction(update: Update, context: CallbackContext) -> None:
print(update.message.text, context.user_data)
unit = float(unidecode.unidecode(update.message.text))
unit = int(unit * context.user_data['unit'][0])
keyboard = [
[
InlineKeyboardButton(" تایید ", callback_data='yes'),
InlineKeyboardButton(" لغو ", callback_data='no'),
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
c_type = context.user_data['currency']
if context.user_data['unit'][2] < unit:
update.message.reply_text(
text=f'هزینه معامله ارز {persian[c_type]} شما برابر : {Currency.separator_int(unit)} تومان',
reply_markup=reply_markup)
context.user_data['unit'] = unit
else:
update.message.reply_text(text='مقدار وارد شده کمتر از حد معاملات است',
reply_markup=reply_markup)
return FORTH
def make_deal(update: Update, context: CallbackContext) -> None:
# need to be continued
print(context.user_data)
pass
def look_up(username):
global all_users
if username in all_users:
return True
else:
return False
def authenticate(update: Update, context: CallbackContext) -> None:
code = unidecode.unidecode(update.message.text)
print(code)
if code == context.user_data['v_code']:
print('User Authenticated')
update.message.reply_text('✅ شماره ی شما با موفقیت تایید و ثبت شد')
return FIRST
else:
print('User Not Authenticated')
update.message.reply_text('❌ کد ارسالی مطابقت ندارد ❌')
return SECOND
def sign_up(update: Update, context: CallbackContext) -> None:
person = update.message.from_user
phone = unidecode.unidecode(update.message.text)
print("Phone of {}: {}".format(person.first_name, phone))
keyboard = [
[
InlineKeyboardButton("📥ارسال مجدد", callback_data='retry'),
InlineKeyboardButton("↩️بازگشت", callback_data='account')
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("پس از دریافت پیامک کد تایید وارد نمایید: ", reply_markup=reply_markup)
sms_api = SMS()
code = sms_api.send(update.message.text)
context.user_data['v_code'] = code
context.user_data['phone'] = phone
return FIFTH
def wrong_input(update: Update, context: CallbackContext) -> None:
update.message.reply_text(text="در وارد کردن یا صحت شماره خطایی صورت گرفته فرآیند لغو گردید")
return FIRST
def start_over(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
reply_markup = InlineKeyboardMarkup(main_keyboard)
query.edit_message_text(text="معامله خود را شروع کنید", reply_markup=reply_markup)
return FIRST
def cash(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
print('ARZ HAVALEH ', query.data)
keyboard = [
[
InlineKeyboardButton("دلار", callback_data='dollar'),
InlineKeyboardButton("یورو", callback_data='euro'),
InlineKeyboardButton("پوند", callback_data='pond')
],
[
InlineKeyboardButton("یوان", callback_data=str('yuan')),
InlineKeyboardButton("لیر", callback_data=str('leer'))
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
query.edit_message_text(
text="برای ادامه معامله ارز مورد نظر را انتخاب کنید", reply_markup=reply_markup
)
return THIRD
def crypto(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
print('ارز دیجیتال مورد نظر خود را انتخاب کنید: ', query.data)
keyboard = [
[
InlineKeyboardButton("Bitcoin", callback_data=currency_name['Bitcoin']),
InlineKeyboardButton("Ethereum", callback_data=currency_name['Ethereum']),
InlineKeyboardButton("Monero", callback_data=currency_name['Monero']),
],
[
InlineKeyboardButton("Dash", callback_data=currency_name['Dash']),
InlineKeyboardButton("Litecoin", callback_data=currency_name['Litecoin']),
InlineKeyboardButton("Tether", callback_data=currency_name['Tether']),
],
[
InlineKeyboardButton("Cardano", callback_data=currency_name['Cardano']),
InlineKeyboardButton('TRON', callback_data=currency_name['TRON'])
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
query.edit_message_text(
text="اارز دیجیتال مورد نظر خود را انتخاب کنید: ", reply_markup=reply_markup
)
return THIRD
def other(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
print('other: ', query.data)
keyboard = [
[
InlineKeyboardButton("خرید از ما ", callback_data='buy'),
InlineKeyboardButton("فروش به ما ", callback_data='sell'),
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
query.edit_message_text(
text="منوی خرید و فروش", reply_markup=reply_markup
)
context.user_data['currency'] = query.data
return THIRD
def end(update: Update, context: CallbackContext) -> None:
query = update.callback_query
query.answer()
query.edit_message_text(text='به امید دیدار')
return ConversationHandler.END
def main():
updater = Updater("1441929878:AAF7R_YIbI9y3hQdGyyeyWUv4LYELA0TOho")
dispatcher = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
FIRST: [
CallbackQueryHandler(main_menu, pattern='^' + 'main' + '$'),
CallbackQueryHandler(menu_handler, pattern='^' + '.+' + '$'),
],
SECOND: [
CallbackQueryHandler(deal_handler, pattern='^' + '.+' + '$')
],
THIRD: [
CallbackQueryHandler(amount, pattern='^buy$|^sell$'),
MessageHandler(Filters.regex('^.+$'), transaction),
],
FORTH: [
CallbackQueryHandler(make_deal, pattern='^yes$'),
CallbackQueryHandler(end, pattern='^no$'),
],
FIFTH: [
MessageHandler(Filters.regex('(^(\+98)\d{10}$)|(^\d{11}$)'), sign_up),
MessageHandler(Filters.regex('^\d{1,5}$'), authenticate),
MessageHandler(Filters.regex('^.+$'), wrong_input)
],
SIXTH: [
CallbackQueryHandler(wallet_handler, pattern='^' + '.+' + '$')
]
},
fallbacks=[CommandHandler('start', start)],
)
dispatcher.add_handler(conv_handler)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
"""CallbackQueryHandler(test, pattern='^' + 'wallet' + '$'),
CallbackQueryHandler(test, pattern='^' + 'market' + '$'),
CallbackQueryHandler(test, pattern='^' + 'recommend' + '$'),
CallbackQueryHandler(test, pattern='^' + 'account' + '$'),
CallbackQueryHandler(test, pattern='^' + 'rules' + '$'),
CallbackQueryHandler(test, pattern='^' + 'service' + '$'),
CallbackQueryHandler(other, pattern='^.*$'),
MessageHandler(Filters.regex('^\d{11}\d*$'), sign_up),
MessageHandler(Filters.regex('^\d{1,5}$'), authenticate)
"""
|
from si7021 import getTempC, getHumidity
print("***Test SI7021 Sensor***")
print ("Temperature in Celsius is : %.2f C" %getTempC())
print ("Relative Humidity is : %.2f %%" %getHumidity())
|
import tkinter
import tkinter as tk
from tkinter import *
from tkinter import messagebox
#Properties for the window/canvas
window = Tk()
window.title("Login Screen")
window.geometry("200x200")
#Creating the login screen
lbl = Label(window, text="Please Login to Continue", font=("Arial Bold", 10))
lbl.grid(column=0, row=0)
#Username and Password
Username = ("Will")
Password = ("Will")
def clicked():
print("Trying to login...")
if UsernameText.get() == Username and PasswordText.get() == Password:
messagebox.showinfo("Success", "You are now Logged in!")
else:
messagebox.showerror("Warning","Username or Password is Incorrect")
#Login Button
btn = Button(window, text="Login", bg="black", fg="white", command=clicked)
btn.place(relx=0.5, rely=0.9, anchor=CENTER)
#Username
lbl = Label(window, text = "Username", font=("Arial Bold", 10))
lbl.place(relx=0.18, rely=0.35, anchor=CENTER)
UsernameText = txt = Entry(window,width=10)
txt.place(relx=0.5, rely=0.35, anchor=CENTER)
#Password
lbl = Label(window, text = "Password", font=("Arial Bold", 10))
lbl.place(relx=.17, rely=0.5, anchor=CENTER)
PasswordText = txt = Entry(window,width=10, show="*")
txt.place(relx=.5, rely=0.5, anchor=CENTER)
window.mainloop()
|
# 一. 类型和运算
# 1 --简单的列出对象obj所包含的方法和名称, 返回一个字符串列表
# print(dir(obj))
# 查询obj.func的具体介绍和方法
# help(obj.func)
# 2--测试类型的三种方法
# L = list()
# if type(L) == type([]):
# print("L is list")
#
# if type(L) == list:
# print("L is list")
#
# if isinstance(L, list):
# print("L is list")
# 3--python数据类型: 哈希类型, 不哈希类型
# 哈希类型, 即在原地不能改变的变量类型, 不可变类型. 函数hash()查看其hash值, 也可以作为字典的key
# "数字类型: int, float, decimal.Decimal, fractions.Fraction, complex"
# import decimal
# a = decimal.Decimal(12.5) # Decimal常用于金融,货币
# print(a, type(a))
# import fractions
# a = fractions.Fraction(12.5) # 分数
# print(a, type(a))
# print(complex(1,2.1))
# print(complex("1"))
# print(complex("1+2.1j"))
# "字符串类型: str, bytes"
# "元组: tuple"
# "冻结类型: frozenset"
# "布尔类型: True, False"
# "None"
# 不可hash类型: 原地可变类型, list, dict和set. 它们不可作为字典的key
# 4--数字常量
# 整数: 1234, -1234, 0, 9999999
# 浮点数: 1.23, 1., 3.14e-10(表示3.14乘以10的-10次方), 4E210(表示4乘以10的210次方), 4e+210(表示4乘以10的210次方),
# 八进制, 十六进制, 二进制 0o177, 0x9ff, 0X9FF, 0b101010
# 复数常量, 也可以用complex(real, image)来创建 3+4j, 3.0+4.0j, 3J, 4j
# 将十进制数转化为十六进制, 八进制, 二进制 hex(100), oct(100), bin(100)
# 将字符串转化为整数,base为进制数 int(string, base)
# print(int('100',16))
# 正无穷, 负无穷, 非数, 非数a!=a为True, 0乘以无穷等于nan, 正无穷+负无穷等于nan, Numpy里用列表表示分数分母填0转换过去是inf float('inf'), float('-inf'), float('nan')
# if 0 > float('-inf'): print(True)
# if 0 < float('inf'): print(True)
# print(0 * float('nan'))
# a = float('nan')
# print(a!=a)
# print(0*float('inf'))
# print(float('inf')+float('-inf'))
# 5--数字的表达式操作符
# yield x # 生成器函数发送协议
# lamda args: expression # 生成匿名函数
# x if y else z # 三元表达式
# x and y, x or y, not x # 逻辑与, 逻辑或, 逻辑非
# x in y, x not in y # 成员对象测试
# x is y, ix is not y # 对象实体测试
# x<y, x<=y, x>y, x>=y, x==y, x!=y # 大小比较, 集合子集或超集值相等性操作符
# a = {1,2,3}
# b = {1,2,3,4}
# print(a <= b) # 集合子集
# print(1<2<3) # python中允许连续比较
# x|y, x&y, x^y # 位或, 位与, 位异或
# a = 0b00000000
# b = 0b11111111
# print(a|b)
# print(a&b)
# print(a^b)
# x<<y, x>>y # 位操作:x左移/右移y位
# x = 0b00000001
# y = 0b10000000
# print(x<<7==y)
# +, -, *, /, //, %, ** # 加减乘除取整取余幂运算
# -x, +x, ~x # 一元减法, 识别, 按位求补
# x = 0b10000000
# print(-x)
# print(+x)
# print(~x)
# 取反函数
# foo = lambda x: ~x+1
# print(foo(5))
# x[i], x[i:j:k] # 索引, 切片
# int(3.14), float(3) # 强制类型转换
# 6--整数可以利用bit_length函数测试所占的位数
# a = 1
# print(a.bit_length())
# a = 1024
# print(a.bit_length())
# 7--repr和str显示格式的区别
"""
repr格式: 默认的交互模式的回显, 产生的结果看起来它们就像是代码
str格式: 打印语句, 转换成一种对用户更加友好的格式
"""
# a = "hello"
# print(a)
# print(repr(a))
# 8--数字相关的模块
# math模块
# Decimal模块: 小数模块
# from decimal import Decimal,getcontext
# getcontext().prec = 3 # 设置精度, 最大为3
# value = Decimal(1) / Decimal(3)
# print(value)
# Fraction模块: 分数模块
# from fractions import Fraction
# value = Fraction(4,6)
# value2 = Fraction(0.25)
# print(value,value2)
# 9--集合set
"""
set是一个无序不重复元素集, 基本功能包括关系测试和消除重复元素
set支持union(联合),intersection(交),difference(差)和symmetric difference(对称差集)等数学运算
set支持x in set, len(set), for x in set
set不记录元素位置或者插入点, 因此不支持indexing,slicing,或其它类序列的操作
"""
# s = set([3,5,9,10,"h"]) # 传入的是可迭代对象就ok
# print(s)
# t = set("hello")
# print(t)
# a = t | s # 并集, 等价于t.union(s)
# print(a)
# print(t.union(s))
# b = t & s # 交集, 等价于t.intersection(s)
# print(b)
# print(t.intersection(s))
# c = t - s # 求差集, t不在s中的项, 等价于t.difference(s)
# print(c)
# print(t.difference(s))
# d = t ^ s # 对称差集(项在t或s中, 但不会同时出现在二者中), 等价于t.symmetric_difference(s)
# print(d)
# print(t.symmetric_difference(s))
# t.add('x'), t.remove('x') # 增加/删除一个item
# s.update([10,37,42]) # 利用[......]更新集合
# print(s)
# x in s, x not in s # 集合是否存在某个值
# print(s.issubset(t)) # s<=t, 测试是否s中的每一个元素都在t中
# print(s.issuperset(t)) # s>=t, 测试是否t中的每一个元素都在s中
# s.copy() # 拷贝
# x = 3
# s.discard(x) # 删除s中的x
# print(s)
# s.clear() # 清空s
# {x**2 for x in [1,2,3,5]}
# {x for x in 'spam'}
# 10--集合fronzenset, 不可变对象
"""
set是可变对象, 即不存在hash值, 不能作为字典的值. 同样的还有list等(tuple是可以作为字典的key的)
frozenset是不可变对象, 即存在hash值, 可作为字典的键值
frozenset对象没有add, remove等方法, 但有union/intersection/difference/等方法
"""
# a = set([1,2,3])
# b = set()
# b.add(a) # 报错, set是不可哈希类型
# b.add(frozenset(a)) # ok, 将set变为frozenset, 可哈希
# print(b)
# 11--布尔类型bool
# type(True)
# isinstance(False,int)
# True == 1; True is 1; 输出(True, False)
# 12--动态类型简介
"""
变量名通过引用, 指向对象
python中的"类型"属于对象, 而不是变量, 每个对象都包含有头部信息,比如:"类型标示符","引用计数器等"
"""
# 共享引用及在原处修改: 对于可变对象, 要注意尽量不要共享引用
# 共享引用和相等测试:
# L = [1]
# M = [1]
# print(L is M) # 返回False
# L = M = [1,2,3]
# print(L is M) # 返回True, 共享引用
# 增强赋值和共享引用:普通+号会生成新的对象, 而增强赋值+=会在原处修改
# L = M = [1,2]
# L = L + [3,4] # +号进行运算, 运算完成后生成个新对象, L在引用这个对象
# print(L,M) # L = [1,2,3,4], M = [1,2]
# L += [3,4] # += 不生成新对象, 直接对原对象操作, 也不会重新进行引用
# print(L,M) # L =[1,2,3,4], M = [1,2,3,4]
# 13--常见字符串常量和表达式
# S = '' # 空字符串
# S = "spam's" # 双引号和单引号相同
# S = "s\np\ta\x00m" # 转移字符, \x00表示空格
# S = r'\temp' # Raw字符串, 不会进行转移, 抑制转义
# S = b'Spam' # python3中的字节字符串
# S = u'spam' # python2中的Unicode字符串
# s1+s2, s1*3, s[i], s[i:j], len(s) # 字符串操作
# s = 'a %s parrot' %'kind' # 字符串格式化表达
# s = 'a {1} {0} parrot'.format('kind','red') #字符串格式化方法
# print(s)
# for x in s:print(x) # 字符串迭代, 成员关系
# [x*2 for x in s] # 字符串列表解析
# s = ",".join(['a','b','c']) # 字符串输出, 结果: a,b,c
# print(s)
# 14--内置str处理函数
str1 = "string object"
str1.upper(), str1.lower(), str1.swapcase(), str1.capitalize(), str1.title() # 全部大写, 全部小写, 大小写转换, 首字母大写, 每个单词的首字母大写 str是不可变类型, 生成新对象, 需要变量引用它
# str1.ljust(width) # 获取固定长度, 左对齐, 右边不够用空格补齐
# str1.rjust(width) # 获取固定长度, 右对齐, 左边不够用空格补齐
# str1.center(width) # 获取固定长度, 中间对齐, 两边不够用空格补齐
# str1.zfill(width) # 获取固定长度, 右对齐, 左边不够用0补齐
# print(str1.ljust(30)+'end')
# print(str1.rjust(30))
# print(str1.center(30))
# print(str1.zfill(30))
# str1.find('t',start,end) # 查找字符串, 可指定起始及结束位置搜索
# str1.rfind('t') # 从右边开始查找字符串
# 上面所有的方法都可以用index代替, 不同的是使用index查找不到会抛异常, 而find返回-1
# str1.count('t') # 查找字符串出现的次数
# str1.replace('old','new') # 替换函数, 替换old为new
# str1.strip() # 默认首尾删除空白符, 左边和右边都删
# s = " a hello b "
# print(s.strip()+'a')
# s = "helllllllllllllllo hello qweqwqhelllllllll"
# s = "qwertyuiophhhheeeelllll"
# print(s.strip('hel')) # 删除字符串中开头和结尾指定的字符串, 不删除中间; 最后一个可重复? 对, 重复任意次数
str1.lstrip() # 去除左边的空格
str1.lstrip('d') # 删除str1字符串左边的字符串
str1.rstrip() # 去除右边的空格
str1.rsplit('d') # 删除str1字符串右边的字符串
str1.startswith("start") # 是否以start开头
str1.endswith("end") # 是否以end结尾
str1.isalpha(), str1.isalnum(), str1.isdigit(), str1.islower(), str1.isupper() # 判断字符串是否全为字母, 字母或数字(不包含下划线), 数字, 小写, 大写
# 数字分为Unicode数字, byte数字, 全角数字, 罗马数字, 汉字数字, 小数
str1.isdigit(), str1.isdecimal(), str1.isnumeric() # 判断是否全是数字 # 其中, 全角数字全为True, 小数全为False
# 15--三重引号编写多行字符串块, 并且在代码折行出嵌入换行字符\n
# mantra = """
# hello world
# hello python
# hello my friend
# """
# print(repr(mantra))
# 16--索引和切片:
# S[0], S[len(S)-1], S[-1] # 索引
# S[1:3], S[1:], S[:-1], S[1:10:2] # 分片, 第三个参数指定步长
# 17--字符串转换工具:
int('42'), str(42) # 返回42, '42'
float('4.13'), str(4.13) # 返回4.13, '4.13'
ord('s'), chr(115) # 返回115, 's' # ASCII编码
int('1001',2) # 将字符串作为二进制数编码, 转换为数字, 返回9, 前面必须是字符串格式
bin(13), oct(13), hex(13) # 将整数转换为二进制, 八进制, 十六进制字符串
# 18--另类字符串连接
# name = "wang" "hong" # 单行, name = "wanghong"
name = "wang"\
"hong" # 多行, name = "wanghong", \的行, 后面不能加东西, 比如注释和空格
# print(name)
# 19--python中的字符串格式化实现 字符串格式化表达式
"""
基于C语言的'print'模型, 并且在大多数的现有的语言中使用
通用结构: %[(name)][flag][width].[precision]typecode
"""
# s = "this is %d %s" % (1,'dead')
# s = "%s----%s----%s" % (42, 3.14, [1,2,3])
# s = "%d...%6d...%-6d...%06d" % (1234, 1234, 1234, 1234)
# f = 1.23456789
# s = "%e | %f | %g" %(f,f,f) # 浮点数字(科学计数法), 浮点数字(用小数点符号), 浮点数字(根据值自动确定用小数点还是科学计数法)
# s = "%c"%123 # 把ASCII码转换成字符, 然后输出
# s = "%(name1)d---%(name2)s" % {"name1":23, "name2":"value2"} # 基于字典的格式化表达式
# def foo():
# name = "zhangsan"
# age = 18
# print(vars()) # vars()函数返回一个字典, 包含了所有本函数调用时存在的变量(包括形参)
# s = "%(name)s is %(age)d" % vars()
# print(s)
# foo()
# 20--python中的字符串格式化实现 字符串格式化调用方法
# 普通调用
"{0},{1} and {2}".format('spam','ham','eggs') # 基于位置的调用
"{motto} and {pork}".format(motto="spam",pork="ham") # 基于key的调用
"{motto} and {0}".format('ham',motto="spam") # 混合调用
# 添加键 属性 偏移量(import sys)
# import sys
# s = "my {1[spam]} runs {0.platform}".format(sys,{"spam":"laptop"}) # 基于key的键和属性
# print(s)
# print(sys.platform) # 系统类型
# s = "first = {0[0]}, second = {0[1]}".format(['A','B','C'])
# print(s)
# 具体格式化
# s = "{0:e}, {1:.3e}, {2:g}".format(3.14159, 3.14159, 3.14159) # 输出'3.141590e+00, 3.142e+00, 3.14159'
# print(s)
# "fieldname:format_spec".format(......)
# 说明
"""
fieldname是指定参数的一个数字或关键字, 后面可跟可选的".name"或"[index]"成分引用
fill ::= <any character> #填充字符
align ::= "<" | ">" | "=" | "^" #对齐方式
sign ::= "+" | "-" | " " #符号说明
width ::= integer #字符串宽度
precision ::= integer #浮点数精度
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
"""
# 例子
# s = "={0:10}={1:10}".format("spam","123.456") # 指定长度
# s = "={0:>10}=".format('test') # 右对齐
# s = "={0:<10}=".format('test') # 左对齐
# s = "={0:^10}=".format('test') # 居中对齐
# s = "{0:x},{1:o},{2:b}".format(255, 255, 255) # 十六进制, 八进制, 二进制
# s = "My name is {0:{1}}.".format("Luke",8) # 动态指定长度
# print(s)
# 21--常用列表常量和操作
# L = [[1,2], 'string', {}] # 嵌套列表
# L = list('spam') # 列表初始化
# L = list(range(0, 4)) # 列表初始化 # 只要是可迭代的, 都可以用list初始化吧
# L = list({"A":90,"B":80,"C":70}.items())
# print(L)
# L = list(map(ord,'spam')) # 列表解析
# len(L) # 求列表长度
# L.count(value) # 求列表中某个值的个数
# L.append(obj) # 向列表尾部添加数据
# L.insert(index, obj) # 向列表的指定index位置添加数据,index及其之后的数据右移
# L.extend(interable) # 通过添加interable中的元素来扩展列表
# L = [1,2,3].extend("hello")
# L.index(value, [start, [stop]]) # 返回列表中value值的第一个索引
# L.pop(index) # 删除并返回index处的元素, 默认为删除并返回最后一个元素
# L.remove(value) # 删除列表中的value值, 只删除第一次出现的value的值
# L.reverse() # 翻转列表
# L.sort(cmp=None,key=None,reverse=False) # 排序列表
# a = [1, 2, 3],
# b = a[10:] # 注意, 这里不会引发IndexError异常, 只会返回一个空的列表[]
a = []
a += [1] # 在原有列表的基础上进行操作, 即列表的id没有改变
# a = []
# a = a + [1] # 构建了一个新的列表, a引用新的列表, 列表id发生了改变
# 22--用切片来删除序列的某一段
a = [1,2,3,4,5,6,7]
a[1:4] = []
a = [0,1,2,3,4,5,6,7]
del a [::2] # 去除偶数项(偶数索引的)
# 23--常用字典常量和操作
# D = {}
# D = {"spam":2, "tol":{"ham":1}} # 嵌套字典
# D = dict.fromkeys(['s','d'],[8,9]) # {"s":8, "d":"9"}
# D = dict.fromkeys(['s','d'],8) # {"s":8, "d":"8"}
# D = dict(name="tom", age="12")
# D.keys(), D.values(), D.items() # 字典键, 值, 以及键值对
# D.get(key, default) # get函数通过键去获取键值, 如果获取不到返回default
# D = {"a":90, "b":80, "c":70}
# D_other = {"a":90, "b":80, "c":60}
# D.update(D_other) # 合并字典, 如果存在相同的键值, D_other会覆盖掉数据
# D.pop("key",ret) # 删除字典中键值为key的项, 并返回其键值, 如果不存在此键, 返回ret
# D.popitem() # pop字典中随机的一项
# ret = D.setdefault("d",60) # 如果键d存在, 返回60; 如果键d不存在, 设置键d的值为60, 并返回60
# del D # 删除字典
# del D['key'] # 删除字典的某一项
# if key in D: if key not in D: # 测试字典键是否存在
# 字典注意事项: 1. 对新索引会添加一项 2. 字典键不一定非得是字符串, 也可以为任何的不可变对象
# 不可变对象: 调用对象自身的任意方法, 也不会改变该对象自身的内容,这些方法会创建新的对象并返回
# D[(1,2,3)] = 2 # tuple作为字典的key
# 24--字典解析
# D = {k:8 for k in ['s','d']}
# D = {k:v for (k,v) in zip(['name',"age"], ["tom",12])}
# 25--字典的特殊方法__missing__: 当查找不到该key时, 会执行该方法
# class dict(dict):
# def __missing__(self, key):
# return "hahaha"
# d = dict()
# print(d["foo"])
# 26--元组和列表的唯一区别在于元组是不可变对象, 列表是可变对象
# a = [1,2,3] # a[1] = 0, OK
# a = (1,2,3) # a[1] = 0, Error
# a = ([1,2],) # a[0][1] = 0, OK # 没有逗号, 元组只有一个元素, 不能索引
# a = [(1,2)] # a[0][1] = 0, Error
# 27--元组的特殊用法: 逗号和圆括号
# D = (12) # 此时D为一个整数, 即D = 12
# D = (12,) # 此时D为一个元组, 即D = (12,) len(D)仍然为1
# 28--文件的基本操作
# fw = open(r'C:\Luke\spam.html',"w")
# fw.write("hello\nworld\nhahahaha")
# fw.close()
# fr = open(r'C:\Luke\spam.html',"r")
# print(fr.read())
# fr.close()
# output = open(r'C:\Luke\spam.html',"w") # 打开输出文件, 用于写,
# output.write("hello\nqweqwe")
# input = open('C:\Luke\spam.html','r') # 打开输入文件, 用于读
# 写文件不存在的话默认新建, 读文件不存在的话会报错. 打开的方式可以为 "r", "w", "a", "rb", "wb", "ab"等
# fp = open('C:\Luke\spam.html','r')
# fp.read(size) # size为读取的长度, 以byte为单位, 一个字符占用一个byte, r和rb都是这样
# fr.readline(size) # 读取第一行前size个字符
# fr.readlines(size) # 把文件的每一行作为list的一个元素, 返回这个list. 它的内部是通过循环调用readline()来实现的, 如果提供size参数, size是表示读取内容的总长
# fr.readable() # 是否可读
# fw.write(str) # 把str写到文件中, write()并不会在str后面加上一个换行符
# fw.writelines(seq) # 把sql的内容全部写到文件中(多行一次性写入)
# fw.writable() # 是否可写, 注意:没有e
# fr.close() # 关闭文件
# fw.flush() # 把缓冲区的内容写入硬盘
# fw.fileno() # 返回一个长整型的"文件标签", 一个数字
# s = fr.isatty() # 文件是否是一个终端设备文件(unix系统中的)
# fr.tell() # 返回文件操作标记的当前位置, 以文件的开头为原点
# 文件操作标记, 光标位置, 之能用于读, 不能用于写, 也不能追加
# fr.next() # 返回下一行, 并将文件操作标记位移到下一行. 把file用于 for .. in file这样的语句时, 就是调用next()函数来实现遍历的
# fr.seek(offset, whence) # 将文件打开操作标记移到offset位置, whence为0表示从头开始计算, 1表示以当前位置为原点计算, 2表示以文件末尾为原点进行计算
# fr.seek(0,0)
# fr.seekable() # 是否可以seek
# fr.truncate(size) # 将文件裁成规定的大小, 默认是裁到当前文件操作标记的位置
# for line in open('data'): # 使用for语句, 比较适用于打开比较大的文件
# print(line)
# with open('data') as file:
# print(file.readline()) # 使用with语句, 可以保证文件关闭
# with open('data') as f:
# lines = f.readlines() # 一次性读入文件所有行, 并关闭文件
# open('f.txt', encoding="latin-1") # python3.x Unicode文本文件
# open('f.bin','rb') # python3.x 二进制bytes文件
# 29--其它
# python中的真假值含义: 1. 数字如果非零, 则为真; 0为假. 2. 其它对象如果非空, 则为真
# 通常意义下的类型分类: 1.数字, 序列, 映射 2. 可变类型和不可变类型
# id()和hash()
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
fruits = ['mango', 'kiwi', 'strawberry', 'guava', 'pineapple', 'mandarin orange']
numbers = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 17, 19, 23, 256, -8, -4, -2, 5, -9]
# Example for loop solution to add 1 to each number in the list
numbers_plus_one = []
for number in numbers:
numbers_plus_one.append(number + 1)
# Example of using a list comprehension to create a list of the numbers plus one.
numbers_plus_one = [number + 1 for number in numbers]
# Example code that creates a list of all of the list of strings in fruits and uppercases every string
output = []
for fruit in fruits:
output.append(fruit.upper())
# In[4]:
# Exercise 1 - rewrite the above example code using list comprehension syntax. Make a variable named uppercased_fruits to hold the output of the list comprehension. Output should be ['MANGO', 'KIWI', etc...]
uppercased_fruits = [fruit.upper() for fruit in fruits]
uppercased_fruits
# In[51]:
# Exercise 2 - create a variable named capitalized_fruits and use list comprehension syntax to produce output like ['Mango', 'Kiwi', 'Strawberry', etc...]
capitalized_fruits = [fruit.capitalize()
for fruit in fruits]
capitalized_fruits
# In[50]:
# Exercise 3 - Use a list comprehension to make a variable named fruits_with_more_than_two_vowels. Hint: You'll need a way to check if something is a vowel.
fruit1 = 'guava'
len([letter
for letter in fruit1
if letter in 'aeiou'])
# In[20]:
fruits_with_more_than_two_vowels = [fruit for fruit in fruits if len([letter for letter in fruit if letter in 'aeiou']) > 2]
# In[21]:
fruits_with_more_than_two_vowels
# In[23]:
# Exercise 4 - make a variable named fruits_with_only_two_vowels. The result should be ['mango', 'kiwi', 'strawberry']
[fruit
for fruit in fruits
if len([letter for letter in fruit if letter in 'aeiou']) == 2]
# In[31]:
# Exercise 5 - make a list that contains each fruit with more than 5 characters
[fruit
for fruit in fruits
if len(fruit) > 5]
# In[33]:
# Exercise 6 - make a list that contains each fruit with exactly 5 characters
[fruit
for fruit in fruits
if len(fruit) == 5]
# In[54]:
# Exercise 7 - Make a list that contains fruits that have less than 5 characters
[fruit
for fruit in fruits
if len(fruit) <5]
# In[43]:
# Exercise 8 - Make a list containing the number of characters in each fruit. Output would be [5, 4, 10, etc... ]
numbers
# In[44]:
[len(fruit)
for fruit in fruits]
# In[47]:
# Exercise 9 - Make a variable named fruits_with_letter_a that contains a list of only the fruits that contain the letter "a"
fruits_with_letter_a = [fruit
for fruit in fruits
if 'a' in fruit]
# In[48]:
fruits_with_letter_a
# In[66]:
# Exercise 10 - Make a variable named even_numbers that holds only the even numbers
fruits_with_letter_a = [num
for num in numbers
if num % 2 == 0]
# In[55]:
# Exercise 11 - Make a variable named odd_numbers that holds only the odd numbers
odd_numbers = [num
for num in numbers
if num % 2 == 1]
# In[56]:
# Exercise 12 - Make a variable named positive_numbers that holds only the positive numbers
positive_numbers = [num
for num in numbers
if num > 0]
# In[65]:
# Exercise 13 - Make a variable named negative_numbers that holds only the negative numbers
negative_numbers = [num
for num in numbers
if num < 0]
# In[67]:
# Exercise 14 - use a list comprehension w/ a conditional in order to produce a list of numbers with 2 or more numerals
[num
for num in numbers
if not (-10 < num < 10)]
# In[91]:
# Exercise 15 - Make a variable named numbers_squared that contains the numbers list with each element squared. Output is [4, 9, 16, etc...]
numbers_squared = [num ** 2 for num in numbers]
# In[79]:
numbers_squared
# In[80]:
# Exercise 16 - Make a variable named odd_negative_numbers that contains only the numbers that are both odd and negative.
odd_negative_numbers = [num for num in numbers if num < 0 and num % 2 == 1]
# In[81]:
odd_negative_numbers
# In[82]:
# Exercise 17 - Make a variable named numbers_plus_5. In it, return a list containing each number plus five.
numbers_plus_5 = [num + 5
for num in numbers]
# In[83]:
numbers_plus_5
# In[ ]:
|
import imp
import json
import logging
import time
from slackclient import SlackClient
from .brain import Brain
from .listener import Listener
from .listener import ListenerType
from .message import Message
from .repl import EspressoConsole
from .user import User
from .plugin_api import PluginAPI
class Espresso(PluginAPI, object):
"""The bot's main class, responsible for event loop, callbacks, and messaging connection.
The plugin API's decorators are mixed in to this class.
"""
def __init__(self, config):
self.config = config
self.api_token = config['api_token']
self.debug = config['debug']
self.slack_client = None
self.listeners = []
self.user = None
self.brain = Brain(config['brainstate_location'])
def connect(self):
"""Connects to Slack.
Creates a new SlackClient with ``self.api_token``.
"""
# TODO: Refactor for abstract MessagingServices.
self.slack_client = SlackClient(self.api_token)
self.slack_client.rtm_connect() # connect to the real-time messaging system
slack_test = json.loads(self.slack_client.api_call('auth.test'))
self.user = User(slack_test['user_id'], slack_test['user'])
logging.info("I am @%s, uid %s", self.user.name, self.user.uid)
def load_plugins(self, plugins, plugindir):
"""Loads specified plugins from the specified plugin directory.
Args:
plugins: plugins to load
plugindir: directory to load plugins from
"""
if plugins is not None:
for plugin in plugins:
logging.debug('loading plugin %s from %s', plugin, plugindir)
file_descriptor, path, desc = imp.find_module(plugin, [plugindir])
try:
imp.load_module(plugin, file_descriptor, path, desc)
finally:
if file_descriptor:
file_descriptor.close()
def brew(self):
"""Run the bot.
Starts an infinite processing/event loop.
"""
logging.info("starting the bot")
self.connect()
self.load_plugins(self.config['plugins'], self.config['plugin_dir'])
if self.config['debug_console']:
espresso_console = EspressoConsole(locals())
espresso_console.interact()
while True:
for msg in self.slack_client.rtm_read():
logging.debug("Raw message: %s", msg)
if 'type' in msg:
if msg['type'] == 'message' and 'subtype' not in msg and not msg['user'] == self.user.uid:
message = Message(User(msg['user'],
self.slack_client.server.users.find(msg['user']).name),
self.slack_client.server.channels.find(msg['channel']),
msg['text'])
for listener in self.listeners:
listener.call(message)
# sleep for 1/10 sec to not peg the cpu
# with this basic async implementation
time.sleep(.1)
def add_listener(self, ltype, regex, function, options):
"""Adds a listener listening for something from the messaging system.
Args:
ltype: the type of the regex.
regex: a regex string identifying what to listen for.
function: the callback to call if the regex matches.
options: a dict of options to pass on.
"""
if ltype == ListenerType.heard:
self.listeners.append(Listener(self, regex, function, options))
elif ltype == ListenerType.heard_with_name:
regex = r"^(?:\<\@{uid}\>|{uname})\s*:?\s*".format(
uid=self.user.uid,
uname=self.user.name) + regex
self.listeners.append(Listener(self, regex, function, options))
logging.debug("Added listener of type %s with regex %s calling %s",
ltype, regex, function.__name__)
def send(self, message, channel):
"""Send a message to the messaging system."""
logging.debug("Send message %s to #%s", message, channel)
self.slack_client.server.channels.find(channel).send_message(message)
|
import asyncio
import time
from aiokafka import AIOKafkaConsumer, AIOKafkaClient
from aiokafka.conn import AIOKafkaConnection
from aiokafka.cluster import ClusterMetadata
from aiokafka.errors import ConnectionError
from kafka.errors import KafkaError
from typing import Dict
def is_connected(conns: Dict[str, AIOKafkaConnection]):
ok = 0
for conn in conns.items():
if conn[1].connected():
ok += 1
if ok == 0:
return False
return True
async def check_if_kafka_is_alive(my_client: AIOKafkaClient):
while 1:
conns: Dict[str, AIOKafkaConnection] = my_client.__getattribute__('_conns')
print(my_client._bootstrap_servers)
print('Host = ', my_client.hosts)
if not is_connected(conns):
print('RENEW CONNECTION')
try:
my_client.__setattr__('cluster', ClusterMetadata(metadata_max_age_ms=300000))
my_client.__setattr__('_topics', set())
my_client.__setattr__('_conns', {})
my_client.__setattr__('_sync_task', None)
loop = asyncio.get_event_loop()
my_client.__setattr__('_md_update_fut', None)
my_client.__setattr__('_md_update_waiter', loop.create_future())
my_client.__setattr__('_get_conn_lock', asyncio.Lock(loop=loop))
await my_client.bootstrap()
except ConnectionError:
pass
else:
for conn in conns.items():
print(conn)
print(conn[1].connected())
try:
if not conn[1].connected():
print('TRY RE CONNECTION')
await conn[1].connect()
if not conn[1].connected():
print('RENEW CONNECTION')
await my_client.bootstrap()
except Exception as err:
print(err)
await asyncio.sleep(1)
async def consume():
loop = asyncio.get_event_loop()
consumer = AIOKafkaConsumer('my_favorite_topic', bootstrap_servers='infra-cp-kafka', auto_offset_reset='earliest',
loop=loop)
await consumer.start()
# client = consumer.__getattribute__('_client')
# asyncio.ensure_future(check_if_kafka_is_alive(client))
try:
async for msg in consumer:
print(msg)
except KafkaError as err:
print(err)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
asyncio.ensure_future(consume())
try:
loop.run_forever()
except Exception:
loop.stop()
|
from django.urls import include, re_path
from rest_framework import routers
from .views import (
ElectionSubTypeViewSet,
ElectionTypeViewSet,
ElectionViewSet,
OrganisationViewSet,
)
class EERouter(routers.DefaultRouter):
def get_lookup_regex(self, viewset, lookup_prefix=""):
# we identify organisations by
# (organisation_type, official_identifier, start_date)
# but DRF doesn't do composite keys
# (much like the rest of django)
# so it needs a bit of... gentle persuasion
if viewset == OrganisationViewSet:
return r"(?P<organisation_type>[-\w]+)/(?P<official_identifier>[-\w]+)/(?P<date>\d{4}-\d{2}-\d{2})"
return super().get_lookup_regex(viewset, lookup_prefix)
router = EERouter()
router.register(r"elections", ElectionViewSet)
router.register(r"election_types", ElectionTypeViewSet)
router.register(r"election_subtypes", ElectionSubTypeViewSet)
router.register(r"organisations", OrganisationViewSet)
routes = router.get_urls()
urlpatterns = [
re_path(r"^", include(routes)),
re_path(
r"^organisations/(?P<organisation_type>[-\w]+)/$",
OrganisationViewSet.as_view({"get": "filter"}),
),
re_path(
r"^organisations/(?P<organisation_type>[-\w]+)\.(?P<format>[a-z0-9]+)/?$",
OrganisationViewSet.as_view({"get": "filter"}),
),
re_path(
r"^organisations/(?P<organisation_type>[-\w]+)/(?P<official_identifier>[-\w]+)/$",
OrganisationViewSet.as_view({"get": "filter"}),
),
re_path(
r"^organisations/(?P<organisation_type>[-\w]+)/(?P<official_identifier>[-\w]+)\.(?P<format>[a-z0-9]+)/?$",
OrganisationViewSet.as_view({"get": "filter"}),
),
]
|
import pandas as pd
import math
def selectData(df, newData):
for i, rowAux in newData.iterrows():
#Initialize counts for each column in each Category
countRating=0
countSize=0
countInstalls=0
for j, row in df.iterrows():
if rowAux['Category'] == row['Category']:
if math.isnan(row['Rating'])==False:
rowAux['Rating']+=float(row['Rating'])
countRating += 1
if math.isnan(row['Installs'])==False:
rowAux['Installs'] += int(row['Installs'])
countInstalls += 1
if math.isnan(row['Size'])==False:
rowAux['Size'] += float(row['Size'])
countSize += 1
#Calculate the mean for each variable of a given Category
newData.iloc[i, newData.columns.get_loc('Rating')]=rowAux['Rating']/countRating
newData.iloc[i, newData.columns.get_loc('Installs')]=rowAux['Installs']/countInstalls
newData.iloc[i, newData.columns.get_loc('Size')]=rowAux['Size']/countSize
return newData
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
df = pd.read_csv('../dataset/pre-processed/cleanSize.csv')
d = {'Category': ['ART_AND_DESIGN', 'AUTO_AND_VEHICLES', 'BEAUTY', 'BOOKS_AND_REFERENCE', 'BUSINESS', 'COMICS', 'COMMUNICATION', 'DATING', 'EDUCATION', 'ENTERTAINMENT', 'EVENTS', 'FINANCE', 'FOOD_AND_DRINK', 'HEALTH_AND_FITNESS', 'HOUSE_AND_HOME', 'LIBRARIES_AND_DEMO', 'LIFESTYLE', 'GAME', 'FAMILY', 'MEDICAL', 'SOCIAL', 'SHOPPING', 'PHOTOGRAPHY', 'SPORTS', 'TRAVEL_AND_LOCAL', 'TOOLS', 'PERSONALIZATION', 'PRODUCTIVITY', 'PARENTING', 'WEATHER', 'MAPS_AND_NAVIGATION', 'NEWS_AND_MAGAZINES', 'VIDEO_PLAYERS'], 'Installs':0, 'Rating':0, 'Size':0}
newData = pd.DataFrame(data=d)
wData = selectData(df, newData)
wData.to_csv('../dataset/pre-processed/dataBubbleChart.csv')
|
"""
練習問題1
print(sum([1,2,3])/len([1,2,3]))
"""
"""
練習問題2
i = input()
score = int(i)
if score < 0:
print(0)
else:
print(score)
"""
"""
i = input()
score = int(i)
print(max(0,score))
print(min(100,score))
"""
"""
i = input()
score = int(i)
#print(max(0,score))
print(min(100,max(0,score)))
"""
"""
def plus1(a):
return a + 1
result1 = plus1(100)
print(result1)
"""
"""
def zettai(x):
if x < 0:
return -x
else:
return x
print(zettai(-1))
print(zettai(100))
"""
"""
def mysum(vs):
total = 0
for x in vs:
total += x
return total
#values = [1,2,3]
values = eval(input("リスト:"))
print(f"total = {mysum(values)}")
"""
"""
def mymax(x,y):
if x > y:
return x
else:
return y
a = 200
b = 100
print(f"max={mymax(a,b)}")
"""
"""
def myprint(x,y):
for i in range(y):
print(x)
myprint("Hello",10)
"""
"""
def f1(a):
print(a)
print(locals()) #ローカル領域の変数群の値を辞書型で返す
# return
def f2(a,b):
print(a+b)
print(locals())
a = 100
f1(10)
f2(20,30)
print(a)
print(locals())
"""
"""
def is_positive(x):
if x >= 0:
return True
else:
return False
if is_positive(100):
print("OK")
if is_positive(-100):
print("NG")
"""
#
|
#!/usr/bin/env python3
#
# This example shows how to set up a self-consistent fluid DREAM run,
# where no kinetic equations are solved, but the electric field and
# temperature are evolved self-consistently.
#
# Run as
#
# $ ./basic.py
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM import runiface
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.ColdElectronTemperature as T_cold
import DREAM.Settings.Equations.ElectricField as Efield
import DREAM.Settings.Equations.HotElectronDistribution as FHot
from DREAM.Settings.Equations.ElectricField import ElectricField
from DREAM.Settings.Equations.ColdElectronTemperature import ColdElectronTemperature
ds = DREAMSettings()
# set collision settings
ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_FULL
ds.collisions.collfreq_type = Collisions.COLLFREQ_TYPE_PARTIALLY_SCREENED
ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_STOPPING_POWER
ds.collisions.lnlambda = Collisions.LNLAMBDA_ENERGY_DEPENDENT
#############################
# Set simulation parameters #
#############################
# time resolution of restarted simulation
Tmax_restart = 1e-3 # simulation time in seconds
Nt_restart = 20 # number of time steps
B0 = 5 # magnetic field strength in Tesla
E_initial = 60 # initial electric field in V/m
E_wall = 0.0 # boundary electric field in V/m
T_initial = 4 # initial temperature in eV
Tmax_init2 = 1e-3 # simulation time in seconds
Nt_init2 = 10 # number of time steps
Tmax_init1 = 5e-5 # simulation time in seconds
Nt_init1 = 7 # number of time steps
Nr = 4 # number of radial grid points
Np = 200 # number of momentum grid points
Nxi = 5 # number of pitch grid points
pMax = 0.03 # maximum momentum in m_e*c
times = [0] # times at which parameters are given
radius = [0, 1] # span of the radial grid
radius_wall = 1.5 # location of the wall
T_selfconsistent = True
hotTailGrid_enabled = True
# Set up radial grid
ds.radialgrid.setB0(B0)
ds.radialgrid.setMinorRadius(radius[-1])
ds.radialgrid.setWallRadius(radius_wall)
ds.radialgrid.setNr(Nr)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_DYNAMIC_FULLY_IONIZED, n=1e20)
ds.eqsys.n_i.addIon(name='Ar', Z=18, iontype=Ions.IONS_DYNAMIC_NEUTRAL, n=1e20)
# Set E_field
efield = E_initial*np.ones((len(times), len(radius)))
ds.eqsys.E_field.setPrescribedData(efield=efield, times=times, radius=radius)
# Set initial temperature
temperature = T_initial * np.ones((len(times), len(radius)))
ds.eqsys.T_cold.setPrescribedData(temperature=temperature, times=times, radius=radius)
# Hot-tail grid settings
# Set initial Maxwellian @ T = 1 keV, n = 5e19, uniform in radius
if not hotTailGrid_enabled:
ds.hottailgrid.setEnabled(False)
else:
ds.hottailgrid.setNxi(Nxi)
ds.hottailgrid.setNp(Np)
ds.hottailgrid.setPmax(pMax)
nfree_initial, rn0 = ds.eqsys.n_i.getFreeElectronDensity()
ds.eqsys.f_hot.setInitialProfiles(rn0=rn0, n0=nfree_initial, rT0=0, T0=T_initial)
ds.eqsys.f_hot.setBoundaryCondition(bc=FHot.BC_F_0)
ds.eqsys.f_hot.enableIonJacobian(False)
# Disable runaway grid
ds.runawaygrid.setEnabled(False)
# Use the new nonlinear solver
ds.solver.setType(Solver.NONLINEAR)
ds.solver.tolerance.set(reltol=1e-4)
ds.solver.setMaxIterations(maxiter = 100)
ds.solver.setVerbose(True)
ds.other.include('fluid', 'lnLambda','nu_s','nu_D')
# Set time stepper
ds.timestep.setTmax(Tmax_init1)
ds.timestep.setNt(Nt_init1)
ds.save('init_settings.h5')
ds.output.setFilename('output_init.h5')
# Save settings to HDF5 file
runiface(ds, 'output_init2.h5', quiet=False)
# Set time stepper
ds.timestep.setTmax(Tmax_init2)
ds.timestep.setNt(Nt_init2)
if T_selfconsistent:
ds.eqsys.T_cold.setType(ttype=T_cold.TYPE_SELFCONSISTENT)
ds.solver.setLinearSolver(Solver.LINEAR_SOLVER_LU)
ds.fromOutput('output_init2.h5')
ds.save('init_settings.h5')
runiface(ds, 'output_init3.h5', quiet=False)
###########
# RESTART #
###########
ds2 = DREAMSettings(ds)
ds2.fromOutput('output_init3.h5')
ds2.eqsys.E_field.setType(Efield.TYPE_SELFCONSISTENT)
ds2.eqsys.E_field.setBoundaryCondition(bctype = Efield.BC_TYPE_PRESCRIBED, inverse_wall_time = 0, V_loop_wall_R0 = E_wall*2*np.pi)
ds2.timestep.setTmax(Tmax_restart)
ds2.timestep.setNt(Nt_restart)
ds2.save('restart_settings.h5')
runiface(ds2, 'output.h5', quiet=False)
|
def add(*args):
return round(sum(x/(c+1) for c,x in enumerate(args)))
'''
This kata is all about adding numbers.
You will create a function named add. It will return the sum of all the arguments.
Sounds easy, doesn't it?
Well Here's the Twist. The inputs will gradually decrease with their index as
parameter to the function.
add(3,4,6) #returns (3/1)+(4/2)+(6/3)=7
Remember the function will return 0 if no arguments are passed and it must round
the result if sum is a float.
Example
add() #=> 0
add(1,2,3) #=> 3
add(1,4,-6,20) #=> 6
'''
|
n = int(input())
scores = [int(x) for x in input().split()]
M = max(scores)
sum_new_scores = 0
for s in scores:
sum_new_scores += s / M * 100
print(sum_new_scores / n)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.classifier import GNNClassifier
class GAT(GNNClassifier):
def __init__(self, input_dim, hidden_dim, num_labels, num_layers, num_heads=2, merge='mean', dropout=0.6):
super().__init__(input_dim, hidden_dim, num_labels, num_layers)
self.num_heads = num_heads
self.merge = merge
self.dropout = dropout
self.build_layers()
def build_layers(self):
layers = []
h_dim = self.hidden_dim * self.num_heads if self.merge == 'cat' else self.hidden_dim
for i in range(self.num_layers):
if i == 0:
layers.append(MultiHeadGATLayer(self.input_dim, self.hidden_dim, self.num_heads, self.merge, self.dropout))
elif i == self.num_layers - 1:
layers.append(MultiHeadGATLayer(h_dim, self.num_labels, 1, self.merge, self.dropout))
else:
layers.append(MultiHeadGATLayer(h_dim, self.hidden_dim, self.num_heads, self.merge, self.dropout))
self.gnn_layers = nn.ModuleList(layers)
class MultiHeadGATLayer(nn.Module):
def __init__(self, in_dim, out_dim, num_heads=2, merge='mean', dropout=0.5):
super(MultiHeadGATLayer, self).__init__()
self.merge = merge
self.dropout = dropout
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(GATLayer(in_dim, out_dim, self.dropout))
def forward(self, g, h):
head_outs = [attn_head(g, h) for attn_head in self.heads]
if self.merge == 'cat':
# concat on the output feature dimension (dim=1)
return torch.cat(head_outs, dim=1)
else:
# merge using average
return torch.mean(torch.stack(head_outs), 0)
class GATLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout=0.6):
super().__init__()
self.linear = nn.Linear(in_dim, out_dim, bias=False)
self.attn_linear = nn.Linear(out_dim * 2, 1, bias=False)
self.dropout = dropout
def edge_attention(self, edges):
cat = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_linear(cat)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
# alpha = F.softmax(nodes.mailbox['e'], dim=1)
alpha = F.dropout(F.softmax(nodes.mailbox['e'], dim=1), self.dropout, self.training)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, g, h):
z = self.linear(F.dropout(h, self.dropout, self.training))
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
return g.ndata.pop('h')
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import jsonpickle
import dateutil.parser
from .controller_test_base import ControllerTestBase
from ..test_helper import TestHelper
from ytelapi.api_helper import APIHelper
class TranscriptionControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(TranscriptionControllerTests, cls).setUpClass()
cls.controller = cls.api_client.transcription
# Retrieve a list of transcription objects for your Ytel account.
def test_test_list_transcriptions(self):
# Parameters for the API call
page = None
pagesize = None
status = None
date_transcribed = None
# Perform the API call through the SDK function
result = self.controller.create_list_transcriptions(page, pagesize, status, date_transcribed)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
|
# cmds = {
# 'query_comm_temp_unit':':UNIT:TEMPERATURE?',
# 'set_comm_unit_f':':UNIT:TEMPERATURE F',
# 'set_comm_unit_c':':UNIT:TEMPERATURE C',
# 'query_temp_disp':':UNIT:TEMPERATURE:DISPLAY?',
# 'set_disp_unit_f':':UNIT:TEMPERATURE:DISPLAY F',
# 'set_disp_unit_c':':UNIT:TEMPERATURE:DISPLAY C',
# 'read_temp_pv':':SOURCE:CLOOP#:PVALUE?',
# 'query_input_err':':SOURCE:CLOOP{}:ERROR?',
# 'read_s_point':':SOURCE:CLOOP{}:SPOINT?',
# 'write_s_pount':':SOURCE:CLOOP{}:SPOINT {}',
# '':':SOURCE:CLOOP#:IDLE?',
# '':':SOURCE:CLOOP#:IDLE <value>',
# '':':SOURCE:CASCADE1:SPOINT?',
# '':':SOURCE:CASCADE1:SPOINT <value>',
# '':':SOURCE:CASCADE1:OUTER:PVALUE?',
# '':':SOURCE:CASCADE1:OUTER:ERROR?',
# '':':SOURCE:CASCADE1:INNER:PVALUE?<floating point value>',
# '':':SOURCE:CASCADE1:INNER:ERROR?',
# '':':SOURCE:CASCADE1:OUTER:SPOINT?',
# '':':SOURCE:CASCADE1:INNER:SPOINT?',
# '':':SOURCE:CLOOP#:RACTION OFF',
# '':':SOURCE:CLOOP#:RACTION STARTUP',
# '':':SOURCE:CLOOP#:RACTION SETPOINT',
# '':':SOURCE:CLOOP#:RACTION BOTH',
# '':':SOURCE:CLOOP#:RSCALE MINUTES',
# '':':SOURCE:CLOOP#:RSCALE HOURS',
# '':':SOURCE:CLOOP#:RRATE?',
# '':':SOURCE:CLOOP#:RTIME?',
# '':':SOURCE:CLOOP#:RRATE <value>',
# '':':SOURCE:CLOOP#:RTIME <value>',
# '':':OUTPUT#:STATE ON',
# '':':OUTPUT#:STATE OFF',
# '':':OUTPUT#:STATE?',
# '':':PROGRAM:NUMBER <value>',
# 'query_prog_name':':PROGRAM:NAME?',
# 'sel_prog_step':':PROGRAM:STEP <value>',
# '':':PROGRAM:SELECTED:STATE START',
# '':':PROGRAM:SELECTED:STATE STOP',
# '':':PROGRAM:SELECTED:STATE PAUSE',
# '':':PROGRAM:SELECTED:STATE RESUME',
# 'identify':'*IDN?',
# }
|
from django.views.generic import TemplateView
from generic.mixins import CategoryListMixin
class ContactsView(TemplateView, CategoryListMixin):
template_name = "contacts.html"
|
# TODO: Still need to write the parser
import macropy.activate
from language import *
from gen import *
from sympy import *
import shac
# This the the single dimension in "x" example artificially paced cell
# without any value for f(lambda).
ode1 = Ode(S("diff(x(t))+0.1*x(t)"), S("x(t)"), 0.0001, {})
ode2 = Ode(S("diff(x(t))-800"), S("x(t)"), 20, {})
ode3 = Ode(S("diff(x(t))-200.0*x(t)"), S("x(t)"), 20, {})
ode4 = Ode(S("diff(x(t))+0.002*x(t)"), S("x(t)"), 138, {})
# The locations of the hybrid automaton
t1 = Loc("t1", [ode1], [],
{S("x(t)"): [Guard(S("x<=20"))]})
t2 = Loc("t2", [ode2], [],
{S("x(t)"): [Guard(S("x < 20"))]})
t3 = Loc("t3", [ode3], [],
{S("x(t)"): [Guard(S("x>=20")), Guard(S("x < 138"))]})
t4 = Loc("t4", [ode4], [],
{S("x(t)"): [Guard(S("x>20")), Guard(S("x <= 138"))]})
# The edges
e1 = Edge('t1', 't2', {S("x(t)"): [Guard(S("x < 20"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[Event("VS")])
e2 = Edge('t2', 't1', {S("x(t)"): [Guard(S("x < 20"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[Event("VSP")])
e3 = Edge('t2', 't3', {S("x(t)"): [Guard(S("x <= 20")),
Guard(S("x >= 20"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[])
e4 = Edge('t3', 't4', {S("x(t)"): [Guard(S("x <= 138")),
Guard(S("x >= 138"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[])
e5 = Edge('t4', 't1', {S("x(t)"): [Guard(S("x <= 20")),
Guard(S("x >= 20"))]},
[Update.Update2(Symbol('x'), Symbol('x'))],
[])
cell1D = Ha("cell1D", [t1, t2, t3, t4], t1,
[e1, e2, e3, e4, e5], [], [])
# Compile
shac.compile(cell1D)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.