hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a84bbb52eda077f44cc7b316828fb87f5e2b4397 | 3,371 | py | Python | ansible-devel/test/units/galaxy/test_role_requirements.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | ansible-devel/test/units/galaxy/test_role_requirements.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | ansible-devel/test/units/galaxy/test_role_requirements.py | satishcarya/ansible | ed091e174c26316f621ac16344a95c99f56bdc43 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.playbook.role.requirement import RoleRequirement
def test_null_role_url():
role = RoleRequirement.role_yaml_parse('')
assert role['src'] == ''
assert role['name'] == ''
assert role['scm'] is None
assert role['version'] is None
def test_git_file_role_url():
role = RoleRequirement.role_yaml_parse('git+file:///home/bennojoy/nginx')
assert role['src'] == 'file:///home/bennojoy/nginx'
assert role['name'] == 'nginx'
assert role['scm'] == 'git'
assert role['version'] is None
def test_https_role_url():
role = RoleRequirement.role_yaml_parse('https://github.com/bennojoy/nginx')
assert role['src'] == 'https://github.com/bennojoy/nginx'
assert role['name'] == 'nginx'
assert role['scm'] is None
assert role['version'] is None
def test_git_https_role_url():
role = RoleRequirement.role_yaml_parse('git+https://github.com/geerlingguy/ansible-role-composer.git')
assert role['src'] == 'https://github.com/geerlingguy/ansible-role-composer.git'
assert role['name'] == 'ansible-role-composer'
assert role['scm'] == 'git'
assert role['version'] is None
def test_git_version_role_url():
role = RoleRequirement.role_yaml_parse('git+https://github.com/geerlingguy/ansible-role-composer.git,main')
assert role['src'] == 'https://github.com/geerlingguy/ansible-role-composer.git'
assert role['name'] == 'ansible-role-composer'
assert role['scm'] == 'git'
assert role['version'] == 'main'
@pytest.mark.parametrize("url", [
('https://some.webserver.example.com/files/main.tar.gz'),
('https://some.webserver.example.com/files/main.tar.bz2'),
('https://some.webserver.example.com/files/main.tar.xz'),
])
def test_tar_role_url(url):
role = RoleRequirement.role_yaml_parse(url)
assert role['src'] == url
assert role['name'].startswith('main')
assert role['scm'] is None
assert role['version'] is None
def test_git_ssh_role_url():
role = RoleRequirement.role_yaml_parse('git@gitlab.company.com:mygroup/ansible-base.git')
assert role['src'] == 'git@gitlab.company.com:mygroup/ansible-base.git'
assert role['name'].startswith('ansible-base')
assert role['scm'] is None
assert role['version'] is None
def test_token_role_url():
role = RoleRequirement.role_yaml_parse('git+https://gitlab+deploy-token-312644:_aJQ9c3HWzmRR4knBNyx@gitlab.com/akasurde/ansible-demo')
assert role['src'] == 'https://gitlab+deploy-token-312644:_aJQ9c3HWzmRR4knBNyx@gitlab.com/akasurde/ansible-demo'
assert role['name'].startswith('ansible-demo')
assert role['scm'] == 'git'
assert role['version'] is None
def test_token_new_style_role_url():
role = RoleRequirement.role_yaml_parse({"src": "git+https://gitlab+deploy-token-312644:_aJQ9c3HWzmRR4knBNyx@gitlab.com/akasurde/ansible-demo"})
assert role['src'] == 'https://gitlab+deploy-token-312644:_aJQ9c3HWzmRR4knBNyx@gitlab.com/akasurde/ansible-demo'
assert role['name'].startswith('ansible-demo')
assert role['scm'] == 'git'
assert role['version'] == ''
| 37.876404 | 147 | 0.703055 | 0 | 0 | 0 | 0 | 438 | 0.129932 | 0 | 0 | 1,571 | 0.466034 |
a84c51e00e3c06ed946f2d73ff195ec6335ee4c9 | 8,007 | py | Python | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 35 | 2019-10-25T09:19:36.000Z | 2022-03-04T11:22:27.000Z | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 17 | 2019-10-30T03:49:20.000Z | 2020-07-02T15:54:50.000Z | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 9 | 2019-11-06T13:30:08.000Z | 2021-06-12T03:00:05.000Z | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: master
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1PodLogOptions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container': 'str',
'follow': 'bool',
'limit_bytes': 'str',
'previous': 'bool',
'since_seconds': 'str',
'since_time': 'V1Time',
'tail_lines': 'str',
'timestamps': 'bool'
}
attribute_map = {
'container': 'container',
'follow': 'follow',
'limit_bytes': 'limitBytes',
'previous': 'previous',
'since_seconds': 'sinceSeconds',
'since_time': 'sinceTime',
'tail_lines': 'tailLines',
'timestamps': 'timestamps'
}
def __init__(self, container=None, follow=None, limit_bytes=None, previous=None, since_seconds=None, since_time=None, tail_lines=None, timestamps=None): # noqa: E501
"""V1PodLogOptions - a model defined in Swagger""" # noqa: E501
self._container = None
self._follow = None
self._limit_bytes = None
self._previous = None
self._since_seconds = None
self._since_time = None
self._tail_lines = None
self._timestamps = None
self.discriminator = None
if container is not None:
self.container = container
if follow is not None:
self.follow = follow
if limit_bytes is not None:
self.limit_bytes = limit_bytes
if previous is not None:
self.previous = previous
if since_seconds is not None:
self.since_seconds = since_seconds
if since_time is not None:
self.since_time = since_time
if tail_lines is not None:
self.tail_lines = tail_lines
if timestamps is not None:
self.timestamps = timestamps
@property
def container(self):
"""Gets the container of this V1PodLogOptions. # noqa: E501
:return: The container of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V1PodLogOptions.
:param container: The container of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._container = container
@property
def follow(self):
"""Gets the follow of this V1PodLogOptions. # noqa: E501
:return: The follow of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._follow
@follow.setter
def follow(self, follow):
"""Sets the follow of this V1PodLogOptions.
:param follow: The follow of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._follow = follow
@property
def limit_bytes(self):
"""Gets the limit_bytes of this V1PodLogOptions. # noqa: E501
:return: The limit_bytes of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._limit_bytes
@limit_bytes.setter
def limit_bytes(self, limit_bytes):
"""Sets the limit_bytes of this V1PodLogOptions.
:param limit_bytes: The limit_bytes of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._limit_bytes = limit_bytes
@property
def previous(self):
"""Gets the previous of this V1PodLogOptions. # noqa: E501
:return: The previous of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1PodLogOptions.
:param previous: The previous of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._previous = previous
@property
def since_seconds(self):
"""Gets the since_seconds of this V1PodLogOptions. # noqa: E501
:return: The since_seconds of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._since_seconds
@since_seconds.setter
def since_seconds(self, since_seconds):
"""Sets the since_seconds of this V1PodLogOptions.
:param since_seconds: The since_seconds of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._since_seconds = since_seconds
@property
def since_time(self):
"""Gets the since_time of this V1PodLogOptions. # noqa: E501
:return: The since_time of this V1PodLogOptions. # noqa: E501
:rtype: V1Time
"""
return self._since_time
@since_time.setter
def since_time(self, since_time):
"""Sets the since_time of this V1PodLogOptions.
:param since_time: The since_time of this V1PodLogOptions. # noqa: E501
:type: V1Time
"""
self._since_time = since_time
@property
def tail_lines(self):
"""Gets the tail_lines of this V1PodLogOptions. # noqa: E501
:return: The tail_lines of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._tail_lines
@tail_lines.setter
def tail_lines(self, tail_lines):
"""Sets the tail_lines of this V1PodLogOptions.
:param tail_lines: The tail_lines of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._tail_lines = tail_lines
@property
def timestamps(self):
"""Gets the timestamps of this V1PodLogOptions. # noqa: E501
:return: The timestamps of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._timestamps
@timestamps.setter
def timestamps(self, timestamps):
"""Sets the timestamps of this V1PodLogOptions.
:param timestamps: The timestamps of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._timestamps = timestamps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1PodLogOptions, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodLogOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.869128 | 170 | 0.585488 | 7,758 | 0.968902 | 0 | 0 | 4,032 | 0.503559 | 0 | 0 | 3,842 | 0.47983 |
a84d0a4ba61d358039b7a1e74466919b134e36f7 | 4,158 | py | Python | tests/test_html.py | unfoldingWord-dev/USFM-Utils | c3c4b5df5636076a4f64f8ec63358df13dc814ed | [
"MIT"
] | 4 | 2016-09-26T09:42:47.000Z | 2020-05-16T05:14:33.000Z | tests/test_html.py | unfoldingWord-dev/USFM-Utils | c3c4b5df5636076a4f64f8ec63358df13dc814ed | [
"MIT"
] | 1 | 2020-09-24T19:29:26.000Z | 2020-09-24T19:29:26.000Z | tests/test_html.py | unfoldingWord-dev/USFM-Utils | c3c4b5df5636076a4f64f8ec63358df13dc814ed | [
"MIT"
] | 2 | 2017-07-27T05:13:02.000Z | 2019-12-19T23:45:18.000Z | import itertools
import unittest
from usfm_utils.elements.document import Document
from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote
from usfm_utils.elements.footnote_utils import AutomaticFootnoteLabel, CustomFootnoteLabel
from usfm_utils.html.html_visitor import HtmlVisitor, non_span_formatting
from tests import test_utils
class HtmlRenderingTest(unittest.TestCase):
@staticmethod
def render_elements(*elements):
return HtmlRenderingTest.render(Document(elements))
@staticmethod
def render(document):
test_file = HtmlRenderingTest.TestFile()
visitor = HtmlVisitor(test_file)
visitor.write(document)
return test_file.content()
def test_footnotes(self):
for kind in list(Footnote.Kind):
word = test_utils.word()
footnote = Footnote(kind, [Text(word)], AutomaticFootnoteLabel())
paragraph = Paragraph([footnote])
rendered = self.render_elements(paragraph)
self.assertIn(kind.name, rendered)
self.assertIn(word, rendered)
for kind in list(Footnote.Kind):
word = test_utils.word()
label = test_utils.word(allow_empty=False)
footnote = Footnote(kind, [Text(word)], CustomFootnoteLabel(label))
paragraph = Paragraph([footnote])
rendered = self.render_elements(paragraph)
self.assertIn(kind.name, rendered)
self.assertIn(word, rendered)
self.assertIn(label, rendered)
def test_formatted_text(self):
for kind in list(FormattedText.Kind):
text = " ".join(test_utils.word(allow_empty=False)
for _ in range(10))
formatted_text = FormattedText(kind, [Text(text)])
rendered = self.render_elements(formatted_text)
self.assertIn(text, rendered)
if kind in non_span_formatting:
open_tag, close_tag = non_span_formatting[kind]
self.assertIn(open_tag, rendered)
self.assertIn(close_tag, rendered)
else:
self.assertIn(kind.name, rendered) # kind.name should appear as a class
def test_heading(self):
word = test_utils.word()
heading = test_utils.word()
elements = [Paragraph([Text(word)])]
document = Document(elements, heading=heading)
rendered = self.render(document)
self.assertIn(word, rendered)
self.assertIn(heading, rendered)
def test_paragraph(self):
bools = (False, True)
for embedded, poetic, introductory, continuation \
in itertools.product(bools, bools, bools, bools):
word = test_utils.word()
text = Text(word)
paragraph = Paragraph([text],
embedded=embedded,
poetic=poetic,
introductory=introductory,
continuation=continuation)
rendered = self.render_elements(paragraph)
self.assertIn(word, rendered)
if embedded:
self.assertIn("embedded", rendered) # should appear as a class
else:
self.assertNotIn("embedded", rendered)
if poetic:
self.assertIn("poetic", rendered)
else:
self.assertNotIn("poetic", rendered)
if introductory:
self.assertIn("introductory", rendered)
else:
self.assertNotIn("introductory", rendered)
if continuation:
self.assertIn("continuation", rendered)
else:
self.assertNotIn("continuation", rendered)
class TestFile(object):
"""
A file-like string object used for mocking text files
"""
def __init__(self):
self._content = ""
def content(self):
return self._content
def write(self, p_str):
self._content += p_str
if __name__ == "__main__":
unittest.main()
| 37.125 | 90 | 0.597162 | 3,740 | 0.899471 | 0 | 0 | 305 | 0.073353 | 0 | 0 | 246 | 0.059163 |
a84f0d0f3749f18aaa14888aa7b0b39c8a1c4cf5 | 1,703 | py | Python | src/tokenization/train_tokenizer.py | saridormi/commits_dataset | 8dc5309c3b55385c0455ff91974704e18ab7cfdc | [
"Apache-2.0"
] | 1 | 2022-02-22T07:05:46.000Z | 2022-02-22T07:05:46.000Z | src/tokenization/train_tokenizer.py | saridormi/commits_dataset | 8dc5309c3b55385c0455ff91974704e18ab7cfdc | [
"Apache-2.0"
] | null | null | null | src/tokenization/train_tokenizer.py | saridormi/commits_dataset | 8dc5309c3b55385c0455ff91974704e18ab7cfdc | [
"Apache-2.0"
] | null | null | null | import hydra
import os
import logging
from hydra.utils import instantiate, to_absolute_path
from omegaconf import DictConfig, OmegaConf
from tokenizers import Tokenizer
from src.tokenization.utils import Lexer
@hydra.main(config_path="configs", config_name="train_tokenizer_config")
def main(cfg: DictConfig) -> None:
logging.info("Tokenizer config")
logging.info(OmegaConf.to_yaml(cfg))
tokenizer = Tokenizer(instantiate(cfg.tokenizer))
lexer = Lexer(sep_token=cfg.pre_tokenizer.pattern)
fnames = []
for part in ["train", "val", "test", "val_original", "test_original"]:
part_fname = to_absolute_path(os.path.join(cfg.paths.data_dir, f"diffs/{part}.txt"))
if not os.path.exists(part_fname):
logging.info(f"Pretokenizing {part}")
lexer(
input_filename=to_absolute_path(os.path.join(cfg.paths.data_dir, f"{part}_final.csv")),
output_filename=to_absolute_path(os.path.join(cfg.paths.data_dir, f"{part}_final_pretokenized.csv")),
save_diffs=True,
diff_filename=to_absolute_path(os.path.join(cfg.paths.data_dir, f"diffs/{part}.txt")),
chunksize=cfg.chunksize,
)
fnames.append(part_fname)
tokenizer.pre_tokenizer = instantiate(cfg.pre_tokenizer)
trainer = instantiate(cfg.trainer)
tokenizer.train(fnames, trainer)
tokenizer.save(to_absolute_path(cfg.paths.tokenizer_fname))
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.FileHandler("tokenizer_training.log"), logging.StreamHandler()],
)
main()
| 37.021739 | 117 | 0.688197 | 0 | 0 | 0 | 0 | 1,241 | 0.728714 | 0 | 0 | 285 | 0.167352 |
a84f521165b609606547850404c87694075d7414 | 4,653 | py | Python | xfdnn/rt/xdnn_rt.py | jebtang/ml-suite | 33dadbbd1a98acbab353a379bbc7e96547a0c2e3 | [
"Apache-2.0"
] | 1 | 2018-11-22T01:59:26.000Z | 2018-11-22T01:59:26.000Z | xfdnn/rt/xdnn_rt.py | jebtang/ml-suite | 33dadbbd1a98acbab353a379bbc7e96547a0c2e3 | [
"Apache-2.0"
] | null | null | null | xfdnn/rt/xdnn_rt.py | jebtang/ml-suite | 33dadbbd1a98acbab353a379bbc7e96547a0c2e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import tensorflow as tf
import numpy as np
from xfdnn_compiler_tensorflow import TFFrontend
#from xfdnn.tools.compile.frontends.frontend_caffe import CaffeFrontend
from tensorflow.python.platform import gfile
import xdnn_opt
class xdnnRT:
def __init__(self, compiler, rtargs):
#print ("compiler args", cargs)
self._inputs = self.list_inputs_of_graph()
pydotGraph, schedule, self._out, _ = compiler.compile()
# print ("compiled pydot graph", pydotGraph)
# print ("compiled schedule", schedule)
opt = None
if rtargs.device == "CPU":
opt = xdnn_opt.CPUTransform( self._inputs, pydotGraph, schedule)
elif rtargs.device == "FPGA":
if rtargs.xclbin:
opt = xdnn_opt.FPGATransform( self._inputs, pydotGraph, schedule, rtargs.xclbin)
else:
raise AttributeError("Must specify path to xclbin when device = FPGA")
else:
raise AttributeError("Unsupported device type", rtargs.device)
#variables hold the inputs/consts of graph
self._variables = opt.variables
self._layers = opt.getLayers()
for l in self._layers:
l.setup()
def list_inputs_of_graph(self):
pass
def preprocess(self,inputs):
pass
def batch_classify(self, img_list, batch, preprocess) :
bctr = 0
ictr = 0
pred = None
prepdata = {}
prep = self._inputs[0]
print(len(img_list))
ctr = 0
pred = []
while ctr < len(img_list) :
ctrmax = min(ctr+batch, len(img_list))
pred.append(self.feed_forward(img_list[ctr:ctrmax], preprocess = preprocess))
ctr = ctrmax
if len(pred) == 0 : return []
elif len(pred) == 1 :
return pred[0]
return np.concatenate(pred)
def feed_forward(self, inputs, out=None, preprocess = None):
inp_dict = {}
if not preprocess:
preprocess = self.preprocess
inp_dict[self._inputs[0]] = preprocess(inputs)
for k, v in inp_dict.items():
self._variables[k] = v
for layer in self._layers:
layer_inputs = []
layer_inputs = [self._variables[inp] for inp in layer.inputs]
self._variables[layer.output] = layer.forward_exec( layer_inputs )
if out is None:
return self._variables[self._out]
return self._variables[out]
class TFxdnnRT(xdnnRT):
def __init__ ( self, cargs):
self._tfGraph = tf.GraphDef()
with gfile.FastGFile(cargs.networkfile, 'rb') as f:
self._tfGraph.ParseFromString(f.read())
compiler = TFFrontend(cargs)
xdnnRT.__init__(self, compiler, cargs)
def list_inputs_of_graph(self) :
res = []
for node in self._tfGraph.node :
if node.op == 'Placeholder' :
res.append(node.name)
return res
def preprocess(self, inputs):
if type(inputs) is not np.ndarray:
inputs = np.transpose(self.read_tensor_from_image_file(inputs), [0,3,1,2]) # assuming that there is only one input
return inputs
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
with tf.Session() as sess :
result = sess.run(normalized)
return result
| 36.351563 | 127 | 0.588438 | 4,313 | 0.926929 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.121427 |
a85085ad48ffb4df3ce6c1f54c3962d5699ea21f | 8,821 | py | Python | bot/cogs/personal.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | 1 | 2020-07-07T05:02:40.000Z | 2020-07-07T05:02:40.000Z | bot/cogs/personal.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | null | null | null | bot/cogs/personal.py | issuemeaname/rammus-discord-bot | 23bcb02af11b8b764d75fa974149164f0086c1ea | [
"MIT"
] | 2 | 2019-02-07T18:26:43.000Z | 2021-07-04T16:58:41.000Z | """
This is a cog used for personal commands. For now they exist for people I know
but they will be removed eventually, as they hold no other use to the community
or other users of Rammus. Again, this is temporary and will be removed shortly.
Thank you for reading
"""
import random
import discord
from discord.ext import commands
import bot.checks
from bot.resources import PACER_TEST
class Personal(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.append = " `:^)`"
async def msg(self, ctx, message):
await ctx.send(message + self.append)
# ace
@commands.command(hidden=True)
@bot.checks.is_member(155625382748356608)
async def ace(self, ctx):
await self.msg(ctx, "is it because im chinese?")
# akey
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(474170410213048331)
async def akey(self, ctx):
if ctx.author.display_name != "ASIAN":
await ctx.author.edit(nick="ASIAN")
await self.msg(ctx, ":white_check_mark: Successfully changed this "
"Asian's name")
else:
await self.msg(ctx, ":x: No need to change this Asian's name" +
self.append)
# archy
@commands.command(hidden=True)
@bot.checks.is_member(205107664533848065)
async def archy(self, ctx):
options = [
f"{ctx.author.mention} is a lesbian",
PACER_TEST
]
option = random.choice(options)
await self.msg(ctx, option)
# astaris
@commands.command(hidden=True)
@bot.checks.is_member(192974987513036800)
async def astaris(self, ctx):
options = [
"Astaris is big bolly today",
"Astaris isn't a big bolly today"
]
option = random.choice(options)
await self.msg(ctx, option)
# azey
@commands.command(hidden=True)
@bot.checks.is_member(239276819918880769)
async def azey(self, ctx):
options = [
"Yes I’m aze pls don’t touch",
"Archy abuses me"
]
option = random.choice(options)
await self.msg(ctx, option)
# beem
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(336336895711772693)
async def beem(self, ctx):
if ctx.author.display_name != "Baam":
await ctx.author.edit(nick="Baam")
await self.msg(ctx, "Changed stupid Baam's name")
else:
await self.msg(ctx, "No need to change stupid Baam's name" +
self.append)
# cat
@commands.command(hidden=True)
@bot.checks.is_member(440802535301709827)
async def cat(self, ctx):
options = [
"meow",
"wat",
"noni",
"send help"
]
option = random.choice(options)
await self.msg(ctx, option)
# catsis
@commands.command(hidden=True)
@bot.checks.is_member(440802535301709827)
async def catsis(self, ctx):
options = [
"You got no jams",
"Infires",
"Jjang jjang man bbong bbong",
"Kkaepjang",
]
option = random.choice(options)
await self.msg(ctx, option)
# char
@commands.command(hidden=True)
@bot.checks.is_member(473457198207467522)
async def char(self, ctx):
await self.msg(ctx, "Char is a lolicon")
# chun
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(202373732067442690)
async def chun(self, ctx):
options = [
"Immature Chun",
"Mature Chun",
"ChunDaBooly",
"ChunHelpful"
]
option = random.choice(options)
await ctx.author.edit(nick=option)
await self.msg(ctx, "Changed Chun's name")
# fcb
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(283204260781490176)
async def fcb(self, ctx):
if ctx.author.display_name != ctx.author.name:
try:
await ctx.author.edit(nick=None)
except discord.errors.Forbidden:
pass
await self.msg(ctx, "FCB is h0t")
# hunter
@commands.command(hidden=True)
@bot.checks.is_member(285908956570976259)
async def hunter(self, ctx):
await self.msg(ctx, "hunter is gay lol")
# jackie
@commands.command(hidden=True)
@bot.checks.is_member(293025979880833024)
async def jackie(self, ctx):
options = [
"Handsome as **FUCK!**",
"Jackie is {:,} pounds today."
]
rint = random.randint
weight = round(rint(1, 100) * rint(1, 100) / (rint(1, 100) /
rint(1, 100)), 2)
option = random.choice(options).format(weight)
await self.msg(ctx, option)
# joe
@commands.command(hidden=True)
@bot.checks.is_member(433662145268547585)
async def joe(self, ctx):
await self.msg(ctx, "The Cool One")
# kroy
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(346115225625296897, 475068807912620042)
async def kroy(self, ctx):
if ctx.author.display_name != ctx.author.name:
try:
await ctx.author.edit(nick=ctx.author.name)
except discord.errors.Forbidden:
pass
await self.msg(ctx, "Changed Kroyburger's name")
else:
await self.msg(ctx, "No need to change Kroyburger's name")
# menmis
@commands.command(hidden=True)
@bot.checks.is_member(286573603368206347)
async def menmis(self, ctx):
options = [
"Menmis is a good mod",
"Menmis is getting demoted"
]
option = random.choice(options)
await self.msg(ctx, option + "")
# orcles
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(301638410815406081)
async def orcles(self, ctx):
if ctx.author.display_name != ctx.author.name:
await ctx.author.edit(nick=None)
await self.msg(ctx, "Changed obnoxious Orcles's stupid name" +
self.append)
else:
await self.msg(ctx, "Can't ~~ stand ~~ change Orcles's name." +
self.append)
# Rage
@commands.command(hidden=True)
@bot.checks.is_member(447187805106339864)
async def Rage(self, ctx):
await self.msg(ctx, "Rage dies faster than light")
# rory
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(353180156883632128)
async def rory(self, ctx):
options = [
"rory",
"dinorory rex"
]
option = random.choice(options)
if ctx.author.display_name != option:
await ctx.author.edit(nick=option)
await self.msg(ctx, f":white_check_mark: Successfully changed fat "
f"rory's name to \"**{option}**\"")
else:
await self.msg(ctx, f":x: No need to change fat rory's name to "
f"\"**{option}**\"")
# sharky
# sh4rky
@commands.command(hidden=True)
@bot.checks.is_member(254759884367724554)
async def sh4rky(self, ctx):
await self.msg(ctx, "Below gay")
# traf
@commands.command(hidden=True)
@bot.checks.is_member(311514087639089162)
async def traf(self, ctx):
options = [
"**TRAF IS A MONKEY** :monkey_face::monkey::banana: ooh ooh ooh "
"ah ah ah!!",
"**TRAF IS THE OPEST**"
]
option = random.choice(options)
await self.msg(ctx, option + "")
# xero
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(257239037721444353)
async def xero(self, ctx):
if ctx.author.display_name != ctx.author.name:
await ctx.author.edit(nick=None)
await self.msg(ctx, "Changed noob Xero's name")
else:
await self.msg(ctx, "No need to change *this* loser's name" +
self.append)
# zogic
@commands.command(hidden=True)
@commands.bot_has_permissions(manage_nicknames=True)
@bot.checks.is_member(397628415085379584)
async def zogic(self, ctx):
await ctx.author.edit(nick=None)
await self.msg(ctx, "Don't call me zoggy")
def setup(bot):
bot.add_cog(Personal(bot))
| 29.800676 | 79 | 0.594831 | 8,384 | 0.950028 | 0 | 0 | 7,769 | 0.88034 | 5,453 | 0.617904 | 1,678 | 0.190142 |
a853dba576efbfdcdc009b4074ffba7b0557a19b | 503 | py | Python | EducationalRound101/red_and_blue/red_and_blue_test.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | EducationalRound101/red_and_blue/red_and_blue_test.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | EducationalRound101/red_and_blue/red_and_blue_test.py | tqa236/codeforces | 81ad7bdb7786455f83d48d59a8884f62ded66caf | [
"MIT"
] | null | null | null | import unittest
from red_and_blue import red_and_blue
class Test(unittest.TestCase):
def test_1(self):
self.assertEqual(red_and_blue([6, -5, 7, -3], [2, 3, -4]), 13)
def test_2(self):
self.assertEqual(red_and_blue([1, 1], [10, -3, 2, 2]), 13)
def test_3(self):
self.assertEqual(red_and_blue([-1, -2, -3, -4, -5], [-1, -2, -3, -4, -5]), 0)
def test_4(self):
self.assertEqual(red_and_blue([0], [0]), 0)
if __name__ == "__main__":
unittest.main()
| 23.952381 | 85 | 0.586481 | 397 | 0.789264 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.019881 |
a854b2655b1079804920d693717c0a025c1256f9 | 129,043 | py | Python | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | 19 | 2020-07-06T01:01:28.000Z | 2022-03-09T19:22:25.000Z | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | 2 | 2020-09-20T12:26:54.000Z | 2021-11-22T21:48:08.000Z | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | null | null | null | # Generated from Sygus.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3%")
buf.write("\u01ca\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\3\2\3\2\5\2o\n\2\3\3\3\3\3\3\3\3")
buf.write("\5\3u\n\3\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\7")
buf.write("\3\7\3\7\3\7\5\7\u0085\n\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\5\b\u0092\n\b\3\t\3\t\3\t\3\t\3\t\3\t")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\5\13\u00b4\n\13\3\f\3\f\3\r\3\r\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\21")
buf.write("\3\22\3\22\3\22\3\23\3\23\3\23\3\23\5\23\u00cd\n\23\3")
buf.write("\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\27\5\27\u00df\n\27\3\30\3\30\3")
buf.write("\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33")
buf.write("\3\33\3\33\5\33\u00fb\n\33\3\34\3\34\3\34\3\34\3\35\3")
buf.write("\35\3\35\3\35\5\35\u0105\n\35\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37\u0114\n")
buf.write("\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3\"\5")
buf.write("\"\u0125\n\"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\5$\u0131\n")
buf.write("$\3%\3%\3%\3%\3%\5%\u0138\n%\3&\3&\3&\3\'\3\'\3\'\3\'")
buf.write("\5\'\u0141\n\'\3(\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3*\3*")
buf.write("\3*\3*\5*\u0152\n*\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3-\3")
buf.write("-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\5-\u016e\n")
buf.write("-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\5.\u018c\n.\3/\3/\3/\3")
buf.write("/\3/\3/\3/\3/\3\60\3\60\3\60\3\61\3\61\3\61\3\61\5\61")
buf.write("\u019d\n\61\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3")
buf.write("\63\3\63\5\63\u01a9\n\63\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u01ba")
buf.write("\n\64\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\2\2\67\2\4\6\b\n\f\16\20\22")
buf.write("\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPR")
buf.write("TVXZ\\^`bdfhj\2\3\3\2\16\17\2\u01c1\2n\3\2\2\2\4t\3\2")
buf.write("\2\2\6v\3\2\2\2\bx\3\2\2\2\n}\3\2\2\2\f\u0084\3\2\2\2")
buf.write("\16\u0091\3\2\2\2\20\u0093\3\2\2\2\22\u0099\3\2\2\2\24")
buf.write("\u00b3\3\2\2\2\26\u00b5\3\2\2\2\30\u00b7\3\2\2\2\32\u00b9")
buf.write("\3\2\2\2\34\u00bb\3\2\2\2\36\u00bf\3\2\2\2 \u00c1\3\2")
buf.write("\2\2\"\u00c5\3\2\2\2$\u00cc\3\2\2\2&\u00ce\3\2\2\2(\u00d3")
buf.write("\3\2\2\2*\u00d7\3\2\2\2,\u00de\3\2\2\2.\u00e0\3\2\2\2")
buf.write("\60\u00e5\3\2\2\2\62\u00ed\3\2\2\2\64\u00fa\3\2\2\2\66")
buf.write("\u00fc\3\2\2\28\u0104\3\2\2\2:\u0106\3\2\2\2<\u0113\3")
buf.write("\2\2\2>\u0115\3\2\2\2@\u011d\3\2\2\2B\u0124\3\2\2\2D\u0126")
buf.write("\3\2\2\2F\u0130\3\2\2\2H\u0137\3\2\2\2J\u0139\3\2\2\2")
buf.write("L\u0140\3\2\2\2N\u0142\3\2\2\2P\u014a\3\2\2\2R\u0151\3")
buf.write("\2\2\2T\u0153\3\2\2\2V\u0157\3\2\2\2X\u016d\3\2\2\2Z\u018b")
buf.write("\3\2\2\2\\\u018d\3\2\2\2^\u0195\3\2\2\2`\u019c\3\2\2\2")
buf.write("b\u019e\3\2\2\2d\u01a8\3\2\2\2f\u01b9\3\2\2\2h\u01bb\3")
buf.write("\2\2\2j\u01c1\3\2\2\2lo\5\4\3\2mo\3\2\2\2nl\3\2\2\2nm")
buf.write("\3\2\2\2o\3\3\2\2\2pq\5\b\5\2qr\5\n\6\2ru\3\2\2\2su\5")
buf.write("\n\6\2tp\3\2\2\2ts\3\2\2\2u\5\3\2\2\2vw\7%\2\2w\7\3\2")
buf.write("\2\2xy\7\3\2\2yz\7\4\2\2z{\5\6\4\2{|\7\5\2\2|\t\3\2\2")
buf.write("\2}~\5\16\b\2~\177\5\f\7\2\177\13\3\2\2\2\u0080\u0081")
buf.write("\5\16\b\2\u0081\u0082\5\f\7\2\u0082\u0085\3\2\2\2\u0083")
buf.write("\u0085\3\2\2\2\u0084\u0080\3\2\2\2\u0084\u0083\3\2\2\2")
buf.write("\u0085\r\3\2\2\2\u0086\u0092\5\60\31\2\u0087\u0092\5\62")
buf.write("\32\2\u0088\u0092\5X-\2\u0089\u0092\5T+\2\u008a\u0092")
buf.write("\5V,\2\u008b\u0092\5\22\n\2\u008c\u0092\5&\24\2\u008d")
buf.write("\u0092\5\20\t\2\u008e\u0092\5f\64\2\u008f\u0092\5h\65")
buf.write("\2\u0090\u0092\5j\66\2\u0091\u0086\3\2\2\2\u0091\u0087")
buf.write("\3\2\2\2\u0091\u0088\3\2\2\2\u0091\u0089\3\2\2\2\u0091")
buf.write("\u008a\3\2\2\2\u0091\u008b\3\2\2\2\u0091\u008c\3\2\2\2")
buf.write("\u0091\u008d\3\2\2\2\u0091\u008e\3\2\2\2\u0091\u008f\3")
buf.write("\2\2\2\u0091\u0090\3\2\2\2\u0092\17\3\2\2\2\u0093\u0094")
buf.write("\7\3\2\2\u0094\u0095\7\6\2\2\u0095\u0096\5\6\4\2\u0096")
buf.write("\u0097\5\24\13\2\u0097\u0098\7\5\2\2\u0098\21\3\2\2\2")
buf.write("\u0099\u009a\7\3\2\2\u009a\u009b\7\7\2\2\u009b\u009c\5")
buf.write("\6\4\2\u009c\u009d\5\24\13\2\u009d\u009e\7\5\2\2\u009e")
buf.write("\23\3\2\2\2\u009f\u00a0\7\3\2\2\u00a0\u00a1\7\b\2\2\u00a1")
buf.write("\u00a2\5\26\f\2\u00a2\u00a3\7\5\2\2\u00a3\u00b4\3\2\2")
buf.write("\2\u00a4\u00b4\7\t\2\2\u00a5\u00b4\7\n\2\2\u00a6\u00b4")
buf.write("\7\13\2\2\u00a7\u00a8\7\3\2\2\u00a8\u00a9\7\f\2\2\u00a9")
buf.write("\u00aa\5 \21\2\u00aa\u00ab\7\5\2\2\u00ab\u00b4\3\2\2\2")
buf.write("\u00ac\u00ad\7\3\2\2\u00ad\u00ae\7\r\2\2\u00ae\u00af\5")
buf.write("\24\13\2\u00af\u00b0\5\24\13\2\u00b0\u00b1\7\5\2\2\u00b1")
buf.write("\u00b4\3\2\2\2\u00b2\u00b4\5\6\4\2\u00b3\u009f\3\2\2\2")
buf.write("\u00b3\u00a4\3\2\2\2\u00b3\u00a5\3\2\2\2\u00b3\u00a6\3")
buf.write("\2\2\2\u00b3\u00a7\3\2\2\2\u00b3\u00ac\3\2\2\2\u00b3\u00b2")
buf.write("\3\2\2\2\u00b4\25\3\2\2\2\u00b5\u00b6\7!\2\2\u00b6\27")
buf.write("\3\2\2\2\u00b7\u00b8\t\2\2\2\u00b8\31\3\2\2\2\u00b9\u00ba")
buf.write("\7\"\2\2\u00ba\33\3\2\2\2\u00bb\u00bc\5\6\4\2\u00bc\u00bd")
buf.write("\7\20\2\2\u00bd\u00be\5\6\4\2\u00be\35\3\2\2\2\u00bf\u00c0")
buf.write("\7#\2\2\u00c0\37\3\2\2\2\u00c1\u00c2\7\3\2\2\u00c2\u00c3")
buf.write("\5\"\22\2\u00c3\u00c4\7\5\2\2\u00c4!\3\2\2\2\u00c5\u00c6")
buf.write("\5\6\4\2\u00c6\u00c7\5$\23\2\u00c7#\3\2\2\2\u00c8\u00c9")
buf.write("\5\6\4\2\u00c9\u00ca\5$\23\2\u00ca\u00cd\3\2\2\2\u00cb")
buf.write("\u00cd\3\2\2\2\u00cc\u00c8\3\2\2\2\u00cc\u00cb\3\2\2\2")
buf.write("\u00cd%\3\2\2\2\u00ce\u00cf\7\3\2\2\u00cf\u00d0\7\21\2")
buf.write("\2\u00d0\u00d1\5(\25\2\u00d1\u00d2\7\5\2\2\u00d2\'\3\2")
buf.write("\2\2\u00d3\u00d4\7\3\2\2\u00d4\u00d5\5*\26\2\u00d5\u00d6")
buf.write("\7\5\2\2\u00d6)\3\2\2\2\u00d7\u00d8\5.\30\2\u00d8\u00d9")
buf.write("\5,\27\2\u00d9+\3\2\2\2\u00da\u00db\5.\30\2\u00db\u00dc")
buf.write("\5,\27\2\u00dc\u00df\3\2\2\2\u00dd\u00df\3\2\2\2\u00de")
buf.write("\u00da\3\2\2\2\u00de\u00dd\3\2\2\2\u00df-\3\2\2\2\u00e0")
buf.write("\u00e1\7\3\2\2\u00e1\u00e2\5\6\4\2\u00e2\u00e3\7$\2\2")
buf.write("\u00e3\u00e4\7\5\2\2\u00e4/\3\2\2\2\u00e5\u00e6\7\3\2")
buf.write("\2\u00e6\u00e7\7\22\2\2\u00e7\u00e8\5\6\4\2\u00e8\u00e9")
buf.write("\5\66\34\2\u00e9\u00ea\5\24\13\2\u00ea\u00eb\5<\37\2\u00eb")
buf.write("\u00ec\7\5\2\2\u00ec\61\3\2\2\2\u00ed\u00ee\7\3\2\2\u00ee")
buf.write("\u00ef\7\23\2\2\u00ef\u00f0\5\6\4\2\u00f0\u00f1\7\3\2")
buf.write("\2\u00f1\u00f2\5\64\33\2\u00f2\u00f3\7\5\2\2\u00f3\u00f4")
buf.write("\5\24\13\2\u00f4\u00f5\7\5\2\2\u00f5\63\3\2\2\2\u00f6")
buf.write("\u00f7\5\24\13\2\u00f7\u00f8\5\64\33\2\u00f8\u00fb\3\2")
buf.write("\2\2\u00f9\u00fb\3\2\2\2\u00fa\u00f6\3\2\2\2\u00fa\u00f9")
buf.write("\3\2\2\2\u00fb\65\3\2\2\2\u00fc\u00fd\7\3\2\2\u00fd\u00fe")
buf.write("\58\35\2\u00fe\u00ff\7\5\2\2\u00ff\67\3\2\2\2\u0100\u0101")
buf.write("\5:\36\2\u0101\u0102\58\35\2\u0102\u0105\3\2\2\2\u0103")
buf.write("\u0105\3\2\2\2\u0104\u0100\3\2\2\2\u0104\u0103\3\2\2\2")
buf.write("\u01059\3\2\2\2\u0106\u0107\7\3\2\2\u0107\u0108\5\6\4")
buf.write("\2\u0108\u0109\5\24\13\2\u0109\u010a\7\5\2\2\u010a;\3")
buf.write("\2\2\2\u010b\u010c\7\3\2\2\u010c\u010d\5\6\4\2\u010d\u010e")
buf.write("\5F$\2\u010e\u010f\7\5\2\2\u010f\u0114\3\2\2\2\u0110\u0114")
buf.write("\5H%\2\u0111\u0114\5\6\4\2\u0112\u0114\5> \2\u0113\u010b")
buf.write("\3\2\2\2\u0113\u0110\3\2\2\2\u0113\u0111\3\2\2\2\u0113")
buf.write("\u0112\3\2\2\2\u0114=\3\2\2\2\u0115\u0116\7\3\2\2\u0116")
buf.write("\u0117\7\24\2\2\u0117\u0118\7\3\2\2\u0118\u0119\5@!\2")
buf.write("\u0119\u011a\7\5\2\2\u011a\u011b\5<\37\2\u011b\u011c\7")
buf.write("\5\2\2\u011c?\3\2\2\2\u011d\u011e\5D#\2\u011e\u011f\5")
buf.write("B\"\2\u011fA\3\2\2\2\u0120\u0121\5D#\2\u0121\u0122\5B")
buf.write("\"\2\u0122\u0125\3\2\2\2\u0123\u0125\3\2\2\2\u0124\u0120")
buf.write("\3\2\2\2\u0124\u0123\3\2\2\2\u0125C\3\2\2\2\u0126\u0127")
buf.write("\7\3\2\2\u0127\u0128\5\6\4\2\u0128\u0129\5\24\13\2\u0129")
buf.write("\u012a\5<\37\2\u012a\u012b\7\5\2\2\u012bE\3\2\2\2\u012c")
buf.write("\u012d\5<\37\2\u012d\u012e\5F$\2\u012e\u0131\3\2\2\2\u012f")
buf.write("\u0131\3\2\2\2\u0130\u012c\3\2\2\2\u0130\u012f\3\2\2\2")
buf.write("\u0131G\3\2\2\2\u0132\u0138\5\26\f\2\u0133\u0138\5\30")
buf.write("\r\2\u0134\u0138\5\32\16\2\u0135\u0138\5\34\17\2\u0136")
buf.write("\u0138\5\36\20\2\u0137\u0132\3\2\2\2\u0137\u0133\3\2\2")
buf.write("\2\u0137\u0134\3\2\2\2\u0137\u0135\3\2\2\2\u0137\u0136")
buf.write("\3\2\2\2\u0138I\3\2\2\2\u0139\u013a\5N(\2\u013a\u013b")
buf.write("\5L\'\2\u013bK\3\2\2\2\u013c\u013d\5N(\2\u013d\u013e\5")
buf.write("L\'\2\u013e\u0141\3\2\2\2\u013f\u0141\3\2\2\2\u0140\u013c")
buf.write("\3\2\2\2\u0140\u013f\3\2\2\2\u0141M\3\2\2\2\u0142\u0143")
buf.write("\7\3\2\2\u0143\u0144\5\6\4\2\u0144\u0145\5\24\13\2\u0145")
buf.write("\u0146\7\3\2\2\u0146\u0147\5P)\2\u0147\u0148\7\5\2\2\u0148")
buf.write("\u0149\7\5\2\2\u0149O\3\2\2\2\u014a\u014b\5Z.\2\u014b")
buf.write("\u014c\5R*\2\u014cQ\3\2\2\2\u014d\u014e\5Z.\2\u014e\u014f")
buf.write("\5R*\2\u014f\u0152\3\2\2\2\u0150\u0152\3\2\2\2\u0151\u014d")
buf.write("\3\2\2\2\u0151\u0150\3\2\2\2\u0152S\3\2\2\2\u0153\u0154")
buf.write("\7\3\2\2\u0154\u0155\7\25\2\2\u0155\u0156\7\5\2\2\u0156")
buf.write("U\3\2\2\2\u0157\u0158\7\3\2\2\u0158\u0159\7\26\2\2\u0159")
buf.write("\u015a\5<\37\2\u015a\u015b\7\5\2\2\u015bW\3\2\2\2\u015c")
buf.write("\u015d\7\3\2\2\u015d\u015e\7\27\2\2\u015e\u015f\5\6\4")
buf.write("\2\u015f\u0160\5\66\34\2\u0160\u0161\5\24\13\2\u0161\u0162")
buf.write("\7\3\2\2\u0162\u0163\5J&\2\u0163\u0164\7\5\2\2\u0164\u0165")
buf.write("\7\5\2\2\u0165\u016e\3\2\2\2\u0166\u0167\7\3\2\2\u0167")
buf.write("\u0168\7\27\2\2\u0168\u0169\5\6\4\2\u0169\u016a\5\66\34")
buf.write("\2\u016a\u016b\5\24\13\2\u016b\u016c\7\5\2\2\u016c\u016e")
buf.write("\3\2\2\2\u016d\u015c\3\2\2\2\u016d\u0166\3\2\2\2\u016e")
buf.write("Y\3\2\2\2\u016f\u018c\5\6\4\2\u0170\u018c\5H%\2\u0171")
buf.write("\u0172\7\3\2\2\u0172\u0173\5\6\4\2\u0173\u0174\5d\63\2")
buf.write("\u0174\u0175\7\5\2\2\u0175\u018c\3\2\2\2\u0176\u0177\7")
buf.write("\3\2\2\u0177\u0178\7\30\2\2\u0178\u0179\5\24\13\2\u0179")
buf.write("\u017a\7\5\2\2\u017a\u018c\3\2\2\2\u017b\u017c\7\3\2\2")
buf.write("\u017c\u017d\7\31\2\2\u017d\u017e\5\24\13\2\u017e\u017f")
buf.write("\7\5\2\2\u017f\u018c\3\2\2\2\u0180\u0181\7\3\2\2\u0181")
buf.write("\u0182\7\32\2\2\u0182\u0183\5\24\13\2\u0183\u0184\7\5")
buf.write("\2\2\u0184\u018c\3\2\2\2\u0185\u0186\7\3\2\2\u0186\u0187")
buf.write("\7\33\2\2\u0187\u0188\5\24\13\2\u0188\u0189\7\5\2\2\u0189")
buf.write("\u018c\3\2\2\2\u018a\u018c\5\\/\2\u018b\u016f\3\2\2\2")
buf.write("\u018b\u0170\3\2\2\2\u018b\u0171\3\2\2\2\u018b\u0176\3")
buf.write("\2\2\2\u018b\u017b\3\2\2\2\u018b\u0180\3\2\2\2\u018b\u0185")
buf.write("\3\2\2\2\u018b\u018a\3\2\2\2\u018c[\3\2\2\2\u018d\u018e")
buf.write("\7\3\2\2\u018e\u018f\7\24\2\2\u018f\u0190\7\3\2\2\u0190")
buf.write("\u0191\5^\60\2\u0191\u0192\7\5\2\2\u0192\u0193\5Z.\2\u0193")
buf.write("\u0194\7\5\2\2\u0194]\3\2\2\2\u0195\u0196\5b\62\2\u0196")
buf.write("\u0197\5`\61\2\u0197_\3\2\2\2\u0198\u0199\5b\62\2\u0199")
buf.write("\u019a\5`\61\2\u019a\u019d\3\2\2\2\u019b\u019d\3\2\2\2")
buf.write("\u019c\u0198\3\2\2\2\u019c\u019b\3\2\2\2\u019da\3\2\2")
buf.write("\2\u019e\u019f\7\3\2\2\u019f\u01a0\5\6\4\2\u01a0\u01a1")
buf.write("\5\24\13\2\u01a1\u01a2\5Z.\2\u01a2\u01a3\7\5\2\2\u01a3")
buf.write("c\3\2\2\2\u01a4\u01a5\5Z.\2\u01a5\u01a6\5d\63\2\u01a6")
buf.write("\u01a9\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a4\3\2\2\2")
buf.write("\u01a8\u01a7\3\2\2\2\u01a9e\3\2\2\2\u01aa\u01ab\7\3\2")
buf.write("\2\u01ab\u01ac\7\34\2\2\u01ac\u01ad\5\6\4\2\u01ad\u01ae")
buf.write("\5\66\34\2\u01ae\u01af\7\3\2\2\u01af\u01b0\5J&\2\u01b0")
buf.write("\u01b1\7\5\2\2\u01b1\u01b2\7\5\2\2\u01b2\u01ba\3\2\2\2")
buf.write("\u01b3\u01b4\7\3\2\2\u01b4\u01b5\7\34\2\2\u01b5\u01b6")
buf.write("\5\6\4\2\u01b6\u01b7\5\66\34\2\u01b7\u01b8\7\5\2\2\u01b8")
buf.write("\u01ba\3\2\2\2\u01b9\u01aa\3\2\2\2\u01b9\u01b3\3\2\2\2")
buf.write("\u01bag\3\2\2\2\u01bb\u01bc\7\3\2\2\u01bc\u01bd\7\35\2")
buf.write("\2\u01bd\u01be\5\6\4\2\u01be\u01bf\5\24\13\2\u01bf\u01c0")
buf.write("\7\5\2\2\u01c0i\3\2\2\2\u01c1\u01c2\7\3\2\2\u01c2\u01c3")
buf.write("\7\36\2\2\u01c3\u01c4\5\6\4\2\u01c4\u01c5\5\6\4\2\u01c5")
buf.write("\u01c6\5\6\4\2\u01c6\u01c7\5\6\4\2\u01c7\u01c8\7\5\2\2")
buf.write("\u01c8k\3\2\2\2\26nt\u0084\u0091\u00b3\u00cc\u00de\u00fa")
buf.write("\u0104\u0113\u0124\u0130\u0137\u0140\u0151\u016d\u018b")
buf.write("\u019c\u01a8\u01b9")
return buf.getvalue()
class SygusParser ( Parser ):
grammarFileName = "Sygus.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "'set-logic'", "')'", "'declare-var'",
"'define-sort'", "'BitVec'", "'Int'", "'Bool'", "'Real'",
"'Enum'", "'Array'", "'true'", "'false'", "'::'", "'set-options'",
"'define-fun'", "'declare-fun'", "'let'", "'check-synth'",
"'constraint'", "'synth-fun'", "'Constant'", "'Variable'",
"'InputVariable'", "'LocalVariable'", "'synth-inv'",
"'declare-primed-var'", "'inv-constraint'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "WS", "COMMENT", "INTEGER", "BVCONST",
"REALCONST", "QUOTEDLIT", "SYMBOL" ]
RULE_start = 0
RULE_prog = 1
RULE_symbol = 2
RULE_setLogicCmd = 3
RULE_cmdPlus = 4
RULE_cmdPlusTail = 5
RULE_cmd = 6
RULE_varDeclCmd = 7
RULE_sortDefCmd = 8
RULE_sortExpr = 9
RULE_intConst = 10
RULE_boolConst = 11
RULE_bVConst = 12
RULE_enumConst = 13
RULE_realConst = 14
RULE_eCList = 15
RULE_symbolPlus = 16
RULE_symbolPlusTail = 17
RULE_setOptsCmd = 18
RULE_optList = 19
RULE_symbolPairPlus = 20
RULE_symbolPairPlusTail = 21
RULE_symbolPair = 22
RULE_funDefCmd = 23
RULE_funDeclCmd = 24
RULE_sortStar = 25
RULE_argList = 26
RULE_symbolSortPairStar = 27
RULE_symbolSortPair = 28
RULE_term = 29
RULE_letTerm = 30
RULE_letBindingTermPlus = 31
RULE_letBindingTermPlusTail = 32
RULE_letBindingTerm = 33
RULE_termStar = 34
RULE_literal = 35
RULE_nTDefPlus = 36
RULE_nTDefPlusTail = 37
RULE_nTDef = 38
RULE_gTermPlus = 39
RULE_gTermPlusTail = 40
RULE_checkSynthCmd = 41
RULE_constraintCmd = 42
RULE_synthFunCmd = 43
RULE_gTerm = 44
RULE_letGTerm = 45
RULE_letBindingGTermPlus = 46
RULE_letBindingGTermPlusTail = 47
RULE_letBindingGTerm = 48
RULE_gTermStar = 49
RULE_synthInvCmd = 50
RULE_declarePrimedVar = 51
RULE_invConstraintCmd = 52
ruleNames = [ "start", "prog", "symbol", "setLogicCmd", "cmdPlus",
"cmdPlusTail", "cmd", "varDeclCmd", "sortDefCmd", "sortExpr",
"intConst", "boolConst", "bVConst", "enumConst", "realConst",
"eCList", "symbolPlus", "symbolPlusTail", "setOptsCmd",
"optList", "symbolPairPlus", "symbolPairPlusTail", "symbolPair",
"funDefCmd", "funDeclCmd", "sortStar", "argList", "symbolSortPairStar",
"symbolSortPair", "term", "letTerm", "letBindingTermPlus",
"letBindingTermPlusTail", "letBindingTerm", "termStar",
"literal", "nTDefPlus", "nTDefPlusTail", "nTDef", "gTermPlus",
"gTermPlusTail", "checkSynthCmd", "constraintCmd", "synthFunCmd",
"gTerm", "letGTerm", "letBindingGTermPlus", "letBindingGTermPlusTail",
"letBindingGTerm", "gTermStar", "synthInvCmd", "declarePrimedVar",
"invConstraintCmd" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
T__21=22
T__22=23
T__23=24
T__24=25
T__25=26
T__26=27
T__27=28
WS=29
COMMENT=30
INTEGER=31
BVCONST=32
REALCONST=33
QUOTEDLIT=34
SYMBOL=35
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def prog(self):
return self.getTypedRuleContext(SygusParser.ProgContext,0)
def getRuleIndex(self):
return SygusParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStart" ):
return visitor.visitStart(self)
else:
return visitor.visitChildren(self)
def start(self):
localctx = SygusParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.state = 108
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 106
self.prog()
pass
elif token in [SygusParser.EOF]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def setLogicCmd(self):
return self.getTypedRuleContext(SygusParser.SetLogicCmdContext,0)
def cmdPlus(self):
return self.getTypedRuleContext(SygusParser.CmdPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_prog
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProg" ):
listener.enterProg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProg" ):
listener.exitProg(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProg" ):
return visitor.visitProg(self)
else:
return visitor.visitChildren(self)
def prog(self):
localctx = SygusParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_prog)
try:
self.state = 114
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 110
self.setLogicCmd()
self.state = 111
self.cmdPlus()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 113
self.cmdPlus()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SYMBOL(self):
return self.getToken(SygusParser.SYMBOL, 0)
def getRuleIndex(self):
return SygusParser.RULE_symbol
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbol" ):
listener.enterSymbol(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbol" ):
listener.exitSymbol(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbol" ):
return visitor.visitSymbol(self)
else:
return visitor.visitChildren(self)
def symbol(self):
localctx = SygusParser.SymbolContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_symbol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 116
self.match(SygusParser.SYMBOL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetLogicCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def getRuleIndex(self):
return SygusParser.RULE_setLogicCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetLogicCmd" ):
listener.enterSetLogicCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetLogicCmd" ):
listener.exitSetLogicCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetLogicCmd" ):
return visitor.visitSetLogicCmd(self)
else:
return visitor.visitChildren(self)
def setLogicCmd(self):
localctx = SygusParser.SetLogicCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_setLogicCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 118
self.match(SygusParser.T__0)
self.state = 119
self.match(SygusParser.T__1)
self.state = 120
self.symbol()
self.state = 121
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self):
return self.getTypedRuleContext(SygusParser.CmdContext,0)
def cmdPlusTail(self):
return self.getTypedRuleContext(SygusParser.CmdPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmdPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdPlus" ):
listener.enterCmdPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdPlus" ):
listener.exitCmdPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmdPlus" ):
return visitor.visitCmdPlus(self)
else:
return visitor.visitChildren(self)
def cmdPlus(self):
localctx = SygusParser.CmdPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_cmdPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 123
self.cmd()
self.state = 124
self.cmdPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self):
return self.getTypedRuleContext(SygusParser.CmdContext,0)
def cmdPlusTail(self):
return self.getTypedRuleContext(SygusParser.CmdPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmdPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdPlusTail" ):
listener.enterCmdPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdPlusTail" ):
listener.exitCmdPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmdPlusTail" ):
return visitor.visitCmdPlusTail(self)
else:
return visitor.visitChildren(self)
def cmdPlusTail(self):
localctx = SygusParser.CmdPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_cmdPlusTail)
try:
self.state = 130
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 126
self.cmd()
self.state = 127
self.cmdPlusTail()
pass
elif token in [SygusParser.EOF]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def funDefCmd(self):
return self.getTypedRuleContext(SygusParser.FunDefCmdContext,0)
def funDeclCmd(self):
return self.getTypedRuleContext(SygusParser.FunDeclCmdContext,0)
def synthFunCmd(self):
return self.getTypedRuleContext(SygusParser.SynthFunCmdContext,0)
def checkSynthCmd(self):
return self.getTypedRuleContext(SygusParser.CheckSynthCmdContext,0)
def constraintCmd(self):
return self.getTypedRuleContext(SygusParser.ConstraintCmdContext,0)
def sortDefCmd(self):
return self.getTypedRuleContext(SygusParser.SortDefCmdContext,0)
def setOptsCmd(self):
return self.getTypedRuleContext(SygusParser.SetOptsCmdContext,0)
def varDeclCmd(self):
return self.getTypedRuleContext(SygusParser.VarDeclCmdContext,0)
def synthInvCmd(self):
return self.getTypedRuleContext(SygusParser.SynthInvCmdContext,0)
def declarePrimedVar(self):
return self.getTypedRuleContext(SygusParser.DeclarePrimedVarContext,0)
def invConstraintCmd(self):
return self.getTypedRuleContext(SygusParser.InvConstraintCmdContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmd" ):
listener.enterCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmd" ):
listener.exitCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd" ):
return visitor.visitCmd(self)
else:
return visitor.visitChildren(self)
def cmd(self):
localctx = SygusParser.CmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_cmd)
try:
self.state = 143
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.funDefCmd()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 133
self.funDeclCmd()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 134
self.synthFunCmd()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 135
self.checkSynthCmd()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 136
self.constraintCmd()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 137
self.sortDefCmd()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 138
self.setOptsCmd()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 139
self.varDeclCmd()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 140
self.synthInvCmd()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 141
self.declarePrimedVar()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 142
self.invConstraintCmd()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDeclCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_varDeclCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarDeclCmd" ):
listener.enterVarDeclCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarDeclCmd" ):
listener.exitVarDeclCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVarDeclCmd" ):
return visitor.visitVarDeclCmd(self)
else:
return visitor.visitChildren(self)
def varDeclCmd(self):
localctx = SygusParser.VarDeclCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_varDeclCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self.match(SygusParser.T__0)
self.state = 146
self.match(SygusParser.T__3)
self.state = 147
self.symbol()
self.state = 148
self.sortExpr()
self.state = 149
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortDefCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortDefCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortDefCmd" ):
listener.enterSortDefCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortDefCmd" ):
listener.exitSortDefCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortDefCmd" ):
return visitor.visitSortDefCmd(self)
else:
return visitor.visitChildren(self)
def sortDefCmd(self):
localctx = SygusParser.SortDefCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_sortDefCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 151
self.match(SygusParser.T__0)
self.state = 152
self.match(SygusParser.T__4)
self.state = 153
self.symbol()
self.state = 154
self.sortExpr()
self.state = 155
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def intConst(self):
return self.getTypedRuleContext(SygusParser.IntConstContext,0)
def eCList(self):
return self.getTypedRuleContext(SygusParser.ECListContext,0)
def sortExpr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SortExprContext)
else:
return self.getTypedRuleContext(SygusParser.SortExprContext,i)
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortExpr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortExpr" ):
listener.enterSortExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortExpr" ):
listener.exitSortExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortExpr" ):
return visitor.visitSortExpr(self)
else:
return visitor.visitChildren(self)
def sortExpr(self):
localctx = SygusParser.SortExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_sortExpr)
try:
self.state = 177
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 157
self.match(SygusParser.T__0)
self.state = 158
self.match(SygusParser.T__5)
self.state = 159
self.intConst()
self.state = 160
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 162
self.match(SygusParser.T__6)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 163
self.match(SygusParser.T__7)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 164
self.match(SygusParser.T__8)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 165
self.match(SygusParser.T__0)
self.state = 166
self.match(SygusParser.T__9)
self.state = 167
self.eCList()
self.state = 168
self.match(SygusParser.T__2)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 170
self.match(SygusParser.T__0)
self.state = 171
self.match(SygusParser.T__10)
self.state = 172
self.sortExpr()
self.state = 173
self.sortExpr()
self.state = 174
self.match(SygusParser.T__2)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 176
self.symbol()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER(self):
return self.getToken(SygusParser.INTEGER, 0)
def getRuleIndex(self):
return SygusParser.RULE_intConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntConst" ):
listener.enterIntConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntConst" ):
listener.exitIntConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntConst" ):
return visitor.visitIntConst(self)
else:
return visitor.visitChildren(self)
def intConst(self):
localctx = SygusParser.IntConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_intConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 179
self.match(SygusParser.INTEGER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BoolConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SygusParser.RULE_boolConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolConst" ):
listener.enterBoolConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolConst" ):
listener.exitBoolConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBoolConst" ):
return visitor.visitBoolConst(self)
else:
return visitor.visitChildren(self)
def boolConst(self):
localctx = SygusParser.BoolConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_boolConst)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 181
_la = self._input.LA(1)
if not(_la==SygusParser.T__11 or _la==SygusParser.T__12):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BVConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BVCONST(self):
return self.getToken(SygusParser.BVCONST, 0)
def getRuleIndex(self):
return SygusParser.RULE_bVConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBVConst" ):
listener.enterBVConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBVConst" ):
listener.exitBVConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBVConst" ):
return visitor.visitBVConst(self)
else:
return visitor.visitChildren(self)
def bVConst(self):
localctx = SygusParser.BVConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_bVConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 183
self.match(SygusParser.BVCONST)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SymbolContext)
else:
return self.getTypedRuleContext(SygusParser.SymbolContext,i)
def getRuleIndex(self):
return SygusParser.RULE_enumConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumConst" ):
listener.enterEnumConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumConst" ):
listener.exitEnumConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumConst" ):
return visitor.visitEnumConst(self)
else:
return visitor.visitChildren(self)
def enumConst(self):
localctx = SygusParser.EnumConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_enumConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 185
self.symbol()
self.state = 186
self.match(SygusParser.T__13)
self.state = 187
self.symbol()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RealConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def REALCONST(self):
return self.getToken(SygusParser.REALCONST, 0)
def getRuleIndex(self):
return SygusParser.RULE_realConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRealConst" ):
listener.enterRealConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRealConst" ):
listener.exitRealConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRealConst" ):
return visitor.visitRealConst(self)
else:
return visitor.visitChildren(self)
def realConst(self):
localctx = SygusParser.RealConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_realConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 189
self.match(SygusParser.REALCONST)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ECListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPlus(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_eCList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterECList" ):
listener.enterECList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitECList" ):
listener.exitECList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitECList" ):
return visitor.visitECList(self)
else:
return visitor.visitChildren(self)
def eCList(self):
localctx = SygusParser.ECListContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_eCList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.match(SygusParser.T__0)
self.state = 192
self.symbolPlus()
self.state = 193
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def symbolPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPlus" ):
listener.enterSymbolPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPlus" ):
listener.exitSymbolPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPlus" ):
return visitor.visitSymbolPlus(self)
else:
return visitor.visitChildren(self)
def symbolPlus(self):
localctx = SygusParser.SymbolPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_symbolPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.symbol()
self.state = 196
self.symbolPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def symbolPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPlusTail" ):
listener.enterSymbolPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPlusTail" ):
listener.exitSymbolPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPlusTail" ):
return visitor.visitSymbolPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPlusTail(self):
localctx = SygusParser.SymbolPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_symbolPlusTail)
try:
self.state = 202
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 198
self.symbol()
self.state = 199
self.symbolPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetOptsCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optList(self):
return self.getTypedRuleContext(SygusParser.OptListContext,0)
def getRuleIndex(self):
return SygusParser.RULE_setOptsCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetOptsCmd" ):
listener.enterSetOptsCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetOptsCmd" ):
listener.exitSetOptsCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetOptsCmd" ):
return visitor.visitSetOptsCmd(self)
else:
return visitor.visitChildren(self)
def setOptsCmd(self):
localctx = SygusParser.SetOptsCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_setOptsCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 204
self.match(SygusParser.T__0)
self.state = 205
self.match(SygusParser.T__14)
self.state = 206
self.optList()
self.state = 207
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPairPlus(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_optList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptList" ):
listener.enterOptList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptList" ):
listener.exitOptList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptList" ):
return visitor.visitOptList(self)
else:
return visitor.visitChildren(self)
def optList(self):
localctx = SygusParser.OptListContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_optList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(SygusParser.T__0)
self.state = 210
self.symbolPairPlus()
self.state = 211
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlus" ):
listener.enterSymbolPairPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlus" ):
listener.exitSymbolPairPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlus" ):
return visitor.visitSymbolPairPlus(self)
else:
return visitor.visitChildren(self)
def symbolPairPlus(self):
localctx = SygusParser.SymbolPairPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_symbolPairPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
self.symbolPair()
self.state = 214
self.symbolPairPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlusTail" ):
listener.enterSymbolPairPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlusTail" ):
listener.exitSymbolPairPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlusTail" ):
return visitor.visitSymbolPairPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPairPlusTail(self):
localctx = SygusParser.SymbolPairPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_symbolPairPlusTail)
try:
self.state = 220
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 216
self.symbolPair()
self.state = 217
self.symbolPairPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def QUOTEDLIT(self):
return self.getToken(SygusParser.QUOTEDLIT, 0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPair" ):
listener.enterSymbolPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPair" ):
listener.exitSymbolPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPair" ):
return visitor.visitSymbolPair(self)
else:
return visitor.visitChildren(self)
def symbolPair(self):
localctx = SygusParser.SymbolPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_symbolPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 222
self.match(SygusParser.T__0)
self.state = 223
self.symbol()
self.state = 224
self.match(SygusParser.QUOTEDLIT)
self.state = 225
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDefCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDefCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDefCmd" ):
listener.enterFunDefCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDefCmd" ):
listener.exitFunDefCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDefCmd" ):
return visitor.visitFunDefCmd(self)
else:
return visitor.visitChildren(self)
def funDefCmd(self):
localctx = SygusParser.FunDefCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_funDefCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 227
self.match(SygusParser.T__0)
self.state = 228
self.match(SygusParser.T__15)
self.state = 229
self.symbol()
self.state = 230
self.argList()
self.state = 231
self.sortExpr()
self.state = 232
self.term()
self.state = 233
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDeclCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDeclCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDeclCmd" ):
listener.enterFunDeclCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDeclCmd" ):
listener.exitFunDeclCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDeclCmd" ):
return visitor.visitFunDeclCmd(self)
else:
return visitor.visitChildren(self)
def funDeclCmd(self):
localctx = SygusParser.FunDeclCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_funDeclCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 235
self.match(SygusParser.T__0)
self.state = 236
self.match(SygusParser.T__16)
self.state = 237
self.symbol()
self.state = 238
self.match(SygusParser.T__0)
self.state = 239
self.sortStar()
self.state = 240
self.match(SygusParser.T__2)
self.state = 241
self.sortExpr()
self.state = 242
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortStar" ):
listener.enterSortStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortStar" ):
listener.exitSortStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortStar" ):
return visitor.visitSortStar(self)
else:
return visitor.visitChildren(self)
def sortStar(self):
localctx = SygusParser.SortStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_sortStar)
try:
self.state = 248
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__6, SygusParser.T__7, SygusParser.T__8, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 244
self.sortExpr()
self.state = 245
self.sortStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_argList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgList" ):
listener.enterArgList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgList" ):
listener.exitArgList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgList" ):
return visitor.visitArgList(self)
else:
return visitor.visitChildren(self)
def argList(self):
localctx = SygusParser.ArgListContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_argList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(SygusParser.T__0)
self.state = 251
self.symbolSortPairStar()
self.state = 252
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPair(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairContext,0)
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPairStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPairStar" ):
listener.enterSymbolSortPairStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPairStar" ):
listener.exitSymbolSortPairStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPairStar" ):
return visitor.visitSymbolSortPairStar(self)
else:
return visitor.visitChildren(self)
def symbolSortPairStar(self):
localctx = SygusParser.SymbolSortPairStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_symbolSortPairStar)
try:
self.state = 258
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 254
self.symbolSortPair()
self.state = 255
self.symbolSortPairStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPair" ):
listener.enterSymbolSortPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPair" ):
listener.exitSymbolSortPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPair" ):
return visitor.visitSymbolSortPair(self)
else:
return visitor.visitChildren(self)
def symbolSortPair(self):
localctx = SygusParser.SymbolSortPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_symbolSortPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 260
self.match(SygusParser.T__0)
self.state = 261
self.symbol()
self.state = 262
self.sortExpr()
self.state = 263
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def termStar(self):
return self.getTypedRuleContext(SygusParser.TermStarContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def letTerm(self):
return self.getTypedRuleContext(SygusParser.LetTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerm" ):
listener.exitTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerm" ):
return visitor.visitTerm(self)
else:
return visitor.visitChildren(self)
def term(self):
localctx = SygusParser.TermContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_term)
try:
self.state = 273
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 265
self.match(SygusParser.T__0)
self.state = 266
self.symbol()
self.state = 267
self.termStar()
self.state = 268
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 270
self.literal()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 271
self.symbol()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 272
self.letTerm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTermPlus(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetTerm" ):
listener.enterLetTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetTerm" ):
listener.exitLetTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetTerm" ):
return visitor.visitLetTerm(self)
else:
return visitor.visitChildren(self)
def letTerm(self):
localctx = SygusParser.LetTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_letTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 275
self.match(SygusParser.T__0)
self.state = 276
self.match(SygusParser.T__17)
self.state = 277
self.match(SygusParser.T__0)
self.state = 278
self.letBindingTermPlus()
self.state = 279
self.match(SygusParser.T__2)
self.state = 280
self.term()
self.state = 281
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermContext,0)
def letBindingTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTermPlus" ):
listener.enterLetBindingTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTermPlus" ):
listener.exitLetBindingTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTermPlus" ):
return visitor.visitLetBindingTermPlus(self)
else:
return visitor.visitChildren(self)
def letBindingTermPlus(self):
localctx = SygusParser.LetBindingTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_letBindingTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 283
self.letBindingTerm()
self.state = 284
self.letBindingTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermContext,0)
def letBindingTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTermPlusTail" ):
listener.enterLetBindingTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTermPlusTail" ):
listener.exitLetBindingTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTermPlusTail" ):
return visitor.visitLetBindingTermPlusTail(self)
else:
return visitor.visitChildren(self)
def letBindingTermPlusTail(self):
localctx = SygusParser.LetBindingTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_letBindingTermPlusTail)
try:
self.state = 290
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 286
self.letBindingTerm()
self.state = 287
self.letBindingTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTerm" ):
listener.enterLetBindingTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTerm" ):
listener.exitLetBindingTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTerm" ):
return visitor.visitLetBindingTerm(self)
else:
return visitor.visitChildren(self)
def letBindingTerm(self):
localctx = SygusParser.LetBindingTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_letBindingTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 292
self.match(SygusParser.T__0)
self.state = 293
self.symbol()
self.state = 294
self.sortExpr()
self.state = 295
self.term()
self.state = 296
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def termStar(self):
return self.getTypedRuleContext(SygusParser.TermStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_termStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTermStar" ):
listener.enterTermStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTermStar" ):
listener.exitTermStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTermStar" ):
return visitor.visitTermStar(self)
else:
return visitor.visitChildren(self)
def termStar(self):
localctx = SygusParser.TermStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_termStar)
try:
self.state = 302
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 298
self.term()
self.state = 299
self.termStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def intConst(self):
return self.getTypedRuleContext(SygusParser.IntConstContext,0)
def boolConst(self):
return self.getTypedRuleContext(SygusParser.BoolConstContext,0)
def bVConst(self):
return self.getTypedRuleContext(SygusParser.BVConstContext,0)
def enumConst(self):
return self.getTypedRuleContext(SygusParser.EnumConstContext,0)
def realConst(self):
return self.getTypedRuleContext(SygusParser.RealConstContext,0)
def getRuleIndex(self):
return SygusParser.RULE_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLiteral" ):
listener.enterLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLiteral" ):
listener.exitLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLiteral" ):
return visitor.visitLiteral(self)
else:
return visitor.visitChildren(self)
def literal(self):
localctx = SygusParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_literal)
try:
self.state = 309
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.INTEGER]:
self.enterOuterAlt(localctx, 1)
self.state = 304
self.intConst()
pass
elif token in [SygusParser.T__11, SygusParser.T__12]:
self.enterOuterAlt(localctx, 2)
self.state = 305
self.boolConst()
pass
elif token in [SygusParser.BVCONST]:
self.enterOuterAlt(localctx, 3)
self.state = 306
self.bVConst()
pass
elif token in [SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 4)
self.state = 307
self.enumConst()
pass
elif token in [SygusParser.REALCONST]:
self.enterOuterAlt(localctx, 5)
self.state = 308
self.realConst()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nTDef(self):
return self.getTypedRuleContext(SygusParser.NTDefContext,0)
def nTDefPlusTail(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDefPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDefPlus" ):
listener.enterNTDefPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDefPlus" ):
listener.exitNTDefPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDefPlus" ):
return visitor.visitNTDefPlus(self)
else:
return visitor.visitChildren(self)
def nTDefPlus(self):
localctx = SygusParser.NTDefPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_nTDefPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 311
self.nTDef()
self.state = 312
self.nTDefPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nTDef(self):
return self.getTypedRuleContext(SygusParser.NTDefContext,0)
def nTDefPlusTail(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDefPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDefPlusTail" ):
listener.enterNTDefPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDefPlusTail" ):
listener.exitNTDefPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDefPlusTail" ):
return visitor.visitNTDefPlusTail(self)
else:
return visitor.visitChildren(self)
def nTDefPlusTail(self):
localctx = SygusParser.NTDefPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_nTDefPlusTail)
try:
self.state = 318
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 314
self.nTDef()
self.state = 315
self.nTDefPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def gTermPlus(self):
return self.getTypedRuleContext(SygusParser.GTermPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDef
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDef" ):
listener.enterNTDef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDef" ):
listener.exitNTDef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDef" ):
return visitor.visitNTDef(self)
else:
return visitor.visitChildren(self)
def nTDef(self):
localctx = SygusParser.NTDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_nTDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 320
self.match(SygusParser.T__0)
self.state = 321
self.symbol()
self.state = 322
self.sortExpr()
self.state = 323
self.match(SygusParser.T__0)
self.state = 324
self.gTermPlus()
self.state = 325
self.match(SygusParser.T__2)
self.state = 326
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.GTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermPlus" ):
listener.enterGTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermPlus" ):
listener.exitGTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermPlus" ):
return visitor.visitGTermPlus(self)
else:
return visitor.visitChildren(self)
def gTermPlus(self):
localctx = SygusParser.GTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_gTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 328
self.gTerm()
self.state = 329
self.gTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.GTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermPlusTail" ):
listener.enterGTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermPlusTail" ):
listener.exitGTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermPlusTail" ):
return visitor.visitGTermPlusTail(self)
else:
return visitor.visitChildren(self)
def gTermPlusTail(self):
localctx = SygusParser.GTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_gTermPlusTail)
try:
self.state = 335
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 331
self.gTerm()
self.state = 332
self.gTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CheckSynthCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SygusParser.RULE_checkSynthCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCheckSynthCmd" ):
listener.enterCheckSynthCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCheckSynthCmd" ):
listener.exitCheckSynthCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCheckSynthCmd" ):
return visitor.visitCheckSynthCmd(self)
else:
return visitor.visitChildren(self)
def checkSynthCmd(self):
localctx = SygusParser.CheckSynthCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_checkSynthCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(SygusParser.T__0)
self.state = 338
self.match(SygusParser.T__18)
self.state = 339
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ConstraintCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_constraintCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConstraintCmd" ):
listener.enterConstraintCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConstraintCmd" ):
listener.exitConstraintCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConstraintCmd" ):
return visitor.visitConstraintCmd(self)
else:
return visitor.visitChildren(self)
def constraintCmd(self):
localctx = SygusParser.ConstraintCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_constraintCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 341
self.match(SygusParser.T__0)
self.state = 342
self.match(SygusParser.T__19)
self.state = 343
self.term()
self.state = 344
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthFunCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthFunCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthFunCmd" ):
listener.enterSynthFunCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthFunCmd" ):
listener.exitSynthFunCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthFunCmd" ):
return visitor.visitSynthFunCmd(self)
else:
return visitor.visitChildren(self)
def synthFunCmd(self):
localctx = SygusParser.SynthFunCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_synthFunCmd)
try:
self.state = 363
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,15,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(SygusParser.T__0)
self.state = 347
self.match(SygusParser.T__20)
self.state = 348
self.symbol()
self.state = 349
self.argList()
self.state = 350
self.sortExpr()
self.state = 351
self.match(SygusParser.T__0)
self.state = 352
self.nTDefPlus()
self.state = 353
self.match(SygusParser.T__2)
self.state = 354
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 356
self.match(SygusParser.T__0)
self.state = 357
self.match(SygusParser.T__20)
self.state = 358
self.symbol()
self.state = 359
self.argList()
self.state = 360
self.sortExpr()
self.state = 361
self.match(SygusParser.T__2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def letGTerm(self):
return self.getTypedRuleContext(SygusParser.LetGTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTerm" ):
listener.enterGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTerm" ):
listener.exitGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTerm" ):
return visitor.visitGTerm(self)
else:
return visitor.visitChildren(self)
def gTerm(self):
localctx = SygusParser.GTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_gTerm)
try:
self.state = 393
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 365
self.symbol()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 366
self.literal()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 367
self.match(SygusParser.T__0)
self.state = 368
self.symbol()
self.state = 369
self.gTermStar()
self.state = 370
self.match(SygusParser.T__2)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 372
self.match(SygusParser.T__0)
self.state = 373
self.match(SygusParser.T__21)
self.state = 374
self.sortExpr()
self.state = 375
self.match(SygusParser.T__2)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 377
self.match(SygusParser.T__0)
self.state = 378
self.match(SygusParser.T__22)
self.state = 379
self.sortExpr()
self.state = 380
self.match(SygusParser.T__2)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 382
self.match(SygusParser.T__0)
self.state = 383
self.match(SygusParser.T__23)
self.state = 384
self.sortExpr()
self.state = 385
self.match(SygusParser.T__2)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 387
self.match(SygusParser.T__0)
self.state = 388
self.match(SygusParser.T__24)
self.state = 389
self.sortExpr()
self.state = 390
self.match(SygusParser.T__2)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 392
self.letGTerm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTermPlus(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetGTerm" ):
listener.enterLetGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetGTerm" ):
listener.exitLetGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetGTerm" ):
return visitor.visitLetGTerm(self)
else:
return visitor.visitChildren(self)
def letGTerm(self):
localctx = SygusParser.LetGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_letGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 395
self.match(SygusParser.T__0)
self.state = 396
self.match(SygusParser.T__17)
self.state = 397
self.match(SygusParser.T__0)
self.state = 398
self.letBindingGTermPlus()
self.state = 399
self.match(SygusParser.T__2)
self.state = 400
self.gTerm()
self.state = 401
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlus" ):
listener.enterLetBindingGTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlus" ):
listener.exitLetBindingGTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlus" ):
return visitor.visitLetBindingGTermPlus(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlus(self):
localctx = SygusParser.LetBindingGTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_letBindingGTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 403
self.letBindingGTerm()
self.state = 404
self.letBindingGTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlusTail" ):
listener.enterLetBindingGTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlusTail" ):
listener.exitLetBindingGTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlusTail" ):
return visitor.visitLetBindingGTermPlusTail(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlusTail(self):
localctx = SygusParser.LetBindingGTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_letBindingGTermPlusTail)
try:
self.state = 410
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 406
self.letBindingGTerm()
self.state = 407
self.letBindingGTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTerm" ):
listener.enterLetBindingGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTerm" ):
listener.exitLetBindingGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTerm" ):
return visitor.visitLetBindingGTerm(self)
else:
return visitor.visitChildren(self)
def letBindingGTerm(self):
localctx = SygusParser.LetBindingGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_letBindingGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 412
self.match(SygusParser.T__0)
self.state = 413
self.symbol()
self.state = 414
self.sortExpr()
self.state = 415
self.gTerm()
self.state = 416
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermStar" ):
listener.enterGTermStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermStar" ):
listener.exitGTermStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermStar" ):
return visitor.visitGTermStar(self)
else:
return visitor.visitChildren(self)
def gTermStar(self):
localctx = SygusParser.GTermStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_gTermStar)
try:
self.state = 422
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 418
self.gTerm()
self.state = 419
self.gTermStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthInvCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthInvCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthInvCmd" ):
listener.enterSynthInvCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthInvCmd" ):
listener.exitSynthInvCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthInvCmd" ):
return visitor.visitSynthInvCmd(self)
else:
return visitor.visitChildren(self)
def synthInvCmd(self):
localctx = SygusParser.SynthInvCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_synthInvCmd)
try:
self.state = 439
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 424
self.match(SygusParser.T__0)
self.state = 425
self.match(SygusParser.T__25)
self.state = 426
self.symbol()
self.state = 427
self.argList()
self.state = 428
self.match(SygusParser.T__0)
self.state = 429
self.nTDefPlus()
self.state = 430
self.match(SygusParser.T__2)
self.state = 431
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 433
self.match(SygusParser.T__0)
self.state = 434
self.match(SygusParser.T__25)
self.state = 435
self.symbol()
self.state = 436
self.argList()
self.state = 437
self.match(SygusParser.T__2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarePrimedVarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_declarePrimedVar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarePrimedVar" ):
listener.enterDeclarePrimedVar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarePrimedVar" ):
listener.exitDeclarePrimedVar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarePrimedVar" ):
return visitor.visitDeclarePrimedVar(self)
else:
return visitor.visitChildren(self)
def declarePrimedVar(self):
localctx = SygusParser.DeclarePrimedVarContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_declarePrimedVar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 441
self.match(SygusParser.T__0)
self.state = 442
self.match(SygusParser.T__26)
self.state = 443
self.symbol()
self.state = 444
self.sortExpr()
self.state = 445
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InvConstraintCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SymbolContext)
else:
return self.getTypedRuleContext(SygusParser.SymbolContext,i)
def getRuleIndex(self):
return SygusParser.RULE_invConstraintCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInvConstraintCmd" ):
listener.enterInvConstraintCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInvConstraintCmd" ):
listener.exitInvConstraintCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInvConstraintCmd" ):
return visitor.visitInvConstraintCmd(self)
else:
return visitor.visitChildren(self)
def invConstraintCmd(self):
localctx = SygusParser.InvConstraintCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_invConstraintCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 447
self.match(SygusParser.T__0)
self.state = 448
self.match(SygusParser.T__27)
self.state = 449
self.symbol()
self.state = 450
self.symbol()
self.state = 451
self.symbol()
self.state = 452
self.symbol()
self.state = 453
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 34.319947 | 166 | 0.586897 | 113,928 | 0.882869 | 0 | 0 | 0 | 0 | 0 | 0 | 15,196 | 0.117759 |
a8556da33787e277786e6a3792b993ea18601c17 | 1,870 | py | Python | keitaro/utils.py | Infvmous/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-07-03T16:40:37.000Z | 2021-07-03T16:40:37.000Z | keitaro/utils.py | ysomad/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-01-28T13:06:33.000Z | 2021-01-28T13:06:36.000Z | keitaro/utils.py | ysomad/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-02-23T08:10:29.000Z | 2021-02-23T08:10:29.000Z | import random
from string import ascii_letters, digits
from urllib.parse import urlparse
def list_to_string(list_items, separator='\n'):
"""
Converts list items to string with separator
"""
return separator.join(list_items)
def string_to_list(string, separator='\n'):
"""
Converts string with separator to a list
"""
return [word for word in string.split(separator)]
def generate_random_string(length: int = 8) -> str:
"""
Generates random string of letters and digits with length
"""
symbols = ascii_letters + digits
return ''.join(random.choice(symbols) for letter in range(length))
def build_host_url(url: str, scheme: str = 'https') -> str:
"""
Parses url and adding http scheme if it doesn't exist
"""
parse_result = urlparse(url, scheme)
if parse_result.netloc:
netloc = parse_result.netloc
path = parse_result.path
else:
netloc = parse_result.path
path = ''
host = parse_result._replace(netloc=netloc, path=path)
return host.geturl()
def remove_key_values(dictionary, keys=['self', '__class__']):
"""
Removes key values from dictionary
"""
new_dict = dictionary
for key in keys:
del new_dict[key]
return new_dict
def filter_resource_entities_by_key_value(resource_entities, key, value):
"""
Filters all resource entities by key and values,
returns list of resource entities
"""
found_dicts = [d for d in resource_entities if d[key] == value]
if not found_dicts:
raise KeyError(f'resource entities with {key} "{value}" not found')
return found_dicts
def set_resource_default_fields(args_to_set, query_params, resource_instances):
for key, value in args_to_set.items():
if value is None:
query_params[key] = resource_instances[key]
| 26.338028 | 79 | 0.675401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.265775 |
a855889fb82fea703cc4439aa3a13845ae7ffaa9 | 1,903 | py | Python | examples/ADT.py | SophiaZhyrovetska/Music_analizer | 9454aa1df9a75b25526a972c620a4aea3f30541f | [
"MIT"
] | 2 | 2018-06-26T21:49:49.000Z | 2018-06-26T21:49:53.000Z | examples/ADT.py | SophiaZhyrovetska/Music_analizer | 9454aa1df9a75b25526a972c620a4aea3f30541f | [
"MIT"
] | 1 | 2018-06-20T23:17:52.000Z | 2018-06-27T08:43:49.000Z | examples/ADT.py | SophiaZhyrovetska/Music_analizer | 9454aa1df9a75b25526a972c620a4aea3f30541f | [
"MIT"
] | 1 | 2018-06-26T21:49:52.000Z | 2018-06-26T21:49:52.000Z | class Song:
"A class for representing a song"
def __init__(self, name, singer):
"""
Initialize a new song with it's name and singer
:param name: str
:param singer: str
"""
self.name = name
self.singer = singer
self.mood = self.mood()
def text(self):
"""
Returns a text of a song
:return: str
"""
pass
def mood(self):
"""
Returns a mood of a song
:return: str
"""
pass
def theme(self):
"""
Returns a theme of a song
:return: str
"""
pass
def key_words(self):
"""
Returns key words of a song
:return: list
"""
pass
class Singer:
"A class for representing a singer"
def __init__(self, name):
"""
Initialize a new singer with it's name
:param name: str
"""
self.name = name
class Discography:
"A class for representing a discography of a singer. Uses Singer() and Song() instances"
def __init__(self, singer):
"""
Initialize a new discography
:param singer: Singer() instance
"""
self.singer = singer
self.songs = []
def add_song(self, song):
"""
Adds a song to discography (self.songs)
:param song: Song() instance
:return: None
"""
pass
def number_of_songs(self):
"""
Returns a number of songs in this discography
:return: int
"""
pass
def mood(self):
"""
Returns a a dictionary, with moods as keys and number of songs as values
:return: dict
"""
pass
def themes(self):
"""
Returns most popular themes of songs in this discography
:return: list
"""
pass
| 20.462366 | 92 | 0.504467 | 1,896 | 0.996322 | 0 | 0 | 0 | 0 | 0 | 0 | 1,172 | 0.61587 |
a8584dc2e1d7252f7eee5bae7caafe98b9817a57 | 5,304 | py | Python | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/machine_learning_with_Scikit_Learn_and_TensorFlow | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 11 | 2019-12-19T08:55:52.000Z | 2021-10-01T13:07:13.000Z | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/Scikit_Learn_TensorFlow_Examples | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 5 | 2019-10-09T01:41:19.000Z | 2022-02-10T00:19:01.000Z | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/Scikit_Learn_TensorFlow_Examples | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 7 | 2019-10-08T06:10:14.000Z | 2020-12-01T07:49:21.000Z | import pandas as pd
from learntools.core import *
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
# 1
class SelectDescCol(EqualityCheckProblem):
_var = 'desc'
_expected = (
reviews.description
)
#_solution = CS("desc = reviews.description")
# This behaviour really should have been opt-in, rather than opt-out :/
show_solution_on_correct = False
_hint = "As an example, say we would like to select the column `column` from a DataFrame `table`. Then we have two options: we can call either `table.column` or `table[\"column\"]`."
_solution = """
```python
desc = reviews.description
```
or
```python
desc = reviews["description"]
```
`desc` is a pandas `Series` object, with an index matching the `reviews` DataFrame.
In general, when we select a single column from a DataFrame, we'll get a Series.
"""
# 2
class FirstDesc(EqualityCheckProblem):
_var = 'first_description'
_expected = (
reviews.description.iloc[0]
)
_hint = "To obtain a specific entry (corresponding to column `column` and row `i`) in a DataFrame `table`, we can call `table.column.iloc[i]`. Remember that Python indexing starts at 0!"
_solution = """
```python
first_description = reviews.description.iloc[0]
```
Note that while this is the preferred way to obtain the entry in the DataFrame, many other options will return a valid result, such as `reviews.description.loc[0]`, `reviews.description[0]`, and more!
"""
# 3
class FirstRow(EqualityCheckProblem):
_var = 'first_row'
_expected = (
reviews.iloc[0]
)
_hint = "To obtain a specific row of a DataFrame, we can use the `iloc` operator. For more information, see the section on **Index-based selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("first_row = reviews.iloc[0]")
# 4
class FirstDescs(EqualityCheckProblem):
_var = 'first_descriptions'
_expected = (
reviews.description.iloc[:10]
)
_hint = "We can use either the `loc` or `iloc` operator to solve this problem. For more information, see the sections on **Index-based selection** and **Label-based selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = """
```python
first_descriptions = reviews.description.iloc[:10]
```
Note that many other options will return a valid result, such as `desc.head(10)` and `reviews.loc[:9, "description"]`.
"""
# 5
class SampleReviews(EqualityCheckProblem):
_var = 'sample_reviews'
indices = [1, 2, 3, 5, 8]
_expected = (
reviews.loc[indices],
)
_hint = "Use either the `loc` or `iloc` operator to select rows of a DataFrame."
_solution = CS("""\
indices = [1, 2, 3, 5, 8]
sample_reviews = reviews.loc[indices]""")
# 6
class RowColSelect(EqualityCheckProblem):
_var = 'df'
cols = ['country', 'province', 'region_1', 'region_2']
indices = [0, 1, 10, 100]
_expected = (
reviews.loc[indices, cols],
)
_hint = "Use the `loc` operator. (Note that it is also *possible* to solve this problem using the `iloc` operator, but this would require extra effort to convert each column name to a corresponding integer-valued index.)"
_solution = CS("""\
cols = ['country', 'province', 'region_1', 'region_2']
indices = [0, 1, 10, 100]
df = reviews.loc[indices, cols]""")
# 7
class RowColSelect2(EqualityCheckProblem):
_var = 'df'
cols = ['country', 'variety']
_expected = (
reviews.head(100).loc[:,cols],
)
_hint = "It is most straightforward to solve this problem with the `loc` operator. (However, if you decide to use `iloc`, remember to first convert each column into a corresponding integer-valued index.)"
_solution = """
```python
cols = ['country', 'variety']
df = reviews.loc[:99, cols]
```
or
```python
cols_idx = [0, 11]
df = reviews.iloc[:100, cols_idx]
```
"""
# 8
class ItalianWines(EqualityCheckProblem):
_var = 'italian_wines'
_expected = (
reviews[reviews.country == 'Italy'],
)
_hint = "For more information, see the section on **Conditional selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("italian_wines = reviews[reviews.country == 'Italy']")
# 9
class TopOceanicWines(EqualityCheckProblem):
_var = 'top_oceania_wines'
cols = ['country', 'variety']
_expected = reviews[
(reviews.country.isin(['Australia', 'New Zealand']))
& (reviews.points >= 95)
]
_hint = "For more information, see the section on **Conditional selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("""\
top_oceania_wines = reviews.loc[
(reviews.country.isin(['Australia', 'New Zealand']))
& (reviews.points >= 95)
]""")
qvars = bind_exercises(globals(), [
SelectDescCol,
FirstDesc,
FirstRow,
FirstDescs,
SampleReviews,
RowColSelect,
RowColSelect2,
ItalianWines,
TopOceanicWines,
],
tutorial_id=46,
)
__all__ = list(qvars)
| 35.36 | 289 | 0.6727 | 4,862 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 3,404 | 0.64178 |
a85abfde0966e5dcea2b53ffa4e2160f0a3fdb66 | 6,392 | py | Python | openseis/sd5.py | whamlyn/openseis | 40d9f7af9eb172bc94ed3e45f66b563565906c96 | [
"Apache-2.0"
] | null | null | null | openseis/sd5.py | whamlyn/openseis | 40d9f7af9eb172bc94ed3e45f66b563565906c96 | [
"Apache-2.0"
] | null | null | null | openseis/sd5.py | whamlyn/openseis | 40d9f7af9eb172bc94ed3e45f66b563565906c96 | [
"Apache-2.0"
] | null | null | null | """
OpenSeis module
"""
import numpy as np
import os
import openseis as ops
try:
import h5py
except:
print('h5py package does not seem to be installed on this system.')
class Seis3D():
"""
OpenSeis class for 3D seismic datasets
"""
def __init__(self, sg5file):
"""
Constructor method for Seis3D class
"""
self.sg5file = sg5file
if os.path.exists(self.sg5file):
self.retreive_attributes()
else:
print('No SG5 file found.')
def retreive_attributes(self):
"""
Read file attributes from the sd5 file:
"""
with h5py.File(self.sg5file, 'r') as fd:
self.il_min = fd.attrs['il_min']
self.il_max = fd.attrs['il_max']
self.xl_min = fd.attrs['xl_min']
self.xl_max = fd.attrs['xl_max']
self.z_min = fd.attrs['z_min']
self.z_max = fd.attrs['z_max']
self.z_type = fd.attrs['z_type']
self.nil = fd.attrs['nil']
self.nxl = fd.attrs['nxl']
self.nz = fd.attrs['nz']
def get_il(self, il):
with h5py.File(self.sg5file, 'r') as fd:
ili = il - fd.attrs['il_min']
tdata = fd['seis']['tdata'][ili, :]
return tdata
def get_xl(self, xl):
with h5py.File(self.sg5file, 'r') as fd:
xli = xl - fd.attrs['xl_min']
tdata = fd['seis']['tdata'][:, xli]
return tdata
def get_zslice(self, zval):
with h5py.File(self.sg5file, 'r') as fd:
z_min = fd.attrs['z_min']
dz = fd.attrs['dz']
zi = round((zval - z_min)/dz)
tdata = fd['seis']['tdata'][:, :, zi]
return tdata
def get_thead(self, key):
with h5py.File(self.sg5file, 'r') as fd:
thead = fd['seis']['thead'][key][:]
return thead
def open_sd5file(self):
self.fd = h5py.File(self.sg5file, 'r')
return self.fd
def close_sd5file(self, fd=None):
if fd==None:
self.fd.close()
else:
fd.close()
def load_from_sgy(self, sgyfile, def_thead):
"""
Function to create an sg5 (i.e. HDF5) format seismic file by loading trace
and header data from a SEG-Y datafile.
"""
# create an auralib Segy object instance
buf = ops.segy.Segy(sgyfile, def_thead)
# read all trace data and header data and store in list/dictionary
tdata = buf.read_tdata_multi(0, buf.num_traces, verbose=1000)
thead = buf.read_thead_multi(0, buf.num_traces, verbose=1000)
# convert trace headers to a dictionary of numpy arrays for fast calcuation
tdata = np.array(tdata)
for key in thead.keys():
thead[key] = np.array(thead[key])
# calculate some statistics from the trace headers and use these to build
# the 3D geometry for the sg5 file
il_min = np.min(thead['il'])
il_max = np.max(thead['il'])
xl_min = np.min(thead['xl'])
xl_max = np.max(thead['xl'])
nil = il_max - il_min + 1
nxl = xl_max - xl_min + 1
dz = buf.bhead['samp_rate']*1e-6
nz = buf.bhead['num_samp']
z_min = 0
z_max = z_min + nz*dz
# create HDF5 file and create basic file structure to hold SEGY trace
# and header data
with h5py.File(self.sg5file,'w') as fd:
fd.attrs.create('data_type', 'trace')
fd.attrs.create('data_units', 'amp')
fd.attrs.create('il_min', il_min)
fd.attrs.create('il_max', il_max)
fd.attrs.create('xl_min', xl_min)
fd.attrs.create('xl_max', xl_max)
fd.attrs.create('nil', nil)
fd.attrs.create('nxl', nxl)
fd.attrs.create('dz', dz)
fd.attrs.create('nz', nz)
fd.attrs.create('z_min', z_min)
fd.attrs.create('z_max', z_max)
fd.attrs.create('z_type', 'TWT')
# create an HDF5 "group" called "seis" to contain seismic trace datasets
g0 = fd.create_group('seis')
# create an HDF5 "dataset" called "tdata" to actually store the traces
d0 = g0.create_dataset('tdata', (nil, nxl, nz))
# create an HDF5 "group" called "seis" to contain trace header datasets
g1 = g0.create_group('thead')
# create multiple HDF% "datasets", one for each trace heder to be loaded
for key in thead.keys():
d1 = g1.create_dataset(key, (nil, nxl))
# Create the inline and crossline indicies
ili = thead['il'] - il_min
xli = thead['xl'] - xl_min
# Now, write the traces read from the SEG-Y into the HDF5 'tdata' dataset
for i in range(buf.num_traces):
# print a status message to command line
if i%nxl == 0:
print('Writing trace inline %i' % (thead['il'][i]))
# writing each trace individually, there must be a faster way to do
# this but the HDF5 indexing isn't as flexible as numpy's; requires
# further investigation, but at least this works
d0[ili[i], xli[i], :] = tdata[i]
# Now, write the trace headers in the HDF5 'thead' group
for key in thead.keys():
# print a status message to the command line
print('Writing header key=%s' % (key))
# the trace headers were easier to set up as an indexed writing
# operation than the trace data (2d arrays vs 3D arrays). Trace
# headers more easily fit into memory and could be transformed from a
# 1D array to a 2D array which was then easily written to a 2D HDF5
# dataset. This was much faster than writing each individual header
# for each individual trace.
# make 2D numpy array of current trace header field
tmp = np.zeros([nil, nxl])
tmp[:,:] = np.nan
tmp[ili, xli] = thead[key]
# write 2D numpy array to HDF5 file in a single operation without
# requiring loops
g1[key][:, :] = tmp[:, :]
self.retreive_attributes()
| 32.779487 | 85 | 0.546151 | 6,210 | 0.971527 | 0 | 0 | 0 | 0 | 0 | 0 | 2,408 | 0.376721 |
a85bc4180769fa993ff4097080b1f134fa71c099 | 762 | py | Python | Heaps: Find the Running Median/Heaps: Find the Running Median.py | chmielak90/HackerRank_Coding_Interview_Challenges | 0e9fb79402a3c404719fa8411e8e32137e02fe07 | [
"MIT"
] | 1 | 2021-02-27T19:15:39.000Z | 2021-02-27T19:15:39.000Z | Heaps: Find the Running Median/Heaps: Find the Running Median.py | chmielak90/HackerRank_Coding_Interview_Challenges | 0e9fb79402a3c404719fa8411e8e32137e02fe07 | [
"MIT"
] | null | null | null | Heaps: Find the Running Median/Heaps: Find the Running Median.py | chmielak90/HackerRank_Coding_Interview_Challenges | 0e9fb79402a3c404719fa8411e8e32137e02fe07 | [
"MIT"
] | null | null | null | # n = int(input().strip())
#
# a = []
# a_i = 0
#
# for a_i in range(n):
# a_t = int(input().strip())
# a.append(a_t)
#
# a = sorted(a, key=int)
#
# if len(a) % 2 == 0:
# print((a[int(len(a)/2)] + a[int(len(a)/2)-1])/2)
#
# else:
# print(float(a[int(len(a)/2)]))
# this up working but to slow
from bisect import insort
# n = int(input().strip())
a = []
# a_i = 0
def median(a):
med = 0
if len(a) % 2 == 0:
l = a[len(a) // 2];
r = a[(len(a) // 2) - 1]
med = (l + r) / 2.0
elif len(a) % 2 != 0:
med = a[len(a) // 2]
return med
if __name__ == '__main__':
heap = []
for _ in range(int(input())):
insort(heap, int(input()))
print(float(median(heap))) | 17.318182 | 58 | 0.450131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.468504 |
a85ce1a483cfb29a350fd20c5803ae08cb276570 | 4,119 | py | Python | gost/ui/commands/reporting.py | sixy6e/gost | 43c99952bb3d5a7f668981477b4b3522889ac4db | [
"Apache-2.0"
] | null | null | null | gost/ui/commands/reporting.py | sixy6e/gost | 43c99952bb3d5a7f668981477b4b3522889ac4db | [
"Apache-2.0"
] | null | null | null | gost/ui/commands/reporting.py | sixy6e/gost | 43c99952bb3d5a7f668981477b4b3522889ac4db | [
"Apache-2.0"
] | 2 | 2020-08-16T04:38:59.000Z | 2021-09-09T05:58:58.000Z | """
Command line interface for creating the LaTeX documents.
"""
from pathlib import Path, PurePosixPath as PPath
from typing import Union
import click
import h5py # type: ignore
import pandas
import structlog # type: ignore
import geopandas # type: ignore
from wagl.hdf5 import read_h5_table # type: ignore
from gost.constants import (
CsvFileNames,
DatasetGroups,
DatasetNames,
DirectoryNames,
FileNames,
LOG_PROCESSORS,
LogNames,
)
from gost.collate import create_general_csvs, create_csv
from gost.report_utils import latex_documents
from ._shared_commands import io_dir_options
_LOG = structlog.get_logger()
def _extract_proc_info_results(fid: h5py.File, outdir: Path) -> None:
"""
Extracts the ancillary, gqa, software versions results tables
and converts to CSV for the LaTeX document.
"""
def _read_table(fid: h5py.File, dataset_name: PPath) -> pandas.DataFrame:
"""Small proxy to read the H5 table dataset."""
_LOG.info("reading dataset", dataset_name=str(dataset_name))
dataframe = read_h5_table(fid, str(dataset_name))
return dataframe
dataset_name = PPath(
DatasetGroups.SUMMARY.value, DatasetNames.ANCILLARY_SUMMARISED.value
)
ancillary_df = _read_table(fid, dataset_name)
dataset_name = PPath(DatasetGroups.SUMMARY.value, DatasetNames.GQA_SUMMARISED.value)
gqa_df = _read_table(fid, dataset_name)
dataset_name = PPath(DatasetNames.SOFTWARE_VERSIONS.value)
software_df = _read_table(fid, dataset_name)
out_fname = outdir.joinpath(
DirectoryNames.RESULTS.value, CsvFileNames.ANCILLARY.value
)
create_csv(ancillary_df, out_fname)
out_fname = outdir.joinpath(DirectoryNames.RESULTS.value, CsvFileNames.GQA.value)
create_csv(gqa_df, out_fname)
out_fname = outdir.joinpath(DirectoryNames.RESULTS.value, CsvFileNames.SOFTWARE.value)
_LOG.info("writing CSV", out_fname=str(out_fname))
software_df.to_csv(out_fname, index=False)
@click.command()
@io_dir_options
def reporting(
outdir: Union[str, Path],
) -> None:
"""
Produce the LaTeX reports, and final pass/fail summary.
"""
outdir = Path(outdir)
log_fname = outdir.joinpath(DirectoryNames.LOGS.value, LogNames.REPORTING.value)
if not log_fname.parent.exists():
log_fname.parent.mkdir(parents=True)
with open(log_fname, "w") as fobj:
structlog.configure(
logger_factory=structlog.PrintLoggerFactory(fobj), processors=LOG_PROCESSORS
)
comparison_results_fname = outdir.joinpath(
DirectoryNames.RESULTS.value, FileNames.RESULTS.value
)
_LOG.info(
"opening intercomparison results file", fname=str(comparison_results_fname)
)
with h5py.File(str(comparison_results_fname), "r") as fid:
# read intercomparison general measurements summary
dataset_name = PPath(
DatasetGroups.SUMMARY.value, DatasetNames.GENERAL_SUMMARISED.value
)
_LOG.info("reading dataset", dataset_name=str(dataset_name))
dataframe = read_h5_table(fid, str(dataset_name))
n_datasets = fid[DatasetNames.QUERY.value].attrs["nrows"]
# read and convert metadata tables
_extract_proc_info_results(fid, outdir)
_LOG.info("creating CSV's of the general measurements intercomparison summary")
create_general_csvs(dataframe, outdir.joinpath(DirectoryNames.RESULTS.value))
results_fname = outdir.joinpath(
DirectoryNames.RESULTS.value, FileNames.GENERAL_FRAMING.value
)
_LOG.info(
"opening geometry framing general results file", fname=str(results_fname)
)
gdf = geopandas.read_file(results_fname)
reports_outdir = outdir.joinpath(DirectoryNames.REPORT.value)
_LOG.info("producing LaTeX documents of general results")
latex_documents(gdf, dataframe, reports_outdir, n_datasets)
# TODO GQA and ancillary
_LOG.info("finished writing the LaTeX documents")
| 32.179688 | 90 | 0.705268 | 0 | 0 | 0 | 0 | 2,109 | 0.512017 | 0 | 0 | 769 | 0.186696 |
a85d28a147c2851074f1bb71e893b4e8755ebe06 | 660 | py | Python | aws_lambda_typing/events/config.py | curekoshimizu/aws-lambda-typing | ea5649e084f598f02dadad1a60927034ea46ae5d | [
"MIT"
] | null | null | null | aws_lambda_typing/events/config.py | curekoshimizu/aws-lambda-typing | ea5649e084f598f02dadad1a60927034ea46ae5d | [
"MIT"
] | null | null | null | aws_lambda_typing/events/config.py | curekoshimizu/aws-lambda-typing | ea5649e084f598f02dadad1a60927034ea46ae5d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import typing
class ConfigEvent(typing.TypedDict):
"""
MQEvent https://docs.aws.amazon.com/lambda/latest/dg/services-config.html
Attributes:
----------
invokingEvent: str
ruleParameters: str
resultToken: str
eventLeftScope: bool
executionRoleArn: str
configRuleArn: str
configRuleName: str
configRuleId: str
accountId: str
version: str
"""
invokingEvent: str
ruleParameters: str
resultToken: str
eventLeftScope: bool
executionRoleArn: str
configRuleArn: str
configRuleName: str
configRuleId: str
accountId: str
version: str
| 15.348837 | 77 | 0.660606 | 620 | 0.939394 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.568182 |
a85d3f5542b43f9fe833a1ef99744fb9819e5e1a | 4,078 | py | Python | Incident-Response/Tools/grr/grr/test/grr_response_test/end_to_end_tests/tests/timeline.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/grr/grr/test/grr_response_test/end_to_end_tests/tests/timeline.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/grr/grr/test/grr_response_test/end_to_end_tests/tests/timeline.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | #!/usr/bin/env python
# Lint as: python3
"""E2E tests for the timeline flow."""
import csv
import io
from typing import Sequence
from typing import Text
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.util import temp
from grr_response_proto.api import timeline_pb2
from grr_response_test.end_to_end_tests import test_base
class TestTimelineLinux(test_base.EndToEndTest):
"""A class with Linux-specific timeline tests."""
platforms = [test_base.EndToEndTest.Platform.LINUX]
def testUsrBin(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "/bin/".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
timeline_format = timeline_pb2.ApiGetCollectedTimelineArgs.Format.BODY
body = flow.GetCollectedTimeline(timeline_format)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
entries = list(csv.reader(temp_filedesc, delimiter="|"))
paths = [entry[1] for entry in entries]
self.assertIn("/bin/bash", paths)
self.assertIn("/bin/cat", paths)
self.assertIn("/bin/chmod", paths)
self.assertIn("/bin/cp", paths)
self.assertIn("/bin/rm", paths)
self.assertIn("/bin/sleep", paths)
for entry in entries:
assertBodyEntrySanity(self, entry)
class TestTimelineWindows(test_base.EndToEndTest):
"""A class with Windows-specific timeline tests."""
platforms = [test_base.EndToEndTest.Platform.WINDOWS]
def testWindows(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "C:\\Windows".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
timeline_format = timeline_pb2.ApiGetCollectedTimelineArgs.Format.BODY
body = flow.GetCollectedTimeline(timeline_format)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
entries = list(csv.reader(temp_filedesc, delimiter="|"))
paths = [entry[1].lower() for entry in entries]
self.assertIn("C:\\Windows\\explorer.exe".lower(), paths)
self.assertIn("C:\\Windows\\notepad.exe".lower(), paths)
self.assertIn("C:\\Windows\\regedit.exe".lower(), paths)
self.assertIn("C:\\Windows\\System32\\dwm.exe".lower(), paths)
for entry in entries:
assertBodyEntrySanity(self, entry)
def testWindowsBackslashEscape(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "C:\\Windows".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
body = flow.GetCollectedTimelineBody(backslash_escape=True)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
content = temp_filedesc.read().lower()
self.assertIn("|C:\\\\Windows\\\\explorer.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\notepad.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\regedit.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\System32\\\\dwm.exe|".lower(), content)
def assertBodyEntrySanity( # pylint: disable=invalid-name
test: absltest.TestCase,
entry: Sequence[Text],
) -> None:
"""Asserts that given row of a body file is sane."""
# Size should be non-negative (some files might be empty, though).
test.assertGreaterEqual(int(entry[6]), 0)
# All timestamps should be positive.
test.assertGreater(int(entry[7]), 0)
test.assertGreater(int(entry[8]), 0)
test.assertGreater(int(entry[9]), 0)
# All timestamps should be older than now.
now = rdfvalue.RDFDatetime.Now()
test.assertLessEqual(int(entry[7]), now.AsSecondsSinceEpoch())
test.assertLessEqual(int(entry[8]), now.AsSecondsSinceEpoch())
test.assertLessEqual(int(entry[9]), now.AsSecondsSinceEpoch())
| 35.46087 | 79 | 0.712114 | 2,962 | 0.726336 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.222903 |
a85f307dc35b10cea0708b8963b5efd033be967d | 121 | py | Python | app/tests/test_init.py | MerrybyPractice/flask-comics-api | f5d833d83a8e488e1f783ce3314bd2d4b87f5143 | [
"MIT"
] | null | null | null | app/tests/test_init.py | MerrybyPractice/flask-comics-api | f5d833d83a8e488e1f783ce3314bd2d4b87f5143 | [
"MIT"
] | 2 | 2019-07-30T03:32:09.000Z | 2019-07-31T13:44:36.000Z | app/tests/test_init.py | MerrybyPractice/flask-comics-api | f5d833d83a8e488e1f783ce3314bd2d4b87f5143 | [
"MIT"
] | null | null | null | import sys
sys.path.append("flask-comics-api")
from app import app
def test_all_comics():
print(app)
assert False | 13.444444 | 35 | 0.735537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.14876 |
a861266c9aad15803a8ee7f2f5de7cca2abf2cc8 | 9,231 | py | Python | metric/metric.py | wyu-du/MultiTurnDialogZoo | ac6a5d4fee31aef9db86ffef599d70f099d93897 | [
"MIT"
] | 145 | 2020-02-13T02:59:08.000Z | 2022-03-31T06:09:16.000Z | metric/metric.py | wyu-du/MultiTurnDialogZoo | ac6a5d4fee31aef9db86ffef599d70f099d93897 | [
"MIT"
] | 14 | 2020-02-19T09:32:45.000Z | 2022-03-22T19:18:30.000Z | metric/metric.py | wyu-du/MultiTurnDialogZoo | ac6a5d4fee31aef9db86ffef599d70f099d93897 | [
"MIT"
] | 28 | 2020-02-19T02:47:22.000Z | 2022-03-25T06:11:31.000Z | from nltk.translate.bleu_score import sentence_bleu, corpus_bleu
from nltk.translate.bleu_score import SmoothingFunction
from nltk.collocations import BigramCollocationFinder
from nltk.probability import FreqDist
from .bleu import Bleu
import argparse
import codecs
import numpy as np
import math
from bert_score import score
from rouge import Rouge
import os, re
import ipdb
import numpy as np
# BLEU of NLTK
def cal_BLEU_nltk(refer, candidate, ngram=1):
'''
SmoothingFunction refer to https://github.com/PaddlePaddle/models/blob/a72760dff8574fe2cb8b803e01b44624db3f3eff/PaddleNLP/Research/IJCAI2019-MMPMS/mmpms/utils/metrics.py
'''
smoothie = SmoothingFunction().method7
if ngram == 1:
weight = (1, 0, 0, 0)
elif ngram == 2:
weight = (0.5, 0.5, 0, 0)
elif ngram == 3:
weight = (0.33, 0.33, 0.33, 0)
elif ngram == 4:
weight = (0.25, 0.25, 0.25, 0.25)
return sentence_bleu(refer, candidate,
weights=weight,
smoothing_function=smoothie)
# BLEU of nlg-eval
def cal_BLEU(refs, tgts):
scorer = Bleu(4)
refs = {idx: [line] for idx, line in enumerate(refs)}
tgts = {idx: [line] for idx, line in enumerate(tgts)}
s = scorer.compute_score(refs, tgts)
return s[0]
# BLEU of multibleu.perl
def cal_BLEU_perl(dataset, model):
p = os.popen(f'python ./metric/perl-bleu.py {dataset} {model}').read()
print(f'[!] multi-perl: {p}')
pattern = re.compile(r'(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)')
bleu1, bleu2, bleu3, bleu4 = pattern.findall(p)[0]
bleu1, bleu2, bleu3, bleu4 = float(bleu1), float(bleu2), float(bleu3), float(bleu4)
return bleu1, bleu2, bleu3, bleu4
def cal_Distinct(corpus):
"""
Calculates unigram and bigram diversity
Args:
corpus: tokenized list of sentences sampled
Returns:
uni_diversity: distinct-1 score
bi_diversity: distinct-2 score
"""
bigram_finder = BigramCollocationFinder.from_words(corpus)
bi_diversity = len(bigram_finder.ngram_fd) / bigram_finder.N
dist = FreqDist(corpus)
uni_diversity = len(dist) / len(corpus)
return uni_diversity, bi_diversity
def cal_ROUGE(refer, candidate):
if len(candidate) == 0:
candidate = ['<unk>']
elif len(candidate) == 1:
candidate.append('<unk>')
if len(refer) == 0:
refer = ['<unk>']
elif len(refer) == 1:
refer.append('<unk>')
rouge = Rouge()
scores = rouge.get_scores(' '.join(candidate), ' '.join(refer))
return scores[0]['rouge-2']['f']
def cal_BERTScore(refer, candidate):
# too slow, fuck it
_, _, bert_scores = score(candidate, refer, lang='en', rescale_with_baseline=True)
bert_scores = bert_scores.tolist()
bert_scores = [0.5 if math.isnan(score) else score for score in bert_scores]
return np.mean(bert_scores)
# ========== fuck nlg-eval fuck ========== #
# ========== Our own embedding-based metric ========== #
def cal_vector_extrema(x, y, dic):
# x and y are the list of the words
# dic is the gensim model which holds 300 the google news word2ved model
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
vec_x = np.max(x, axis=0)
vec_y = np.max(y, axis=0)
assert len(vec_x) == len(vec_y), "len(vec_x) != len(vec_y)"
zero_list = np.zeros(len(vec_x))
if vec_x.all() == zero_list.all() or vec_y.all() == zero_list.all():
return float(1) if vec_x.all() == vec_y.all() else float(0)
res = np.array([[vec_x[i] * vec_y[i], vec_x[i] * vec_x[i], vec_y[i] * vec_y[i]] for i in range(len(vec_x))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
return cos
def cal_embedding_average(x, y, dic):
# x and y are the list of the words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
vec_x = np.array([0 for _ in range(len(x[0]))])
for x_v in x:
x_v = np.array(x_v)
vec_x = np.add(x_v, vec_x)
vec_x = vec_x / math.sqrt(sum(np.square(vec_x)))
vec_y = np.array([0 for _ in range(len(y[0]))])
#print(len(vec_y))
for y_v in y:
y_v = np.array(y_v)
vec_y = np.add(y_v, vec_y)
vec_y = vec_y / math.sqrt(sum(np.square(vec_y)))
assert len(vec_x) == len(vec_y), "len(vec_x) != len(vec_y)"
zero_list = np.array([0 for _ in range(len(vec_x))])
if vec_x.all() == zero_list.all() or vec_y.all() == zero_list.all():
return float(1) if vec_x.all() == vec_y.all() else float(0)
vec_x = np.mat(vec_x)
vec_y = np.mat(vec_y)
num = float(vec_x * vec_y.T)
denom = np.linalg.norm(vec_x) * np.linalg.norm(vec_y)
cos = num / denom
# res = np.array([[vec_x[i] * vec_y[i], vec_x[i] * vec_x[i], vec_y[i] * vec_y[i]] for i in range(len(vec_x))])
# cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
return cos
def cal_greedy_matching(x, y, dic):
# x and y are the list of words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
len_x = len(x)
len_y = len(y)
cosine = []
sum_x = 0
for x_v in x:
for y_v in y:
assert len(x_v) == len(y_v), "len(x_v) != len(y_v)"
zero_list = np.zeros(len(x_v))
if x_v.all() == zero_list.all() or y_v.all() == zero_list.all():
if x_v.all() == y_v.all():
cos = float(1)
else:
cos = float(0)
else:
# method 1
res = np.array([[x_v[i] * y_v[i], x_v[i] * x_v[i], y_v[i] * y_v[i]] for i in range(len(x_v))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
cosine.append(cos)
if cosine:
sum_x += max(cosine)
cosine = []
sum_x = sum_x / len_x
cosine = []
sum_y = 0
for y_v in y:
for x_v in x:
assert len(x_v) == len(y_v), "len(x_v) != len(y_v)"
zero_list = np.zeros(len(y_v))
if x_v.all() == zero_list.all() or y_v.all() == zero_list.all():
if (x_v == y_v).all():
cos = float(1)
else:
cos = float(0)
else:
# method 1
res = np.array([[x_v[i] * y_v[i], x_v[i] * x_v[i], y_v[i] * y_v[i]] for i in range(len(x_v))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
cosine.append(cos)
if cosine:
sum_y += max(cosine)
cosine = []
sum_y = sum_y / len_y
score = (sum_x + sum_y) / 2
return score
def cal_greedy_matching_matrix(x, y, dic):
# x and y are the list of words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x) # [x, 300]
y = vecterize(y) # [y, 300]
len_x = len(x)
len_y = len(y)
matrix = np.dot(x, y.T) # [x, y]
matrix = matrix / np.linalg.norm(x, axis=1, keepdims=True) # [x, 1]
matrix = matrix / np.linalg.norm(y, axis=1).reshape(1, -1) # [1, y]
x_matrix_max = np.mean(np.max(matrix, axis=1)) # [x]
y_matrix_max = np.mean(np.max(matrix, axis=0)) # [y]
return (x_matrix_max + y_matrix_max) / 2
# ========== End of our own embedding-based metric ========== #
if __name__ == "__main__":
path = './processed/dailydialog/GatedGCN-no-correlation/pred.txt'
with open(path) as f:
ref, tgt = [], []
for idx, line in enumerate(f.readlines()):
if idx % 4 == 1:
line = line.replace("user1", "").replace("user0", "").replace("- ref: ", "").replace('<sos>', '').replace('<eos>', '').strip()
ref.append(line.split())
elif idx % 4 == 2:
line = line.replace("user1", "").replace("user0", "").replace("- tgt: ", "").replace('<sos>', '').replace('<eos>', '').strip()
tgt.append(line.split())
# Distinct-1, Distinct-2
candidates, references = [], []
for line1, line2 in zip(tgt, ref):
candidates.extend(line1)
references.extend(line2)
distinct_1, distinct_2 = cal_Distinct(candidates)
rdistinct_1, rdistinct_2 = cal_Distinct(references)
print(distinct_1, distinct_2)
| 32.276224 | 173 | 0.551945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,563 | 0.169321 |
a862a94126909233a9d159eed285bf366dafeca1 | 1,739 | py | Python | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | 4 | 2018-03-05T09:34:49.000Z | 2022-02-01T15:33:54.000Z | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | null | null | null | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | 2 | 2016-01-24T19:09:21.000Z | 2019-10-11T12:43:07.000Z | import pylab, numpy
from myquaternaryutility import QuaternaryPlot
q=QuaternaryPlot(211)
q2=QuaternaryPlot(212)
#t=numpy.linspace(0,1.,5)
#comps=[[a,b,c,d] for a in t for b in t for c in t for d in t if a+b+c+d==1.]
#comps=numpy.float32(comps)
t=numpy.linspace(0,1.,30)
comps=[[a,b,1.-a-b-(2.*a**2+b),2.*a**2+b] for a in t for b in t[:10] if a+b+(2.*a**2+b)<=1.]
comps=numpy.float32(comps)
examplenum=0
if examplenum==0:
compvert2=numpy.array([0.125, .125, .6, .15])
compvert0=numpy.array([.2, .2, 0., .6])
compvert1=numpy.array([1., 0., 0., 0])
critdist=.04
withintriangle=False
elif examplenum==1:
compvert2=numpy.array([0.125, .125, .6, .15])
compvert0=numpy.array([.2, .2, 0., .6])
compvert1=numpy.array([1., 0., 0., 0])
critdist=.04
withintriangle=True
q.scatter(comps,c=comps[:,3])
q.label(ha='center', va='center', fontsize=16)
q.set_projection(azim=-17, elev=-6)
inds, distfromplane, xyparr, xyp_verts,intriangle=q2.filterbydistancefromplane(comps, compvert0, compvert1, compvert2, critdist, withintriangle=withintriangle, invlogic=False, returnall=True)
indsnot=q2.filterbydistancefromplane(comps, compvert0, compvert1, compvert2, critdist, withintriangle=withintriangle, invlogic=True)
print len(inds), ' points'
q2.scatter(comps[inds],c=comps[inds,3])
q2.scatter(comps[indsnot],c='grey', marker='.', s=5)
q2.line(compvert0, compvert1)
q2.line(compvert1, compvert2)
q2.line(compvert2, compvert0)
q2.label(ha='center', va='center', fontsize=16)
q2.set_projection(azim=-17, elev=-6)
pylab.figure()
ax=pylab.subplot(111)
q2.plotfominselectedplane(ax, xyparr[inds], comps[inds, -1], xyp_verts=xyp_verts, vertcomps_labels=[compvert0, compvert1, compvert2], s=20)
pylab.show()
| 31.618182 | 191 | 0.703278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.102933 |
a862bfc859f5b5184921c0883870e3487e3d9e30 | 4,399 | py | Python | config/config.py | GeneralOrae/botrae | d14f26e47f4c906233d778ce5e0ce3e67a9f5b2c | [
"MIT"
] | null | null | null | config/config.py | GeneralOrae/botrae | d14f26e47f4c906233d778ce5e0ce3e67a9f5b2c | [
"MIT"
] | null | null | null | config/config.py | GeneralOrae/botrae | d14f26e47f4c906233d778ce5e0ce3e67a9f5b2c | [
"MIT"
] | null | null | null | BOT_TOKEN: str = "ODg4MzAyMzkwNTMxNDg1Njk2.YUQuEQ.UO4oyY9Zk4u1W5f-VpPLkkQ70TM"
SPOTIFY_ID: str = ""
SPOTIFY_SECRET: str = ""
BOT_PREFIX = "$"
EMBED_COLOR = 0x4dd4d0 #replace after'0x' with desired hex code ex. '#ff0188' >> '0xff0188'
SUPPORTED_EXTENSIONS = ('.webm', '.mp4', '.mp3', '.avi', '.wav', '.m4v', '.ogg', '.mov')
MAX_SONG_PRELOAD = 5 #maximum of 25
COOKIE_PATH = "/config/cookies/cookies.txt"
GLOBAL_DISABLE_AUTOJOIN_VC = False
VC_TIMEOUT = 600 #seconds
VC_TIMOUT_DEFAULT = True #default template setting for VC timeout true= yes, timeout false= no timeout
ALLOW_VC_TIMEOUT_EDIT = True #allow or disallow editing the vc_timeout guild setting
STARTUP_MESSAGE = "Starting Bot..."
STARTUP_COMPLETE_MESSAGE = "Startup Complete"
NO_GUILD_MESSAGE = 'Error: Please join a voice channel or enter the command in guild chat'
USER_NOT_IN_VC_MESSAGE = "Error: Please join the active voice channel to use commands"
WRONG_CHANNEL_MESSAGE = "Error: Please use configured command channel"
NOT_CONNECTED_MESSAGE = "Error: Bot not connected to any voice channel"
ALREADY_CONNECTED_MESSAGE = "Error: Already connected to a voice channel"
CHANNEL_NOT_FOUND_MESSAGE = "Error: Could not find channel"
DEFAULT_CHANNEL_JOIN_FAILED = "Error: Could not join the default voice channel"
INVALID_INVITE_MESSAGE = "Error: Invalid invitation link"
ADD_MESSAGE= "To add this bot to your own Server, click [here]" #brackets will be the link text
INFO_HISTORY_TITLE = "Songs Played:"
MAX_HISTORY_LENGTH = 10
MAX_TRACKNAME_HISTORY_LENGTH = 15
SONGINFO_UPLOADER = "Uploader: "
SONGINFO_DURATION = "Duration: "
SONGINFO_SECONDS = "s"
SONGINFO_LIKES = "Likes: "
SONGINFO_DISLIKES = "Dislikes: "
SONGINFO_NOW_PLAYING = "Now Playing"
SONGINFO_QUEUE_ADDED = "Added to queue"
SONGINFO_SONGINFO = "Song info"
SONGINFO_UNKNOWN_SITE = "Unknown site :question:"
SONGINFO_PLAYLIST_QUEUED = "Queued playlist :page_with_curl:"
SONGINFO_UNKNOWN_DURATION = "Unknown"
HELP_ADDBOT_SHORT = "Add Bot to another server"
HELP_ADDBOT_LONG = "Gives you the link for adding this bot to another server of yours."
HELP_CONNECT_SHORT = "Connect bot to voicechannel"
HELP_CONNECT_LONG = "Connects the bot to the voice channel you are currently in"
HELP_DISCONNECT_SHORT = "Disonnect bot from voicechannel"
HELP_DISCONNECT_LONG = "Disconnect the bot from the voice channel and stop audio."
HELP_SETTINGS_SHORT = "View and set bot settings"
HELP_SETTINGS_LONG = "View and set bot settings in the server. Usage: {}settings setting_name value".format(BOT_PREFIX)
HELP_HISTORY_SHORT = "Show history of songs"
HELP_HISTORY_LONG = "Shows the " + str(MAX_TRACKNAME_HISTORY_LENGTH) + " last played songs."
HELP_PAUSE_SHORT = "Pause Music"
HELP_PAUSE_LONG = "Pauses the AudioPlayer. Playback can be continued with the resume command."
HELP_VOL_SHORT = "Change volume %"
HELP_VOL_LONG = "Changes the volume of the AudioPlayer. Argument specifies the % to which the volume should be set."
HELP_PREV_SHORT = "Go back one Song"
HELP_PREV_LONG = "Plays the previous song again."
HELP_RESUME_SHORT = "Resume Music"
HELP_RESUME_LONG = "Resumes the AudioPlayer."
HELP_SKIP_SHORT = "Skip a song"
HELP_SKIP_LONG = "Skips the currently playing song and goes to the next item in the queue."
HELP_SONGINFO_SHORT = "Info about current Song"
HELP_SONGINFO_LONG = "Shows details about the song currently being played and posts a link to the song."
HELP_STOP_SHORT = "Stop Music"
HELP_STOP_LONG = "Stops the AudioPlayer and clears the songqueue"
HELP_YT_SHORT = "Play a supported link or search on youtube"
HELP_YT_LONG = ("$p [link/video title/key words/playlist-link/soundcloud link/spotify link/bandcamp link/twitter link]")
HELP_PING_SHORT = "Pong"
HELP_PING_LONG = "Test bot response status"
HELP_CLEAR_SHORT = "Clear the queue."
HELP_CLEAR_LONG = "Clears the queue and skips the current song."
HELP_LOOP_SHORT = "Loops the currently playing song, toggle on/off."
HELP_LOOP_LONG = "Loops the currently playing song and locks the queue. Use the command again to disable loop."
HELP_QUEUE_SHORT = "Shows the songs in queue."
HELP_QUEUE_LONG = "Shows the number of songs in queue, up to 10."
HELP_SHUFFLE_SHORT = "Shuffle the queue"
HELP_SHUFFLE_LONG = "Randomly sort the songs in the current queue"
HELP_CHANGECHANNEL_SHORT = "Change the bot channel"
HELP_CHANGECHANNEL_LONG = "Change the bot channel to the VC you are in"
ABSOLUTE_PATH = '' #do not modify | 47.301075 | 120 | 0.784042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,637 | 0.599454 |
a865318eb5fd14ad1923e91f2a7b9f0fc0c823de | 10,709 | py | Python | fromage/utils/mol/_cell_operations.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | null | null | null | fromage/utils/mol/_cell_operations.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | null | null | null | fromage/utils/mol/_cell_operations.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | null | null | null | from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# determine how many unit cells we need
vectors = deepcopy(self.vectors)
# vectors normal to faces
a_perp = np.cross(vectors[1], vectors[2])
b_perp = np.cross(vectors[2], vectors[0])
c_perp = np.cross(vectors[0], vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
trans_count = np.array([1, 1, 1])
# distances from faces
distances = np.array([0.0, 0.0, 0.0])
new_vectors = deepcopy(vectors)
for comp in range(3):
while True:
trans_count[comp] += 1
distances[comp] = np.dot(new_vectors[comp], perp[comp])
new_vectors[comp] = trans_count[comp] * vectors[comp]
if distances[comp] > clust_rad:
break
trans_count -= np.array([1, 1, 1])
return trans_count
def make_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Generate a cluster of molecules from a primitive cell
This first makes a supercell of the correct size which will contain with
one additional buffer shell. Then the sphere is generated from this new
supercell by connectivity.
A central molecule can also be supplied which will turn the spheres
defining the clusters into the union of spheres stemming from each atom
of the central molecule.
Parameters
----------
clust_rad : float
Radius defining a sphere. All molecules with atoms in the sphere are
to be grabbed
mode : str
Switches between inclusive and exclusive selecting. Inclusive,
'inc', selects all molecules which have atoms within the radius.
Exclusive, 'exc', selects all molecules fully in the radius.
Default: false
central_mol : Mol
If this is supplied, the central molecule will act as a kernel for
the cluster which will end up being of the appropriate shape.
Returns
-------
cluster : Mol object
Spherical cluster of molecules from their crystal positions
"""
import fromage.utils.mol as mol_init
# if there is a central mol, account for nearest neighbour molecules
# bleeding out of the original radius
if central_mol:
central_rad = 0
for atom in central_mol:
dis = atom.v_dist([0, 0, 0])
if dis < central_rad:
central_rad = dis
trans = self.trans_from_rad(clust_rad + central_rad)
# get the translations necessary to enclose the required mols
else:
trans = self.trans_from_rad(clust_rad)
# if the cluster is inclusive, then extra mols might be required from
# an additional layer of the supercell
if mode == 'inc':
trans += np.array([1, 1, 1]) # one buffer cell layer
supercell = self.centered_supercell(trans, from_origin=True)
seed_atoms = mol_init.Mol([])
# get seedatoms in the shape of the central mol if pertinent
if central_mol:
for atom_i in supercell:
for atom_j in central_mol:
if atom_i.dist(atom_j) < clust_rad:
seed_atoms.append(atom_i)
break
# get spherical seedatoms
else:
for atom in supercell:
if atom.v_dist([0, 0, 0]) < clust_rad:
seed_atoms.append(atom)
max_mol_len = 0
if mode == 'exc':
while len(seed_atoms) > 0:
mol = seed_atoms.select(0)
if len(mol) > max_mol_len:
max_mol_len = len(mol)
clust_atoms = mol_init.Mol([])
if len(mol) == max_mol_len:
clust_atoms += mol
for atom in mol:
seed_atoms.remove(atom)
if mode == 'inc':
clust_atoms = mol_init.Mol([])
max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))
while len(seed_atoms) > 0:
# The part of the mol detected in seed_atoms
mol_tmp = seed_atoms.select(0)
if len(mol_tmp) < max_mol_len:
# The whole mol, which could potentially include even more
# seed_atoms
mol = supercell.select(supercell.index(seed_atoms[0]))
else:
mol = mol_tmp
clust_atoms += mol
for atom in mol_tmp:
seed_atoms.remove(atom)
for atom in mol:
supercell.remove(atom)
# remove all atoms of the mol which are part of seed_atoms
try:
seed_atoms.remove(atom)
except ValueError:
pass
return clust_atoms
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
def confined(self):
"""Move all atoms to fit inside the primitive cell"""
frac_mol = self.dir_to_frac_pos()
out_mol = frac_mol.frac_to_dir_pos()
return out_mol
| 31.683432 | 88 | 0.614343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,869 | 0.454664 |
a865732e6447bac58e649e009ab20b43ed2f1cea | 6,260 | py | Python | tests/test_pvpc.py | r-jordan/aiopvpc | 9b38c02e184368121f725599d4768fc1ca4d6218 | [
"MIT"
] | null | null | null | tests/test_pvpc.py | r-jordan/aiopvpc | 9b38c02e184368121f725599d4768fc1ca4d6218 | [
"MIT"
] | null | null | null | tests/test_pvpc.py | r-jordan/aiopvpc | 9b38c02e184368121f725599d4768fc1ca4d6218 | [
"MIT"
] | null | null | null | """Tests for aiopvpc."""
import logging
from asyncio import TimeoutError
from datetime import datetime, timedelta
from unittest.mock import patch
import pytest
from aiohttp import ClientError
from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ
from .conftest import MockAsyncSession, TZ_TEST
@pytest.mark.parametrize(
"day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour",
(
("2019-10-26 00:00:00+08:00", TZ_TEST, 0, 1, 0, False, None),
("2019-10-26 00:00:00", TZ_TEST, 24, 1, 24, True, 23),
("2019-10-27 00:00:00", TZ_TEST, 25, 1, 25, True, 23),
("2019-03-31 20:00:00", TZ_TEST, 23, 2, 23, False, 23),
("2019-03-31 20:00:00+04:00", TZ_TEST, 23, 1, 23, False, 23),
("2019-10-26 21:00:00", TZ_TEST, 49, 2, 26, True, 23),
("2019-10-26 21:00:00+01:00", TZ_TEST, 49, 2, 26, True, 23),
("2019-10-26 00:00:00", REFERENCE_TZ, 24, 1, 24, True, 23),
("2019-10-27 00:00:00", REFERENCE_TZ, 25, 1, 25, True, 23),
("2019-03-31 20:00:00", REFERENCE_TZ, 23, 2, 23, False, 23),
("2019-10-26 21:00:00", REFERENCE_TZ, 49, 2, 25, True, 23),
("2021-06-01 06:00:00", REFERENCE_TZ, 24, 1, 24, True, 23),
),
)
@pytest.mark.asyncio
async def test_price_extract(
day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour
):
"""Test data parsing of official API files."""
day = datetime.fromisoformat(day_str)
mock_session = MockAsyncSession()
pvpc_data = PVPCData(
local_timezone=timezone,
tariff="discrimination",
websession=mock_session,
)
pvpc_data.source_available = True
assert not pvpc_data.process_state_and_attributes(day)
assert mock_session.call_count == 0
await pvpc_data.async_update_prices(day)
has_prices = pvpc_data.process_state_and_attributes(day)
assert len(pvpc_data._current_prices) == num_prices
assert mock_session.call_count == num_calls
has_prices = pvpc_data.process_state_and_attributes(day + timedelta(hours=10))
assert len(pvpc_data._current_prices) == num_prices_8h
assert has_prices == available_8h
if has_prices:
last_dt, last_p = list(pvpc_data._current_prices.items())[-1]
assert last_dt.astimezone(timezone).hour == last_hour
@pytest.mark.parametrize(
"available, day_str, num_log_msgs, status, exception",
(
(False, "2032-10-26 00:00:00+00:00", 0, 200, None),
(False, "2032-10-26 00:00:00+00:00", 0, 500, None),
(True, "2032-10-26 00:00:00+00:00", 1, 200, TimeoutError),
(False, "2032-10-26 00:00:00+00:00", 0, 200, TimeoutError),
(True, "2032-10-26 00:00:00+00:00", 1, 200, ClientError),
(False, "2032-10-26 00:00:00+00:00", 0, 200, ClientError),
),
)
@pytest.mark.asyncio
async def test_bad_downloads(
available,
day_str,
num_log_msgs,
status,
exception,
caplog,
):
"""Test data parsing of official API files."""
day = datetime.fromisoformat(day_str)
mock_session = MockAsyncSession(status=status, exc=exception)
with caplog.at_level(logging.INFO):
pvpc_data = PVPCData(
local_timezone=REFERENCE_TZ,
tariff="normal",
websession=mock_session,
)
pvpc_data.source_available = available
assert not pvpc_data.process_state_and_attributes(day)
prices = await pvpc_data.async_update_prices(day)
assert not prices
assert not pvpc_data.process_state_and_attributes(day)
assert len(caplog.messages) == num_log_msgs
assert mock_session.call_count == 1
assert len(prices) == 0
@pytest.mark.parametrize(
"timezone, start, end",
(
(
TZ_TEST,
datetime(2019, 10, 26, 15, tzinfo=TZ_TEST),
datetime(2019, 10, 27, 13, tzinfo=TZ_TEST),
),
(
REFERENCE_TZ,
datetime(2019, 10, 26, 15, tzinfo=REFERENCE_TZ),
datetime(2019, 10, 27, 13, tzinfo=REFERENCE_TZ),
),
),
)
def test_full_data_download_range(timezone, start, end):
"""Test retrieval of full PVPC data in a day range."""
with patch("aiohttp.ClientSession", MockAsyncSession):
pvpc_data = PVPCData(local_timezone=timezone)
prices = pvpc_data.download_prices_for_range(start, end)
assert len(prices) == 24
first_price = min(prices)
last_price = max(prices)
data_first_hour = prices[first_price]
# Check full PVPC data is retrieved
assert len(data_first_hour) == 30
assert all(tag in data_first_hour for tag in ESIOS_TARIFFS)
# Check units have not changed in full data retrieval (they are in €/MWh)
assert all(data_first_hour[tag] > 1 for tag in ESIOS_TARIFFS)
# check tz-alignment (price at 15h is tz-independent)
assert prices[first_price]["NOC"] == 119.16
assert first_price.astimezone(timezone).hour == 15
assert last_price.astimezone(timezone).hour == 13
@pytest.mark.asyncio
async def test_download_range(caplog):
"""Test retrieval of full PVPC data in a day range."""
start = datetime(2019, 10, 26, 15)
end = datetime(2019, 10, 28, 13)
mock_session = MockAsyncSession()
with caplog.at_level(logging.WARNING):
pvpc_data = PVPCData(
tariff="electric_car", local_timezone=TZ_TEST, websession=mock_session
)
prices = await pvpc_data.async_download_prices_for_range(start, end)
assert mock_session.call_count == 3
assert len(prices) == 34
assert len(caplog.messages) == 2
no_prices = await pvpc_data.async_download_prices_for_range(
datetime(2010, 8, 27, tzinfo=TZ_TEST),
datetime(2010, 8, 27, 22, tzinfo=TZ_TEST),
)
assert len(no_prices) == 0
assert len(caplog.messages) == 4
assert not await pvpc_data.async_download_prices_for_range(
datetime(2010, 8, 27), datetime(2010, 8, 27, 23)
)
assert len(caplog.messages) == 7
first_price = min(prices)
assert first_price.hour == 14 and first_price.tzname() == "UTC"
# Check only tariff values are retrieved
assert isinstance(prices[first_price], float)
assert prices[first_price] < 1
| 36.395349 | 87 | 0.65607 | 0 | 0 | 0 | 0 | 5,950 | 0.950176 | 3,145 | 0.502236 | 1,087 | 0.173587 |
a866618eea370a8203f4080686647eec5ef1844f | 1,438 | py | Python | validation.py | Jianxiang-Wang/Pytorch-code-for-time-series-classification | 94151ba27345e7b7fd57e6622c45997f25e7aca2 | [
"MIT"
] | 2 | 2021-10-08T01:50:17.000Z | 2022-03-15T02:56:48.000Z | validation.py | Jianxiang-Wang/Pytorch-code-for-time-series-classification | 94151ba27345e7b7fd57e6622c45997f25e7aca2 | [
"MIT"
] | 1 | 2020-12-07T07:22:21.000Z | 2020-12-07T07:22:21.000Z | validation.py | Jianxiang-Wang/Pytorch-code-for-time-series-classification | 94151ba27345e7b7fd57e6622c45997f25e7aca2 | [
"MIT"
] | 1 | 2021-05-27T03:14:37.000Z | 2021-05-27T03:14:37.000Z | from sklearn import metrics
import torch
from models import *
import torch.backends.cudnn as cudnn
import seaborn as sns
import matplotlib.pyplot as plt
from dataset import load
#define the net
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = LSTM(3, 10, 2, 3)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(torch.load('./checkpoint/ckpt.pth'))
net = net.module
#loading data
_, _, valloader, classes = load()
def validation():
print(net.classifier)
#print(net)
net.eval()
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device).float(), targets.to(device)
inputs = inputs.view(-1,300,3)
outputs = net(inputs)
# Confusion Matrix
print("Confusion Matrix...")
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
Accuracy = 100.*correct/total
predicted = predicted.cpu().numpy()
targets = targets.data.cpu().numpy()
cm = metrics.confusion_matrix(targets, predicted)
print(cm)
print('Accuracy=',Accuracy,"%")
figure = plt.figure(figsize=(8, 8))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
if __name__=='__main__':
validation()
| 23.966667 | 71 | 0.66064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.125174 |
a867335bf7412afcc85944e531249bfc53f2c724 | 813 | py | Python | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 34 | 2015-02-28T14:19:22.000Z | 2019-09-19T14:55:32.000Z | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 6 | 2015-01-12T12:07:13.000Z | 2019-12-12T23:01:37.000Z | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 3 | 2015-05-22T17:24:43.000Z | 2017-04-11T17:57:42.000Z | from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '1.0.0'
class Extension(ext.Extension):
dist_name = 'Mopidy-Spotify-Tunigo'
ext_name = 'spotify_tunigo'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['region'] = config.String(optional=True)
schema['sub_genres'] = config.Boolean()
schema['cache_time'] = config.Integer(minimum=0, optional=True)
return schema
def setup(self, registry):
from .backend import SpotifyTunigoBackend
registry.add('backend', SpotifyTunigoBackend)
| 26.225806 | 71 | 0.688807 | 703 | 0.864699 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.119311 |
a8677db0cad4db9e12e3834fb3fdc9ca873604ad | 3,647 | py | Python | apps/log_search/tasks/project.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | apps/log_search/tasks/project.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | apps/log_search/tasks/project.py | yiqiwang-17/bk-log | 7b356fced63b667baea300cfd194ad70a842c3ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
项目同步
1. 从CC拉取业务列表及人员信息
2. 更新项目信息
3. 更新用户组信息
"""
from celery.schedules import crontab # noqa
from celery.task import periodic_task # noqa
from django.conf import settings # noqa
from apps.utils.log import logger # noqa
from apps.log_search.handlers.biz import BizHandler # noqa
from apps.log_search.models import ProjectInfo # noqa
from apps.utils.db import array_chunk # noqa
from apps.utils.lock import share_lock # noqa
@periodic_task(run_every=crontab(minute="*/1"), queue="sync")
@share_lock()
def sync():
if settings.USING_SYNC_BUSINESS:
# 同步CMDB业务信息
sync_projects()
return True
return False
def sync_projects():
"""
同步CMDB业务信息
"""
businesses = BizHandler.list()
if not businesses:
logger.error("[log_search][tasks]get business error")
return False
objs = []
# 项目信息
projects = ProjectInfo.get_cmdb_projects()
# 用户组
for business in businesses:
bk_biz_id = int(business["bk_biz_id"])
if not projects.get(bk_biz_id):
objs.append(
ProjectInfo(
project_name=business["bk_biz_name"],
bk_biz_id=business["bk_biz_id"],
bk_app_code=settings.APP_CODE,
time_zone=business.get("time_zone", settings.TIME_ZONE),
)
)
else:
has_deleted = ProjectInfo.objects.filter(bk_biz_id=bk_biz_id, is_deleted=True)
if has_deleted:
has_deleted.update(is_deleted=False)
# 增加修改project_name
ProjectInfo.objects.filter(bk_biz_id=bk_biz_id).exclude(project_name=business["bk_biz_name"]).update(
project_name=business["bk_biz_name"]
)
del projects[int(business["bk_biz_id"])]
if objs:
chunks = array_chunk(objs)
for chunk in chunks:
ProjectInfo.objects.bulk_create(chunk)
logger.info("[log_search][tasks]sync business nums: {}".format(len(objs)))
if projects:
ProjectInfo.objects.filter(project_id__in=projects.values()).delete()
logger.info(
"[sync_projects] businesses=>{}, sync=>{}, delete=>{}".format(len(businesses), len(objs), len(projects))
)
return True
| 37.597938 | 113 | 0.675898 | 0 | 0 | 0 | 0 | 218 | 0.057565 | 0 | 0 | 1,900 | 0.501716 |
a86ab6894db4939b7b687a294448c9e44447c480 | 8,421 | py | Python | hawkdet/dataset/transformers.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | hawkdet/dataset/transformers.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | hawkdet/dataset/transformers.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
import random
from ..lib.box_utils import matrix_iof
class RandomCrop:
def __init__(self, image_size=(640, 640), iof_factor=1.0, min_face=16):
self.image_size = image_size
self.iof_factor = iof_factor # iof(IoF(forgrand))
self.min_face = min_face
self.pre_scales = [0.3, 0.45, 0.6, 0.8, 1.0]
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
labels = item.get('labels')
lmks = item.get('landmarks', None)
img_h, img_w, _ = img.shape
for _ in range(250):
scale = random.choice(self.pre_scales)
short_side = min(img_h, img_w)
side_len = int(scale * short_side)
l = np.random.randint(0, img_w-side_len+1)
t = np.random.randint(0, img_h-side_len+1)
roi = np.array((l, t, l+side_len, t+side_len))
value = matrix_iof(bboxes, roi[np.newaxis])
flag = (value >= self.iof_factor)
if not flag.any():
continue
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
bboxes_t = bboxes[mask].copy()
labels_t = labels[mask].copy()
lmks_t = lmks[mask].copy()
lmks_t = lmks_t.reshape([-1, 5, 2])
if bboxes_t.shape[0] == 0:
continue
img_t = img[roi[1]:roi[3], roi[0]:roi[2]]
bboxes_t[:, :2] = np.maximum(bboxes_t[:, :2], roi[:2])
bboxes_t[:, :2] -= roi[:2]
bboxes_t[:, 2:] = np.minimum(bboxes_t[:, 2:], roi[2:])
bboxes_t[:, 2:] -= roi[:2]
# landm
lmks_t[:, :, :2] = lmks_t[:, :, :2] - roi[:2]
lmks_t[:, :, :2] = np.maximum(lmks_t[:, :, :2], np.array([0, 0]))
lmks_t[:, :, :2] = np.minimum(lmks_t[:, :, :2], roi[2:] - roi[:2])
lmks_t = lmks_t.reshape([-1, 10])
# make sure that the cropped image contains at least one face > 16 pixel at training image scale
b_w_t = (bboxes_t[:, 2] - bboxes_t[:, 0] + 1) / side_len * self.image_size[0]
b_h_t = (bboxes_t[:, 3] - bboxes_t[:, 1] + 1) / side_len * self.image_size[1]
mask = np.minimum(b_w_t, b_h_t) > self.min_face
bboxes_t = bboxes_t[mask]
labels_t = labels_t[mask]
lmks_t = lmks_t[mask]
if bboxes_t.shape[0] == 0:
continue
return {
'image': img_t,
'bboxes': bboxes_t,
'labels': labels_t,
'landmarks': lmks_t
}
return {
'image': img,
'bboxes': bboxes,
'labels': labels,
'landmarks': lmks
}
class RandomDistort:
def __call__(self, item):
img = item.get('image')
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = img.copy()
if random.randrange(2):
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
#brightness distortion
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#saturation distortion
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
#hue distortion
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
#contrast distortion
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
item['image'] = image
return item
class Pad:
def __init__(self, img_mean=[104, 111, 120]):
self.img_mean = img_mean
def __call__(self, item):
img = item.get('image')
height, width, _ = img.shape
if height == width:
return item
long_side = max(width, height)
image_t = np.empty((long_side, long_side, 3), dtype=img.dtype)
image_t[:, :] = self.img_mean
image_t[0:0 + height, 0:0 + width] = img
item['image'] = img
return item
class RandomFlip:
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
lmks = item.get('landmarks', None)
_, width, _ = img.shape
if random.randrange(2):
img = cv2.flip(img, 1)
bboxes = bboxes.copy()
bboxes[:, 0::2] = width - bboxes[:, 2::-2]
# landm
lmks = lmks.copy()
lmks = lmks.reshape([-1, 5, 2])
lmks[:, :, 0] = width - lmks[:, :, 0]
tmp = lmks[:, 1, :].copy()
lmks[:, 1, :] = lmks[:, 0, :]
lmks[:, 0, :] = tmp
tmp1 = lmks[:, 4, :].copy()
lmks[:, 4, :] = lmks[:, 3, :]
lmks[:, 3, :] = tmp1
lmks = lmks.reshape([-1, 10])
item['image'] = img
item['bboxes'] = bboxes
item['landmarks'] = lmks
return item
class Resize:
def __init__(self, image_size=(640, 640)): # h, w
self.image_size = image_size
def box_resize(self, img_h, img_w, bboxes=None):
scale_x = self.image_size[1] / img_w
scale_y = self.image_size[0] / img_h
if bboxes is not None:
bboxes *= [scale_x, scale_y, scale_x, scale_y]
return bboxes
def lmk_resize(self, img_h, img_w, lmks=None):
scale_x = self.image_size[1] / img_w
scale_y = self.image_size[0] / img_h
if lmks is not None:
lmks *= ([scale_x, scale_y]*5)
return lmks
def __call__(self, item):
img = item.get('image')
bboxes = item.get('bboxes')
lmks = item.get('landmarks', None)
ori_h, ori_w, _ = img.shape
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = interp_methods[random.randrange(5)]
img = cv2.resize(img, self.image_size[::-1], interpolation=interp_method)
item['image'] = img.astype(np.uint8)
item['bboxes'] = self.box_resize(ori_h, ori_w, bboxes)
item['landmarks'] = self.lmk_resize(ori_h, ori_w, lmks)
return item
class ImageT:
def __call__(self, item):
img = item.get('image')
img = img.transpose(2, 0, 1)
item['image'] = img
return item
class Normalize:
def __init__(self, image_mean, image_std):
self.image_mean = image_mean
self.image_std = image_std
def __call__(self, item):
img = item.get('image')
img = (img - self.image_mean) / self.image_std
item['image'] = img
return item
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, item):
for t in self.transforms:
item = t(item)
return item
def build_transforms(image_size, image_mean, image_std, iof_factor=1.0, min_face=16):
transforms = Compose([
RandomCrop(image_size, iof_factor, min_face),
RandomDistort(),
Pad(image_mean),
RandomFlip(),
Normalize(image_mean, image_std),
Resize(image_size),
ImageT(),
])
return transforms | 31.421642 | 115 | 0.516922 | 7,956 | 0.944781 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.066025 |
a86bd19cc05762dfd9e1c34a405531b5248d3abb | 7,553 | py | Python | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 53 | 2015-02-17T16:20:06.000Z | 2022-03-18T09:22:00.000Z | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 23 | 2015-01-30T16:26:20.000Z | 2022-03-11T23:26:03.000Z | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 19 | 2015-01-21T13:55:46.000Z | 2019-02-23T23:14:31.000Z | #! /usr/bin/env python
# Copyright (c) 2007-2009 PediaPress GmbH
# See README.rst for additional licensing information.
# unified/universal token
import sys
import re
import _uscan as _mwscan
from mwlib.refine.util import resolve_entity, parseParams
def walknode(node, filt=lambda x: True):
if not isinstance(node, token):
for x in node:
for k in walknode(x):
if filt(k):
yield k
return
if filt(node):
yield node
if node.children:
for x in node.children:
for k in walknode(x):
if filt(k):
yield k
def walknodel(node, filt=lambda x: True):
return list(walknode(node, filt=filt))
def show(node, out=None, indent=0, verbose=False):
if node is None:
return
if out is None:
out = sys.stdout
if not isinstance(node, token):
for x in node:
show(x, out=out, indent=indent, verbose=verbose)
return
out.write("%s%r\n" % (" " * indent, node))
children = node.children
if children:
for x in children:
show(x, out=out, indent=indent + 1, verbose=verbose)
class _show(object):
def __get__(self, obj, type=None):
if obj is None:
return lambda node, out=None: show(node, out=out)
else:
return lambda out=None: show(obj, out=out)
class token(object):
caption = ''
vlist = None
target = None
level = None
children = None
rawtagname = None
tagname = None
ns = None
lineprefix = None
interwiki = None
langlink = None
namespace = None
blocknode = False
# image attributes
align = None
thumb = False
frame = None
t_end = 0
t_text = 1
t_entity = 2
t_special = 3
t_magicword = 4
t_comment = 5
t_2box_open = 6
t_2box_close = 7
t_http_url = 8
t_break = 9
t_begintable = t_begin_table = 10
t_endtable = t_end_table = 11
t_html_tag = 12
t_singlequote = 13
t_pre = 14
t_section = 15
t_endsection = t_section_end = 16
t_item = 17
t_colon = 18
t_semicolon = 19
t_hrule = 20
t_newline = 21
t_column = 22
t_row = 23
t_tablecaption = 24
t_urllink = 25
t_uniq = 26
t_html_tag_end = 100
token2name = {}
_text = None
@staticmethod
def join_as_text(tokens):
return u"".join([x.text or u"" for x in tokens])
def _get_text(self):
if self._text is None and self.source is not None:
self._text = self.source[self.start:self.start + self.len]
return self._text
def _set_text(self, t):
self._text = t
text = property(_get_text, _set_text)
def __init__(self, type=None, start=None, len=None, source=None, text=None, **kw):
self.type = type
self.start = start
self.len = len
self.source = source
if text is not None:
self.text = text
self.__dict__.update(kw)
def __repr__(self):
if isinstance(self, token):
r = [self.token2name.get(self.type, self.type)]
else:
r = [self.__class__.__name__]
if self.text is not None:
r.append(repr(self.text)[1:])
if self.tagname:
r.append(" tagname=")
r.append(repr(self.tagname))
if self.rawtagname:
r.append(" rawtagname=")
r.append(repr(self.rawtagname))
if self.vlist:
r.append(" vlist=")
r.append(repr(self.vlist))
if self.target:
r.append(" target=")
r.append(repr(self.target))
if self.level:
r.append(" level=")
r.append(repr(self.level))
if self.ns is not None:
r.append(" ns=")
r.append(repr(self.ns))
if self.lineprefix is not None:
r.append(" lineprefix=")
r.append(self.lineprefix)
if self.interwiki:
r.append(" interwiki=")
r.append(repr(self.interwiki))
if self.langlink:
r.append(" langlink=")
r.append(repr(self.langlink))
if self.type == self.t_complex_style:
r.append(repr(self.caption))
elif self.caption:
r.append("->")
r.append(repr(self.caption))
return u"".join(r)
show = _show()
token2name = token.token2name
for d in dir(token):
if d.startswith("t_"):
token2name[getattr(token, d)] = d
del d, token2name
def _split_tag(txt):
m = re.match(" *(\w+)(.*)", txt, re.DOTALL)
assert m is not None, "could not match tag name"
name = m.group(1)
values = m.group(2)
return name, values
def _analyze_html_tag(t):
text = t.text
selfClosing = False
if text.startswith(u"</"):
name = text[2:-1]
isEndToken = True
elif text.endswith("/>"):
name = text[1:-2]
selfClosing = True
isEndToken = False # ???
else:
name = text[1:-1]
isEndToken = False
name, values = _split_tag(name)
t.vlist = parseParams(values)
name = name.lower()
if name == 'br':
isEndToken = False
t.rawtagname = name
t.tag_selfClosing = selfClosing
t.tag_isEndToken = isEndToken
if isEndToken:
t.type = t.t_html_tag_end
def dump_tokens(text, tokens):
for type, start, len in tokens:
print type, repr(text[start:start + len])
def scan(text):
text += u"\0" * 32
return _mwscan.scan(text)
class _compat_scanner(object):
allowed_tags = None
def _init_allowed_tags(self):
self.allowed_tags = set("""
abbr b big blockquote br center cite code del div em endfeed font h1 h2 h3
h4 h5 h6 hr i index inputbox ins kbd li ol p pages references rss s small span
startfeed strike strong sub sup caption table td th tr tt u ul var dl dt dd
""".split())
def __call__(self, text, uniquifier=None):
if self.allowed_tags is None:
self._init_allowed_tags()
if isinstance(text, str):
text = unicode(text)
tokens = scan(text)
res = []
def g():
return text[start:start + tlen]
for type, start, tlen in tokens:
if type == token.t_begintable:
txt = g()
count = txt.count(":")
if count:
res.append(token(type=token.t_colon, start=start, len=count, source=text))
tlen -= count
start += count
t = token(type=type, start=start, len=tlen, source=text)
if type == token.t_entity:
t.text = resolve_entity(g())
t.type = token.t_text
res.append(t)
elif type == token.t_html_tag:
s = g()
if uniquifier:
s = uniquifier.replace_uniq(s)
t.text = s
_analyze_html_tag(t)
tagname = t.rawtagname
if tagname in self.allowed_tags:
res.append(t)
else:
res.append(token(type=token.t_text, start=start, len=tlen, source=text))
else:
res.append(t)
return res
compat_scan = _compat_scanner()
def tokenize(input, name="unknown", uniquifier=None):
assert input is not None, "must specify input argument in tokenize"
return compat_scan(input, uniquifier=uniquifier)
| 24.683007 | 94 | 0.558983 | 4,988 | 0.6604 | 388 | 0.05137 | 100 | 0.01324 | 0 | 0 | 643 | 0.085132 |
a86d0966eb30256383853d63973d591b55852258 | 6,565 | py | Python | MaskDetection/video.py | fossabot/IoT_COVID19-Detector_CO-vision | 8f45ddcbbbc17f24f7629fa344e3f375c3bd879b | [
"MIT"
] | null | null | null | MaskDetection/video.py | fossabot/IoT_COVID19-Detector_CO-vision | 8f45ddcbbbc17f24f7629fa344e3f375c3bd879b | [
"MIT"
] | null | null | null | MaskDetection/video.py | fossabot/IoT_COVID19-Detector_CO-vision | 8f45ddcbbbc17f24f7629fa344e3f375c3bd879b | [
"MIT"
] | null | null | null | #Data Set을 만들어서 학습.
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import json
import requests
# 카카오톡 메시지 커스텀 템플릿 주소 : https://kapi.kakao.com/v2/api/talk/memo/send
talk_url = "https://kapi.kakao.com/v2/api/talk/memo/send"
get_friend_list_url = 'https://kapi.kakao.com/v1/api/talk/friends'
send_me_url = 'https://kapi.kakao.com/v2/api/talk/memo/default/send'
send_friend_url = 'https://kapi.kakao.com/v1/api/talk/friends/message/default/send'
# 카카오 사용자 토큰
token = 'REST API' #REST API
accessToken = 'accessToken'
headers = {
"Authorization": 'Bearer ' + str(accessToken).format(
token=token
)
}
# 카카오 친구 목록 얻어오기. 수정 필요
def getFriendList(accessToken) :
payload = ''
headers = {
'Content-Type' : "application/x-www-form-urlencoded",
'Cache-Control' : "no-cache",
'Authorization' : "Bearer " + str(accessToken),
}
response = requests.request("GET",get_friend_list_url,data=payload, headers=headers)
#print(response)
friend_List = json.loads(((response.text).encode('utf-8')))
friend_UUID_List = []
elements = response.json().get('elements')
for element in elements:
#print(element.get("uuid"))
friend_UUID_List.append(element.get("uuid"))
#print(friend_UUID_List)
return friend_UUID_List[0]
facenet = cv2.dnn.readNet('models/deploy.prototxt', 'models/res10_300x300_ssd_iter_140000.caffemodel')
#FaceDetector 모델 > OpenCv의 DNN
model = load_model('models/mask_detector.model')
#MaskDetector 모델 > Keras 모델
cap = cv2.VideoCapture('imgs/03.mp4')
#동영상 로드
#노트북 캠의 실시간 영상을 받아오고 싶으면 0을 넣으면 된다!
ret, img = cap.read()
#ret이 True이면 영상이 있다는 뜻
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
out = cv2.VideoWriter('output.mp4', fourcc, 1, (img.shape[1], img.shape[0]))
#cv2.VideoWriter(outputFile, fourcc, frame, size) : fourcc는 코덱 정보, frame은 초당 저장될 프레임, size는 저장될 사이즈를 뜻합니다 cv2.VideoWriter_fourcc('D','I','V','X') 이런식으로 사용
#현재 테스트 동영상의 프레임은 25
while cap.isOpened():
ret, img = cap.read()
if not ret:
break
h, w = img.shape[:2]
blob = cv2.dnn.blobFromImage(img, scalefactor=1., size=(300, 300), mean=(104., 177., 123.))
# Preprocessing. OpenCV의 FaceNet에서 학습시킨대로 Param값을 넣어줌. DNN이 사용하는 형태로 이미지 변환
# cv2.dnn.blobFromImage함수가 하는 일은 1. Mean subtraction (평균 빼기) / 2.Scaling (이미지 사이즈 바꾸기) / 3.And optionally channel swapping (옵션, 이미지 채널 바꾸기)
# (104.0,177.0, 123.0)는 mean subtraction의 경험적 최적값. 그럼 mean subtraction이란 RGB값의 일부를 제외해서 dnn이 분석하기 쉽게 단순화해주는 것.
# (300,300) : dnn모듈이 CNN으로 처리하기 좋은 이미지 사이즈, 모델이 300,300으로 고정
facenet.setInput(blob) # 변환해준 이미지 FaceNet의 input
dets = facenet.forward() # facedection 결과 저장
result_img = img.copy()
#detect face 한뒤, 그 얼굴영역이 마스크 썼을 확률을 계산하여 추가한다.
for i in range(dets.shape[2]): # 저장이 된 것을 loop을 돌면서 저장. detections.shape[2]는 모델이 가져오는 최대 박스의 갯수. 200이므로 최대 200개의 얼굴을 인식할수 있다.
confidence = dets[0, 0, i, 2]
#검사하는데 detection의 결과가 자신있는 정도.
#detections[0, 0]은 우리가 그릴 박스"들"의 속성
#따라서 i는 현재 i번째 박스. 2는 세번째 속성이 의미하는데 이게 얼굴일 확률을 나타냄.
if confidence < 0.5:
continue
x1 = int(dets[0, 0, i, 3] * w) #bounding 박스 구해주기
y1 = int(dets[0, 0, i, 4] * h)
x2 = int(dets[0, 0, i, 5] * w)
y2 = int(dets[0, 0, i, 6] * h)
#print(i, confidence, x1, y1, x2, y2) i는 몇번째 얼굴인지, cofidence는 실제 얼굴이맞을 확률. 그 뒤는 좌표
face = img[y1:y2, x1:x2] # bounding Box을 통해 얼굴만 저장
# 마스크를 썼나 안썼나 예측
# 전처리하는 부분
face_input = cv2.resize(face, dsize=(224, 224)) # 이미지 크기 변경
face_input = cv2.cvtColor(face_input, cv2.COLOR_BGR2RGB) # 이미지의 컬러시스템 변경
face_input = preprocess_input(face_input) # mobileNetV2에서 하는 preprocessing과 똑같이 하기위해 처리
face_input = np.expand_dims(face_input, axis=0) # 이렇게 하면 shape이 (224,224,3) 으로 나오는데 넣을때는 (1,224,224,3)이 되어야 하므로 차원하나 추가
mask, nomask = model.predict(face_input).squeeze() # load해놓은 모델에 predict method를 통해, 마스크 여부 확률을 반환
if mask > nomask:
color = (0, 255, 0)
label = 'Mask %d%%' % (mask * 100)
else:
color = (0, 0, 255)
label = 'No Mask %d%%' % (nomask * 100)
#mask 썼을확률 계산후 그에대한 결과를 보여주는 곳. 해당 얼굴영역보다 이전 인덱스는 이미 계산되어 이미지에 저장되어 있다.
cv2.rectangle(result_img, pt1=(x1, y1), pt2=(x2, y2), thickness=2, color=color, lineType=cv2.LINE_AA)
#계산된 결과를 현재 돌아가고 있는 얼굴영역 위에 Text를 써줌으로써 표시한다. 마스크 썼을확률은 label에 들어있음.
cv2.putText(result_img, text=label, org=(x1, y1 - 10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=color, thickness=2, lineType=cv2.LINE_AA)
#마스크 안썻을 확률이 일정확률 이상인 경우
if nomask >= 0.75:
#해당 인원 사진 저장
cv2.imwrite(str(i)+'_'+str('No Mask %d%%' % (nomask * 100)) + '.jpg', result_img)
temperature = 36.5 #현재 온도 변수가 없으므로 임시로 설정
# 전달할 메시지 내용 JSON형식으로 저장후 전달
message_description = '해당인원 온도 :' + str(temperature) + '\n마스크 미착용 확률 : ' + str('%d%%' % (nomask * 100))
template = {
"object_type": "feed",
"content": {
"image_url": "IMAGE_URL, 클라이언트의 사진을 가져오거나 서버의 사진을 가져오기가 아닌 URL상에서 가져와야함",
"title": "이상증상자 및 마스크 미착용자 식별",
"description": message_description,
"image_width": 640,
"image_height": 640,
"link": {
"web_url": "http://www.daum.net",
"mobile_web_url": "http://m.daum.net",
}
}
}
data = {
# 허동준 UUID : MAIwCT4JPggkFiAVJhIhFCMbNwM6CzsLPnY
# 조동현 UUID : MAIzAjYFNQcxHSgaLh8qHi4aNgI7CjoKP28
# 친구목록에서 얻어온 UUID 값으로 해야 하므로 수정 필요
'receiver_uuids': '["MAIzAjYFNQcxHSgaLh8qHi4aNgI7CjoKP28"]',
"template_object": json.dumps(template)
}
# 메시지 전송 및 오류 검출
response = requests.post(send_friend_url, headers=headers, data=data)
print(response.status_code)
if response.json().get('result_code') == 0:
print('메시지를 성공적으로 보냈습니다.')
else:
print('메시지를 성공적으로 보내지 못했습니다. 오류메시지 : ' + str(response.json()))
out.write(result_img)
cv2.imshow('result', result_img) #실시간 모니터링하고 있는 화면을 띄워줌
if cv2.waitKey(1) == ord('q'): #q누르면 동영상 종료
break
out.release()
cap.release() | 42.62987 | 160 | 0.609139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,591 | 0.556822 |
a86d609155584012cf3856785ca737b87484f6b4 | 3,387 | py | Python | objects_detector.py | AlejandroGonzalR/image-object-detection | a1320190902e9157f34f82f6b7bf46921ee92fe4 | [
"MIT"
] | null | null | null | objects_detector.py | AlejandroGonzalR/image-object-detection | a1320190902e9157f34f82f6b7bf46921ee92fe4 | [
"MIT"
] | null | null | null | objects_detector.py | AlejandroGonzalR/image-object-detection | a1320190902e9157f34f82f6b7bf46921ee92fe4 | [
"MIT"
] | null | null | null | import getopt
import sys
import cv2
import numpy as np
class_ids = []
confidences = []
boxes = []
min_confidence = 0.5
target_name = "truck"
def main(argv):
input_image = ''
try:
opts, args = getopt.getopt(argv, 'i:', ["input-file"])
except getopt.GetoptError:
print('python3 objects_detector.py -i <input-file>')
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--input-file"):
input_image = arg
net, output_layers, classes = load_net()
image, blob, height, width, channels = load_image(input_image)
detect_objects(net, blob, output_layers, height, width)
show_detected_objects(image, classes)
# Load YOLO network into CV2 with COCO names
def load_net():
# Weights are available in YOLO website, please check README.md for more information
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
with open("coco.names", "r") as f:
classes = [line.strip() for line in f]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return net, output_layers, classes
# Loads input image, resize them and generate Blob
def load_image(input_file):
img = cv2.imread(input_file)
img = cv2.resize(img, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
return img, blob, height, width, channels
# Performs object detection based on Blob
def detect_objects(net, blob, output_layers, height, width):
for b in blob:
for n, img_blob in enumerate(b):
cv2.imshow(str(n), img_blob)
net.setInput(blob)
outs = net.forward(output_layers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > min_confidence:
# Object detected position and size
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle object delimiter coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
# Show obtained results in input image
def show_detected_objects(image, classes):
# Performs non maximum suppression given boxes and corresponding scores
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
for i in range(len(boxes)):
if i in indexes:
if str(classes[class_ids[i]]) == target_name:
target_label = "{0} ({1} %)".format("Carga pesada", round(confidences[i] * 100, 2))
x, y, w, h = boxes[i]
label = target_label
color = (255, 0, 0)
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
cv2.putText(image, label, (x, y + 30), font, 1, color, 1)
print(label)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main(sys.argv[1:])
| 31.073394 | 99 | 0.600827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.170652 |
a86d72d1fa9d273bd8fa78ace8fa385b2f849513 | 4,826 | py | Python | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | 5 | 2019-06-01T06:21:25.000Z | 2021-11-17T18:43:43.000Z | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | null | null | null | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | 3 | 2019-10-01T11:24:06.000Z | 2021-09-23T17:20:03.000Z | from pynmmso.listeners.base_listener import BaseListener
class TraceListener(BaseListener):
"""
Listener used to trace the progress of the Nmmso algorithm.
Arguments
---------
level : int
The amount of detail to output in the trace. Level 1 is the least information and
level 5 is the most. Default is level 2.
"""
def __init__(self, level=2):
self.nmmso = None
self.iteration_number = 1
self.evaluations = 0
self.level = level
super().__init__()
def set_nmmso(self, nmmso):
self.nmmso = nmmso
def iteration_started(self):
if self.level >= 3:
print(80*"=")
print("Starting iteration {}".format(self.iteration_number))
def location_evaluated(self, location, value):
self.evaluations += 1
if self.level >= 5:
print("Evaluation {}: location {}, value is {}".format(
self.evaluations, location, value))
def swarm_peak_changed(self, swarm, old_location, old_value):
if self.level >= 3:
print("Swarm {} has found a new peak at location {} with value {}, "
"old location was {} old value was {}".format(
swarm.id, swarm.mode_location, swarm.mode_value, old_location, old_value))
def swarm_created_at_random(self, new_swarm):
if self.level >= 3:
print("Created swarm {} at random location {}, value is {}".format(
new_swarm.id, new_swarm.mode_location, new_swarm.mode_value))
def swarm_created_from_crossover(self, new_swarm, parent_swarm1, parent_swarm2):
if self.level >= 3:
print("Created swarm {} by crossover of swarms {} and {} at location {}, "
"value is {}".format(
new_swarm.id, parent_swarm1.id, parent_swarm2.id,
new_swarm.mode_location, new_swarm.mode_value, ))
def merging_started(self):
if self.level >= 4:
print("Merging swarms...")
def merged_close_swarms(self, swarm1, swarm2):
if self.level >= 3:
print("Merged swarm {} into swarm {} as they were close".format(swarm2.id, swarm1.id))
def merged_saddle_swarms(self, swarm1, swarm2):
if self.level >= 3:
print("Merged swarm {} into swarm {} as midpoint was fitter".format(
swarm2.id, swarm1.id))
def merging_ended(self):
if self.level >= 4:
print("Finished merging swarms")
def incrementing_swarms_started(self):
if self.level >= 4:
print("Incrementing swarms...")
def swarm_added_particle(self, swarm):
if self.level >= 4:
print("Added particle to swarm {}, it now has {} particles".format(
swarm.id, swarm.number_of_particles))
def swarm_moved_particle(self, swarm):
if self.level >= 4:
print("Moved particle of swarm {}".format(swarm.id))
def incrementing_swarms_ended(self):
if self.level >= 4:
print("Finished incrementing swarms")
def hiving_swams_started(self):
if self.level >= 4:
print("Hiving swarms...")
def hiving_new_swarm(self, new_swarm, parent_swarm):
if self.level >= 3:
print("Hiving new swarm {} from swarm {}".format(new_swarm.id, parent_swarm.id))
def hiving_swarms_ended(self):
if self.level >= 4:
print("Finishing hiving swarms")
def iteration_ended(
self, n_new_locations, n_mid_evals, n_evol_modes, n_rand_modes, n_hive_samples):
total_this_iteration = \
n_new_locations + n_mid_evals + n_evol_modes + n_rand_modes + n_hive_samples
if self.level >= 1:
print("Finished iteration {}, evaluations this iteration: {}, total evaluations: {}, "
"number of swarms: {}".format(
self.iteration_number,
total_this_iteration,
self.nmmso.evaluations,
len(self.nmmso.swarms)))
if self.level >= 3:
print(" This iteration: new location evals = {} mid evals = {} evol modes = {} "
"rand modes = {} hive samples = {}".format(
n_new_locations, n_mid_evals, n_evol_modes, n_rand_modes, n_hive_samples))
if self.level >= 2:
for swarm in self.nmmso.swarms:
print("Swarm {} : location: {} value {}".format(
swarm.id, swarm.mode_location, swarm.mode_value))
self.iteration_number += 1
def max_evaluations_reached(self):
if self.level >= 1:
print("Maximum number of evaluations reached. Total evaluations: {}".format(
self.nmmso.evaluations))
| 37.123077 | 98 | 0.588065 | 4,766 | 0.987567 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.255698 |
a86da01bc5deadfbef4d95de97a5e5217a078c02 | 500 | py | Python | spotify_dashboard/spotify/models.py | timmyomahony/spotify-picture-frame | 259799b27da331341b0d860885be0aec091e32ff | [
"MIT"
] | 1 | 2020-11-03T11:04:22.000Z | 2020-11-03T11:04:22.000Z | spotify_dashboard/spotify/models.py | timmyomahony/spotify-picture-frame | 259799b27da331341b0d860885be0aec091e32ff | [
"MIT"
] | null | null | null | spotify_dashboard/spotify/models.py | timmyomahony/spotify-picture-frame | 259799b27da331341b0d860885be0aec091e32ff | [
"MIT"
] | null | null | null | import time
from django.db import models
from django.utils.timesince import timesince
class Track(models.Model):
id = models.CharField(max_length=30, primary_key=True)
artist = models.CharField(max_length=500)
album = models.CharField(max_length=500)
title = models.CharField(max_length=500)
image = models.URLField()
href = models.URLField()
data = models.JSONField()
published = models.BooleanField(default=True)
def __str__(self):
return self.title
| 26.315789 | 58 | 0.722 | 410 | 0.82 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a86f5448dbeefa6a311488498e7648ee7614b136 | 2,758 | py | Python | tests/test_controller.py | yehzhang/dscraper | 6fd1a4238795e9eb01b9dd8329a84495a70979d1 | [
"Apache-2.0"
] | 1 | 2017-08-13T09:50:06.000Z | 2017-08-13T09:50:06.000Z | tests/test_controller.py | yehzhang/dscraper | 6fd1a4238795e9eb01b9dd8329a84495a70979d1 | [
"Apache-2.0"
] | null | null | null | tests/test_controller.py | yehzhang/dscraper | 6fd1a4238795e9eb01b9dd8329a84495a70979d1 | [
"Apache-2.0"
] | null | null | null | import unittest
import logging
import asyncio
import datetime
from pytz import timezone
import dscraper
from dscraper.utils import FrequencyController
logger = logging.getLogger(__name__)
from .utils import Test
EPS = 1e-6
class TestController(Test):
INVERTAL = 0.2
CONFIG_NONE = (0, 0, 0, EPS, None)
CONFIG_ALL_DAY_NONE = (0, 0, 0, 0, None)
CONFIG_INVALID = (0, INVERTAL, -0.1, 22, None)
CONFIG_INVALID2 = (0, INVERTAL, 0, 24.1, None)
def setUp(self):
self.all_time = FrequencyController((0, self.INVERTAL, 0, 0, None))
def wait_once(self, controller):
return self.loop_until_complete(controller.wait())
def test_wait(self):
none_time = FrequencyController(self.CONFIG_NONE)
all_none_time = FrequencyController(self.CONFIG_ALL_DAY_NONE)
for cont in (self.all_time, none_time, all_none_time):
self.assertFalse(self.wait_once(cont), 'First wait blocked')
self.assertTrue(self.wait_once(self.all_time), 'False negative')
self.assertFalse(self.wait_once(none_time), 'False positive')
self.assertFalse(self.wait_once(all_none_time), 'False positive')
def test_now_wait(self):
now = datetime.datetime.now()
start = end = now.hour + now.minute / 60 + now.second / 3600
current = FrequencyController((0, self.INVERTAL, start - EPS, end + EPS, None))
pos_offset = FrequencyController((0, self.INVERTAL, start - EPS, end - EPS, None))
neg_offset = FrequencyController((0, self.INVERTAL, start + EPS, end + EPS, None))
for cont in (current, pos_offset, neg_offset):
self.assertFalse(self.wait_once(cont), 'First wait blocked')
self.assertTrue(self.wait_once(current), 'False negative')
self.assertFalse(self.wait_once(pos_offset), 'False positive')
self.assertFalse(self.wait_once(neg_offset), 'False positive')
def test_sequential(self):
self.wait_once(self.all_time)
self.all_time.release()
self.assertCountEqual(self.gather(self.all_time.wait(), self.all_time.wait()), [
True, False], 'not released and acquired')
def test_sequential(self):
self.wait_once(self.all_time)
self.assertTrue(self.wait_once(self.all_time), 'unblock before freed')
self.all_time.free()
self.assertFalse(self.wait_once(self.all_time), 'not freed')
def test_invalid(self):
def create_invalid(config):
try:
FrequencyController(config)
except ValueError:
pass
else:
self.fail('Incorrect value check')
create_invalid(self.CONFIG_INVALID)
create_invalid(self.CONFIG_INVALID2)
| 36.289474 | 90 | 0.662437 | 2,530 | 0.917331 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.079405 |
a8702cc446c2c224995b88f883f3be48de00d5d0 | 4,510 | py | Python | src/tools/dev/scripts/hooks/exceptions.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | null | null | null | src/tools/dev/scripts/hooks/exceptions.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | null | null | null | src/tools/dev/scripts/hooks/exceptions.py | cstatz/visit | f352f3984fa77392e81acbaa6943778a779f0435 | [
"BSD-3-Clause"
] | 1 | 2020-03-18T23:17:43.000Z | 2020-03-18T23:17:43.000Z | #!/usr/bin/python
# (Small) Modification of some code from Cyrus which ensures we have a proper
# number of TRY/ENDTRY lines.
# There is some code here to search for invalid characters which isn't
# enabled/used right now.
import sys
import subprocess
import re
#
# Helper Methods
#
def parse_extension(file):
"Returns the extension of a file name"
if file.rfind(".") >=0:
return file[file.rfind(".")+1:]
def subexe(cmd):
"Executes a system command and returns the output"
sys.stderr.write("cmd: " + cmd)
return subprocess.Popen(cmd.split(), stdout=subprocess.PIPE).communicate()[0]
#
# svnlook wrapper class
#
class SvnLook(object):
"Wraps svnlook useful commands"
def __init__(self,repo_path,svnlook_path):
self.repo = repo_path
self.look = svnlook_path
def changed_files(self,rev):
"""
Returns files changed/added at a given rev.
Supports both transaction and revision numbers.
"""
cmd = self.look + " changed " + self.repo + " "
if rev.find("-") >=0 :
cmd += "--transaction "
else:
cmd += "--revision "
cmd += rev
lines = [ l.strip() for l in subexe(cmd).split("\n") ]
files = []
stats = []
for l in lines:
if len(l) >0 :
tok = l.split();
stats.append(tok[0])
files.append(tok[1])
return files, stats
def file_contents(self,file, rev = None):
"""
Returns the contents of a file at a given rev.
Supports both transation and revision numbers.
"""
cmd = self.look + " cat " + self.repo + " "
if not rev is None:
if rev.find("-") >=0 :
cmd += "--transaction " + rev + " "
else:
cmd += "--revision " + rev + " "
cmd += file
return subexe(cmd)
#
# Tests
#
class SvnTest(object):
"Test base class"
def __init__(self,svnlook,rev):
self.svnlook = svnlook
self.rev = rev
def execute(self,file):
"Base test method"
return False
class TestInvalidChars(SvnTest):
"Test that checks for invalid characters in *.C files"
def __init__(self,svnlook,rev):
SvnTest.__init__(self,svnlook,rev)
def execute(self,file):
"Checks for tabs & windows newlines in *.C files"
if not parse_extension(file) == "C":
return True
ok = True
line = 0
lines = self.svnlook.file_contents(file,self.rev).split("\n")
for l in lines:
if l.find("\t") >=0:
sys.stderr.write("[%s line %d] error: found tab char\n" % (file,line))
ok = False
if l[-1:] == "\r":
sys.stderr.write("[%s line %d] error: found windows newline\n" % (file,line))
ok = False
line +=1
return ok
class TestExceptions(SvnTest):
"Test that checks for proper try/endtry blocks in *.C files"
def __init__(self,svnlook,rev):
SvnTest.__init__(self,svnlook,rev)
def execute(self,file):
"Checks for proper try/endtry blocks in *.C files"
if not parse_extension(file) == "C":
return True
# read file contents
lines = self.svnlook.file_contents(file,self.rev)
# count TRYs & ENDTRYs
trys = len( [m.start() for m in re.finditer(re.escape("TRY"), lines)])
endtrys = len( [m.start() for m in re.finditer(re.escape("ENDTRY"), lines)])
# TRYs are counted with ENDTRYS, correct for this:
trys += -endtrys
if trys == endtrys:
return True
else:
sys.stderr.write("[%s] error: # of TRYs (%d) != ENDTRYs (%d)\n" %(file,trys,endtrys))
return False
if __name__ == "__main__":
# check for proper # of args
if len(sys.argv) < 3:
sys.stderr.write("usage: pre-commit [repo_path] [transaction_id]\n")
sys.exit(-1)
# get the repo path and transaction id
repo_path = sys.argv[1]
trans_id = sys.argv[2]
# create svnlook wrapper
svnlook = SvnLook(repo_path,"/usr/bin/svnlook")
# get changed files
files, status = svnlook.changed_files(trans_id)
ok = True
for f in files:
if not TestExceptions(svnlook, trans_id).execute(f):
ok = False
if not ok:
sys.stderr.write("error: failed one or more svn pre-commit tests!")
sys.exit(-1)
| 30.066667 | 97 | 0.567184 | 3,171 | 0.703104 | 0 | 0 | 0 | 0 | 0 | 0 | 1,485 | 0.329268 |
a870e6bfb15f35b40b1115c9d96dd6062d8a09ff | 513 | py | Python | mmdet/models/utils/__init__.py | dandelin/mmdetection | 03e1c72f1bb6222bdf9af3bfe60946cf008c7143 | [
"Apache-2.0"
] | null | null | null | mmdet/models/utils/__init__.py | dandelin/mmdetection | 03e1c72f1bb6222bdf9af3bfe60946cf008c7143 | [
"Apache-2.0"
] | null | null | null | mmdet/models/utils/__init__.py | dandelin/mmdetection | 03e1c72f1bb6222bdf9af3bfe60946cf008c7143 | [
"Apache-2.0"
] | null | null | null | from .conv_ws import conv_ws_2d, ConvWS2d
from .conv_module import build_conv_layer, ConvModule
from .norm import build_norm_layer
from .scale import Scale
from .weight_init import (
xavier_init,
normal_init,
uniform_init,
kaiming_init,
bias_init_with_prob,
)
__all__ = [
"conv_ws_2d",
"ConvWS2d",
"build_conv_layer",
"ConvModule",
"build_norm_layer",
"xavier_init",
"normal_init",
"uniform_init",
"kaiming_init",
"bias_init_with_prob",
"Scale",
]
| 19.730769 | 53 | 0.692008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.296296 |
a87166a60d65966ddedb5ce1324d6d79ae3b0c7c | 17,050 | py | Python | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | 1 | 2016-05-21T21:11:21.000Z | 2016-05-21T21:11:21.000Z | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | null | null | null | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
## temp imports
from matplotlib import pyplot as p
from matplotlib import cm
import tempfile
import numpy as np
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import wcs
import aplpy
from pyraf import iraf
# Logger
from logutil import *
# Loading necessary IRAF packages and configurations
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.apphot(_doprint=0)
iraf.reset(min_lenuserarea='200000')
__all__ = ['Frame']
def index_by_last_column_entry(M, keys):
colkeys = M[:,-1]
sorter = np.argsort(colkeys)
index = np.searchsorted(colkeys, keys, sorter = sorter)
return M[sorter[index]]
class Frame(mylogger):
def __init__(self, fname, ext, mask, logfn='last.log'):
## Initialize DAOPHOT
self.base = './'
import daophot
daophot.set_default(self.base)
## Initiate logger utility
self.sdict={}
self.log = mylogger(self.sdict, logfn)
self.fname = fname
self.pfname = fname.split('/')[-1]
self.iname = "%s[%i]" % (fname, ext)
self.ext = ext
# Read image and maks
self.hdu = self.read_fits(fname, ext)
self.mask = self.read_fits(mask, ext)
self.read_prim(fname) # Some infos are only found in the primary HDU
self.read_wcs()
# Change with fwhm estimator routine. Bellow for processed DECAM data
self.fwhm = self.hdup.header['FWHMAV']
self.fwhmph = self.hdup.header['FWHMAV']*(3600*self.hdup.header['CDELT2'])
self.high = 35000
## Utility names
self.daofindfn = '%s%d.coo.1' % (self.pfname, ext)
self.photfn = '%s%d.mag.2' % (self.pfname, ext)
self.pstfile = '%s%d.pst.1' % (self.pfname, ext)
self.fitpsffn = '%s%d.fitpsf.1' % (self.pfname, ext)
self.guess = '%s%d.guess.1' % (self.pfname, ext)
self.psfgridname = '%s%d.psfgrid.png' % (self.pfname, ext)
self.psf = '%s%d.psf.1.fits' % (self.pfname, ext)
self.psfimg = '%s%d.psf.1.img.fits' % (self.pfname, ext)
self.psfselectplot = '%s%d.psfselect.png' % (self.pfname, ext)
def pix2sky(self, x, y):
return self.wcs.wcs_pix2world(np.array([x,y]).T, 1)
def read_fits(self, fname, ext):
self.log(1, 'READ', 1, 'Reading %s[%i]' % (fname, ext))
return fits.open(fname, memmap=True)[ext]
def read_prim(self, fname):
self.log(1, 'READP', 1, 'Reading %s header' % (fname))
self.hdup = fits.open(fname, memmap=True)[0]
def read_wcs(self):
self.log(1, 'WCS READ', 1, 'Reading WCS for %s[%i]' % (self.fname, self.ext))
self.wcs = wcs.WCS(self.hdu.header)
def findsky(self, scl, nwin, rerun=False):
"""
Find sky statistics at random windows.
Window size set by scl (square) and number of windows by nwin
"""
a = self.hdu.data
m = self.mask.data
avoid = 100*scl
s = scl*0.5
xsize = a.shape[1]
ysize = a.shape[1]
b = np.empty((nwin,4))
for i in xrange(nwin):
x = y = -10
while x < avoid or x > xsize-avoid or y < avoid or y > ysize-avoid:
x = 1 + int(np.random.rand()*xsize)
y = 1 + int(np.random.rand()*ysize)
tmp = a[y-s:y+s, x-s:x+s]
tmpm = m[y-s:y+s, x-s:x+s]
if np.any(tmpm < 1):
b[i,0] = np.nan
b[i,1] = np.nan
b[i,2] = np.nan
b[i,3] = np.nan
else:
b[i,0] = np.sum(tmp)/(scl*scl)
b[i,1] = np.mean(tmp)
b[i,2] = np.median(tmp)
b[i,3] = np.std(tmp)
sigma = np.nanmedian(b[:,3])
sky = np.nanmedian(b[:,0])
self.sigma = sigma
self.sky = sky
self.log(1, 'SKY', sky, 'Sky value median: %lf' % sky)
self.log(1, 'SKYSIGMA', sigma, 'Sky variance median: %lf' % sigma)
iraf.datapars.setParam('sigma', sigma)
iraf.fitskypars.setParam('skyvalu', sky)
def run_daofind(self, coofn):
iraf.daofind.setParam('image',self.iname)
iraf.datapars.setParam('fwhmpsf',self.fwhm)
iraf.daofind.setParam('output', coofn)
iraf.daofind(mode='h',Stdout=1)
return coofn
def run_phot(self, coofn, photfn):
iraf.phot.setParam('coords', coofn)
iraf.phot.setParam('output', photfn)
iraf.phot.setParam('image',self.iname)
iraf.fitskypars.setParam('skyvalue',self.sky)
iraf.fitskypars.setParam('annulus',4.*self.fwhm)
iraf.fitskypars.setParam('dannulus',2.*self.fwhm)
iraf.photpars.setParam('zmag', self.hdup.header['MAGZPT']) # Use DECAM estimate of zeropoint
iraf.phot(mode='h',Stdout=1)
def trim_phot(self, photfn, outfn):
a = ascii.read(photfn)
std = a['STDEV']
sum = a['SUM']/a['AREA']
sky = a['MSKY']
sn = np.abs(sum-sky)/std
i = sn > 3
tmp = a[i]
tmp.write(outfn, format='ascii')
return outfn, np.where(i)[0]
def run_fitpsf(self, coofn, outfn, guessfn):
# select some guess stars for PSF building
# Based on median magnitude from apperture phot
daofind = np.loadtxt(coofn, usecols=(0,1,2))
i = (daofind[:,2] < np.median(daofind[:,2]) + 0.12)&(daofind[:,2] > np.median(daofind[:,2]) - 0.12)
np.savetxt(guessfn, daofind[i,0:2], fmt=['%-10.3f','%-10.3f'])
iraf.fitpsf.setParam('image', self.iname)
iraf.fitpsf.setParam('output', outfn) # preliminary psf fit
iraf.fitpsf.setParam('coords', guessfn)
iraf.fitpsf(mode='h',Stdout=1)
def merge(self, trimphotfn, daofindfn, fitpsffn):
self.log(1, 'MERGED', '1', '%d Will merge %s, %s, %s by ID' % (self.ext, trimphotfn, daofindfn, fitpsffn))
## Use trimmed photometry to avoid variable sky background spurious
## detections
# x,y, msky, stdev, sum, area, mag, merr, id
# mags = np.loadtxt(self.photfn+'trim', usecols=(6,7,14,15,26,27,29,30,3), skiprows=1)
mags = np.genfromtxt(trimphotfn, usecols=(6,7,14,15,26,27,29,30,3), skip_header=1, dtype='|S5')
j = True
for i in np.arange(mags.shape[1]):
j *= (mags[:,i] != '--')
mags = mags[j]
mags = mags.astype(np.float64)
tf = tempfile.NamedTemporaryFile(dir=self.base)
iraf.txdump(textfile=daofindfn,
fields='sharpness,sround,ground,id',
expr='sharpness!=INDEF && sround!=INDEF && ground!=INDEF',
Stdout=tf.name+'coo.meh')
daofind = np.loadtxt(tf.name+'coo.meh')
tf = tempfile.NamedTemporaryFile(dir=self.base)
iraf.txdump(textfile=fitpsffn,
fields='rsigma,id',
expr='rsigma!=INDEF && rsigma < 7.0',
Stdout=tf.name+'psf.meh')
fitpsf = np.loadtxt(tf.name+'psf.meh')
## I have no idea how this works
I = reduce(lambda l,r: np.intersect1d(l,r,False), (i[:,-1] for i in
(mags, daofind,
fitpsf)))
mags = index_by_last_column_entry(mags, I)
fitpsf = index_by_last_column_entry(fitpsf, I)
daofind = index_by_last_column_entry(daofind, I)
oo = np.c_[mags[np.searchsorted(mags[:,-1], I)],
daofind[np.searchsorted(daofind[:,-1], I)],
fitpsf[np.searchsorted(fitpsf[:,-1], I)]]
tf = 'joinedforpsf%s.%d.dat' % ('DEBUG', self.ext)
np.savetxt(tf, oo, fmt='%lf')
return oo
def select_psf(self, trimphotfn, coofn, fitpsffn, outfn, mlimt=1.2, sepmult=12, checkcom=True):
f = self.merge(trimphotfn, coofn, fitpsffn)
w = self.pix2sky(f[:,0], f[:,1])
coo = SkyCoord(w[:,0]*u.deg, w[:,1]*u.deg)
nid, nsep2, _ = coo.match_to_catalog_sky(coo, nthneighbor=2)
x = f[:,0]
y = f[:,1]
id = f[:,-1]
sky = f[:,2]
skystd = f[:,3]
mag = f[:,6]
merr = f[:,7]
sharp = f[:,9]
fwhm = f[:,13]
self.maglim = np.mean(mag) - mlimt
self.merrlim = 0.08
self.sharplim = 0.04
#self.fwhmlimup = 1.15*self.fwhm/2.355
#self.fwhmlimlow = 0.55*self.fwhm/2.355
self.fwhmlimup = np.mean(fwhm) + 0.2*np.std(fwhm)
self.fwhmlimlow = np.mean(fwhm) - np.std(fwhm)
p.figure(figsize=(12,12))
p.subplot(331)
p.xlabel('mag')
p.hist(mag, range=[12,32], bins=30, color='k', alpha=0.6)
p.axvline(x=self.maglim, c='k')
p.subplot(332)
p.xlabel('merr')
p.hist(merr, range=[0,0.5], bins=30, color='k', alpha=0.6)
p.axvline(x=self.merrlim, c='k')
p.subplot(333)
p.xlabel('sharpness')
p.hist(sharp, range=[-0.5,0.5], bins=30, color='k', alpha=0.6)
p.axvline(x=np.median(sharp), c='k')
p.axvline(x=np.median(sharp)+self.sharplim, ls='--', c='k')
p.axvline(x=np.median(sharp)-self.sharplim, ls='--', c='k')
p.subplot(334)
p.xlabel('fwhm [px]')
p.hist(fwhm, range=[1,10], bins=30, color='k', alpha=0.6)
p.axvline(x=self.fwhmlimup, ls='--', c='k')
p.axvline(x=self.fwhmlimlow, ls='--', c='k')
p.subplot(335)
p.xlabel('separation [arcsec]')
p.hist(nsep2.arcsec, bins=30, color='k', alpha=0.6)
p.axvline(x=12*self.fwhmph, ls='--', c='k')
p.subplot(336)
p.xlabel('sky std [counts]')
p.hist(skystd, bins=30, color='k', alpha=0.6)
p.axvline(x=np.median(skystd) - np.std(skystd), ls='--', c='k')
p.axvline(x=np.median(skystd) + np.std(skystd), ls='--', c='k')
p.axvline(x=np.median(skystd), ls='-', c='k')
p.subplot(337)
p.xlabel('sky [counts]')
p.hist(sky, bins=30, color='k', alpha=0.6)
p.axvline(x=np.mean(sky) - np.std(sky), ls='--', c='k')
p.axvline(x=np.mean(sky) + np.std(sky), ls='--', c='k')
p.axvline(x=np.mean(sky), ls='-', c='k')
p.savefig(self.psfselectplot)
## Set of constrains for PSF stars
i = (mag < self.maglim)
i *= (merr < self.merrlim)
i *= (np.abs(sharp-np.median(sharp)) < self.sharplim)
i *= (fwhm < self.fwhmlimup)
i *= (fwhm > self.fwhmlimlow)
i *= (nsep2.arcsec > sepmult*self.fwhmph)
i *= (x > 60*self.fwhm)*(y > 60*self.fwhm)
i *= (x < self.hdu.data.shape[1] - 60*self.fwhm)
i *= (y < self.hdu.data.shape[0] - 60*self.fwhm)
#i *= (np.abs(skystd - np.median(skystd)) < np.std(skystd))
#i *= (np.abs(sky - np.mean(sky)) < np.std(sky))
if len(id[i]) <= 2:
self.log(3, 'NPSF', len(id[i]), 'Number of PSF stars less than 2')
else:
self.log(1, 'NPSF', len(id[i]), '%d Number of PSF stars is %i' % (self.ext, len(id[i])))
fid, fx, fy, fmag, fsky = self.cutbad(id[i], x[i], y[i], mag[i], sky[i], checkcom)
self._parse_pst(fid, fx, fy, fmag, fsky, outfn)
return (fid, fx, fy, fmag, fsky), f
# self._parse_pst(id[i], x[i], y[i], mag[i], sky[i], outfn)
# return (id[i], x[i], y[i], mag[i], sky[i]), f
def tvmark(self, ra, dec):
gc = aplpy.FITSFigure(self.hdu)
gc.show_grayscale(stretch='arcsinh')
gc.set_tick_labels_font(size='small')
gc.show_markers(ra,dec,layer='scatter',edgecolor='red',
facecolor='none',marker='o',s=10,alpha=0.5)
def _parse_pst(self, id, x, y, mag, msky, pstfile):
pstfile = open(pstfile, 'w')
pstfile.write("#N ID XCENTER YCENTER MAG MSKY \\\n")
pstfile.write("#U ## pixels pixels magnitudes counts \\\n")
pstfile.write("#F %-9d %-10.3f %-10.3f %-12.3f %-15.7g \n")
pstfile.write("#\n")
np.savetxt(pstfile, np.array([id, x,y, mag, msky]).T,
fmt=['%-9d','%-10.3f','%-10.3f','%-12.3f','%-15.7g'])
pstfile.close()
def cutbad(self, id, x, y, mag, sky, checkcom=True):
rad = int(6*self.fwhm)
ID = []
X = []
Y = []
MAG = []
SKY = []
for i in np.arange(len(x)):
xbox = int(x[i] - rad)
Xbox = int(x[i] + rad)
ybox = int(y[i] - rad)
Ybox = int(y[i] + rad)
block = self.hdu.data[ybox:Ybox,xbox:Xbox]
xx = np.arange(block.shape[1])
yy = np.arange(block.shape[0])
xc = block.shape[1]/2.
yc = block.shape[0]/2.
rr = np.sqrt((xx[:, None]-xc)**2 + (yy[None, :]-yc)**2) # None is a trick to increase dimensions of boolean array
j = (rr > 3*self.fwhm)
if np.any(block > self.high):
print 'star %d at %d %d eliminated: global high value nearby' % (id[i], x[i], y[i])
elif np.any(block < self.sky - 6*self.sigma):
print 'star %d at %d %d eliminated: global low value nearby' % (id[i], x[i], y[i])
elif np.any(block[j] > self.sky + 5*self.sigma) & checkcom==True:
print 'star %d at %d %d eliminated: contaminating object' % (id[i], x[i], y[i])
else:
ID.append(id[i])
X.append(x[i])
Y.append(y[i])
MAG.append(mag[i])
SKY.append(sky[i])
ID = np.array(ID)
X = np.array(X)
Y = np.array(Y)
MAG = np.array(MAG)
SKY = np.array(SKY)
return (ID, X, Y, MAG, SKY)
def grid_psf(self, pstfile, gridname):
from mpl_toolkits.axes_grid1 import ImageGrid
id, x, y = np.loadtxt(pstfile, usecols=(0,1,2), unpack=True)
side = int(np.ceil(np.sqrt(len(x))))
rad = int(6*self.fwhm)
fig = p.figure(figsize=(12,12))
grid = ImageGrid(fig, 111,
nrows_ncols = (side, side),
axes_pad = 0.0,
share_all=True,
label_mode = "L",
cbar_location = "right",
cbar_mode=None,
# cbar_size="5%",
# cbar_pad="5%",
aspect = True
)
for i in np.arange(len(x)):
xbox = int(x[i] - rad)
Xbox = int(x[i] + rad)
ybox = int(y[i] - rad)
Ybox = int(y[i] + rad)
block = self.hdu.data[ybox:Ybox,xbox:Xbox]
grid[i].imshow(block.T, origin='lower', cmap=cm.gray,
vmin=self.sky-5*self.sigma, vmax=300,
interpolation='nearest')
p.savefig(gridname)
def run_psf(self, base, ext, photfn):
fwhm = self.fwhm
iraf.daopars.setParam('matchra',fwhm)
iraf.daopars.setParam('psfrad',4*fwhm+1)
iraf.daopars.setParam('fitrad',fwhm)
iraf.daopars.setParam('sannulu',2*fwhm)
iraf.daopars.setParam('wsannul',4*fwhm)
iraf.psf.setParam('image',self.iname)
iraf.psf.setParam('photfile', photfn)
iraf.psf.setParam('pstfile', '%s.%d.pst.1' % (base, ext))
iraf.psf.setParam('psfimage', '%s.%d.psf.1' % (base, ext))
iraf.psf.setParam('opstfile', '%s.%d.psj.1' % (base, ext))
iraf.psf.setParam('groupfil', '%s.%d.psg.1' % (base, ext))
iraf.psf(mode='h')
iraf.seepsf(psfimage='%s.%d.psf.1.fits'%(base, ext),
image='%s.%d.psf.1.img.fits'%(base, ext), magnitu='18.0')
def run_allstar(self, base, ext):
fwhm = self.fwhm
iraf.daopars.setParam('matchra',fwhm)
iraf.daopars.setParam('psfrad',4*fwhm+1)
iraf.daopars.setParam('fitrad',fwhm)
iraf.daopars.setParam('sannulu',2*fwhm)
iraf.daopars.setParam('wsannul',4*fwhm)
iraf.allstar.setParam('image',self.iname)
iraf.allstar.setParam('photfile', '%s.%d.mag.1' % (base, ext))
iraf.allstar.setParam('psfimage', '%s.%d.psf.1' % (base, ext))
iraf.allstar.setParam('allstarf', '%s.%d.als.1' % (base, ext))
iraf.allstar.setParam('rejfile', '%s.%d.arj.1' % (base, ext))
iraf.allstar.setParam('subimage', '%s.%d.sub.1' % (base, ext))
iraf.allstar(mode='h',verbose='no')
if __name__=='__main__':
fname = "/scratch/gc_survey/raw_data/c4d_150715_013102_osi_g_v1.fits"
tmp = Frame(fname, 2, 'bunda.log')
tmp.findsky(10, 1000)
#tmp.run_daofind()
#tmp.run_phot()
#tmp.trim_phot()
#tmp.run_fitpsf()
#f = tmp.select_psf()
#tmp.grid_psf()
#tmp.run_psf()
tmp.run_allstar()
#t = tmp.pix2sky(f[1], f[2])
#tmp.tvmark(t[:,0], t[:,1])
#p.show()
| 37.888889 | 125 | 0.528504 | 15,893 | 0.932141 | 0 | 0 | 0 | 0 | 0 | 0 | 3,531 | 0.207097 |
a8719a8d286f6a06b3cf89fb0e6e20d2209d3663 | 74,138 | py | Python | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | 1 | 2021-08-04T13:03:16.000Z | 2021-08-04T13:03:16.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom: []
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_YAML_CONTENT_WITH_GADGETS = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom:
- customization_args:
adviceObjects:
value:
- adviceTitle: b
adviceHtml: <p>c</p>
gadget_type: TestGadget
gadget_name: ATestGadget
visible_in_states:
- New state
- Second state
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Second state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Second state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
TEST_GADGETS = {
'TestGadget': {
'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget')
}
}
TEST_GADGET_CUSTOMIZATION_ARGS = {
'adviceObjects': {
'value': [{
'adviceTitle': 'b',
'adviceHtml': '<p>c</p>'
}]
}
}
TEST_GADGET_DICT = {
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'visible_in_states': ['First state']
}
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
exploration.validate()
# Ensure an answer group with two classifier rules is invalid
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}, {
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}],
'correct': False,
})
)
self._assert_validation_error(
exploration, 'AnswerGroups can only have one classifier rule.')
# Restore a valid exploration.
init_state.interaction.answer_groups.pop()
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'correct': False,
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = 'Feedback'
self._assert_validation_error(
exploration, 'Expected outcome feedback to be a list')
outcome.feedback = [15]
self._assert_validation_error(
exploration, 'Expected outcome feedback item to be a string')
outcome.feedback = ['Feedback']
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = []
exploration.validate()
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
interaction.default_outcome = None
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
interaction.fallbacks = {}
self._assert_validation_error(
exploration, 'Expected fallbacks to be a list')
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
interaction.fallbacks = []
exploration.validate()
# Validate AnswerGroup.
answer_group.rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule for each answer group.')
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_fallbacks_validation(self):
"""Test validation of state fallbacks."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
base_outcome = {
'dest': exploration.init_state_name,
'feedback': [],
'param_changes': [],
}
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'FakeTriggerName',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': base_outcome,
}])
self._assert_validation_error(exploration, 'Unknown trigger type')
with self.assertRaises(KeyError):
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {},
}])
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {},
},
'outcome': base_outcome,
}])
# Default values for the customization args will be added silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 3,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
'bad_key_that_will_get_stripped_silently': {
'value': 'unused_value',
}
},
},
'outcome': base_outcome,
}])
# Unused customization arg keys will be stripped silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 42,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 2,
},
},
},
'outcome': base_outcome,
}])
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_exploration_skin_and_gadget_validation(self):
"""Test that Explorations including gadgets validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
invalid_gadget_instance = exp_domain.GadgetInstance(
'bad_type', 'aUniqueGadgetName', [], {})
with self.assertRaisesRegexp(
utils.ValidationError,
'Unknown gadget with type bad_type is not in the registry.'
):
invalid_gadget_instance.validate()
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
# Force a GadgetInstance to require certain state names.
gadget_instance.visible_in_states.extend(['DEF', 'GHI'])
self._assert_validation_error(
exploration, 'Exploration missing required states: DEF, GHI')
def_state = exp_domain.State.create_default_state('DEF')
def_state.update_interaction_id('TextInput')
exploration.states['DEF'] = def_state
self._assert_validation_error(
exploration, 'Exploration missing required state: GHI')
ghi_state = exp_domain.State.create_default_state('GHI')
ghi_state.update_interaction_id('TextInput')
exploration.states['GHI'] = ghi_state
exploration.validate()
# Force a gadget name collision.
gadget_instance.visible_in_states = ['DEF']
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
exploration.skin_instance.panel_contents_dict[
'bottom'][1].visible_in_states = ['GHI']
self._assert_validation_error(
exploration,
'ATestGadget gadget instance name must be unique.')
exploration.skin_instance.panel_contents_dict['bottom'].pop()
gadget_instance.visible_in_states.extend(['DEF'])
self._assert_validation_error(
exploration,
'TestGadget specifies visibility repeatedly for state: DEF')
# Remove duplicate state.
gadget_instance.visible_in_states.pop()
# Adding a panel that doesn't exist in the skin.
exploration.skin_instance.panel_contents_dict[
'non_existent_panel'] = []
self._assert_validation_error(
exploration,
'The panel name \'non_existent_panel\' is invalid.')
def test_gadget_name_validation(self):
"""Test that gadget naming conditions validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
gadget_instance.validate()
gadget_instance.name = ''
self._assert_validation_error(
gadget_instance, 'Gadget name must not be an empty string.')
gadget_instance.name = 0
self._assert_validation_error(
gadget_instance,
'Gadget name must be a string. Received type: int')
gadget_instance.name = 'ASuperLongGadgetNameThatExceedsTheLimit'
max_length = exp_domain.GadgetInstance._MAX_GADGET_NAME_LENGTH # pylint: disable=protected-access
self._assert_validation_error(
gadget_instance,
'ASuperLongGadgetNameThatExceedsTheLimit gadget name'
' exceeds maximum length of %d' % max_length)
gadget_instance.name = 'VERYGADGET!'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: VERYGADGET!')
gadget_instance.name = 'Name with \t tab'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \t tab')
gadget_instance.name = 'Name with \n newline'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \n newline')
gadget_instance.name = 'Name with 3 space'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with 3 space')
gadget_instance.name = ' untrim whitespace '
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: untrim whitespace ')
# Names with spaces and number should pass.
gadget_instance.name = 'Space and 1'
gadget_instance.validate()
def test_exploration_get_gadget_types(self):
"""Test that Exploration.get_gadget_types returns apt results."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
'An Exploration ID', SAMPLE_YAML_CONTENT)
self.assertEqual(exploration_without_gadgets.get_gadget_types(), [])
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(), ['TestGadget'])
another_gadget = exp_domain.GadgetInstance(
'AnotherGadget', 'GadgetUniqueName1', [], {}
)
exploration_with_gadgets.skin_instance.panel_contents_dict[
'bottom'].append(another_gadget)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(),
['AnotherGadget', 'TestGadget']
)
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.states['New state'].update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
}])
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_without_gadgets(self):
"""Test from_yaml() and to_yaml() methods without gadgets."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT)
yaml_content = exploration_without_gadgets.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_with_gadgets(self):
"""Test from_yaml() and to_yaml() methods including gadgets."""
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
generated_yaml = exploration_with_gadgets.to_yaml()
generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml)
sample_yaml_as_dict = utils.dict_from_yaml(
SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V12
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
return {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'skin_customizations': (
exp_domain.SkinInstance._get_default_skin_customizations() # pylint: disable=protected-access
),
'language_code': 'en',
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class GadgetOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on gadgets."""
def test_gadget_operations(self):
"""Test deletion of gadgets."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].type, TEST_GADGET_DICT['gadget_type'])
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, TEST_GADGET_DICT['gadget_name'])
with self.assertRaisesRegexp(
ValueError, 'Gadget NotARealGadget does not exist.'
):
exploration.rename_gadget('NotARealGadget', 'ANewName')
exploration.rename_gadget(
TEST_GADGET_DICT['gadget_name'], 'ANewName')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, 'ANewName')
# Add another gadget.
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(
exploration.get_all_gadget_names(),
['ANewName', 'ATestGadget']
)
with self.assertRaisesRegexp(
ValueError, 'Duplicate gadget name: ANewName'
):
exploration.rename_gadget('ATestGadget', 'ANewName')
gadget_instance = exploration.get_gadget_instance_by_name(
'ANewName')
self.assertIs(
exploration.skin_instance.panel_contents_dict['bottom'][0],
gadget_instance
)
panel = exploration._get_panel_for_gadget('ANewName') # pylint: disable=protected-access
self.assertEqual(panel, 'bottom')
exploration.delete_gadget('ANewName')
exploration.delete_gadget('ATestGadget')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'], [])
with self.assertRaisesRegexp(
ValueError, 'Gadget ANewName does not exist.'
):
exploration.delete_gadget('ANewName')
class SkinInstanceUnitTests(test_utils.GenericTestBase):
"""Test methods for SkinInstance."""
_SAMPLE_SKIN_INSTANCE_DICT = {
'skin_id': 'conversation_v1',
'skin_customizations': {
'panels_contents': {
'bottom': [
{
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state']
}
]
}
}
}
def test_get_state_names_required_by_gadgets(self):
"""Test accurate computation of state_names_required_by_gadgets."""
skin_instance = exp_domain.SkinInstance(
'conversation_v1',
self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations'])
self.assertEqual(
skin_instance.get_state_names_required_by_gadgets(),
['New state', 'Second state'])
def test_generation_of_get_default_skin_customizations(self):
"""Tests that default skin customizations are created properly."""
skin_instance = exp_domain.SkinInstance(feconf.DEFAULT_SKIN_ID, None)
self.assertEqual(
skin_instance.panel_contents_dict,
{'bottom': []}
)
def test_conversion_of_skin_to_and_from_dict(self):
"""Tests conversion of SkinInstance to and from dict representations."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
skin_instance = exploration.skin_instance
skin_instance_as_dict = skin_instance.to_dict()
self.assertEqual(
skin_instance_as_dict,
self._SAMPLE_SKIN_INSTANCE_DICT)
skin_instance_as_instance = exp_domain.SkinInstance.from_dict(
skin_instance_as_dict)
self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1')
self.assertEqual(
sorted(skin_instance_as_instance.panel_contents_dict.keys()),
['bottom'])
class GadgetInstanceUnitTests(test_utils.GenericTestBase):
"""Tests methods instantiating and validating GadgetInstances."""
def test_gadget_instantiation(self):
"""Test instantiation of GadgetInstances."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'bottom']), 1)
def test_gadget_instance_properties(self):
"""Test accurate representation of gadget properties."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
self.assertEqual(test_gadget_instance.height, 50)
self.assertEqual(test_gadget_instance.width, 60)
self.assertIn('New state', test_gadget_instance.visible_in_states)
def test_gadget_instance_validation(self):
"""Test validation of GadgetInstance."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
# Validation against sample YAML should pass without error.
exploration.validate()
# Assert size exceeded error triggers when a gadget's size exceeds
# a panel's capacity.
with self.swap(
test_gadget_instance.gadget,
'width_px',
4600):
self._assert_validation_error(
exploration,
'Width 4600 of panel \'bottom\' exceeds limit of 350')
# Assert internal validation against CustomizationArgSpecs.
test_gadget_instance.customization_args[
'adviceObjects']['value'].extend(
[
{'adviceTitle': 'test_title', 'adviceHtml': 'test html'},
{'adviceTitle': 'another_title', 'adviceHtml': 'more html'},
{'adviceTitle': 'third_title', 'adviceHtml': 'third html'}
]
)
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget is limited to 3 tips, found 4.'
):
test_gadget_instance.validate()
test_gadget_instance.customization_args[
'adviceObjects']['value'].pop()
# Assert that too many gadgets in a panel raise a ValidationError.
panel_contents_dict['bottom'].append(test_gadget_instance)
with self.assertRaisesRegexp(
utils.ValidationError,
'\'bottom\' panel expected at most 1 gadget, but 2 gadgets are '
'visible in state \'New state\'.'
):
exploration.validate()
# Assert that an error is raised when a gadget is not visible in any
# states.
test_gadget_instance.visible_in_states = []
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget gadget not visible in any states.'):
test_gadget_instance.validate()
def test_conversion_of_gadget_instance_to_and_from_dict(self):
"""Test conversion of GadgetInstance to and from dict. """
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
test_gadget_instance = panel_contents_dict['bottom'][0]
test_gadget_as_dict = test_gadget_instance.to_dict()
self.assertEqual(
test_gadget_as_dict,
{
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state'],
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS
}
)
test_gadget_as_instance = exp_domain.GadgetInstance.from_dict(
test_gadget_as_dict)
self.assertEqual(test_gadget_as_instance.width, 60)
self.assertEqual(test_gadget_as_instance.height, 50)
class GadgetVisibilityInStatesUnitTests(test_utils.GenericTestBase):
"""Tests methods affecting gadget visibility in states."""
def test_retrieving_affected_gadgets(self):
"""Test that appropriate gadgets are retrieved."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
affected_gadget_instances = (
exploration._get_gadget_instances_visible_in_state('Second state')) # pylint: disable=protected-access
self.assertEqual(len(affected_gadget_instances), 1)
self.assertEqual(affected_gadget_instances[0].name, 'ATestGadget')
| 31.216 | 115 | 0.605614 | 67,683 | 0.912933 | 0 | 0 | 0 | 0 | 0 | 0 | 36,474 | 0.491974 |
a871c5a3f2744ffc0ab85831207e950cede0cd8c | 3,160 | py | Python | Infection_vs_Inflammation/Code/Process_Data_V3.py | jdatascientist/Machine_Learning_4_MRI | 973196063d69115048bfa97f213dd6ff0400f74d | [
"MIT"
] | null | null | null | Infection_vs_Inflammation/Code/Process_Data_V3.py | jdatascientist/Machine_Learning_4_MRI | 973196063d69115048bfa97f213dd6ff0400f74d | [
"MIT"
] | null | null | null | Infection_vs_Inflammation/Code/Process_Data_V3.py | jdatascientist/Machine_Learning_4_MRI | 973196063d69115048bfa97f213dd6ff0400f74d | [
"MIT"
] | null | null | null | # Import Modules as needed
import numpy as np
from mylocal_functions import *
import matplotlib.pyplot as plt
# ======== CEST============= #
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
CEST_Int_matrix=np.zeros((len(CEST_list),4))
ppm=np.linspace(-8,8,101);
for i in range( len(CEST_list) ):
D=txt_2_array(CEST_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,8); Zn=Zn[:,9::]
M=np.zeros([1,4])
for j in range(4):
p=fit_L2_scale(ppm,Zn[j,:])
L=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
#CEST_centered[i,:]=L
#CEST_integral[i,0]=np.sum(L)
M[0,j]=np.sum(L)
CEST_Int_matrix[i,:]=M
# ======== T2 MSME============= #a
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2.txt')
T2_matrix=np.zeros( (len(T2_list),4) )
TR=np.linspace(.012,.012*12,12)
# Fit T2
for i in range(len(T2_list)):
YDataMatrix=txt_2_array(T2_list[i])
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
T2_matrix[i,:]=T2time.T
# ======== T2ex DCE============= #
# Make list of all T2.txt files
T2ex_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2exDCE.txt')
T2ex_Int_matrix=np.zeros( (len(T2ex_list),4) )
# T2ex integral
for i in range( len(T2ex_list) ):
D=txt_2_array(T2ex_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,0); Zn=Zn[:,9::]
T2ex_Int_matrix[i,:]=np.sum(Zn-1,axis=1)
#======== create violing plots ============= #
Tissues=["Infected","Healthy R","Sterile Infl.","Healthy K"]
# Set dimensions of plot
fig = plt.figure(1,figsize=(10,10));
# CEST
ax = fig.add_subplot(3,1,1); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(CEST_Int_matrix, showextrema=True,showmedians=True);
plt.ylabel("CEST Integral")
#T2
ax = fig.add_subplot(3,1,2); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2_matrix,showextrema=True,showmedians=True);
plt.ylabel("T2 time")
#T2ex
ax = fig.add_subplot(3,1,3); ax.set_xticks([1, 2, 3, 4]); ax.set_xticklabels(Tissues)
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
plt.ylabel("T2ex Integral")
# plot non_neg only
# ======== T2ex DCE ALL============= #
# Make list of all T2.txt files
def plotvio(slice_num):
p1='ls ../Study_03_CBA/*S'
p2=str(slice_num)
p3='*T2exDCE.txt'
file_names=p1+p2+p3
T2ex_list = get_ipython().getoutput(file_names)
T2ex_Int_matrix=np.zeros( (len(T2ex_list),4) )
# T2ex integral
for i in range( len(T2ex_list) ):
D=txt_2_array(T2ex_list[i]); #Convert txt file to array
Zn=normalize_data(D.T,0); #Zn=Zn[:,9::]
T2ex_Int_matrix[i,:]=np.sum(Zn-1,axis=1)
plt.violinplot(T2ex_Int_matrix,showextrema=True,showmedians=True);
for i in range(5):
n=i+1
plt.figure(99,figsize=(15,15));
plt.subplot(5,1,n); plt.title("Slice_0"+str(n))
plotvio(n)
| 30.980392 | 87 | 0.644937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.24462 |
a8723c707610ff2d371e0e72d391d934a953ce69 | 238 | py | Python | LAMARCK_ML/metrics/__init__.py | JonasDHomburg/LAMARCK | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2019-09-20T08:03:47.000Z | 2021-05-10T11:02:09.000Z | LAMARCK_ML/metrics/__init__.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | LAMARCK_ML/metrics/__init__.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from .implementations import Accuracy, \
FlOps, \
Nodes, \
TimeMetric, \
MemoryMetric, \
Parameters, \
LayoutCrossingEdges, \
LayoutDistanceX, \
LayoutDistanceY, \
CartesianFitness
from .interface import MetricInterface
| 19.833333 | 40 | 0.731092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8725cd6af64baf27731158994ab2c4a10a983eb | 2,684 | py | Python | Confidence_Calibration/main.py | heatherwan/Automatic-Validation-of-Simulation-Results | a39b049aa7c835abb39f4501a4ee3db20cd84672 | [
"MIT"
] | null | null | null | Confidence_Calibration/main.py | heatherwan/Automatic-Validation-of-Simulation-Results | a39b049aa7c835abb39f4501a4ee3db20cd84672 | [
"MIT"
] | null | null | null | Confidence_Calibration/main.py | heatherwan/Automatic-Validation-of-Simulation-Results | a39b049aa7c835abb39f4501a4ee3db20cd84672 | [
"MIT"
] | null | null | null | # Functions for Dirichlet parameter tuning main class for CIFAR-100
import numpy as np
import argparse
from calibration.calibration_functions import tune_dir_nn_heather, cal_TS_results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file_val', type=str, default='exp000', help='input val logit name')
parser.add_argument('--file_test', type=str, default='exp000', help='input val logit name')
parser.add_argument('--method', type=str, default='TS', help='calibration method: MS-ODIR, DIR-ODIR, TS')
args = parser.parse_args()
files = [f'Input_logit/{args.file_val}', f'Input_logit/{args.file_test}']
name = args.file_val.split('_')[0]
# name = 'exp120'
# files = [f'Input_logit/exp120_val_logit.txt', f'Input_logit/exp120_test_logit.txt']
# args.method = 'TS'
# print(name)
if args.method == "TS":
df_guo = cal_TS_results(name, args.method, files, approach="other")
df_guo.to_csv(f'result/{name}_{args.method}_result')
else:
if 'DIR-ODIR' in name:
use_logits = True
else:
use_logits = False
# set parameters
model_dir = 'model_weights'
loss_fn = 'sparse_categorical_crossentropy'
k_folds = 3
random_state = 15
use_scipy = False
comp_l2 = True
double = True
# Set regularisation parameters to check through
# lambdas = np.array([10 ** i for i in np.arange(-2.0, -1.5)])
# lambdas = sorted(np.concatenate([lambdas, lambdas * 0.25, lambdas * 0.5]))
# mus = np.array([10 ** i for i in np.arange(-2.0, -1.5)])
lambdas = [0.0025]
mus = [0.01]
# print out parameters
print("Lambdas:", len(lambdas))
print("Mus:", str(mus))
print("Double learning:", double)
print("Complementary L2:", comp_l2)
print("Using logits for Dirichlet:", use_logits)
print("Using Scipy model instead of Keras:", use_scipy)
df_res, df_res_ensemble = tune_dir_nn_heather(name, args.method, files, lambdas=lambdas, mus=mus, verbose=False,
k_folds=k_folds,
random_state=random_state, double_learning=double,
model_dir=model_dir,
loss_fn=loss_fn, comp_l2=comp_l2,
use_logits=use_logits,
use_scipy=use_scipy)
df_res.to_csv(f'result/{name}_{args.method}_result')
| 44 | 120 | 0.574143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.35842 |
a8737282035baae091f5e4cffd08da990791fcd0 | 5,593 | py | Python | neutron/tests/functional/agent/l3/test_metadata_proxy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/tests/functional/agent/l3/test_metadata_proxy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/tests/functional/agent/l3/test_metadata_proxy.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import time
from neutron_lib import constants
import webob
import webob.dec
import webob.exc
from neutron.agent.linux import utils
from neutron.tests.common import machine_fixtures
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l3 import framework
from neutron.tests.functional.agent.linux import helpers
METADATA_REQUEST_TIMEOUT = 60
METADATA_REQUEST_SLEEP = 5
class MetadataFakeProxyHandler(object):
def __init__(self, status):
self.status = status
@webob.dec.wsgify()
def __call__(self, req):
return webob.Response(status=self.status)
class MetadataL3AgentTestCase(framework.L3AgentTestFramework):
SOCKET_MODE = 0o644
def _create_metadata_fake_server(self, status):
server = utils.UnixDomainWSGIServer('metadata-fake-server')
self.addCleanup(server.stop)
# NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but metadata_proxy_socket folder must be readable by all
# users
self.useFixture(
helpers.RecursivePermDirFixture(
os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555))
server.start(MetadataFakeProxyHandler(status),
self.agent.conf.metadata_proxy_socket,
workers=0, backlog=4096, mode=self.SOCKET_MODE)
def _query_metadata_proxy(self, machine):
url = 'http://%(host)s:%(port)s' % {'host': constants.METADATA_V4_IP,
'port': constants.METADATA_PORT}
cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url
i = 0
CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2
while i <= CONNECTION_REFUSED_TIMEOUT:
try:
raw_headers = machine.execute(cmd)
break
except RuntimeError as e:
if 'Connection refused' in str(e):
time.sleep(METADATA_REQUEST_SLEEP)
i += METADATA_REQUEST_SLEEP
else:
self.fail('metadata proxy unreachable '
'on %s before timeout' % url)
if i > CONNECTION_REFUSED_TIMEOUT:
self.fail('Timed out waiting metadata proxy to become available')
return raw_headers.splitlines()[0]
def test_access_to_metadata_proxy(self):
"""Test access to the l3-agent metadata proxy.
The test creates:
* A l3-agent metadata service:
* A router (which creates a metadata proxy in the router namespace),
* A fake metadata server
* A "client" namespace (simulating a vm) with a port on router
internal subnet.
The test queries from the "client" namespace the metadata proxy on
http://169.254.169.254 and asserts that the metadata proxy added
the X-Forwarded-For and X-Neutron-Router-Id headers to the request
and forwarded the http request to the fake metadata server and the
response to the "client" namespace.
"""
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
self._create_metadata_fake_server(webob.exc.HTTPOk.code)
# Create and configure client namespace
router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
br_int = framework.get_ovs_bridge(
self.agent.conf.OVS.integration_bridge)
machine = self.useFixture(
machine_fixtures.FakeMachine(
br_int,
net_helpers.increment_ip_cidr(router_ip_cidr),
router_ip_cidr.partition('/')[0]))
# Query metadata proxy
firstline = self._query_metadata_proxy(machine)
# Check status code
self.assertIn(str(webob.exc.HTTPOk.code), firstline.split())
class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
"""
SOCKET_MODE = 0o664
def setUp(self):
super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user/group.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's
why we use its gid.
"""
SOCKET_MODE = 0o666
def setUp(self):
super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
self.agent.conf.set_override('metadata_proxy_group', '65534')
| 37.286667 | 79 | 0.674593 | 4,569 | 0.816914 | 0 | 0 | 98 | 0.017522 | 0 | 0 | 2,255 | 0.403183 |
a874274a3bc3f8a721abc5d40fa4b47b612e790d | 680 | py | Python | ctrl/experimental/watch_remake_task_status.py | markmuetz/cosmic | f215c499bfc8f1d717dea6aa78a58632a4e89113 | [
"Apache-2.0"
] | null | null | null | ctrl/experimental/watch_remake_task_status.py | markmuetz/cosmic | f215c499bfc8f1d717dea6aa78a58632a4e89113 | [
"Apache-2.0"
] | null | null | null | ctrl/experimental/watch_remake_task_status.py | markmuetz/cosmic | f215c499bfc8f1d717dea6aa78a58632a4e89113 | [
"Apache-2.0"
] | 1 | 2021-01-26T02:25:48.000Z | 2021-01-26T02:25:48.000Z | import sys
import logging
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler
class RemakeTaskStatusEventHandler(FileSystemEventHandler):
def on_any_event(self, event):
super().on_any_event(event)
print(event)
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = RemakeTaskStatusEventHandler()
observer = PollingObserver()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while observer.is_alive():
observer.join(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 26.153846 | 59 | 0.708824 | 151 | 0.222059 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.019118 |
a87429b2c4743e0326d5d14c4451d21d143c8edd | 1,025 | py | Python | flask_clacks.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | 1 | 2019-03-21T06:18:47.000Z | 2019-03-21T06:18:47.000Z | flask_clacks.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | null | null | null | flask_clacks.py | WilliamMayor/flask-clacks | 491e22e34f06cdd25532ab7952cf1fa90f6bae1e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from functools import wraps
from flask import make_response
__author__ = 'William Mayor'
__email__ = 'mail@williammayor.co.uk'
__version__ = '1.0.1'
class Clacks(object):
def __init__(self, app=None, names=None):
if names is None:
names = []
self.names = list(names) + ['Terry Pratchett']
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.after_request(self.add_headers)
def add_headers(self, resp):
for n in self.names:
resp.headers.add('X-Clacks-Overhead', 'GNU ' + n)
return resp
def clacks(names=None):
_names = names
def decorator(f):
c = Clacks(names=_names)
@wraps(f)
def wrapper(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
return c.add_headers(resp)
return wrapper
if callable(names):
_names = None
return decorator(names)
return decorator
| 21.808511 | 61 | 0.587317 | 473 | 0.461463 | 0 | 0 | 139 | 0.13561 | 0 | 0 | 112 | 0.109268 |
a8759683c3def40d86da4d64b460e72c87841283 | 2,980 | py | Python | utils/utils.py | wl-970925/DANN-MNIST | c781fdcc3267a664c353ae07d46000a67075e0f1 | [
"MIT"
] | null | null | null | utils/utils.py | wl-970925/DANN-MNIST | c781fdcc3267a664c353ae07d46000a67075e0f1 | [
"MIT"
] | null | null | null | utils/utils.py | wl-970925/DANN-MNIST | c781fdcc3267a664c353ae07d46000a67075e0f1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/2/15 16:10
# @Author : Dai PuWei
# @Email : 771830171@qq.com
# @File : utils.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.core.framework import summary_pb2
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.average = self.sum / float(self.count)
def make_summary(name, val):
return summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag=name, simple_value=val)])
def plot_accuracy(x,y,path):
"""
这是绘制精度的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
lengend_array = ["train_acc", "val_acc"]
train_accuracy,val_accuracy = y
plt.plot(x, train_accuracy, 'r-')
plt.plot(x, val_accuracy, 'b--')
plt.grid(True)
plt.xlim(0, x[-1]+2)
#plt.xticks(x)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def plot_loss(x,y,path,mode="train"):
"""
这是绘制损失的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
if mode == "train":
lengend_array = ["train_loss","train_image_cls_loss","train_domain_cls_loss"]
else:
lengend_array = ["val_loss", "val_image_cls_loss", "val_domain_cls_loss"]
loss_results,image_cls_loss_results,domain_cls_loss_results = y
loss_results_min = np.max([np.min(loss_results) - 0.1,0])
image_cls_loss_results_min = np.max([np.min(image_cls_loss_results) - 0.1,0])
domain_cls_loss_results_min =np.max([np.min(domain_cls_loss_results) - 0.1,0])
y_min = np.min([loss_results_min,image_cls_loss_results_min,domain_cls_loss_results_min])
plt.plot(x, loss_results, 'r-')
plt.plot(x, image_cls_loss_results, 'b--')
plt.plot(x, domain_cls_loss_results, 'g-.')
plt.grid(True)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.xlim(0,x[-1]+2)
plt.ylim(ymin=y_min)
#plt.xticks(x)
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def learning_rate_schedule(process,init_learning_rate = 0.01,alpha = 10.0 , beta = 0.75):
"""
这个学习率的变换函数
:param process: 训练进程比率,值在0-1之间
:param init_learning_rate: 初始学习率,默认为0.01
:param alpha: 参数alpha,默认为10
:param beta: 参数beta,默认为0.75
"""
return init_learning_rate /(1.0 + alpha * process)**beta
def grl_lambda_schedule(process,gamma=10.0):
"""
这是GRL的参数lambda的变换函数
:param process: 训练进程比率,值在0-1之间
:param gamma: 参数gamma,默认为10
"""
return 2.0 / (1.0+np.exp(-gamma*process)) - 1.0 | 30.408163 | 94 | 0.624161 | 358 | 0.108947 | 0 | 0 | 0 | 0 | 0 | 0 | 1,254 | 0.381619 |
a8767131fc92daf2b1e76889f0a46f8b914b9aa3 | 1,889 | py | Python | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.types import CallbackQuery
import random
ALL_PIC = [
"https://telegra.ph/file/52b71d5a61c904c6a59d1.jpg",
"https://telegra.ph/file/28a00384a3be4f6c916ba.jpg",
"https://telegra.ph/file/eb654e5c7ff4d29eab29f.jpg",
"https://telegra.ph/file/a4796bdcca7ff90a3a3b8.jpg",
"https://telegra.ph/file/b7b43793368770ca4c7fb.jpg"
]
@Client.on_message(filters.command("start"))
async def start_message(bot, message):
await message.reply_photo(
photo=random.choice(ALL_PIC),
caption="hey {message.from_user.mention} എന്റെ പേര് <a href=https://t.me/FluffyPyroGramBot>𝙵𝙻𝚄𝙵𝙵𝚈 𝙿𝚈𝚁𝙾𝙶𝚁𝙰𝙼</a>, 🔰മച്ചാനെ എന്റെ പണി കഴിഞ്ഞിട്ടില്ല അതുകൊണ്ട് RePo✅️ പ്രൈവറ്റ് ആണ് Work കഴിഞ്ഞിട്ട് public ആക്കും ",
reply_markup=InlineKeyboardMarkup( [[
InlineKeyboardButton ("🗨️𝔾ℝ𝕆𝕌ℙ🗨️", url="https://t.me/DEVELOPERSCHANNEL2022"),
InlineKeyboardButton ("📂ℂℍ𝔸ℕℕ𝔼𝕃📂", url="https://t.me/DELCHANNEL001"),
],[
InlineKeyboardButton ("🔰𝔼𝔻𝕀𝕋𝔼ℝ🔰", url="t.me/TEAM_KERALA"),
InlineKeyboardButton ("©️ℙ𝔸𝕀𝔻 ℙℝ𝕆𝕄𝕆𝕋𝕀𝕆ℕ", url="t.me/pushpa_Reju"),
],[
InlineKeyboardButton ("👨💻𝔻𝔼𝕍𝔼𝕃𝕆ℙ𝔼ℝ👨💻", url="t.me/TEAM_KERALA"),
InlineKeyboardButton ("help", callback_data="song"),
],[
InlineKeyboardButton ("⚜️𝔸𝔻𝔻 𝕄𝔼 𝕋𝕆 𝔸 ℂℍ𝔸𝕋 𝔾ℝ𝕆𝕌ℙ⚜️", url="http://t.me/FluffyPyroGramBot?startgroup=true"),
]]
)
)
@Client.on_callback_query()
async def callback(bot,query: CallbackQuery):
if query.data == "song":
await query.message.edit_text(
text="/tgraph"
)
reply_markup=InlineKeyboardMarkup( [[
InlineKeyboardButton("song", callback_data="song")
]]
)
| 39.354167 | 218 | 0.629434 | 0 | 0 | 0 | 0 | 1,825 | 0.795901 | 1,752 | 0.764065 | 1,166 | 0.508504 |
a877a97489bec7501c25a2c46b3d75476271d8a2 | 2,666 | py | Python | graphs/finding_bridges.py | topguns837/Python-1 | e7381b513b526e2f3ca134022389832778bdf080 | [
"MIT"
] | 1 | 2021-12-07T12:57:56.000Z | 2021-12-07T12:57:56.000Z | graphs/finding_bridges.py | thanhtd91/Python-1 | a98465230f21e6ece76332eeca1558613788c387 | [
"MIT"
] | null | null | null | graphs/finding_bridges.py | thanhtd91/Python-1 | a98465230f21e6ece76332eeca1558613788c387 | [
"MIT"
] | 1 | 2022-01-01T07:30:12.000Z | 2022-01-01T07:30:12.000Z | """
An edge is a bridge if, after removing it count of connected components in graph will
be increased by one. Bridges represent vulnerabilities in a connected network and are
useful for designing reliable networks. For example, in a wired computer network, an
articulation point indicates the critical computers and a bridge indicates the critical
wires or connections.
For more details, refer this article:
https://www.geeksforgeeks.org/bridge-in-a-graph/
"""
def __get_demo_graph(index):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]:
"""
Return the list of undirected graph bridges [(a1, b1), ..., (ak, bk)]; ai <= bi
>>> compute_bridges(__get_demo_graph(0))
[(3, 4), (2, 3), (2, 5)]
>>> compute_bridges(__get_demo_graph(1))
[(6, 7), (0, 6), (1, 9), (3, 4), (2, 4), (2, 5)]
>>> compute_bridges(__get_demo_graph(2))
[(1, 6), (4, 6), (0, 4)]
>>> compute_bridges(__get_demo_graph(3))
[]
>>> compute_bridges({})
[]
"""
id = 0
n = len(graph) # No of vertices in graph
low = [0] * n
visited = [False] * n
def dfs(at, parent, bridges, id):
visited[at] = True
low[at] = id
id += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(to, at, bridges, id)
low[at] = min(low[at], low[to])
if id <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
low[at] = min(low[at], low[to])
bridges = []
for i in range(n):
if not visited[i]:
dfs(i, -1, bridges, id)
return bridges
| 26.39604 | 87 | 0.415604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 964 | 0.36159 |
a877cf6005c10ff86c4c28551acf7a046198d1b7 | 5,487 | py | Python | index.py | ReeLeeSama/Ticket-system | 0f68cefff0488caf537fa8810139be7d8ce203ce | [
"MIT"
] | 1 | 2021-05-15T08:40:00.000Z | 2021-05-15T08:40:00.000Z | index.py | ReeLeeSama/Ticket-system | 0f68cefff0488caf537fa8810139be7d8ce203ce | [
"MIT"
] | null | null | null | index.py | ReeLeeSama/Ticket-system | 0f68cefff0488caf537fa8810139be7d8ce203ce | [
"MIT"
] | null | null | null | import discord
import asyncio
import aiofiles
from discord.ext import commands
intents = discord.Intents.all()
client = commands.Bot(command_prefix=commands.when_mentioned_or('!'),intents=intents)
client.ticket_configs = {}
@client.command()
async def ping(ctx):
embed=discord.Embed(title="Bot Ping",description=f"My ping is {round(client.latency * 1000)}ms ",color=discord.Colour.gold())
await ctx.reply(embed=embed)
@client.event
async def on_ready():
print("Bot is online")
@client.event
async def on_raw_reaction_add(payload): #When a reaction is added
if payload.member.id != client.user.id and str(payload.emoji) == u"\U0001F3AB": #Checks if the reaction is not made by a bot an emoji is "🎫"
msg_id, channel_id, category_id = client.ticket_configs[payload.guild_id]
if payload.message_id == msg_id: #checks if the reaction message is equal to the message id in ticket_configs.txt
guild = client.get_guild(payload.guild_id)
for category in guild.categories:
if category.id == category_id:
break
channel = guild.get_channel(channel_id) #gets the channel id
ticket_channel = await category.create_text_channel(f"ticket-{payload.member.display_name}", topic=f"Ticket for {payload.member.display_name}.", permission_synced=True) #Creates a ticket as "ticket_channel"
f = open(f"tickets/{ticket_channel.id}.txt", "w") #Opens a folder called "tickets" and inside it creates a file with the channel id. Usefull for transcripts
f.close() #closes the file
await ticket_channel.set_permissions(payload.member, read_messages=True, send_messages=True) # Adds the member to the ticket
mention_member = f"{payload.member.mention}"
message = await channel.fetch_message(msg_id)
await message.remove_reaction(payload.emoji, payload.member) #Removes the reaction for the message where you react to make a ticket
creation_embed=discord.Embed(title="Ticket Created",description="Thank you for creating a ticket and make sure that the ticket follows our ticket guidelines and explain the ticket creation reason in detail so our staff can help you.",color=discord.Colour.blurple())
await ticket_channel.send(mention_member,embed=creation_embed) # Mentions the member and sends the embded to the channel where the ticket is created.
@client.command()
async def close(ctx):
channel = ctx.channel
if channel.name.startswith("ticket"): #checks if a channel name starts with "ticket"
await ctx.reply("Are you sure you want to close the ticket? Reply with ``confirm`` to close the ticket.") #Will ask the user to confirm to close the ticket
await client.wait_for("message",check=lambda m: m.channel == ctx.channel and m.author == ctx.author and m.content == "confirm",timeout=10) #Wait for a message with content "confirm" and makes sure that the command runner is the message sender and waits for reply for 10 seconds.
await channel.delete() #If the message is "confirm" it will delete the channel
closer = ctx.author.mention
transcript_chan = client.get_channel(803399751487717396) #channel to send the ticket transcript to.
await transcript_chan.send(closer,file=discord.File(f"tickets/{channel.id}.txt")) #Sends the file to the transcript channel and mentions the ticket closer there.
else:
return
@client.command()
@commands.has_permissions(administrator=True)
async def config(ctx, msg: discord.Message=None, category: discord.CategoryChannel=None): #Usage = !config "message_id category_id" to get the ids enable deveoper mode and right click the message that will be used to create tickets and the category id is the category where the tickets will be created.
if msg is None or category is None: #If a message id or category id is not provided.
error_embed=discord.Embed(title="Ticket Configuration Failed",description="Failed to configure. Either an argument is missing or an invalid argument was passed.",color=discord.Colour.red())
await ctx.channel.send(embed=error_embed)
return
client.ticket_configs[ctx.guild.id] = [msg.id, msg.channel.id, category.id] #Resets the configuration
async with aiofiles.open("ticket_configs.txt", mode="r") as file:
data = await file.readlines()
async with aiofiles.open("ticket_configs.txt", mode="w") as file:
await file.write(f"{ctx.guild.id} {msg.id} {msg.channel.id} {category.id}\n")
for line in data:
if int(line.split(" ")[0]) != ctx.guild.id:
await file.write(line)
await msg.add_reaction(u"\U0001F3AB") # Adds reaction to the message and when someone reacts to this emoji it will create a ticket.
await ctx.channel.send("Successfully configured the ticket system.") # If you get thsi it means that the ticket system has been configured successfully.
@client.event
async def on_message(message):
await client.process_commands(message)#processes the command
if message.channel.name.startswith("ticket"): #check if the channel name starts with "ticket"
f = open(f"tickets/{message.channel.id}.txt", "a") # Opens the channel id in the tickets folder
f.write(f"{message.author} : {message.content}\n") # Write the message author and the message he sent
f.close() #closesthe file
client.run("your_bot_token_here")
| 59.641304 | 303 | 0.720612 | 0 | 0 | 0 | 0 | 5,206 | 0.94827 | 5,064 | 0.922404 | 2,534 | 0.461566 |
a8782932b4cb5b62f9cdd1eb887ce299cb47652d | 1,044 | py | Python | tests/sample_runbooks/set_variable.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 37 | 2019-12-23T15:23:20.000Z | 2022-03-15T11:12:11.000Z | tests/sample_runbooks/set_variable.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 144 | 2020-03-09T11:22:09.000Z | 2022-03-28T21:34:09.000Z | tests/sample_runbooks/set_variable.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 46 | 2020-01-23T14:28:04.000Z | 2022-03-09T04:17:10.000Z | """
Calm Runbook Sample for set variable task
"""
from calm.dsl.runbooks import read_local_file
from calm.dsl.runbooks import runbook, runbook_json
from calm.dsl.runbooks import RunbookTask as Task
from calm.dsl.runbooks import CalmEndpoint as Endpoint, basic_cred
CRED_USERNAME = read_local_file(".tests/runbook_tests/username")
CRED_PASSWORD = read_local_file(".tests/runbook_tests/password")
VM_IP = read_local_file(".tests/runbook_tests/vm_ip")
Cred = basic_cred(CRED_USERNAME, CRED_PASSWORD, name="endpoint_cred")
endpoint = Endpoint.Linux.ip([VM_IP], cred=Cred)
@runbook
def DslSetVariableTask(endpoints=[endpoint], default=False):
"Runbook example with Set Variable Tasks"
Task.SetVariable.escript(script="print 'var1=test'", variables=["var1"])
Task.SetVariable.ssh(
filename="scripts/sample_script.sh", variables=["var2"], target=endpoints[0]
)
Task.Exec.escript(script="print '@@{var1}@@ @@{var2}@@'")
def main():
print(runbook_json(DslSetVariableTask))
if __name__ == "__main__":
main()
| 29.828571 | 84 | 0.749042 | 0 | 0 | 0 | 0 | 372 | 0.356322 | 0 | 0 | 293 | 0.280651 |
a87a09274174fa57021b101cc6600c408162ecf0 | 3,921 | py | Python | CL_tools/build_cdf2.py | NLP2CT/norm-nmt | b15e904fbfab6595c26fedf58e9023d36b563e95 | [
"MIT"
] | 13 | 2020-06-01T13:00:23.000Z | 2022-01-29T01:40:40.000Z | CL_tools/build_cdf2.py | NLP2CT/norm-nmt | b15e904fbfab6595c26fedf58e9023d36b563e95 | [
"MIT"
] | 2 | 2020-07-01T07:04:52.000Z | 2021-10-13T03:07:53.000Z | CL_tools/build_cdf2.py | NLP2CT/norm-nmt | b15e904fbfab6595c26fedf58e9023d36b563e95 | [
"MIT"
] | 3 | 2020-06-01T12:59:36.000Z | 2020-07-04T13:57:02.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import numpy as np
def count_words(filename):
counter = collections.Counter()
with open(filename, "r") as fd:
for line in fd:
words = line.strip().split()
counter.update(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, counts = list(zip(*count_pairs))
return words, counts
def control_symbols(string):
if not string:
return []
else:
return string.strip().split(",")
def save_vocab(name, vocab):
if name.split(".")[-1] != "txt":
name = name + ".txt"
# pairs = sorted(vocab.items(), key=lambda x: (x[1], x[0]))
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
# total freq
T_freq = sum(ids)
with open(name, "w") as f:
for i, word in enumerate(words):
# f.write(word + " " + str(ids[i]) + "\n")
f.write(word + " " + "%.16f" % (ids[i] / T_freq) + "\n")
# write total freq
def cal_cdf_model(corpus, vocab):
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
freq_dict = {}
for word, id in zip(words, ids):
freq_dict[word] = id
T_freq = sum(ids)
data = []
debug = 0
with open(corpus, "r") as f:
for line in f.readlines():
line = line.split()
SUM = 0
for w in line:
p = freq_dict[w] / T_freq
if p != 0:
SUM += math.log(p)
SUM = -SUM
data.append(SUM)
# if SUM < 5.718:
# debug += 1
# print (SUM)
# data contains all sum log
# bins='auto'
v, base = np.histogram(data, bins=np.arange(1000))
print ("data:", data[:50])
print ("value", v[:50])
base = base.astype(np.float32)
print ("base:", base[:50])
print ("highest value:", base[-1])
print ("len of base:", len(base))
# print ("debug:", debug)
cdf = np.cumsum(v)
cdf = cdf / len(data)
cdf = cdf.astype(np.float32)
print ("cdf:", cdf, cdf.dtype)
print ("outputing cdf and bases.")
# res = {"cdf": cdf, "base": base}
np.savez(args.output + "-cdf_base.npz", cdf=cdf, base=base)
def parse_args():
parser = argparse.ArgumentParser(description="Create vocabulary")
parser.add_argument("corpus", help="input corpus")
parser.add_argument("output", default="vocab.txt",
help="Output vocabulary name")
parser.add_argument("--limit", default=0, type=int, help="Vocabulary size")
parser.add_argument("--control", type=str, default="",
help="Add control symbols to vocabulary. "
"Control symbols are separated by comma.")
return parser.parse_args()
args=parse_args()
def main():
vocab = {}
limit = args.limit
count = 0
words, counts = count_words(args.corpus)
ctrl_symbols = control_symbols(args.control)
for sym in ctrl_symbols:
vocab[sym] = len(vocab)
for word, freq in zip(words, counts):
if limit and len(vocab) >= limit:
break
if word in vocab:
print("Warning: found duplicate token %s, ignored" % word)
continue
# vocab[word] = len(vocab)
# print(word, freq)
vocab[word] = freq
count += freq
save_vocab(args.output, vocab)
cal_cdf_model(args.corpus, vocab)
print("Total words: %d" % sum(counts))
print("Unique words: %d" % len(words))
print("Vocabulary coverage: %4.2f%%" % (100.0 * count / sum(counts)))
if __name__ == "__main__":
main()
| 27.041379 | 79 | 0.566692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 849 | 0.216526 |
a87a0c58e2acdc88f9a0c4132a845c88665fa4ac | 1,531 | py | Python | stubs.min/Autodesk/Revit/DB/__init___parts/BRepBuilderGeometryId.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/Autodesk/Revit/DB/__init___parts/BRepBuilderGeometryId.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/__init___parts/BRepBuilderGeometryId.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class BRepBuilderGeometryId(object,IDisposable):
"""
This class is used by the BRepBuilder class to identify objects it creates (faces,edges,etc.).
BRepBuilderGeometryId(other: BRepBuilderGeometryId)
"""
def Dispose(self):
""" Dispose(self: BRepBuilderGeometryId) """
pass
@staticmethod
def InvalidGeometryId():
"""
InvalidGeometryId() -> BRepBuilderGeometryId
Returns an invalid BRepBuilderGeometryId,used as a return value to indicate an
error.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: BRepBuilderGeometryId,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other):
""" __new__(cls: type,other: BRepBuilderGeometryId) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: BRepBuilderGeometryId) -> bool
"""
| 33.282609 | 215 | 0.701502 | 1,525 | 0.996081 | 0 | 0 | 320 | 0.209014 | 0 | 0 | 1,024 | 0.668844 |
a87b6a507b0104731193175cfd2830a1d450aa86 | 873 | py | Python | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
from anno_lib import gPro
import sys
##
# Runs the GOst profiler from g:Profiler
####
# jje 10152011
# Oliver Hofmann
# Bioinformatics Core
# Harvard School of Public Health
####
#term = "GO:0007050"
#gost_loc = "http://biit.cs.ut.ee/gprofiler/"
try:
idfile = sys.argv[1]
outfile = sys.argv[2]
loc = sys.argv[3]
spec = sys.argv[4]
pcut = sys.argv[5]
except IOError as (errno, strerror):
print "usage: gPro_profile.py infile outfile gPro_URL p-value_cutoff"
''' open and read in ids '''
with open(idfile) as ids:
id_raw = ids.read()
# convert ids from a list to space delim string
id_form = id_raw.replace("\n"," ")
gpro_obj = gPro.Profiler(loc,spec)
content = gpro_obj.ask_pcut(id_form,pcut)
# parse for term enrichment
key_content = gpro_obj.break_GOSt(content)
output = open(outfile,"w")
output.write(key_content)
output.close()
| 19.4 | 70 | 0.710195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.452463 |
a87bc89cece67977cd20dc1b823775f648a5a6ea | 238 | py | Python | glue/core/exceptions.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 550 | 2015-01-08T13:51:06.000Z | 2022-03-31T11:54:47.000Z | glue/core/exceptions.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 1,362 | 2015-01-03T19:15:52.000Z | 2022-03-30T13:23:11.000Z | glue/core/exceptions.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 142 | 2015-01-08T13:08:00.000Z | 2022-03-18T13:25:57.000Z |
class IncompatibleAttribute(Exception):
pass
class IncompatibleDataException(Exception):
pass
class UndefinedROI(Exception):
pass
class InvalidSubscriber(Exception):
pass
class InvalidMessage(Exception):
pass
| 11.9 | 43 | 0.752101 | 224 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a87ca034edbcf0ccd37f65c8627b5f05c80d33a3 | 5,135 | py | Python | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | import face_model
import argparse
import os
import cv2
import sys
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--enable-gpu', default=False, type=bool, help='enable to detect and inference in GPU')
#parser.add_argument('--model', default='../models2/model-r100-sfz/model,8', help='path to load model.')
parser.add_argument('--model', default='../../model/m1-insightv3/model,0', help='path to load model.')
parser.add_argument('--ga-model', default='', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--mode', default=1, type=int, help='mode, 0: do nothing, 1:append or merge depends on merge threshold')
parser.add_argument('--threshold', default=0.4, type=float, help='cosine threshold')
parser.add_argument('--merge-threshold', default=0.8, type=float, help='cosine threshold for merge. 1.0: never do merging; -1.0: to merge everytime')
parser.add_argument('--append-threshold', default=0.55, type=float, help='cosine threshold for append. 1.0: never do append; -1.0: to append everytime')
parser.add_argument('--only-replace', default=False, type=bool, help='replace most similar vecotor when list is full,no merge')
parser.add_argument('--max_vector_size', default=8, type=int, help='')
parser.add_argument('--input', default='camera-video1', type=str, help='input npy file name')
args = parser.parse_args()
merge_count=0
append_count=0
full_count=0
none_count=0
print 'count init!'
X = np.load(args.input+'/X.npy')
#for i in xrange(X.shape[0]):
# if i==0:
# continue
# a = X[i]
# sims = []
# for j in xrange(0, i):
# b = X[j]
# sim = np.dot(a, b)
# sims.append(sim)
# print(i,max(sims))
def update_sim_score(vec_list):
for i,vec1 in enumerate(vec_list):
max_ids = 0
max_sims = 0.0
for j,vec2 in enumerate(vec_list):
if i == j:
next
else:
sim = np.dot(vec1[0], vec2[0])
if sim > max_sims:
max_sims = sim
max_ids = j
vec1[1] = max_ids
vec1[2] = max_sims
def insert_vec(vec_list, input_vec, input_id, input_score):
global merge_count
global append_count
global full_count
global none_count
if input_score > args.merge_threshold:
print 'do merge'
new_vec = vec_list[input_id][0] + input_vec
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[input_id][0] = new_vec
update_sim_score(vec_list)
merge_count+=1
elif input_score < args.append_threshold:
if len(vec_list) < args.max_vector_size:
vec_list.append([input_vec,input_id,input_score])
update_sim_score(vec_list)
print 'append to list'
append_count+=1
else:
max_score = 0.0
max_id = -1
print 'merge when list if full'
full_count+=1
for i,vec in enumerate(vec_list):
if max_score < vec[2]:
max_score = vec[2]
max_id = i
if max_score > input_score:
if args.only_replace==False:
vec2_id = vec_list[max_id][1]
new_vec = vec_list[max_id][0] + vec_list[vec2_id][0]
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[max_id][0] = new_vec
vec_list[vec2_id][0] = input_vec
update_sim_score(vec_list)
else:
vec_list[max_id][0] = input_vec
update_sim_score(vec_list)
else:
if args.only_replace==False:
new_vec = input_vec + vec_list[input_id][0]
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[input_id][0] = new_vec
update_sim_score(vec_list)
else:
pass
else:
none_count+=1
identities = {0: [[X[0],0,0]]}
face_img = {0:[0]}
for i in xrange(1, X.shape[0]):
a = X[i]
ids = []
sims = []
vector_indexes = []
for _id, vectors in identities.iteritems():
for vector_idx, vector in enumerate(vectors):
sim = np.dot(a, vector[0])
sims.append(sim)
ids.append(_id)
vector_indexes.append(vector_idx)
max_idx = np.argmax(sims)
max_score = sims[max_idx]
max_id = ids[max_idx]
max_vector_idx = vector_indexes[max_idx]
print(i, max_score, max_id)
if max_score<args.threshold:
new_id = len(identities)
identities[new_id] = [[a,0,0]]
face_img[new_id] = [i]
print 'append new id ', new_id
else:
if args.mode==1:
face_img[max_id].append(i)
insert_vec(identities[max_id],a,max_vector_idx,max_score)
print('final', len(identities))
print 'merge_count ', merge_count
print 'append_count ', append_count
print 'full_count', full_count
print 'none_count', none_count
os.system('mkdir ' + args.input + '/id')
for face_id,img_list in face_img.items():
new_dir = args.input + '/id/' + str(face_id)
os.system('mkdir ' + new_dir)
for img in img_list:
src = args.input + '/' + str(img) + '.jpg '
os.system('cp ' + src + new_dir)
print('face_id:%d, img idx:%d' % (face_id,img)) | 34.006623 | 152 | 0.659007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,224 | 0.238364 |
a87dc28a95aae4aa7718fabb0f98ba00c0f8f068 | 773 | py | Python | word2vec_model/BagCentroids.py | wingedRuslan/Sentiment-Analysis | 6dbc90175a2b42e33e0779f4a09b04ea99689534 | [
"MIT"
] | null | null | null | word2vec_model/BagCentroids.py | wingedRuslan/Sentiment-Analysis | 6dbc90175a2b42e33e0779f4a09b04ea99689534 | [
"MIT"
] | null | null | null | word2vec_model/BagCentroids.py | wingedRuslan/Sentiment-Analysis | 6dbc90175a2b42e33e0779f4a09b04ea99689534 | [
"MIT"
] | null | null | null |
def create_bag_of_centroids(wordlist, word_centroid_map):
"""
a function to create bags of centroids
"""
# The number of clusters is equal to the highest cluster index in the word / centroid map
num_centroids = max( word_centroid_map.values() ) + 1
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros(num_centroids, dtype="float32")
# Loop over the words in the tweet. If the word is in the vocabulary,
# find which cluster it belongs to, and increment that cluster count by one
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
# Return numpy array
return bag_of_centroids
| 35.136364 | 93 | 0.676585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.483829 |
a87de031996b8c0feaf819ed4c35d7f0c764409a | 17,590 | py | Python | build/lib/scripts/series_tools.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 3 | 2019-09-05T14:47:02.000Z | 2021-11-12T15:31:56.000Z | build/lib/scripts/series_tools.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | 2 | 2019-11-13T21:36:22.000Z | 2019-12-16T21:16:43.000Z | build/lib/scripts/series_tools.py | nicolas998/ifis_tools | f7b06473a916324fc37937bc5e9034cc57bc1623 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # series_tools:
#
# set of tools that work with streamflow records.
# - Identify events.
# - Identidy baseflow and runoff.
#
import pandas as pd
import numpy as np
# ## Digital filters
#
# Collection of functions to separate runoff from baseflow.
# +
def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8):
'''Digital filters to separate baseflow from runoff in a continuos time series.
Parameters:
- tipo: type of filter to be used.
- Eckhart o 1.
- Nathan o 2.
- Chapman o 3.
- Q: pandas series with the streamflow records.
- a: paramter for the filter.
- Eckhart: 0.98.
- Nathan: 0.8.
- Chapman: 0.8.
- BFI: 0.8 only applies for Eckhart filter.
Returns:
- Pandas DataFrame with the Runoff, Baseflow.'''
#Functions definitions.
def Nathan1990(Q, a = 0.8):
'''One parameter digital filter of Nathan and McMahon (1990)'''
R = np.zeros(Q.size)
c = 1
for q1,q2 in zip(Q[:-1], Q[1:]):
R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1)
if R[c]<0:
R[c] = 0
elif R[c]>q2:
R[c] = q2
c += 1
B = Q - R
return R, B
def Eckhart2005(Q, BFI=0.8, a = 0.98):
'''Two parameter Eckhart digital filter
Parameters:
- Q: np.ndarray with the streamflow records.
- BFI: The maximum amount of baseflow (%).
- a: parameter alpha (0.98)
Output:
- R: total runoff.
- B: total baseflow.'''
#SEparation
B = np.zeros(Q.size)
B[0] = Q[0]
c = 1
for q in Q[1:]:
#SEparation equation
B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI)
#Constrains
if B[c] > q:
B[c] = q
c+=1
R = Q - B
return R, B
def ChapmanMaxwell1996(Q, a = 0.98):
'''Digital filter proposed by chapman and maxwell (1996)'''
B = np.zeros(Q.size)
c = 1
for q in Q[1:]:
B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q
c+=1
R = Q-B
return R,B
#Cal the filter
if tipo == 'Eckhart' or tipo == 1:
R,B = Eckhart2005(Q.values, a, BFI)
elif tipo =='Nathan' or tipo == 2:
R,B = Nathan1990(Q.values, a,)
elif tipo == 'Chapman' or tipo ==3:
R,B = ChapmanMaxwell1996(Q.values, a)
#Returns the serie
return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow'])
# -
# ## Events selection functions
#
# Collection of functions to identify peaks in a series and the end of each peak recession.
# +
def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')):
'''Find the peack values of the hydrographs of a serie
Params:
- Q: Pandas serie with the records.
- Qmin: The minimum value of Q to be considered a peak.
if None takes the 99th percentile of the series as the min
- tw: size of the ime window used to eliminate surrounding maximum values'''
if Qmin is None:
Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99)
#Find the maximum
Qmax = Q[Q>Qmin]
QmaxCopy = Qmax.copy()
#Search the maxium maximorums
Flag = True
PosMax = []
while Flag:
MaxIdx = Qmax.idxmax()
PosMax.append(MaxIdx)
Qmax[MaxIdx-tw:MaxIdx+tw] = -9
if Qmax.max() < Qmin: Flag = False
#Return the result
return QmaxCopy[PosMax].sort_index()
def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'):
'''Find the end of each selected event in order to know the
longitude of each recession event.
Parameters:
- Q: Pandas series with the records.
- Qmax: Pandas series with the peak streamflows.
- minDif: The minimum difference to consider that a recession is over.
Optional:
- minDistance: minimum temporal distance between the peak and the end.
- maxSearch: maximum number of iterations to search for the end.
- Widow: Size of the temporal window used to smooth the streamflow
records before the difference estimation (pandas format).
Returns:
- Qend: The point indicating the en of the recession.'''
#Obtains the difference
X = Q.resample('1h').mean()
dX = X.values[1:] - X.values[:-1]
dX = pd.Series(dX, index=X.index[:-1])
#Obtains the points.
DatesEnds = []
Correct = []
for peakIndex in Qmax.index:
try:
a = dX[dX.index > peakIndex]
if minDistance is None:
DatesEnds.append(a[a>minDif].index[0])
else:
Dates = a[a>minDif].index
flag = True
c = 0
while flag:
distancia = Dates[c] - peakIndex
if distancia > minDistance:
DatesEnds.append(Dates[c])
flag= False
c += 1
if c>maxSearch: flag = False
Correct.append(0)
except:
DatesEnds.append(peakIndex)
Correct.append(1)
#Returns the pandas series with the values and end dates
Correct = np.array(Correct)
return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0]
# -
# ## Runoff analysis
# +
def Runoff_SeparateBaseflow(Qobs, Qsim):
'''From observed records obtain the baseflow and runoff streamflow records.
Parameters:
- Qobs: Observed record dt < 1h.
- Qsim: Simulated records dt < 1h.
Returns:
- Qh: Observed records at hourly scale.
- Qsh: Simulated records at a hourly scale.
- Qsep: Observed separated records at hourly scale'''
#Observed series to hourly scale.
Qh = Qobs.resample('1h').mean()
Qh[np.isnan(Qh)] = Qh.mean()
Qh[Qh<0] = Qh.mean()
Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998)
#Pre-process of simulated series to hourly scale.
Qsh = Qsim.resample('1h').mean()
Qsh[np.isnan(Qsh)] = 0.0
#Return results
return Qh, Qsh, Qsep
def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None):
'''Separates runoff from baseflow and finds the events.
Parameters:
- Qobs: Hourly obseved streamflow.
- Qsim: Hourly simulated streamflow.
- minTime: minimum duration of the event.
- minConcav: minimum concavity of the event.
- minPeak: minimum value of the peakflows.
Returns:
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.'''
#Obtain the positions of the start and
pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20))
pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak)
#Returns results
return pos1, pos2
def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None):
'''Obtains the DataFrame with the resume of the RC analysis.
Parameters:
- Area: the area of the basin in km2.
- Qobs: Hourly observed streamflow.
- Rain: Hourly rainfall.
- Qsep: Hourly dataFrame with the separated flows.
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.
- N: Number of days to eval the rainfall between p1-N: p2.
- Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N.
Results:
- DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax'''
#Search for N
if N is None:
#Time window based on the basin area.
N = Area**0.2
N = np.floor(N) // 2 * 2 + 1
if N<3: N = 3
if N>11: N = 11
Ndays = pd.Timedelta(str(N)+'d')
if Nant is None:
Nant = pd.Timedelta(str(N+3)+'d')
else:
Ndays = N
if Nant is None:
Nant = N + pd.Timedelta('3d')
#Lists of data
RC = []
RainTot = []
Date = []
Qmax = []
RainInt = []
RainAnt = []
#Get Values for events
for pi,pf in zip(pos1, pos2):
#General variables obtention
Runoff = Qsep['Runoff'][pi:pf+Ndays].sum()*3600.
Rainfall = (Rain[pi-Ndays:pf].sum()/1000.)*(Area*1e6)
#Runoff and streamflow List updates
Qmax.append(Qobs[pi:pf].max())
RC.append(Runoff / Rainfall)
#Rainfall list updates
RainTot.append(Rain[pi-Ndays:pf].sum())
RainInt.append(Rain[pi-Ndays:pf].max())
RainAnt.append(Rain[pi-Ndays-Nant:pi-Ndays].sum())
#Dates.
Date.append(pi)
#Converts to arrays
RC = np.array(RC)
RainTot = np.array(RainTot)
RainInt = np.array(RainInt)
RainAnt = np.array(RainAnt)
Date = np.array(Date)
Qmax = np.array(Qmax)
#Select the correct values
p1 = np.where(np.isfinite(RC))[0]
p2 = np.where((RC[p1]<=1.0) & (RC[p1]>0.0))[0]
#Lo que es
RC = RC[p1[p2]]
RainTot = RainTot[p1[p2]]
RainInt = RainInt[p1[p2]]
RainAnt = RainAnt[p1[p2]]
Date = Date[p1[p2]]
Qmax = Qmax[p1[p2]]
#Los malos
pos = np.where((RC>0.04) & (RainTot<10))[0]
#Depura de nuevo
RC = np.delete(RC, pos)
RainTot = np.delete(RainTot, pos)
RainInt = np.delete(RainInt, pos)
RainAnt = np.delete(RainAnt, pos)
Date = np.delete(Date, pos)
Qmax = np.delete(Qmax, pos)
#Turns things into a DataFrame
Data = pd.DataFrame(
np.vstack([RC, RainTot, RainAnt, RainInt, Qmax]).T,
index= Date,
columns=['RC', 'RainEvent', 'RainBefore','RainInt','Qmax'])
return Data
def Runoff_groupByRain(D, groupby = 'RainEvent' , bins = None,
Vmin=None, Vmax=None, Nb = 10, logx = True):
'''Group the values of RC in function of a variable.
Parameters:
- D: pandas Dataframe with the results from the RC analysis.
- groupby: name of the column to use for the groups.
- Vmin: minimum value to set the groups.
- Vmax: max value to set the groups.
- b: number of bins.
- logx: use or not logaritmic X axis.
Results:
- Dictionary with the RC by groups, P25, P50, P90, mean value of the variable
for grouping, Variable for groups.'''
#Change if the axis X is logarithm or not
if logx:
x = np.log(D[groupby])
else:
x = D[groupby]
#SEt max y min
if Vmin is None: Vmin = x.min()
if Vmax is None: Vmax = x.max()
#SEt the intervals
if bins is None:
b = np.linspace(Vmin, Vmax, Nb)
else:
b = bins
#Make the groups
DicStats = {'RC':[],'P25':[],'P75':[],'P50':[], 'X': [], groupby: []}
for i,j in zip(b[:-1], b[1:]):
p = np.where((x>=i) & (x<=j))[0]
if p.size > 0:
DicStats['RC'].append(D['RC'][p])
DicStats['P25'].append(np.percentile(D['RC'][p], 25))
DicStats['P50'].append(np.percentile(D['RC'][p], 50))
DicStats['P75'].append(np.percentile(D['RC'][p], 75))
DicStats['X'].append((i+j)/2.)
DicStats[groupby].append(x[p])
return DicStats
#-------------------------------------------------------------------------------------------
## Backgroudn functions.
def __Runoff_Get_Events__(Q, Umbral):
'''Obtais the initia and end dates of the events related to
a time series based on the results from the Asynch 190.
Parameters:
- Q: pandas series with the streamflow (simulated from asynch 190 no infiltration).
- perc: percentile used to stablish runoff occurrence.
Returns:
- pos1: initial date of each event.
- pos2: end date of each event'''
#Treshold and positions with values over it
pos = np.where(Q.values > Umbral)[0]
#Positions start and end.
Dpos = pos[1:] - pos[:-1]
Dpos1 = pd.Series(Dpos, Q.index[pos[1:]])
pos1 = Dpos1[Dpos1>1].index
pos1 = pos1.insert(0, Q.index[pos][0])
pos1 = pos1[:-1]
Dpos2 = pd.Series(Dpos, Q.index[pos[:-1]])
pos2 = Dpos2[Dpos2>1].index
#returns results
return pos1, pos2
def __Runoff_Get_eventsPeaks__(Q, pos1, pos2):
'''Obtains the peaks of the observed events selected by the
criteria of the asynch 190 model
PArameters:
- Q: Pandas series qwith the observed data.
- pos1: list with the start of the peaks.
- pos2: list with the end of the peaks.
Returns:
- List with the peaks corresponding to the events.'''
#Peak at each event
Peaks = []
for p1, p2 in zip(pos1, pos2):
Peaks.append(np.nanmax(Q[p1:p2].values))
return Peaks
def __Runoff_Del_Events__(Q, pos1, pos2, minTime = 2.5, minPeak = None, minConcav = None):
'''Eliminates events from the selected initial peaks based on different
aspects such as min time of the event, min peak and the concativity.
Parameters:
- Q: pandas series with the observed streamflow.
- pos1: Pandas indexes with the start of the events.
- pos2: Pandas indexes with the end of the events.
- minTime: minimum time (days) of the duration of the hydrographs.
- minPeak: minim value of the peak at the hydrographs.
- minConcat: minimum concativity for the hydrograph (suggested: 10).
Returns:
- starts: pandas index with the corrected starts.
- ends: pandas indexes with the corrected ends.'''
#Eliminates events based on their duration
if minTime is not None:
#Obtains the duration
Td = pos2 - pos1
Td = Td.total_seconds()/(3600*24)
Td = Td.values
#Eliminates
p = np.where(Td<minTime)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the peak flow
if minPeak is not None:
#Obtains peaks
Peaks = Series_Get_eventsPeaks(Q, pos1, pos2)
Peaks = np.array(Peaks)
#Eliminates
p = np.where(Peaks<minPeak)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the concavity criterion
if minConcav is not None:
#Obtains the concativity series
Concav = Q.resample('5h').mean().diff(2)
Concav = Series_Get_eventsPeaks(Concav, pos1, pos2)
#Eliminates
p = np.where(np.array(Concav)<minConcav)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Returns the result
return pos1, pos2
# -
# ## Recession analysis
# +
#Function to obtain a
def Recession_NDF_method(l):
'''l[0]: np.ndarray of the streamflow data.
l[1]: parameter B between 0 and 5'''
# Function to obtains A for a given B (l[1])
def Estimate_A(Q,B,dt):
e1 = np.nansum((Q.values[:-1] - Q.values[1:]))
e2 = dt * np.nansum(((Q.values[:-1] - Q.values[1:])/2.)**B)
return e1/e2
# Estimates Q for the pair B and A
def Estimate_Q(Q, B, A):
'''Obtaines the estimated Q for a given A and B
Parameters:
- Qo: the initial value of the analyzed peak.
- t: Vector with the elapsed time.'''
#Convert time vector to elapsed time in seconds.
t = Q.index.astype('int64') / 1e9
t = (t.values - t.values[0])/3600.
Qo = Q.values[0]
# Obtains the estimted Qs
return Qo * (1 - ( (1.-B)*A*t / Qo**(1.-B) )) ** (1./(1.-B))
def Estimate_error(Qobs, Qsim):
'''Estimates the total percentage error obtained with the pair
A and B'''
Vsim = Qsim.sum()
Vobs = Qobs.sum()
return (Vsim - Vobs) / Vsim
#Obtains the time delta
dt = l[0].index[1] - l[0].index[0]
dt = dt.value / 1e9
#Estimates A
A = Estimate_A(l[0],l[1],dt)
#Estimaest Q
Qsim = Estimate_Q(l[0],l[1], A)
CountNaN = Qsim[np.isnan(Qsim)].size
#Estimate error
if CountNaN == 0:
E = Estimate_error(l[0],Qsim)
else:
E = 1000
return A, E, Qsim
# search B for recession
def Recession_Search_NDF(Q,Initial = 0, Long=1 ,process = 8, Window = 1, step = 0.01):
'''Search for the optimum value of B and A for a hydrograph
Parameters:
- Initial: Initial point oscillates between 0 and 168h.
- Long: recession longitude oscillates between 4 and 12 days.
- process: total number of processors to do the analysis.'''
#Movement of the initial and finish time
dis_i = pd.Timedelta(hours = Initial)
dis_f = pd.Timedelta(hours = 24*Long)
#Take a portion of the recession curve
X = Q[Q.idxmax()+dis_i:Q.idxmax()+dis_f+dis_i]
# Excercise to obtain A and B for a streamflow record.
L = []
B = np.arange(0, 5., step)
for b in B:
L.append([X, b])
p = Pool(processes=process)
Res = p.map(NDF_method, L)
p.close()
p.join()
#Error selection
Error = np.abs([i[1] for i in Res])
PosEr = np.argmin(Error)
#Return: B, A, E and Qsim
return B[PosEr], Res[PosEr][0], Error[PosEr], pd.Series(Res[PosEr][2], X.index)
# -
| 34.422701 | 107 | 0.578795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,547 | 0.485901 |
a87e40be3c08427f0c7c737de6d4a54b325082a5 | 2,851 | py | Python | readthedocs/proxito/views/mixins.py | dojutsu-user/readthedocs.org | d90f37d89902b849285d9ff8c506e0ca0f6e6fc4 | [
"MIT"
] | null | null | null | readthedocs/proxito/views/mixins.py | dojutsu-user/readthedocs.org | d90f37d89902b849285d9ff8c506e0ca0f6e6fc4 | [
"MIT"
] | null | null | null | readthedocs/proxito/views/mixins.py | dojutsu-user/readthedocs.org | d90f37d89902b849285d9ff8c506e0ca0f6e6fc4 | [
"MIT"
] | null | null | null | import logging
import mimetypes
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.views.static import serve
log = logging.getLogger(__name__) # noqa
class ServeDocsMixin:
"""Class implementing all the logic to serve a document."""
def _serve_docs(self, request, final_project, path):
"""
Serve documentation in the way specified by settings.
Serve from the filesystem if using PYTHON_MEDIA We definitely shouldn't
do this in production, but I don't want to force a check for DEBUG.
"""
if settings.PYTHON_MEDIA:
return self._serve_docs_python(
request, final_project=final_project, path=path
)
return self._serve_docs_nginx(request, final_project=final_project, path=path)
def _serve_docs_python(self, request, final_project, path):
"""
Serve docs from Python.
.. warning:: Don't do this in production!
"""
log.info('[Django serve] path=%s, project=%s', path, final_project.slug)
storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
root_path = storage.path('')
# Serve from Python
return serve(request, path, root_path)
def _serve_docs_nginx(self, request, final_project, path):
"""
Serve docs from nginx.
Returns a response with ``X-Accel-Redirect``, which will cause nginx to
serve it directly as an internal redirect.
"""
log.info('[Nginx serve] path=%s, project=%s', path, final_project.slug)
if not path.startswith('/proxito/'):
if path[0] == '/':
path = path[1:]
path = f'/proxito/{path}'
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or 'application/octet-stream'
response = HttpResponse(
f'Serving internal path: {path}', content_type=content_type
)
if encoding:
response['Content-Encoding'] = encoding
# NGINX does not support non-ASCII characters in the header, so we
# convert the IRI path to URI so it's compatible with what NGINX expects
# as the header value.
# https://github.com/benoitc/gunicorn/issues/1448
# https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling
x_accel_redirect = iri_to_uri(path)
response['X-Accel-Redirect'] = x_accel_redirect
return response
def _serve_401(self, request, project):
res = render(request, '401.html')
res.status_code = 401
log.debug('Unauthorized access to %s documentation', project.slug)
return res
| 34.768293 | 86 | 0.653104 | 2,527 | 0.886356 | 0 | 0 | 0 | 0 | 0 | 0 | 1,127 | 0.3953 |
a87f024d79e42e7d806ba8c1470d48f414917c1c | 4,381 | py | Python | helper.py | whs2k/googleAlerts | 78bf38e2ff707f916e6841e37762e6a7088f9a7c | [
"MIT"
] | null | null | null | helper.py | whs2k/googleAlerts | 78bf38e2ff707f916e6841e37762e6a7088f9a7c | [
"MIT"
] | null | null | null | helper.py | whs2k/googleAlerts | 78bf38e2ff707f916e6841e37762e6a7088f9a7c | [
"MIT"
] | null | null | null | import requests
import pandas as pd
import tweepy #for twitter
import os
from bs4 import BeautifulSoup
import praw #for reddit
import requests
import requests.auth
def getTweets(search_terms=['counterfeit','amazonHelp']):
consumer_key = '6CM1Yqk0Qz6KUXsDQUS8xmahS'
consumer_secret = 'LMSBfoJWMTlder205Ihr2t1JDgwJD2XgKQeWYau25gJix4lm24'
access_token = '753302551840198656-Qx1HSVIZlqjShSsUeWY4BhRaVEbWVAP'
access_token_secret = 'iwtFUe30YrmDlMyGACLLNYrpZQutuW2e8QzX03YwOlz97'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
cfit_tweets = api.search(q=search_terms, count=1000)
df = pd.DataFrame()
df['text'] = [x.text for x in cfit_tweets]
df['source'] = ['twitter: "counterfiet, amazonelp"' for x in cfit_tweets]
df['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in cfit_tweets]
df['retweets'] = [x.retweet_count for x in cfit_tweets]
df['favorites'] = [x.favorite_count for x in cfit_tweets]
df['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in cfit_tweets]
keys = ['t'+str(x) for x in range(len(df['iframe'].tolist()))]
values = df['iframe'].tolist()
return dict(zip(keys, values))
def getReddits():
'''
#1. Get Token
client_auth = requests.auth.HTTPBasicAuth('BXTDVNZqv8SFyw', 'LQtvysbgBqkh-Zjwl1XyLZMdoD4')
post_data = {"grant_type": "password", "username": "whs2k", "password": "osrno1"}
headers = {"User-Agent": "ChangeMeClient/0.1 by YourUsername"}
response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
#response.json()
#2. Use Token
headers = {"Authorization": "bearer 56034692712-UGJkxFNvT1OAn_LGs3XOO645V5Y", "User-Agent": "ChangeMeClient/0.1 by YourUsername"}
response = requests.get("https://oauth.reddit.com/api/v1/me", headers=headers)
#response.json()
'''
reddit = praw.Reddit(client_id='BXTDVNZqv8SFyw',
client_secret='LQtvysbgBqkh-Zjwl1XyLZMdoD4',
password='osrno1',
user_agent='testscript by /u/whs2k',
username='whs2k')
if reddit.read_only:
print('We Are Connected to Reddit!')
#search terms controversial, gilded, hot, new, rising, top'''
png_urls = [x.url for x in reddit.subreddit('FulfillmentByAmazon').new(limit=1000) if '.png' in x.url]
print('We have {} png urls'.format(len(png_urls)))
keys = ['r'+str(x) for x in range(len(df['iframe'].tolist()))]
return dict(zip(keys, png_urls))
def getTweetsDF():
consumer_key = '6CM1Yqk0Qz6KUXsDQUS8xmahS'
consumer_secret = 'LMSBfoJWMTlder205Ihr2t1JDgwJD2XgKQeWYau25gJix4lm24'
access_token = '753302551840198656-Qx1HSVIZlqjShSsUeWY4BhRaVEbWVAP'
access_token_secret = 'iwtFUe30YrmDlMyGACLLNYrpZQutuW2e8QzX03YwOlz97'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
cfit_tweets = api.search(q=['counterfeit','amazonHelp'], count=1000)
fake_tweets = api.search(q=['fake','amazonHelp'], count=1000)
df = pd.DataFrame()
df['text'] = [x.text for x in cfit_tweets]
df['source'] = ['twitter: "counterfiet, amazonelp"' for x in cfit_tweets]
df['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in cfit_tweets]
df['retweets'] = [x.retweet_count for x in cfit_tweets]
df['favorites'] = [x.favorite_count for x in cfit_tweets]
df['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in cfit_tweets]
df1 = pd.DataFrame()
df1['text'] = [x.text for x in fake_tweets]
df1['source'] = ['twitter: "fake, amazonHelp"' for x in fake_tweets]
df1['url'] = [x.text[x.text.find('http'):].split('\n')[0] for x in fake_tweets]
df1['retweets'] = [x.retweet_count for x in fake_tweets]
df1['favorites'] = [x.favorite_count for x in fake_tweets]
df1['iframe'] = ['https://twitframe.com/show?url=https://twitter.com/{}/status/{}'.format(x.user.screen_name, x.id) for x in fake_tweets]
df_final = df.append(df1)
df_final.sort_values('retweets',ascending=False).drop_duplicates(['text','source']).reset_index().head(50)
keys = ['t1', 't2']
keys = ['t'+str(x) for x in range(len(df1['iframe'].tolist()))]
values = df1['iframe'].tolist()
return dict(zip(keys, values))
| 41.330189 | 138 | 0.714905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,868 | 0.426387 |
a8803c766a451bb61117713eb202084c40d7750f | 1,435 | py | Python | students/k3343/laboratory_works/Berezhnova_Marina/laboratory_work_1/django_project_flights/flights_app/models.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3343/laboratory_works/Berezhnova_Marina/laboratory_work_1/django_project_flights/flights_app/models.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3343/laboratory_works/Berezhnova_Marina/laboratory_work_1/django_project_flights/flights_app/models.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Companies(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return "{}".format(self.name)
class Gates(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return "{}".format(self.name)
class Flights(models.Model):
company = models.ForeignKey(Companies, on_delete=models.CASCADE)
gate = models.ForeignKey(Gates, on_delete=models.CASCADE)
def __str__(self):
return "Company: {} | Gate: {}".format(self.company, self.gate)
class FlightActivities(models.Model):
ACTIVITY = [
('0', 'arrival'),
('1', 'departure')
]
flight = models.ForeignKey(Flights, on_delete=models.CASCADE)
activity = models.CharField(choices=ACTIVITY, default='0', max_length=1)
time = models.DateField()
def __str__(self):
return "{} | Arrival/departure: {} | Date {}".format(self.flight, self.get_activity_display(), self.time)
class FlightComments(models.Model):
flight = models.ForeignKey(FlightActivities, on_delete=models.CASCADE)
COMMENT_TYPE = [
('0', 'Gate changing'),
('1', 'Lateness'),
('2', 'Other')
]
com_type = models.CharField(choices=COMMENT_TYPE, default='0', max_length=1)
com_text = models.CharField(max_length=1024)
author = models.ForeignKey(User, on_delete=models.CASCADE)
| 27.075472 | 110 | 0.687108 | 1,321 | 0.920557 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.11777 |
a88072a31c00b299fbd4de061916f3bdeec7c2c8 | 4,993 | py | Python | face_recognition/claudia/main_engine.py | hzitoun/TripletLossFace | 3f566d01b41fa91b924198a63905f3f58632f5a2 | [
"MIT"
] | 88 | 2020-01-18T09:47:03.000Z | 2021-12-18T22:34:18.000Z | face_recognition/claudia/main_engine.py | hzitoun/TripletLossFace | 3f566d01b41fa91b924198a63905f3f58632f5a2 | [
"MIT"
] | 4 | 2020-01-18T09:20:24.000Z | 2020-03-02T19:40:58.000Z | face_recognition/claudia/main_engine.py | aangfanboy/TripletLossFace | 3f566d01b41fa91b924198a63905f3f58632f5a2 | [
"MIT"
] | 40 | 2020-01-18T11:15:07.000Z | 2021-03-09T07:58:57.000Z | import tensorflow as tf
import sys
import json
import numpy as np
import cv2
from tqdm import tqdm
sys.path.append("../")
from deep_learning.make_better_dataset_for_deepfake.main_data_creator import FaceExtractor
class Claudia:
def load_full(self, path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
return image.numpy()
def set_image(self, image):
image = tf.image.resize(image, (self.input_shape[0], self.input_shape[1]), method="nearest")
return tf.cast(image, tf.float32)/255.
def save_json(self):
with open(self.json_path, 'w') as f:
json.dump(self.json, f)
def create_color_map(self):
for key in self.json:
try:
self.colors[key]
except KeyError:
color = tuple(np.random.choice(range(256), size=3))
color = (int(color[0]), int(color[1]), int(color[2]))
self.colors[key] = color
def __init__(self, model_path: str):
self.model = tf.keras.models.load_model(model_path)
self.faceExtractor = FaceExtractor()
self.input_shape = self.model.layers[0].input_shape[0][1:]
self.json_path = "my_data.json"
try:
with open(self.json_path, 'rb') as f:
self.json = json.loads(f.read())
except:
with open(self.json_path, 'w+') as f:
self.json = {}
self.colors = {}
self.create_color_map()
self.video_writer = None
self.cosine_loss = tf.keras.losses.CosineSimilarity()
def get_output_from_image(self, path, get_face: bool = True, l2: bool = False):
if type(path) == str:
image = self.load_full(path)
else:
image = path
if get_face:
faces, all_frames = self.faceExtractor.extract([image])
faces = faces[0]
all_frames = all_frames[0]
else:
faces = [image]
all_frames = [(0,image.shape[1], 0, image.shape[0])]
outputs = []
for face in faces:
face = self.set_image(face)
output = self.model(tf.expand_dims(face, axis=0))
if l2:
output = tf.nn.l2_normalize(output, 1, 1e-10)
outputs.append(output)
return image, outputs, all_frames
def add_to_json(self, output):
print("ADDED")
i = len(self.json.keys()) + 1
self.json[str(i)] = list(output.numpy().tolist())
self.save_json()
self.create_color_map()
return str(i)
def index_image(self, path, get_face: bool = True, print_out: bool = False, th: float = -0.60):
image, output, all_frames = self.get_output_from_image(path, get_face, l2=True)
mins = []
assert len(all_frames) == len(output)
for i in range(len(output)):
founded = False
for key in self.json:
my_min = (100000, "")
oo = self.json[key]
dist = self.cosine_loss(tf.convert_to_tensor(oo), output[i]).numpy()
if dist <= th:
if dist < my_min[0]:
my_min = (dist, key)
founded = True
if not founded:
new_key = self.add_to_json(output[i])
my_min = (-1., new_key)
mins.append(my_min)
return mins, image, all_frames
def mark(self, image, min_im, all_frames):
for (confidance, who), frame in zip(min_im, all_frames):
try:
color = self.colors[str(who)]
x1, x2, y1, y2 = frame
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
except KeyError:
continue
return image
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
min_im, image, all_frames = self.index_image(path, get_face)
for (confidance, who), frame in zip(min_im, all_frames):
try:
color = self.colors[str(who)]
x1, x2, y1, y2 = frame
cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
except KeyError:
continue
if turn_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if show:
cv2.imshow("a", image)
cv2.waitKey(1)
return image, min_im, all_frames
def go_for_video(self, path, i: int = -1):
cap = cv2.VideoCapture(path)
n = 0
if i != -1:
bar = tqdm(total=i)
else:
bar = tqdm()
min_im, all_frames = None, None
while True:
try:
result, frame = cap.read()
if n % 6 == 0:
if self.video_writer is None:
h, w, c = frame.shape
self.video_writer = cv2.VideoWriter('result.avi', cv2.VideoWriter_fourcc(*"MJPG"), 30,(w,h))
if not result:
break
frame, min_im, all_frames = self.show_who_in_image(frame, True, False, turn_rgb=False)
self.video_writer.write(frame)
else:
frame = self.mark(frame, min_im, all_frames)
self.video_writer.write(frame)
n += 1
bar.update()
except Exception as e:
print(e)
continue
self.video_writer.release()
if __name__ == '__main__':
claudia = Claudia("../deep_learning/models/triplet_inception_resnet_v1_0.h5")
claudia.go_for_video("bbt_test1.mp4") | 25.090452 | 180 | 0.660525 | 4,628 | 0.926898 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.066493 |
a8811200a87031d7a49f84665c7e8ea51e4ccb14 | 648 | py | Python | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | 1 | 2020-03-12T13:07:35.000Z | 2020-03-12T13:07:35.000Z | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | null | null | null | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-03-06 19:55
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20200306_1946'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 6, 19, 55, 10, 304561)),
),
migrations.AlterField(
model_name='article',
name='article_img',
field=models.ImageField(blank=True, null=True, upload_to='articles'),
),
]
| 25.92 | 98 | 0.606481 | 539 | 0.83179 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.21142 |
a881135e08ba4f471543f5c8dd4cdb32fe51f578 | 2,420 | py | Python | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | from gluon.tools import Auth
auth = Auth(db, hmac_key=Auth.get_or_create_key())
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
auth.define_tables ()
admin_role = auth.id_group ("Administrator")
if not admin_role:
admin_role = auth.add_group ("Administrator", "System Administrator - can access & make changes to any data")
editor_role = -1
writer_role = -1
def require_logged_in (func):
def wrapper (*args, **kwargs):
if not auth.user:
raise HTTP(401, "Unauthorized")
return func (*args, **kwargs)
return wrapper
def require_logged_in_deprecated ():
if not auth.user:
redirect (URL (r = request, c = 'default', f = 'user', args = ['login']))
def require_role (role):
def decorator (func):
def wrapper (*args, **kwargs):
if not auth.user:
raise HTTP(401, "Unauthorized")
if role == -1:
return
elif not auth.has_membership (role, auth.user.id):
raise HTTP (403, "Forbidden")
return func (*args, **kwargs)
return wrapper
return decorator
def require_role_deprecated (role):
if not auth.user:
redirect (URL (r = request, c = 'default', f = 'user', args = ['login']))
if role == -1:
return
elif not auth.has_membership (role, auth.user.id):
raise HTTP (401, "Unauthorized")
return
def logged_in ():
return auth.user != None
def has_role (role):
if not logged_in ():
return False
return auth.has_membership (role, auth.user.id)
def check_logged_in ():
return auth.user != None
def check_role (role):
if not auth.user:
return False
if role == -1:
return True
if auth.has_membership (admin_role, auth.user.id):
return True
if role == writer_role and auth.has_membership (editor_role, auth.user.id):
return True
if not auth.has_membership (role, auth.user.id):
return False
return True
def check_user (user_id):
if not auth.user:
return False
return auth.user.id == user_id
def user_name (id):
result = db (db[auth.settings.table_user].id == id).select ().first ()
if result:
return result.first_name + ' ' + result.last_name
else:
return 'Unknown'
| 28.470588 | 113 | 0.634298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.083058 |
a881a8440fb6934709fdfc72c63ec22d17333b12 | 3,154 | py | Python | utils/data_utils.py | a514514772/Hijackgan | d98dcddd64a2f28302ded2fbe51b398db0ae4cc4 | [
"MIT"
] | 36 | 2021-03-23T21:00:24.000Z | 2022-03-30T03:08:00.000Z | utils/data_utils.py | a514514772/Hijackgan | d98dcddd64a2f28302ded2fbe51b398db0ae4cc4 | [
"MIT"
] | 4 | 2021-04-26T07:41:34.000Z | 2021-09-28T08:50:40.000Z | utils/data_utils.py | a514514772/hijackgan | d98dcddd64a2f28302ded2fbe51b398db0ae4cc4 | [
"MIT"
] | null | null | null | # +
import argparse
import os
import pickle
import sys
sys.path.append("..")
import numpy as np
import torchvision
import torchvision.transforms as T
import torch.utils.data as torch_data
from tqdm import tqdm
from models.classifiers import EvalCompoundResNet
# -
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-F', '--function', type=str, required=True, choices=['max_index', 'count_data'])
parser.add_argument('-O', '--output_path', type=str, required=True)
parser.add_argument('--num_attr', type=str, default=8)
parser.add_argument('--sample_per_category', type=int, default=1e5)
parser.add_argument('--weight_path', type=str, default='/home/u5397696/interpolation/celebA-hq-classifier/')
parser.add_argument('--data_root', type=str, default='/home/u5397696/interpolation/interfacegan/data/tmp')
return parser.parse_args()
def max_index(args):
if not os.path.exists(args.output_path):
raise ValueError(f"{args.output_path} doesn't exist.")
with open(args.output_path, 'rb') as f:
data_index = pickle.load(f)
print(f'#attributes: {len(data_index)}')
max_val = -1e9
for i in range(len(data_index)):
max_p = np.max(data_index[i][0])
max_n = np.max(data_index[i][1])
max_val = np.max([max_val, max_p, max_n])
print(i, max_p, max_n)
print (f'Max index is {max_val}')
def count_data(args):
#if os.path.exists(args.output_path):
# raise ValueError(f"{args.output_path} has existed.")
t = T.Compose([T.Resize(224), T.ToTensor()])
dset = torchvision.datasets.ImageFolder(args.data_root, transform=t)
loader= torch_data.DataLoader(dset, batch_size=32, shuffle=False, num_workers=4, pin_memory=True)
print (f'Start processing {os.path.basename(args.data_root)}.')
m = EvalCompoundResNet(args.weight_path).cuda()
data_index = [[[],[]] for _ in range(args.num_attr)]
image_cnt = 0
for bid, (imgs, _) in enumerate(loader):
imgs = imgs.cuda()
preds = m.predict_quantize(imgs)
for iid, pred in enumerate(preds):
is_save = False
for ind in range(args.num_attr):
if pred[ind] == True and len(data_index[ind][0])<args.sample_per_category:
is_save = True
data_index[ind][0].append(image_cnt)
elif pred[ind] == False and len(data_index[ind][1])<args.sample_per_category:
is_save = True
data_index[ind][1].append(image_cnt)
if is_save:
image_cnt += 1
if bid % 10 == 0:
for i in range(args.num_attr):
print(i, len(data_index[i][0]), len(data_index[i][1]))
print(f'Processes {bid}/{len(loader)}.')
with open(args.output_path, 'wb') as f:
pickle.dump(data_index, f)
def main():
args = parse_args()
if args.function == 'max_index':
max_index(args)
elif args.function == 'count_data':
count_data(args)
if __name__ == '__main__':
main()
| 32.183673 | 112 | 0.622384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.175016 |
a883398d1013f82065fdcd6cb3f64c0a32a024f7 | 743 | py | Python | membership/urls.py | kay-han/building-blog | 2bdbee68b484193c636ed869b2de605df67b2a48 | [
"Unlicense"
] | null | null | null | membership/urls.py | kay-han/building-blog | 2bdbee68b484193c636ed869b2de605df67b2a48 | [
"Unlicense"
] | null | null | null | membership/urls.py | kay-han/building-blog | 2bdbee68b484193c636ed869b2de605df67b2a48 | [
"Unlicense"
] | null | null | null | from django.urls import path
from .views import UserRegisterView, UserEditView, PasswordsChangeView
from django.contrib.auth import views as auth_views #It allows using some of the views that come with the authentication system comes with django
from . import views
urlpatterns = [
path('registeration/', UserRegisterView.as_view(), name='registeration'),
path('edit_profile/', UserEditView.as_view(), name='edit-profile'),
#path('password/', auth_views.PasswordsChangeView.as_view(template_name='registration/change-password.html')),
path('password/', PasswordsChangeView.as_view(template_name='registration/change-password.html')),
path('password_success/', views.password_success, name='password_success.html'),
]
| 57.153846 | 146 | 0.776581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.472409 |
a884c82cc571aa5061e58f71c9e99493a0106092 | 615 | py | Python | numpan/pan03.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | numpan/pan03.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | 1 | 2021-11-07T04:54:55.000Z | 2021-11-07T04:54:55.000Z | numpan/pan03.py | jaywoong/learn_pandas | 972ad015c142d5e88ea821694b7a95e6aeb4e172 | [
"Apache-2.0"
] | null | null | null | import pandas as pd;
import numpy as np;
data1 = ['A',2];
data2 = ['B',4];
df1 = pd.DataFrame([data1,data2]);
print(df1);
data = {'subject' : ['math', 'comp', 'phys', 'chem'],
'score': [100, 90, 85, 95],
'students': [94, 32, 83, 17]};
df2 = pd.DataFrame(data);
print(df2);
print(len(df2));
print(df2.shape);
print(df2.shape[0]); # 행 정보
print(df2.shape[1]); # 열 정보
df3 = pd.DataFrame(df2, columns=['students','score','subject']);
print(df3);
print(df3['students'][2]);
print(df3[df3['score'] > 90]);
dic1 = {'math':{1:80,2:90,3:100},'comp':{1:90,2:100}};
df4 = pd.DataFrame(dic1);
print(df4);
| 21.964286 | 64 | 0.585366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.215311 |
a885f24e9c1a06fce54f92a0cfe74a09e9f9a42e | 190 | py | Python | astrochem_ml/__init__.py | laserkelvin/astrochem_ml | 1385e481525681943e50467af58f317401747acd | [
"MIT"
] | null | null | null | astrochem_ml/__init__.py | laserkelvin/astrochem_ml | 1385e481525681943e50467af58f317401747acd | [
"MIT"
] | 2 | 2021-11-18T01:33:22.000Z | 2021-11-18T14:04:43.000Z | astrochem_ml/__init__.py | laserkelvin/astrochem_ml | 1385e481525681943e50467af58f317401747acd | [
"MIT"
] | null | null | null | """Top-level package for Astrochem ML."""
from astrochem_ml import smiles, classes
__author__ = """Kin Long Kelvin Lee"""
__email__ = 'kin.long.kelvin.lee@gmail.com'
__version__ = '0.1.0'
| 23.75 | 43 | 0.721053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.547368 |
a88816b3f545c55918110797952195372fc30a92 | 1,929 | py | Python | hexastore/bisect.py | alexchamberlain/mutant | 3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9 | [
"MIT"
] | 3 | 2019-06-15T13:13:39.000Z | 2020-02-07T19:54:12.000Z | hexastore/bisect.py | alexchamberlain/mutant | 3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9 | [
"MIT"
] | 276 | 2019-07-03T06:18:37.000Z | 2021-07-28T05:24:59.000Z | hexastore/bisect.py | alexchamberlain/mutant | 3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9 | [
"MIT"
] | null | null | null | """Bisection algorithms."""
from typing import Callable, Optional, Sequence, TypeVar, cast
from .typing import Comparable
T = TypeVar("T")
U = TypeVar("U", bound=Comparable)
def bisect_left(
a: Sequence[T], x: T, lo: int = 0, hi: Optional[int] = None, key: Optional[Callable[[T], U]] = None
) -> int:
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
key = cast(Callable[[T], U], lambda x: x)
if lo < 0:
raise ValueError("lo must be non-negative")
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if key(a[mid]) < key(x):
lo = mid + 1
else:
hi = mid
return lo
def bisect_right(
a: Sequence[T], x: T, lo: int = 0, hi: Optional[int] = None, key: Optional[Callable[[T], U]] = None
) -> int:
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
key = cast(Callable[[T], U], lambda x: x)
if lo < 0:
raise ValueError("lo must be non-negative")
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if key(x) < key(a[mid]):
hi = mid
else:
lo = mid + 1
return lo
bisect = bisect_right
| 25.72 | 103 | 0.573354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.447382 |
a88879bc5d222329cf1a2ee415609fd7c5eb6c07 | 1,529 | py | Python | tests/fdb.py | OriolOMM/fdb | 561edf48a8999e58feafe84451320ccc4f45892d | [
"Apache-2.0"
] | null | null | null | tests/fdb.py | OriolOMM/fdb | 561edf48a8999e58feafe84451320ccc4f45892d | [
"Apache-2.0"
] | null | null | null | tests/fdb.py | OriolOMM/fdb | 561edf48a8999e58feafe84451320ccc4f45892d | [
"Apache-2.0"
] | 1 | 2020-02-13T16:54:19.000Z | 2020-02-13T16:54:19.000Z | import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
from datetime import datetime
import time
import paho.mqtt.client as mqtt
def button_callback(channel):
print(str(datetime.now()) + "Button was pushed!")
trigger()
time.sleep(2)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
def trigger():
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mqtt.iglor.es", 8080, 60)
payload = {
"data": "bomb"
}
print("lalas")
client.publish("3522109c644e08605c46308a880dcb7d/smartphone", payload=bytes(payload), qos=0, retain=False)
time.sleep(0.5)
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback) # Setup event on pin 10 rising edge
message = input("Press enter to quit\n\n") # Run until someone presses enter
GPIO.cleanup() # Clean up
| 34.75 | 128 | 0.725965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.438195 |
a888a17e5cc1d1b46ed3663d418f931e372c9caf | 3,150 | py | Python | test.py | eldinsahbaz/MetaheuristicOptimization | d553c4ea791e10b64384056927502717f5009378 | [
"MIT"
] | 1 | 2019-02-22T18:26:55.000Z | 2019-02-22T18:26:55.000Z | test.py | eldinsahbaz/MetaheuristicOptimization | d553c4ea791e10b64384056927502717f5009378 | [
"MIT"
] | null | null | null | test.py | eldinsahbaz/MetaheuristicOptimization | d553c4ea791e10b64384056927502717f5009378 | [
"MIT"
] | null | null | null | import PSO
import numpy as np
from pprint import pprint
from functools import partial
# Define the details of the table design problem
def objective_one(x):
i = 0.001
return -((1/((2*np.pi)**0.5))*np.exp(-0.5*((((x[0]-1.5)*(x[0]-1.5)+(x[1]-1.5)*(x[1]-1.5))/0.5)**1)) + (2/((2*np.pi)**0.5))*np.exp(-0.5*((((x[0]-0.5)*(x[0]-0.5)+(x[1]-0.5)*(x[1]-0.5))/i)**1)))
def sphere(x):
return np.sum(np.square(x))
num_variables = 2
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': partial(PSO.robust_variace_objective, objective_one),
'num_particles': 1000,
'max_iterations': 10,
'max_w': 0.9,
'min_w': 0.2,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
best_solns_one = list()
for i in range(10):
output, convergence_curve = PSO.PSO(**inputs)
best_solns_one.append(output)
num_variables = 2
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': partial(PSO.robust_variace_objective, objective_one),
'num_particles': 1000,
'max_iterations': 10,
'max_w': 0.9,
'min_w': 0.4,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
best_solns_two = list()
for i in range(10):
output, convergence_curve = PSO.PSO(**inputs)
best_solns_two.append(output)
print("The difference is significant" if PSO.compare_algorithms(best_solns_one, best_solns_two) < 0.05 else "The difference is not significant")
num_variables = 100
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': sphere,
'num_particles': 1000,
'max_iterations': 30,
'max_w': 0.9,
'min_w': 0.2,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
output, convergence_curve = PSO.PSO(**inputs)
pprint(output)
PSO.visualize_convergence(convergence_curve)
| 29.439252 | 195 | 0.583492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 621 | 0.197143 |
a888ce23b322d5cd58ae4ab4704251709df155c9 | 193 | py | Python | autokey/data/My Phrases/viniciusban/terminal/kubectl exec -it from clipboard.py | viniciusban/dotfiles | e4048ef236e620ffffc83cce6d51e49019aa4e8e | [
"MIT"
] | null | null | null | autokey/data/My Phrases/viniciusban/terminal/kubectl exec -it from clipboard.py | viniciusban/dotfiles | e4048ef236e620ffffc83cce6d51e49019aa4e8e | [
"MIT"
] | 1 | 2020-05-08T00:37:35.000Z | 2020-05-08T00:37:35.000Z | autokey/data/My Phrases/viniciusban/terminal/kubectl exec -it from clipboard.py | viniciusban/dotfiles | e4048ef236e620ffffc83cce6d51e49019aa4e8e | [
"MIT"
] | null | null | null | # Enter script code
message = "kubectl exec -it <cursor> -- bash"
keyboard.send_keys("kubectl exec -it ")
keyboard.send_keys("<shift>+<ctrl>+v")
time.sleep(0.1)
keyboard.send_keys(" -- bash")
| 24.125 | 45 | 0.694301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.523316 |
a88abbaa6de226c43d73cf509cf13cab18de2ecb | 848 | py | Python | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 11 | 2020-09-18T16:23:06.000Z | 2022-01-22T11:59:57.000Z | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 1 | 2020-10-02T14:33:08.000Z | 2021-10-05T02:44:19.000Z | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 11 | 2020-09-18T16:23:13.000Z | 2022-01-22T11:59:58.000Z | '''
Topic : Algorithms
Subtopic : Diagonal Difference
Language : Python
Problem Statement : Given a square matrix, calculate the absolute difference between the sums of its diagonals.
Url : https://www.hackerrank.com/challenges/diagonal-difference/problem
'''
#!/bin/python3
# Complete the 'diagonalDifference' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY arr as parameter.
#
def diagonalDifference(arr):
# Write your code here
n = len(arr)
d1 = sum(arr[i][i] for i in range(n))
d2 = sum(arr[i][n-i-1] for i in range(n))
return abs(d1 - d2)
assert diagonalDifference([[11,2,4], [4,5,6], [10,8,-12]]) == 15
assert diagonalDifference([[1,2,3], [4,5,6], [9,8,9]]) == 2
assert diagonalDifference([[1,1,1,1], [1,1,1,1], [1,1,1,1], [1,1,1,1]]) == 0
| 32.615385 | 115 | 0.665094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.557783 |
a88d07cc4a9ee4c4430ecfdb6282a384f81f2ed4 | 334 | py | Python | codeforces/dp动态规划/800/702A最大上升子列.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/dp动态规划/800/702A最大上升子列.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/dp动态规划/800/702A最大上升子列.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# https://codeforces.com/problemset/problem/702/A
#严格来说不能算DP?
_ = input()
l = list(map(int,input().split())) #https://codeforces.com/blog/entry/71884
maxL = 1
curL = 1
for i in range(1,len(l)):
if l[i]<=l[i-1]:
curL = 1
continue
curL += 1
if curL > maxL: maxL = curL
print(maxL)
| 18.555556 | 75 | 0.601796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.390805 |
a88e1dd680b976d538070862154a5f05e9ddacef | 10,087 | py | Python | pyrobolearn/algos/fd.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/algos/fd.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/algos/fd.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the Finite-Difference (FD) method algorithm.
This FD method is a policy gradient algorithm that explores in the parameter space of the policy in an episodic way.
"""
import numpy as np
import torch
from pyrobolearn.envs import Env
from pyrobolearn.tasks import RLTask
# from pyrobolearn.algos.rl_algo import GradientRLAlgo
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class FD(object): # GradientRLAlgo):
r"""Finite-Difference Policy Gradient Method.
Type:: policy gradient based (on-policy by definition) with exploration in the parameter space
Description
-----------
The goal of RL is to maximize the expected return:
.. math:: J(\theta) = \int p(\tau) R(\tau) d\tau
The Finite-Difference (FD) algorithm perturbs the parameter space of the policy and evaluate for each perturbation
the expected return J(\theta_i + \Delta_{\theta_i})
The gradient :math:`g_{FD} \approx \nabla_\theta J`is then given by:
.. math:: `g_{FD} = (\Delta\Theta^\top \Delta\Theta)^{-1} \Delta\Theta^\top \Delta J`
which is used to to perform a gradient ascent step: :math:`\theta_{i+1} = \theta_{i} + \eta g_{FD}`, where
:math:`\eta` is the learning rate coefficient.
Properties
----------
Properties:
* Exploration is performed in the parameter space of the policy
Pros:
* Easy to implement and test
* work with deterministic and stochastic policies
* highly efficient in simulation
Cons:
* the perturbation of the parameters is hard (especially with systems that can go unstable)
* O(M^3) for the time complexity (because of the matrix inversion), where M is the number of parameters
Pseudo-algo
-----------
Pseudo-algorithm (taken from [1] with some modification, and reproduce here for completeness)::
1. Input: initial policy parameters :math:`\theta_0`
2. for k=0,1,...,num_episodes do
3. Exploration: generate policy variation :math:`\Delta \theta_k`, and collect set of trajectories
:math:`D_k=\{\tau_i\}` by running policy :math:`\pi_{\theta_k + \Delta \theta_k}` and
:math:`\pi_{\theta_k - \Delta \theta_k}` in the environment.
4. Evaluation: compute total rewards
:math:`J_{k+} = \mathbb{E}_{\theta_k + \Delta \theta_k}[\sum_{t=0}^T \gamma^t r_t]`,
:math:`J_{k-} = \mathbb{E}_{\theta_k - \Delta \theta_k}[\sum_{t=0}^T \gamma^t r_t]`, and difference
gradient estimator :math:`\Delta J = J_{k+} - J_{k-}`
5. Update: compute gradient :math:`g_{FD} = (\Delta \Theta ^\trsp \Delta \Theta)^{-1} \Delta\Theta
\Delta\hat{J}` and update policy parameters using :math:`\theta_{k+1} = \theta_k + \alpha_k g_{FD}`
References::
[1] "Policy Gradient Methods" (http://www.scholarpedia.org/article/Policy_gradient_methods), Peters, 2010
"""
def __init__(self, task, policy, num_variations=None, std_dev=0.01, difference_type='central', learning_rate=0.001,
normalize_grad=False, num_workers=1):
# hyperparameters
"""
Initialize the FD algorithm.
Args:
task (RLTask, Env): RL task/env to run
policy (Policy): specify the policy (model) to optimize
num_variations (None, int): number of times we vary the parameters by a small different increment.
If None, it will be twice the number of parameters as according to [1], it yields very accurate
gradient estimates.
std_dev (float): the small increments are generated from a Normal distribution center at 0 and
difference_type (str): there are two difference type of estimators: 'forward' or 'central'.
The forward-difference estimator computes the gradient using
:math:`J(\theta + \Delta\theta) - J(\theta)`, while the central-difference estimator computes the
gradient using :math:`J(\theta + \Delta\theta) - J(\theta - \Delta\theta)`
learning_rate (float): learning rate (=coefficient) for the gradient ascent step
normalize_grad (bool): specify if we should normalize the gradients
num_workers (int): number of workers/jobs to run in parallel
"""
# create explorer
# create evaluator
# create updater
# super(FD, self).__init__(self, explorer, evaluator, updater, num_workers=1)
if isinstance(task, Env):
task = RLTask(task, policy)
self.task = task
self.policy = policy
self.num_workers = num_workers
# set the number of variations (small increments to vary the parameters)
# From [1]: "Empirically it can be observed that taking the number of variations as twice the number
# of parameters yields very accurate gradient estimates"
if num_variations is None:
self.num_variations = 2 * self.policy.num_parameters
# set standard deviation
self.stddev = np.abs(std_dev)
# set difference type
if difference_type != 'forward' and difference_type != 'central':
raise ValueError("Expecting the 'difference_type' argument to be 'forward' or 'central'. Instead got "
"'{}'".format(difference_type))
self.difference_type = difference_type
# set other parameters
self.lr = learning_rate
self.normalize_grad = bool(normalize_grad)
# remember best parameters
self.best_reward = -np.infty
self.best_parameters = None
def explore_and_evaluate(self, params, num_steps, num_rollouts):
# set policy parameters
self.policy.set_vectorized_parameters(params)
# run a number of rollouts
reward = []
for rollout in range(num_rollouts):
rew = self.task.run(num_steps=num_steps, use_terminating_condition=True, render=False)
reward.append(rew)
reward = np.mean(reward)
return reward
def train(self, num_steps=1000, num_rollouts=1, num_episodes=1, verbose=False, seed=None):
"""
Train the policy.
Args:
num_steps (int): number of steps per rollout / episode. In one episode, how many steps does the environment
proceeds.
num_rollouts (int): number of rollouts per episode to average the results.
num_episodes (int): number of episodes.
verbose (bool): If True, it will print information about the training process.
seed (int): random seed.
Returns:
list of float: average rewards per episode.
"""
# set seed
if seed is not None:
np.random.seed(seed)
# for each episode
rewards = []
for episode in range(num_episodes):
# get parameters
params = self.policy.get_vectorized_parameters()
J_plus, J_minus = np.zeros(self.num_variations), np.zeros(self.num_variations)
Delta_Params = np.zeros((self.num_variations, len(params)))
# evaluate with the current parameters
J = self.explore_and_evaluate(params, num_steps, num_rollouts)
rewards.append(J)
# Save best reward and associated parameter
if J > self.best_reward:
self.best_reward = J
self.best_parameters = params
# print info
if verbose:
print('\nEpisode {} - expected return: {}'.format(episode + 1, J))
# 1. Explore
for i in range(self.num_variations):
# sample parameter increment step vector
delta_params = np.random.normal(loc=0.0, scale=self.stddev, size=len(params))
Delta_Params[i] = delta_params
# estimate J(\theta + \delta)
new_params = params + delta_params
J_plus[i] = self.explore_and_evaluate(new_params, num_steps, num_rollouts)
# estimate J(\theta - \delta)
if self.difference_type == 'forward':
J_minus[i] = J
elif self.difference_type == 'central':
new_params = params - delta_params
J_minus[i] = self.explore_and_evaluate(new_params, num_steps, num_rollouts)
else:
raise ValueError("Expecting the 'difference_type' argument to be 'forward' or 'central'. "
"Instead got '{}'".format(self.difference_type))
# 2. Evaluate
# 3. Update
delta_J = J_plus - J_minus
grad = np.linalg.pinv(Delta_Params).dot(delta_J)
if self.normalize_grad:
grad /= np.linalg.norm(grad)
params = params + self.lr * grad # TODO: allows the user to choose the optimizer
# self.optimizer.optimize(self.policy.list_parameters(), grad)
self.policy.set_vectorized_parameters(params)
return rewards
def test(self, num_steps=1000, dt=0, use_terminating_condition=False, render=True):
"""
Test the policy in the environment.
Args:
num_steps (int): number of steps to run the episode.
dt (float): time to sleep before the next step.
use_terminating_condition (bool): If True, it will use the terminal condition to end the environment.
render (bool): If True, it will render the environment.
Returns:
float: obtained reward
"""
return self.task.run(num_steps=num_steps, dt=dt, use_terminating_condition=use_terminating_condition,
render=render)
| 42.029167 | 119 | 0.620898 | 9,439 | 0.935759 | 0 | 0 | 0 | 0 | 0 | 0 | 6,261 | 0.6207 |
a88e902897afb9409b614fb8fea1673c9e1aaf16 | 592 | py | Python | example/rest/system_rest_example.py | bitcom-exchange/bitcom-python-api | 1491481f376ba7e7d4a7d2edb2f4400b8e4d7ec3 | [
"MIT"
] | 4 | 2021-03-04T00:10:24.000Z | 2021-12-15T01:49:39.000Z | example/rest/system_rest_example.py | bitcom-exchange/bitcom-python-api | 1491481f376ba7e7d4a7d2edb2f4400b8e4d7ec3 | [
"MIT"
] | null | null | null | example/rest/system_rest_example.py | bitcom-exchange/bitcom-python-api | 1491481f376ba7e7d4a7d2edb2f4400b8e4d7ec3 | [
"MIT"
] | null | null | null | from bitcom.client.system_client import SystemClient
from bitcom.utils import *
from bitcom.constant import *
system_client = SystemClient(url=USER1_HOST, access_key=USER1_ACCESS_KEY, secret_key=USER1_SECRET_KEY)
timestamp_response = system_client.get_system_timestamp()
LogInfo.output("Get server timestamp: ", timestamp_response)
version_response = system_client.get_system_version()
LogInfo.output("Get API version: ", version_response)
cod_status_response = system_client.get_system_cod_status()
LogInfo.output("Get cancel-only status after system maintenance: ", cod_status_response)
| 42.285714 | 102 | 0.839527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.158784 |
a8908a5ec5616695737ca542359a44699f12cde8 | 6,722 | py | Python | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:07.000Z | 2022-03-10T21:46:07.000Z | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | null | null | null | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:09.000Z | 2022-03-10T21:46:09.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/GG_start.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from utilGui import Names
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.resize(684, 828)
main_window.setWindowTitle('Alchemist');
self.centralwidget = QtWidgets.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
# run_btn
self.run_btn = QtWidgets.QPushButton(self.centralwidget)
self.run_btn.setText("Run")
self.run_btn.setGeometry(QtCore.QRect(510, 20, 121, 30))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.run_btn.setFont(font)
self.run_btn.setStyleSheet("border-color: rgb(114, 159, 207);\n"
"background-color: rgb(78, 154, 6);")
self.run_btn.setObjectName("run_btn")
# min_max table
self.min_max_table = QtWidgets.QTableWidget(self.centralwidget)
self.min_max_table.setGeometry(QtCore.QRect(20, 100, 421, 192))
self.min_max_table.setObjectName("min_max_table")
self.min_max_table.setColumnCount(2)
self.min_max_table.setRowCount(len(Names.Chemical_Compounds))
for i in range(len(Names.Chemical_Compounds)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Compounds[i])
self.min_max_table.setVerticalHeaderItem(i, item)
item = QtWidgets.QTableWidgetItem("min")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem("max")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(1, item)
for i in range(len(Names.Chemical_Compounds)):
self.min_max_table.setItem(
i, 0, QtWidgets.QTableWidgetItem("0.0"))
self.min_max_table.setItem(
i, 1, QtWidgets.QTableWidgetItem("1.0"))
self.min_max_label = QtWidgets.QLabel(self.centralwidget)
self.min_max_label.setGeometry(QtCore.QRect(20, 70, 421, 22))
self.min_max_label.setObjectName("min_max_label")
self.min_max_label.setText("Search space limitation:")
# opt_label
self.opt_label = QtWidgets.QLabel(self.centralwidget)
self.opt_label.setGeometry(QtCore.QRect(460, 100, 201, 22))
self.opt_label.setObjectName("opt_label")
self.opt_label.setText("Methods:")
# amount
self.amount_sp = QtWidgets.QSpinBox(self.centralwidget)
self.amount_sp.setGeometry(QtCore.QRect(460, 270, 201, 31))
self.amount_sp.setMinimum(1)
self.amount_sp.setMaximum(10000)
self.amount_sp.setValue(1)
self.amount_sp.setObjectName("amount_sp")
self.amount_label = QtWidgets.QLabel(self.centralwidget)
self.amount_label.setGeometry(QtCore.QRect(460, 240, 201, 22))
self.amount_label.setObjectName("amount_label")
self.amount_label.setText("Amount:")
# tg
self.tg_dsb = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.tg_dsb.setGeometry(QtCore.QRect(460, 200, 201, 31))
self.tg_dsb.setMaximum(50000)
self.tg_dsb.setMinimum(0)
self.tg_dsb.setDecimals(0)
self.tg_dsb.setSingleStep(5)
self.tg_dsb.setObjectName("tg_dsb")
self.tg_dsb.setValue(700)
self.tg_label = QtWidgets.QLabel(self.centralwidget)
self.tg_label.setGeometry(QtCore.QRect(460, 170, 201, 22))
self.tg_label.setObjectName("tg_label")
self.tg_label.setText("TG:")
# opt_cb
self.opt_cb = QtWidgets.QComboBox(self.centralwidget)
self.opt_cb.setGeometry(QtCore.QRect(460, 130, 201, 30))
self.opt_cb.setObjectName("opt_cb")
self.opt_cb.addItem("SA")
self.opt_cb.addItem("PSO")
self.opt_cb.addItem("RS")
# result_tb
self.result_label = QtWidgets.QLabel(self.centralwidget)
self.result_label.setGeometry(QtCore.QRect(20, 350, 641, 22))
self.result_label.setObjectName("result_label")
self.result_label.setText("Results:")
self.result_tb = QtWidgets.QTableWidget(self.centralwidget)
self.result_tb.setGeometry(QtCore.QRect(20, 380, 641, 341))
self.result_tb.setObjectName("result_table")
self.result_tb.setColumnCount(46)
# self.result_tb.setRowCount(1)
for i in range(len(Names.Chemical_Elemnts)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Elemnts[i])
self.result_tb.setHorizontalHeaderItem(i, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
item = QtWidgets.QTableWidgetItem("TG")
self.result_tb.setHorizontalHeaderItem(i+1, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
# discard_btn
self.discard_btn = QtWidgets.QPushButton(self.centralwidget)
self.discard_btn.setText("Discard")
self.discard_btn.setGeometry(QtCore.QRect(540, 730, 122, 30))
self.discard_btn.setObjectName("discard_btn")
# save_btn
self.save_btn = QtWidgets.QPushButton(self.centralwidget)
self.save_btn.setText("Save")
self.save_btn.setGeometry(QtCore.QRect(400, 730, 122, 30))
self.save_btn.setToolTip("")
self.save_btn.setObjectName("save_btn")
# clean_all_btn
self.clean_all_btn = QtWidgets.QPushButton(self.centralwidget)
self.clean_all_btn.setGeometry(QtCore.QRect(20, 730, 122, 30))
self.clean_all_btn.setObjectName("clean_all_btn")
self.clean_all_btn.setText("Clan All")
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 684, 27))
self.menubar.setObjectName("menubar")
main_window.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_window)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
| 41.239264 | 74 | 0.666617 | 6,451 | 0.959685 | 0 | 0 | 0 | 0 | 0 | 0 | 755 | 0.112318 |
a8944f28e696185ef0651eba49869976c687f070 | 10,005 | py | Python | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | null | null | null | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | null | null | null | gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | 1 | 2019-12-03T15:28:24.000Z | 2019-12-03T15:28:24.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from oslo_config import cfg
from gbpservice.contrib.nfp.configurator.agents import firewall as fw
from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const
from gbpservice.contrib.tests.unit.nfp.configurator.test_data import (
fw_test_data as fo)
class FWaasRpcManagerTestCase(base.BaseTestCase):
""" Implements test cases for RPC manager methods of firewall agent.
"""
def __init__(self, *args, **kwargs):
super(FWaasRpcManagerTestCase, self).__init__(*args, **kwargs)
self.fo = fo.FakeObjects()
@mock.patch(__name__ + '.fo.FakeObjects.sc')
@mock.patch(__name__ + '.fo.FakeObjects.conf')
def _get_FWaasRpcManager_object(self, conf, sc):
""" Retrieves RPC manager object of firewall agent.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: object of firewall's RPC manager and service controller
"""
agent = fw.FWaasRpcManager(sc, conf)
return agent, sc
def _test_event_creation(self, method):
""" Tests event creation and enqueueing for create/update/delete
operation of firewall agent's RPC manager.
Returns: none
"""
agent, sc = self._get_FWaasRpcManager_object()
context = {}
arg_dict = {'context': context,
'firewall': self.fo.firewall,
'host': self.fo.host}
with mock.patch.object(sc, 'new_event', return_value='foo') as (
mock_sc_event), (
mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event:
call_method = getattr(agent, method.lower())
call_method(context, self.fo.firewall, self.fo.host)
result_dict = arg_dict
result_dict['firewall'] = {
'file_path': "/tmp/%s" % (self.fo.firewall['id'])}
mock_sc_event.assert_called_with(id=method,
data=result_dict, key=None)
mock_sc_rpc_event.assert_called_with('foo')
def test_create_firewall_fwaasrpcmanager(self):
""" Implements test case for create firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_CREATE_EVENT)
def test_update_firewall_fwaasrpcmanager(self):
""" Implements test case for update firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_UPDATE_EVENT)
def test_delete_firewall_fwaasrpcmanager(self):
""" Implements test case for delete firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_DELETE_EVENT)
class FwaasHandlerTestCase(base.BaseTestCase):
""" Implements test cases for event handler methods
of firewall agent.
"""
def __init__(self, *args, **kwargs):
super(FwaasHandlerTestCase, self).__init__(*args, **kwargs)
self.fo = fo.FakeObjects()
self.ev = fo.FakeEventFirewall()
self.firewall_rule = {
'id': 'rule-id', 'action': 'allow',
'destination_ip_address': '',
'destination_port': '80',
'enabled': 'enabled', 'ip_version': 'v4',
'protocol': 'tcp', 'source_ip_address': '',
'source_port': '', 'shared': False,
'position': 1
}
self.ev.data['context']['agent_info']['resource'] = 'firewall'
@mock.patch(__name__ + '.fo.FakeObjects.rpcmgr')
@mock.patch(__name__ + '.fo.FakeObjects.drivers')
@mock.patch(__name__ + '.fo.FakeObjects.sc')
def _get_FwHandler_objects(self, sc, drivers, rpcmgr):
""" Retrieves event handler object of firewall agent.
:param sc: mocked service controller object of process model framework
:param drivers: list of driver objects for firewall agent
:param rpcmgr: object of configurator's RPC manager
Returns: object of firewall agents's event handler
"""
with mock.patch.object(cfg, 'CONF') as mock_cfg:
mock_cfg.configure_mock(host='foo')
agent = fw.FWaasEventHandler(sc, drivers, rpcmgr, mock_cfg)
return agent
def _test_handle_event(self, rule_list_info=True):
""" Test handle event method of firewall agent for various
device configuration operations.
:param rule_list_info: an atrribute of firewall resource object
sent from plugin which contains the firewall rules.
Returns: None
"""
agent = self._get_FwHandler_objects()
driver = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'set_firewall_status') as (
mock_set_fw_status), (
mock.patch.object(
agent.plugin_rpc, 'firewall_deleted')) as (mock_fw_deleted), (
mock.patch.object(
driver, fw_const.FIREWALL_CREATE_EVENT.lower())) as (
mock_create_fw), (
mock.patch.object(
driver, fw_const.FIREWALL_UPDATE_EVENT.lower())) as (
mock_update_fw), (
mock.patch.object(
driver, fw_const.FIREWALL_DELETE_EVENT.lower())) as (
mock_delete_fw), (
mock.patch.object(
agent, '_get_driver', return_value=driver)):
firewall = self.fo._fake_firewall_obj()
if not rule_list_info:
firewall_rule_list = []
else:
firewall_rule_list = [self.firewall_rule]
firewall.update({'firewall_rule_list': firewall_rule_list})
self.ev.data.get('firewall').update(
{'firewall_rule_list': firewall_rule_list})
agent_info = self.ev.data['context']['agent_info']
agent.handle_event(self.ev)
context = self.fo.neutron_context
if 'service_info' in self.fo.context:
self.fo.context.pop('service_info')
if not rule_list_info:
if self.ev.id == fw_const.FIREWALL_CREATE_EVENT:
mock_set_fw_status.assert_called_with(
agent_info,
firewall['id'], const.STATUS_ACTIVE, firewall)
elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT:
mock_set_fw_status.assert_called_with(
agent_info,
const.STATUS_ACTIVE, firewall)
elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT:
mock_fw_deleted.assert_called_with(
agent_info, firewall['id'], firewall)
else:
if self.ev.id == fw_const.FIREWALL_CREATE_EVENT:
mock_create_fw.assert_called_with(
context,
firewall, self.fo.host)
elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT:
mock_update_fw.assert_called_with(
context,
firewall, self.fo.host)
elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT:
mock_delete_fw.assert_called_with(
context,
firewall, self.fo.host)
def test_create_firewall_with_rule_list_info_true(self):
""" Implements test case for create firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_CREATE_EVENT
self._test_handle_event()
def test_update_firewall_with_rule_list_info_true(self):
""" Implements test case for update firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_UPDATE_EVENT
self._test_handle_event()
def test_delete_firewall_with_rule_list_info_true(self):
""" Implements test case for delete firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_DELETE_EVENT
self._test_handle_event()
def test_create_firewall_with_rule_list_info_false(self):
""" Implements test case for create firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_CREATE_EVENT
self._test_handle_event(False)
def test_update_firewall_with_rule_list_info_false(self):
""" Implements test case for update firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_UPDATE_EVENT
self._test_handle_event(False)
def test_delete_firewall_with_rule_list_info_false(self):
""" Implements test case for delete firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_DELETE_EVENT
self._test_handle_event(False)
| 35.228873 | 78 | 0.625087 | 9,040 | 0.903548 | 0 | 0 | 1,248 | 0.124738 | 0 | 0 | 3,628 | 0.362619 |
a8945cb8801554f627e2ef91c6ad0ae2533363a4 | 603 | py | Python | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 2 | 2021-04-01T01:17:53.000Z | 2021-10-04T16:46:13.000Z | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 1 | 2020-07-13T18:59:49.000Z | 2020-07-13T18:59:49.000Z | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 1 | 2021-04-06T20:32:02.000Z | 2021-04-06T20:32:02.000Z | import os
from unittest import TestCase
from io import StringIO
import tempfile
import shutil
import deepchem as dc
class TestCSVLoader(TestCase):
def test_load_singleton_csv(self):
fin = tempfile.NamedTemporaryFile(mode='w', delete=False)
fin.write("smiles,endpoint\nc1ccccc1,1")
fin.close()
print(fin.name)
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["endpoint"]
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
X = loader.featurize(fin.name)
self.assertEqual(1, len(X))
os.remove(fin.name)
| 24.12 | 66 | 0.719735 | 483 | 0.800995 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.082919 |
a895712ceea2a7fbba14537af18f253be776f176 | 11,382 | py | Python | fiwareglancesync/glancesync_serverfacade_mock.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | null | null | null | fiwareglancesync/glancesync_serverfacade_mock.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 88 | 2015-07-21T22:13:23.000Z | 2016-11-15T21:28:56.000Z | fiwareglancesync/glancesync_serverfacade_mock.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 2 | 2015-08-12T11:19:55.000Z | 2018-05-25T19:04:43.000Z | #!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
import csv
import glob
import shelve
import copy
import os
import argparse
import tempfile
import sys
from glancesync_image import GlanceSyncImage
"""This module contains all the code that interacts directly with the glance
implementation. It isolates the main code from the glance interaction.
Therefore, this module may be changed if the API is upgraded or it is
invoked in a different way, without affecting the main module.
This is a mock implementation, used for testing.
"""
import logging
class ServersFacade(object):
images_dir = '/var/lib/glance/images'
images = dict()
# Put this property to False to use this file as a mock in a unittest
# when use_persistence is true, image information is preserved in disk.
use_persistence = False
dir_persist = './.glancesync_persist'
def __init__(self, target):
self.target = target
def get_regions(self):
"""It returns the list of regions on the specified target.
:return: a list of region names.
"""
all_regions = ServersFacade.images.keys()
target_name = self.target['target_name']
regions_list = list()
for region in all_regions:
parts = region.split(':')
if target_name == 'master':
if len(parts) != 1:
continue
regions_list.append(region)
else:
if len(parts) != 2:
continue
if parts[0] != target_name:
continue
regions_list.append(parts[1])
return regions_list
def get_imagelist(self, regionobj):
"""return a image list from the glance of the specified region
:param regionobj: The GlanceSyncRegion object of the region to list
:return: a list of GlanceSyncImage objects
"""
# clone the object: otherwise modifying the returned object
# modify the object in the images.
return copy.deepcopy(ServersFacade.images[regionobj.fullname].values())
def update_metadata(self, regionobj, image):
""" update the metadata of the image in the specified region
See GlanceSync.update_metadata_image for more details.
:param regionobj: region where it is the image to update
:param image: the image with the metadata to update
:return: this function doesn't return anything.
"""
images = ServersFacade.images[regionobj.fullname]
updatedimage = images[image.id]
updatedimage.is_public = image.is_public
updatedimage.name = image.name
# updatedimage.owner = image.owner
updatedimage.user_properties = dict(image.user_properties)
if ServersFacade.use_persistence:
images[image.id] = updatedimage
images.sync()
def upload_image(self, regionobj, image):
"""Upload the image to the glance server on the specified region.
:param regionobj: GlanceSyncRegion object; the region where the image
will be upload.
:param image: GlanceSyncImage object; the image to be uploaded.
:return: The UUID of the new image.
"""
count = 1
if regionobj.fullname not in ServersFacade.images:
ServersFacade.images[regionobj.fullname] = dict()
imageid = '1$' + image.name
while imageid in ServersFacade.images[regionobj.fullname]:
count += 1
imageid = str(count) + '$' + image.name
owner = regionobj.target['tenant'] + 'id'
new_image = GlanceSyncImage(
image.name, imageid, regionobj.fullname, owner, image.is_public,
image.checksum, image.size, image.status,
dict(image.user_properties))
ServersFacade.images[regionobj.fullname][imageid] = new_image
if ServersFacade.use_persistence:
ServersFacade.images[regionobj.fullname].sync()
return imageid
def delete_image(self, regionobj, id, confirm=True):
"""delete a image on the specified region.
Be careful, this action cannot be reverted and for this reason by
default requires confirmation!
:param regionobj: the GlanceSyncRegion object
:param id: the UUID of the image to delete
:param confirm: ask for confirmation
:return: true if image was deleted, false if it was canceled by user
"""
if regionobj.fullname not in ServersFacade.images:
return False
images = ServersFacade.images[regionobj.fullname]
if id not in images:
return False
del images[id]
if ServersFacade.use_persistence:
ServersFacade.images[regionobj.fullname].sync()
return True
def get_tenant_id(self):
"""It returns the tenant id corresponding to the target. It is
necessary to use the tenant_id instead of the tenant_name because the
first is used as the owner of the images.
:return: the tenant id
"""
if 'tenant_id' in self.target:
return self.target['tenant_id']
else:
return self.target['tenant'] + 'id'
@staticmethod
def init_persistence(dir=None, clean=False):
"""Function to start using persistence: load the data from the lass
session if it exists
:param dir: path of the directory where the persistence files go.
Default dir is ./.glancesync_persist
:param clean: if path exists, discard all existing content
:return:
"""
if dir:
ServersFacade.dir_persist = dir
ServersFacade.use_persistence = True
ServersFacade.images = dict()
if os.path.exists(dir):
for name in glob.glob(dir + '/_persist_*'):
if clean:
os.unlink(name)
else:
region = os.path.basename(name)[9:]
ServersFacade.images[region] = shelve.open(name)
else:
os.mkdir(ServersFacade.dir_persist)
@staticmethod
def add_image_to_mock(image):
"""Add the image to the mock
:param image: The image to add. If can be a GlanceSyncImage or a list
:return: This method does not return nothing.
"""
if type(image) == list:
image = GlanceSyncImage.from_field_list(image)
else:
image = copy.deepcopy(image)
if image.region not in ServersFacade.images:
if ServersFacade.use_persistence:
ServersFacade.images[image.region] =\
shelve.open(ServersFacade.dir_persist + '/_persist_' +
image.region)
else:
ServersFacade.images[image.region] = dict()
ServersFacade.images[image.region][image.id] = image
if ServersFacade.use_persistence:
ServersFacade.images[image.region].sync()
@staticmethod
def add_emptyregion_to_mock(region):
"""Add empty region to mock
:param image: The image region (e.g. other:Madrid)
:return: This method does not return nothing.
"""
if ServersFacade.use_persistence:
ServersFacade.images[region] = shelve.open(
ServersFacade.dir_persist + '/_persist_' + region)
else:
ServersFacade.images[region] = dict()
@staticmethod
def clear_mock():
"""clear all the non-persistent content of the mock"""
ServersFacade.images = dict()
# if using persintence, deleting _persist_ file is responsability of
# the caller.
@staticmethod
def add_images_from_csv_to_mock(path):
"""Add images to the mock, reading the csv files saved by the backup
tool.
:param path: The directory where the csv files are.
:return: This method does not return nothing.
Each file in path has this pattern: backup_<regionname>.csv.
"""
for file in glob.glob(path + '/*.csv'):
region_name = os.path.basename(file)[7:-4]
if region_name not in ServersFacade.images:
if ServersFacade.use_persistence:
ServersFacade.images[region_name] =\
shelve.open(ServersFacade.dir_persist + '/_persist_' +
region_name)
else:
ServersFacade.images[region_name] = dict()
with open(file) as f:
for row in csv.reader(f):
# ignore blank lines
if len(row) == 0:
continue
image = GlanceSyncImage.from_field_list(row)
ServersFacade.images[region_name][image.id] = image
if ServersFacade.use_persistence:
ServersFacade.images[region_name].sync()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Start a clean persistent session'
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--path', default='~/.glancesync_persist/',
help='path where the persistent objects are created')
group.add_argument('--random', action='store_true',
help='create a random path')
parser.add_argument(
'initial_load',
help='directory with initial load, with files (backup_<region>.csv)')
parser.add_argument(
'--confirm', action='store_true',
help='If path exists and it is not empty, this option is required')
meta = parser.parse_args()
meta.initial_load = os.path.normpath(os.path.expanduser(meta.initial_load))
if not os.path.exists(meta.initial_load):
logging.error('The directory "%s" with the initial load must exist' %
meta.initial_load)
sys.exit(-1)
if meta.random:
meta.path = tempfile.mkdtemp(prefix='glancesync_tmp')
else:
meta.path = os.path.normpath(os.path.expanduser(meta.path))
m = 'The directory "%s" is not empty. If you are sure, pass --confirm'
if os.path.exists(meta.path) and not meta.confirm \
and len(glob.glob(meta.path + '/_persist_*')) != 0:
logging.error(m % meta.path)
sys.exit(-1)
facade = ServersFacade(dict())
facade.init_persistence(meta.path, True)
facade.add_images_from_csv_to_mock(meta.initial_load)
print('export GLANCESYNC_MOCKPERSISTENT_PATH=' + meta.path)
| 37.94 | 79 | 0.627833 | 8,405 | 0.738317 | 0 | 0 | 3,697 | 0.324754 | 0 | 0 | 4,716 | 0.414266 |
a896d0baf80ed99339ea2d85b7342a139e8dfc26 | 3,127 | py | Python | 0867_transpose_matrix/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | 0867_transpose_matrix/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | 0867_transpose_matrix/python_source.py | arthurdysart/LeetCode | 69f90877c5466927e8b081c4268cbcda074813ec | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Leetcode - Transpose Matrix
https://leetcode.com/problems/transpose-matrix
Created on Fri Nov 23 11:13:48 2018
@author: Arthur Dysart
"""
## REQUIRED MODULES
import sys
## MODULE DEFINITIONS
class Solution:
"""
Iteration over all elements of 2D array.
Time complexity: O(n * m)
- Iterate over all elements of 2D array
Space complexity: O(n * m)
- Store all elements in new 2D array
"""
def transpose_matrix(self, a):
"""
Transform all rows into columns in 2D array.
:param list[list[int]] a: input 2D array of integers
:return: 2D array transposed
:rtype: list[list[int]]
"""
return [list(x) for x in zip(*a)]
class Solution2:
"""
Iteration over all elements of 2D array.
Temporary arrays created by comprehension and updated by list indexing.
Time complexity: O(n * m)
- Iterate over all elements of 2D array
Space complexity: O(n * m)
- Store all elements in new 2D array
"""
def transpose_matrix(self, a):
"""
Transform all rows into columns in 2D array.
:param list[list[int]] a: input 2D array of integers
:return: 2D array transposed
:rtype: list[list[int]]
"""
if not a:
return list(list())
n = len(a[0])
m = len(a)
u = [None for x in range(n)]
for i in range(n):
v = [None for x in range(m)]
for j in range(m):
v[j] = a[j][i]
u[i] = v
return u
class Solution3:
"""
Iteration over all elements of 2D array.
Temporary arrays updated by list appending.
Time complexity: O(n * m)
- Iterate over all elements of 2D array
Space complexity: O(n * m)
- Store all elements in new 2D array
"""
def transpose_matrix(self, a):
"""
Transform all rows into columns in 2D array.
:param list[list[int]] a: input 2D array of integers
:return: 2D array transposed
:rtype: list[list[int]]
"""
if not a:
return list(list())
n = len(a[0])
m = len(a)
u = list()
for i in range(n):
v = list()
for j in range(m):
v.append(a[j][i])
u.append(v)
return u
class Input:
def stdin(self, sys_stdin):
"""
Imports standard input.
:param _io.TextIOWrapper sys_stdin: standard input
:return: input 2D array of integers
:rtype: list[list[int]]
"""
inputs = [x.strip("[]\n").split("],[") for x in sys_stdin]
a = [list(map(int,x.split(","))) for x in inputs[0]]
return a
## MAIN MODULE
if __name__ == "__main__":
# Import exercise parameters
a = Input()\
.stdin(sys.stdin)
# Evaluate solution
z = Solution()\
.transpose_matrix(a)
print(z)
## END OF FILE | 23.870229 | 76 | 0.525424 | 2,637 | 0.8433 | 0 | 0 | 0 | 0 | 0 | 0 | 1,900 | 0.607611 |
a897a4fa8a48d750657c9f7f093c97c8595dcb6d | 3,094 | py | Python | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.sections.brain.file import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration.files.aiml_files._files=os.path.dirname(__file__)
self.configuration.brain_configuration.files.set_files._files = os.path.dirname(__file__)+"/sets"
self.configuration.brain_configuration.files.set_files._extension=".txt"
class PatternsetAIMLTests(unittest.TestCase):
def setUp(cls):
PatternsetAIMLTests.test_client = BasicTestClient()
def test_patten_set_match(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS AMBER")
self.assertEqual(response, "Amber IS A NICE COLOR.")
def test_patten_match_multi_word_set(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS AIR FORCE BLUE")
self.assertEqual(response, "Air Force blue IS A NICE COLOR.")
def test_patten_match_mixed_word_set(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS RED")
self.assertEqual(response, "Red IS A NICE COLOR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS RED ORANGE")
self.assertEqual(response, "Red Orange IS A NICE COLOR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS SACRAMENTO STATE GREEN")
self.assertEqual(response, "Sacramento State green IS A NICE COLOR.")
def test_patten_match_mixed_word_set_longer_sentence(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE RED VERY MUCH")
self.assertEqual(response, "IT IS OK, Red IS NOT MY BEST COLOUR EITHER")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE RED ORANGE AT ALL")
self.assertEqual(response, "IT IS OK, Red Orange IS NOT MY BEST COLOUR EITHER")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE SACRAMENTO STATE GREEN AT ALL")
self.assertEqual(response, "IT IS OK, Sacramento State green IS NOT MY BEST COLOUR EITHER")
def test_patten_match_mixed_word_set_at_front(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "RED IS A NICE COLOUR")
self.assertEqual(response, "YES Red IS A LOVELY COLOUR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "RED ORANGE IS A NICE COLOUR")
self.assertEqual(response, "YES Red Orange IS A LOVELY COLOUR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "SACRAMENTO STATE GREEN IS A NICE COLOUR")
self.assertEqual(response, "YES Sacramento State green IS A LOVELY COLOUR.")
| 50.721311 | 123 | 0.73788 | 2,946 | 0.952165 | 0 | 0 | 0 | 0 | 0 | 0 | 867 | 0.28022 |
a89927879bad1ad4980da6b351229d2b04e26b9e | 1,885 | py | Python | Python/erect-the-fence.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/erect-the-fence.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/erect-the-fence.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(nlogn)
# Space: O(n)
# There are some trees, where each tree is represented by
# (x,y) coordinate in a two-dimensional garden.
# Your job is to fence the entire garden using the minimum length of rope
# as it is expensive. The garden is well fenced only if all the trees are enclosed.
# Your task is to help find the coordinates of trees which are exactly located on the fence perimeter.
#
# Example 1:
# Input: [[1,1],[2,2],[2,0],[2,4],[3,3],[4,2]]
# Output: [[1,1],[2,0],[4,2],[3,3],[2,4]]
#
# Example 2:
# Input: [[1,2],[2,2],[4,2]]
# Output: [[1,2],[2,2],[4,2]]
#
# Even you only have trees in a line, you need to use rope to enclose them.
# Note:
#
# All trees should be enclosed together.
# You cannot cut the rope to enclose trees that will separate them in more than one group.
# All input integers will range from 0 to 100.
# The garden has at least one tree.
# All coordinates are distinct.
# Input points have NO order. No order required for output.
# Definition for a point.
# class Point(object):
# def __init__(self, a=0, b=0):
# self.x = a
# self.y = b
import itertools
# Monotone Chain Algorithm
class Solution(object):
def outerTrees(self, points):
"""
:type points: List[List[int]]
:rtype: List[List[int]]
"""
def ccw(A, B, C):
return (B[0]-A[0])*(C[1]-A[1]) - (B[1]-A[1])*(C[0]-A[0])
if len(points) <= 1:
return points
hull = []
points.sort()
for i in itertools.chain(xrange(len(points)), reversed(xrange(len(points)-1))):
while len(hull) >= 2 and ccw(hull[-2], hull[-1], points[i]) < 0:
hull.pop()
hull.append(points[i])
hull.pop()
for i in xrange(1, (len(hull)+1)//2):
if hull[i] != hull[-1]:
break
hull.pop()
return hull
| 29.920635 | 102 | 0.584615 | 736 | 0.390451 | 0 | 0 | 0 | 0 | 0 | 0 | 1,180 | 0.625995 |
a899d0bde8415bf24f5d7bf27bdd0c2ee8c0fdf1 | 391 | py | Python | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | '''
Quiz 1:
Hacer un programa que lea una temperatura en farenheit y la convierta en celsius y si es mayor
a 100°C imprima "caliente". Si es menor a 0°C imprima "frio"
'''
tempF = int(input("TempF: "))
#(tempF - 32/(5/9))
tempC = (((tempF - 32)*5)/9)
print("\nLa temperatura en Celsius es " + str(tempC))
if tempC >= 100:
print("caliente")
elif tempC < 0:
print("frio") | 23 | 95 | 0.629156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.648855 |
a89a91a25c7d71cc53adcd1adb0b082bb4aacfbb | 59 | py | Python | config.py | retry0/botTelegram-nCov | e75cac6db6ca0f3e8394c82a612bd150fc9c9d44 | [
"MIT"
] | null | null | null | config.py | retry0/botTelegram-nCov | e75cac6db6ca0f3e8394c82a612bd150fc9c9d44 | [
"MIT"
] | null | null | null | config.py | retry0/botTelegram-nCov | e75cac6db6ca0f3e8394c82a612bd150fc9c9d44 | [
"MIT"
] | null | null | null | api_key = "1108029941:AAGkHMtVFPT1-SsL5dRZn7gR65hxWD9-HH0"
| 29.5 | 58 | 0.847458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.813559 |
a89b1257c0b814198cd9f23f6ab82d1368f8a690 | 347 | py | Python | aws-computation.py | juhi04/CodeDeployGitHubDemo | 063778394a5c3adee719834b2e8116940b77b106 | [
"Apache-2.0"
] | null | null | null | aws-computation.py | juhi04/CodeDeployGitHubDemo | 063778394a5c3adee719834b2e8116940b77b106 | [
"Apache-2.0"
] | null | null | null | aws-computation.py | juhi04/CodeDeployGitHubDemo | 063778394a5c3adee719834b2e8116940b77b106 | [
"Apache-2.0"
] | 1 | 2020-09-13T21:19:01.000Z | 2020-09-13T21:19:01.000Z | import pandas as pd
import redis
def create_dummy_df():
print("Redis imported")
df = pd.DataFrame({'A': [1, 2, 3], 'B': [5, 10, 15]})
print(df)
return df
if __name__=='__main__':
try:
while True:
df = create_dummy_df()
except (KeyboardInterrupt, SystemExit):
print('Exited on user request')
| 19.277778 | 57 | 0.590778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.161383 |
a89c762bc05a26b656892179338f84a25bc98f02 | 12,651 | py | Python | botpubg-mpgh/SuperBot.py | fuckpubg/AHK | 7b41869d9692b6d207ce9d7939ea3cb6563386fd | [
"Apache-2.0"
] | 58 | 2017-10-25T17:24:15.000Z | 2022-03-06T21:50:41.000Z | botpubg-mpgh/SuperBot.py | haryandrafatwa/AHK | 7b41869d9692b6d207ce9d7939ea3cb6563386fd | [
"Apache-2.0"
] | 2 | 2017-10-28T07:08:18.000Z | 2018-01-17T03:47:49.000Z | botpubg-mpgh/SuperBot.py | haryandrafatwa/AHK | 7b41869d9692b6d207ce9d7939ea3cb6563386fd | [
"Apache-2.0"
] | 53 | 2017-11-03T06:38:36.000Z | 2021-09-20T00:38:14.000Z | # -*- coding: utf-8 -*-
import json
import os
import time
import psutil
import pyautogui
pubg_url = 'steam://rungameid/578080'
PROCNAME = "TslGame.exe"
CRASH_PROCNAME = "BroCrashReporter.exe"
debug_directory = "debug_screenshots"
start_state = "HELLO"
play_state = "PLAYING"
play_timer_max = 60 * 3
matching_state = "MATCHING"
matching_timer_max = 60 * 3
loading_state = "LOADING"
loading_timer_max = 60 * 3
gameloading_state = "GAME IS LOADING"
gameloading_timer_max = 60 * 3
state = start_state
takeScrenshot = True
timer = 0.0
def getConfig():
with open('config.json', encoding='UTF-8') as data_file:
data = json.load(data_file)
return data
def getpixel(x, y):
return pyautogui.screenshot().getpixel((x, y))
def pixelMatchesColor(x, y, expectedRGBColor, tolerance=0):
pix = getpixel(x,y)
if len(pix) == 3 or len(expectedRGBColor) == 3: # RGB mode
r, g, b = pix[:3]
exR, exG, exB = expectedRGBColor[:3]
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance)
elif len(pix) == 4 and len(expectedRGBColor) == 4: # RGBA mode
r, g, b, a = pix
exR, exG, exB, exA = expectedRGBColor
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance) and (
abs(a - exA) <= tolerance)
else:
assert False, 'Color mode was expected to be length 3 (RGB) or 4 (RGBA), but pixel is length %s and expectedRGBColor is length %s' % (
len(pix), len(expectedRGBColor))
def printScreen(message):
if takeScrenshot:
if not os.path.exists(debug_directory):
os.makedirs(debug_directory)
pyautogui.screenshot('{}/{}{}.png'.format(debug_directory, time.strftime("%m.%d %H.%M.%S", time.gmtime()), message))
def changeState(value):
global state, timer
state = value
timer = 0
def killGame():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
def matchesButton(position):
if pixelMatchesColor(position[0], position[1], white_button,
tolerance=color_tolerance) or pixelMatchesColor(position[0],
position[1],
gray_button,
tolerance=color_tolerance) \
or pixelMatchesColor(position[0],
position[1],
super_white_button,
tolerance=color_tolerance) or pixelMatchesColor(
position[0], position[1], golden_button, tolerance=color_tolerance):
return True
return False
def isGameRunning():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
return True
else:
return False
def checkTimer():
global state
if state == loading_state and timer > loading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == matching_state and timer > matching_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == play_state and timer > play_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == gameloading_state and timer > gameloading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
config = getConfig()
# Menu
print('By using this software you agree with license! You can find it in code.')
print('Choose a server:')
number = 1
for server in config['servers']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
server_position = (config['servers'][inp]['x'], config['servers'][inp]['y'], config['servers'][inp]['title'])
print('Choose a mod:')
number = 1
for server in config['modes']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
print('Can I take screenshots if something wrong happens? (y/N)')
if input().lower() == 'y':
print('Thanks')
else:
print("Well, if something will go wrong, then I can't help you")
takeScrenshot = False
# Position init
mode_position = (config['modes'][inp]['x'], config['modes'][inp]['y'], config['modes'][inp]['title'])
mode_tick_position = (config['modes'][inp]['tick']['x'], config['modes'][inp]['tick']['y'])
play_button_position = (config['play_button']['x'], config['play_button']['y'])
play_state_position = (config['play_state']['x'], config['play_state']['y'])
text_position = (config['text']['x'], config['text']['y'])
exit_position = (config['exit_to_lobby']['x'], config['exit_to_lobby']['y'])
error_position_check = (config['error_position']['x'], config['error_position']['y'])
error_ok_position = (config['error_ok_position']['x'], config['error_ok_position']['y'])
game_message_position = (config['game_message_position']['x'], config['game_message_position']['y'])
exit_button_position = (config['exit_button_position']['x'], config['exit_button_position']['y'])
reconnect_button_position = (config['reconnect_button_position']['x'], config['reconnect_button_position']['y'])
# Reading timings
refresh_rate = config["timers"]["refresh_rate"]
wait_after_killing_a_game = config["timers"]["wait_after_killing_a_game"]
start_delay = config["timers"]["start_delay"]
animation_delay = config["timers"]["animation_delay"]
wait_for_players = config["timers"]["wait_for_players"]
wait_for_plain = config["timers"]["wait_for_plain"]
exit_animation_delay = config["timers"]["exit_animation_delay"]
loading_delay = config["timers"]["loading_delay"]
# Colors
def getColor(config, name):
return (config["colors"][name]["r"], config["colors"][name]["g"], config["colors"][name]["b"])
color_tolerance = config["color_tolerance"]
dark_play_color = getColor(config, "dark_play_color")
play_color = getColor(config, "play_color")
matching_color = getColor(config, "matching_color")
matching_tick_color = getColor(config, "matching_tick_color")
text_start_color = getColor(config, "text_start_color")
white_button = getColor(config, "white_button")
gray_button = getColor(config, "gray_button")
golden_button = getColor(config, "golden_button")
super_white_button = getColor(config, "super_white_button")
windows_background = getColor(config, "windows_background")
exit_button_color = getColor(config, "exit_button_color")
reconnect_button_color = getColor(config, "reconnect_button_color")
# Game info
print('Server: {}. Mode: {}'.format(server_position[2], mode_position[2]))
while (1):
try:
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == CRASH_PROCNAME:
print('Fucking bugs in PUBG. Trying to avoid them!')
proc.kill()
killGame()
time.sleep(wait_after_killing_a_game)
changeState(start_state)
except Exception as ex:
print('Something went wrong while killing bug reporter... Error message: {}'.format(ex))
if state == start_state:
if pixelMatchesColor(error_position_check[0], error_position_check[1], windows_background,
tolerance=color_tolerance):
pyautogui.press('enter')
pyautogui.click(error_ok_position[0], error_ok_position[1])
killGame()
time.sleep(wait_after_killing_a_game)
try:
os.startfile(pubg_url)
changeState(loading_state)
time.sleep(start_delay)
print('Loading PUBG')
except Exception as ex:
print('Something went wrong while starating PUBG... Error message: {}'.format(ex))
elif state == loading_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
pyautogui.moveTo(play_button_position[0], play_button_position[1])
time.sleep(animation_delay)
# Pick a server
pyautogui.click(server_position[0], server_position[1])
time.sleep(animation_delay)
pyautogui.click(mode_position[0], mode_position[1])
time.sleep(animation_delay)
if pixelMatchesColor(mode_tick_position[0], mode_tick_position[1], matching_tick_color,
tolerance=color_tolerance):
pyautogui.click(mode_tick_position[0], mode_tick_position[1])
pyautogui.click(play_button_position[0], play_button_position[1])
changeState(matching_state)
time.sleep(loading_delay)
print('Starting matchmaking...')
elif pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
print('I see text, so the game is probably ready...')
changeState(play_state)
elif pixelMatchesColor(reconnect_button_position[0], reconnect_button_position[1], reconnect_button_color, tolerance=color_tolerance):
print('Nice orange button? I\'ll press it!')
pyautogui.click(reconnect_button_position[0], reconnect_button_position[1])
time.sleep(animation_delay)
elif matchesButton(game_message_position):
print("Game's message was denied")
pyautogui.click(game_message_position[0], game_message_position[1])
elif not pixelMatchesColor(exit_button_position[0], exit_button_position[1], exit_button_color, tolerance=color_tolerance) \
and not pixelMatchesColor(exit_button_position[0], exit_button_position[1], matching_tick_color, tolerance=color_tolerance)\
and timer > 30 and isGameRunning():
print('I can\'t see exit button, so the game is probably ready...')
time.sleep(wait_for_players)
changeState(play_state)
elif state == matching_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
changeState(loading_state)
time.sleep(loading_delay)
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_color,
tolerance=color_tolerance):
if pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
changeState(gameloading_state)
time.sleep(loading_delay)
print('Session is loading')
elif state == gameloading_state:
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
print('Loading is complete')
time.sleep(wait_for_players)
changeState(play_state)
elif state == play_state:
# print(text_position[0], text_position[1])
if not pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
time.sleep(wait_for_plain)
pyautogui.press('esc')
time.sleep(animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
time.sleep(exit_animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
changeState(loading_state)
print('Going in menu. Loading again')
time.sleep(10)
time.sleep(refresh_rate)
timer += refresh_rate
checkTimer()
| 43.927083 | 142 | 0.629594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,464 | 0.194767 |
a89c866513806e569539b19ae1bf174a8a98c694 | 10,645 | py | Python | mailcheker.py | yamamototakas/mailChecker | 2798f467b96f005d2c0d657d4798720f217a113d | [
"MIT"
] | null | null | null | mailcheker.py | yamamototakas/mailChecker | 2798f467b96f005d2c0d657d4798720f217a113d | [
"MIT"
] | null | null | null | mailcheker.py | yamamototakas/mailChecker | 2798f467b96f005d2c0d657d4798720f217a113d | [
"MIT"
] | null | null | null | #! C:\bin\Python35\python.exe
# -*- coding: utf-8 -*-
'''
Modified for python3 on 2012/04/29
original python2 version is Created on 2011/10/30
@author: tyama
'''
import poplib
import email.header
import string
import re
import urllib.request
import urllib.error
import urllib.parse
import http.cookiejar
import socket
import threading
import time
import random
import json
import mailcheker_data as config
from subprocess import check_call
'''
#sample
def decode_mime_header1(s0):
return ''.join( str(s, c or 'ascii') if isinstance(s, (bytes,)) \
else s for s,c in email.header.decode_header(s0) )
'''
def decode_mime_header(st):
decoded_st = ""
for s, enc in email.header.decode_header(st):
try:
if isinstance(s, str):
decoded_st += s
elif enc == 'unknown-8bit': # case of type==bytes
decoded_st += s.decode('Shift_JIS', 'ignore')
elif enc:
decoded_st += s.decode(enc, 'ignore')
else:
decoded_st += s.decode('utf-8', 'ignore')
except LookupError as e:
print('encode error:', e)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return decoded_st
def extract_url(msg, fromkey, payloadkey, multiurl):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
url = []
if fromkey in decoded_from:
# print "YES"
pattern = re.compile(payloadkey)
for part in msg.walk():
if part.get_content_maintype() == 'text':
body = part.get_payload()
enc = part.get_content_charset()
if isinstance(body, str):
u_body = body
elif enc == 'unknown-8bit': # case of type==bytes
u_body = body.decode('Shift_JIS', 'ignore')
elif enc:
u_body = body.decode(enc, 'ignore')
else:
u_body = body.decode('euc-jp', 'ignore')
# print enc
# print u_body
if multiurl:
result = pattern.findall(u_body)
if result:
for each in result:
url.append(each)
url = list(set(url))
# sorted(set(url), key=url.index)
else:
result = pattern.search(u_body)
if result:
url.append(result.group(1))
return url
else:
return None
def isEmailTocheck(msg, fromkey):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
if fromkey in decoded_from:
return True
else:
return False
class http_get(threading.Thread):
def __init__(self, url, opener, index):
threading.Thread.__init__(self)
self.url = url
self.opener = opener
self.index = index
def run(self):
try:
response = self.opener.open(self.url)
'''
enc = response.headers.getparam('charset')
if enc:
print response.read().decode(enc,'ignore')
else:
print response.read().decode('euc-jp','ignore')
'''
print(" ", self.index, self.url)
return True
except urllib.error.HTTPError as error:
print('HTTP Error')
print(error)
except socket.timeout as error:
print('Socket time out')
print(error)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return None
original_data = {
'name': 'ACME',
'shares': 100,
'price': 542.23
}
def main():
print("Base", original_data)
json_str = json.dumps(original_data)
print(json_str)
json_data = json.loads(json_str)
print(json_data)
server_list = config.server_list
user_list = config.user_list
pass_list = config.pass_list
print(server_list)
dl_list1 = config.dl_list1
dl_list2 = config.dl_list2
dl_list3 = config.dl_list3
dl_list = (dl_list1, dl_list2, dl_list3)
# lines=open('setting.dat','r').readlines()
# for line in lines:
# print line[:-1]
lastuidl_lists = []
f = open('lastmsgid.dat', 'r')
for line in f:
lastuidl_lists.append(line.split())
f.close()
out_string = []
print(lastuidl_lists)
print(dl_list)
# time out
socket.setdefaulttimeout(15.0)
# connect to server
cj = http.cookiejar.CookieJar()
cjhdr = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(cjhdr)
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko)\
Chrome/15.0.874.120 Safari/535.2')]
for j in range(len(server_list)):
print('Start ')
server = poplib.POP3_SSL(server_list[j])
# login
server.user(user_list[j])
server.pass_(pass_list[j])
# list items on server
list_resp, list_items, list_octets = server.list()
print(list_resp)
# print (list_items)
print(list_octets)
uidl = server.uidl()
lastuidl = lastuidl_lists[j]
# print server.uidl()
'''if j==1:
lastuidl[1]='TEST'
'''
last_msg_id = 1
x = int(lastuidl[0])
if x > len(list_items):
x = len(list_items)
index = x
print(x)
if x == 0:
out_string.append('1')
out_string.append('abc')
continue
while x > 0:
# print (lastuidl[1], ":>", uidl[1][x-1].split()[1].decode('utf-8','ingore'))
if lastuidl[1] == uidl[1][x - 1].split()[1].decode('utf-8', 'ingore'):
print('equal')
break
print(x)
index = x
x -= 1
print(index)
# if uidl[1][i].split()[1] == 'ANft2MsAABBhTsOb4QzFegr+jPA':
# print 'equal'
# continue
delete_counter = 0
last_index = index
for i in range(index, len(list_items) + 1):
try:
# resp, text, octets = server.retr(i)
t_resp, t_text, t_octets = server.top(i, 1)
except Exception as err:
print('Unexpected error in server.top of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
# print (text)'
t_string_text = b'\n'.join(t_text)
t_msg = email.message_from_bytes(t_string_text)
url_list = None
checkBody = False
for from_key, text_key, multiurl in dl_list[j]:
if isEmailTocheck(t_msg, from_key):
checkBody = True
break
if checkBody:
try:
resp, text, octets = server.retr(i)
except Exception as err:
print('Unexpected error in server.retr of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
string_text = b'\n'.join(text)
msg = email.message_from_bytes(string_text)
for from_key, text_key, multiurl in dl_list[j]:
url_list = extract_url(msg, from_key, text_key, multiurl)
if url_list:
break
# print url_list
if url_list:
m_date = msg.get('Date')
print(m_date)
for each in url_list:
# print each
get = http_get(each, opener, i)
try:
get.start()
# server.dele(i)
delete_counter += 1
if 'r34' in each:
print('Call Chrome')
check_call(
["C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
" --disable-images", each])
except Exception as err:
print('Unexpected error in Main function', each, i)
print(sys.exc_info())
time.sleep(8)
m_subject = msg.get('Subject')
d_subject, enc = email.header.decode_header(m_subject)[0]
if enc is None:
enc = 'euc-jp'
try:
u_subject = str(d_subject, enc)
except Exception as err:
print('Unexpected error in u_subject', d_subject, enc)
print(sys.exc_info())
time.sleep(8)
print(i, " ", u_subject)
else:
print(i)
last_index = i
if i == 6:
pass # quit()
last_msg_id = len(list_items) # - delete_counter
out_string.append(str(last_msg_id))
out_string.append(uidl[1][last_index - 1].split()[1].decode('utf-8', 'ignore'))
try:
server.quit()
except Exception as err:
print('Unexpected error in server.quit()')
print(sys.exc_info())
print('End')
print(out_string[len(out_string) - 1])
# print out_string
time.sleep(2)
for i in range(len(out_string)):
if i % 2:
continue
print(out_string[i])
print(out_string[i + 1])
f = open('lastmsgid.dat', 'w')
for i in range(len(out_string)):
if i % 2:
continue
f.write(out_string[i] + ' ')
f.write(out_string[i + 1] + '\n')
f.close()
if __name__ == '__main__':
main()
print('END')
time.sleep(8)
| 28.538874 | 95 | 0.513293 | 985 | 0.092532 | 0 | 0 | 0 | 0 | 0 | 0 | 2,335 | 0.219352 |
a89d5ea301daab707e4e307ee463a9e25963e7c1 | 7,626 | py | Python | tests/epc_schemes/test_giai.py | nedap/retail-epcpy | f5a454f2a06053f64bc42e6c6411fbd6cb47e745 | [
"MIT"
] | 2 | 2022-03-21T08:22:30.000Z | 2022-03-22T12:32:29.000Z | tests/epc_schemes/test_giai.py | nedap/retail-epcpy | f5a454f2a06053f64bc42e6c6411fbd6cb47e745 | [
"MIT"
] | 1 | 2022-03-28T14:48:52.000Z | 2022-03-28T14:48:52.000Z | tests/epc_schemes/test_giai.py | nedap/retail-epcpy | f5a454f2a06053f64bc42e6c6411fbd6cb47e745 | [
"MIT"
] | null | null | null | import unittest
from epcpy.epc_schemes.giai import GIAI, GIAIFilterValue
from tests.epc_schemes.test_base_scheme import (
TestEPCSchemeInitMeta,
TestGS1ElementMeta,
TestTagEncodableMeta,
)
class TestGIAIInit(
unittest.TestCase,
metaclass=TestEPCSchemeInitMeta,
scheme=GIAI,
valid_data=[
{
"name": "test_valid_giai_1",
"uri": "urn:epc:id:giai:0614141.12345400",
},
{
"name": "test_valid_giai_2",
"uri": "urn:epc:id:giai:0614141.0",
},
{
"name": "test_valid_giai_3",
"uri": "urn:epc:id:giai:0614141.1ABc%2FD",
},
{
"name": "test_valid_giai_4",
"uri": "urn:epc:id:giai:061411.01ABc%2FD",
},
{
"name": "test_valid_giai_5",
"uri": "urn:epc:id:giai:012345.012345678901234567890123",
},
{
"name": "test_valid_giai_6",
"uri": "urn:epc:id:giai:012345678901.012345678901234567",
},
],
invalid_data=[
{
"name": "test_invalid_giai_identifier",
"uri": "urn:epc:id:gai:061411.01ABc%2FD",
},
{
"name": "test_invalid_giai_company_prefix_1",
"uri": "urn:epc:id:giai:06141.1ABc%2FD",
},
{
"name": "test_invalid_giai_company_prefix_2",
"uri": "urn:epc:id:giai:0614111111111.1ABc%2FD",
},
{
"name": "test_invalid_giai_serial_too_long_1",
"uri": "urn:epc:id:giai:012345.0123456789012345678901234",
},
{
"name": "test_invalid_giai_serial_too_long_1",
"uri": "urn:epc:id:giai:012345.0123456789012345678901234",
},
{
"name": "test_invalid_giai_serial_too_long_2",
"uri": "urn:epc:id:giai:012345678901.0123456789012345678",
},
],
):
pass
class TestGIAIGS1Key(
unittest.TestCase,
metaclass=TestGS1ElementMeta,
scheme=GIAI,
valid_data=[
{
"name": "test_valid_giai_gs1_key_1",
"uri": "urn:epc:id:giai:0614141.12345400",
"gs1_key": "061414112345400",
"gs1_element_string": "(8004)061414112345400",
"company_prefix_length": 7,
},
{
"name": "test_valid_giai_gs1_key_2",
"uri": "urn:epc:id:giai:0614141.0",
"gs1_key": "06141410",
"gs1_element_string": "(8004)06141410",
"company_prefix_length": 7,
},
{
"name": "test_valid_giai_gs1_key_3",
"uri": "urn:epc:id:giai:0614141.1ABc%2FD",
"gs1_key": "06141411ABc/D",
"gs1_element_string": "(8004)06141411ABc/D",
"company_prefix_length": 7,
},
{
"name": "test_valid_giai_gs1_key_4",
"uri": "urn:epc:id:giai:061411.01ABc%2FD",
"gs1_key": "06141101ABc/D",
"gs1_element_string": "(8004)06141101ABc/D",
"company_prefix_length": 6,
},
{
"name": "test_valid_giai_gs1_key_5",
"uri": "urn:epc:id:giai:012345.012345678901234567890123",
"gs1_key": "012345012345678901234567890123",
"gs1_element_string": "(8004)012345012345678901234567890123",
"company_prefix_length": 6,
},
{
"name": "test_valid_giai_gs1_key_6",
"uri": "urn:epc:id:giai:012345678901.012345678901234567",
"gs1_key": "012345678901012345678901234567",
"gs1_element_string": "(8004)012345678901012345678901234567",
"company_prefix_length": 12,
},
],
invalid_data=[],
):
pass
class TestGIAITagEncodable(
unittest.TestCase,
metaclass=TestTagEncodableMeta,
scheme=GIAI,
valid_data=[
{
"name": "test_valid_giai_tag_encodable_1",
"uri": "urn:epc:id:giai:0614141.12345400",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_202,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
"tag_uri": "urn:epc:tag:giai-202:1.0614141.12345400",
"hex": "3834257BF58B266D1AB460C00000000000000000000000000000",
},
{
"name": "test_valid_giai_tag_encodable_2",
"uri": "urn:epc:id:giai:0614141.0",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_96,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
"tag_uri": "urn:epc:tag:giai-96:1.0614141.0",
"hex": "3434257BF400000000000000",
},
{
"name": "test_valid_giai_tag_encodable_3",
"uri": "urn:epc:id:giai:0614141.1ABc%2FD",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_202,
"filter_value": GIAIFilterValue.ALL_OTHERS,
},
"tag_uri": "urn:epc:tag:giai-202:0.0614141.1ABc%2FD",
"hex": "3814257BF58C1858D7C400000000000000000000000000000000",
},
{
"name": "test_valid_giai_tag_encodable_4",
"uri": "urn:epc:id:giai:061411.01ABc%2FD",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_202,
"filter_value": GIAIFilterValue.RESERVED_4,
},
"tag_uri": "urn:epc:tag:giai-202:4.061411.01ABc%2FD",
"hex": "38983BF8D831830B1AF880000000000000000000000000000000",
},
{
"name": "test_valid_giai_tag_encodable_5",
"uri": "urn:epc:id:giai:012345.012345678901234567890123",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_202,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
"tag_uri": "urn:epc:tag:giai-202:1.012345.012345678901234567890123",
"hex": "38380C0E583164CDA356CDDC3960C593368D5B3770E583164CC0",
},
{
"name": "test_valid_giai_tag_encodable_6",
"uri": "urn:epc:id:giai:0614141.12345400",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_96,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
"tag_uri": "urn:epc:tag:giai-96:1.0614141.12345400",
"hex": "3434257BF400000000BC6038",
},
{
"name": "test_valid_giai_tag_encodable_7",
"uri": "urn:epc:id:giai:0614141.02",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_202,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
"tag_uri": "urn:epc:tag:giai-202:1.0614141.02",
"hex": "3834257BF5832000000000000000000000000000000000000000",
},
],
invalid_data=[
{
"name": "test_invalid_giai_tag_encodable_invalid_serial_1",
"uri": "urn:epc:id:giai:0614141.02",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_96,
"filter_value": GIAIFilterValue.RAIL_VEHICLE,
},
},
{
"name": "test_invalid_giai_tag_encodable_invalid_serial_2",
"uri": "urn:epc:id:giai:061411.11ABc%2FD",
"kwargs": {
"binary_coding_scheme": GIAI.BinaryCodingScheme.GIAI_96,
"filter_value": GIAIFilterValue.RESERVED_4,
},
},
],
):
pass
| 34.663636 | 80 | 0.550747 | 7,415 | 0.972331 | 0 | 0 | 0 | 0 | 0 | 0 | 3,821 | 0.501049 |
a89e3fe1a4c7cef6a1cb619cc685d9746e6605e8 | 694 | py | Python | tests/conftest.py | olist/olist-loafer | cb3bc0a46ca5a343fbd455181045212e4d032ce6 | [
"MIT"
] | 11 | 2017-10-06T18:18:16.000Z | 2021-12-23T11:34:11.000Z | tests/conftest.py | pydrinker/pydrinker-loafer | 32d2a36827f15cc2b5362ee54da7702267da24c9 | [
"MIT"
] | 7 | 2017-02-16T13:20:54.000Z | 2020-11-03T13:50:46.000Z | tests/conftest.py | pydrinker/pydrinker-loafer | 32d2a36827f15cc2b5362ee54da7702267da24c9 | [
"MIT"
] | 2 | 2017-10-06T18:18:31.000Z | 2020-09-10T15:09:45.000Z | import pytest
from loafer.providers import AbstractProvider
@pytest.fixture
def dummy_handler():
def handler(message, *args):
raise AssertionError("I should not be called")
return handler
@pytest.fixture
def dummy_provider():
class Dummy(AbstractProvider):
async def fetch_messages(self):
raise AssertionError("I should not be called")
async def confirm_message(self):
raise AssertionError("I should not be called")
async def message_not_processed(self):
raise AssertionError("I should not be called")
def stop(self):
raise AssertionError("I should not be called")
return Dummy()
| 23.133333 | 58 | 0.670029 | 421 | 0.606628 | 0 | 0 | 627 | 0.903458 | 278 | 0.400576 | 120 | 0.172911 |
a89e48cf3a4bd044b3bfeec924a6e17c47c0d752 | 543 | py | Python | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='dataset',
version='0.0.1',
author='Ander Cejudo',
author_email='acejudo001@ikasle.ehu.eus',
packages=['dataset'],
url='Indicar una URL para el paquete...',
license='LICENSE.txt',
description='This package includes some basic functions to work with a dataset object',
long_description=open('README.txt').read(),
tests_require=['pytest'],
install_requires=[
"seaborn >= 0.9.0",
"pandas >= 0.25.1",
"matplotlib >= 3.1.1",
"numpy >=1.17.2"
],
) | 27.15 | 90 | 0.635359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.519337 |
a89e666e84a477e51e3b884be7e9dbae7e3aa151 | 8,456 | py | Python | install/services/event.py | philipcwhite/monitoring2 | 05e24efbfc3caace248eb6daa1288920a9758178 | [
"Apache-2.0"
] | 10 | 2018-12-23T07:42:11.000Z | 2022-03-11T09:43:19.000Z | install/services/event.py | philipcwhite/monitoring2 | 05e24efbfc3caace248eb6daa1288920a9758178 | [
"Apache-2.0"
] | 2 | 2020-08-26T11:42:20.000Z | 2021-02-03T13:52:52.000Z | install/services/event.py | philipcwhite/monitoring2 | 05e24efbfc3caace248eb6daa1288920a9758178 | [
"Apache-2.0"
] | 5 | 2018-12-27T20:02:17.000Z | 2021-11-15T12:46:42.000Z | import datetime, configparser, os, smtplib, time, pymysql.cursors
from email.message import EmailMessage
class EventSettings:
app_path = './'
availability_check = 300
availability_severity = 1
agent_retention = 2592000
data_retention = 2592000
event_retention = 2592000
database = 'monitoring'
dbhost = 'localhost'
dbpassword = 'monitoring'
dbuser = 'monitoring'
mailactive = 0
mailadmin = 'monitoring@monitoring'
mailserver = 'localhost'
running = True
class EventConfig:
def load_config():
try:
EventSettings.running = True
parser = configparser.ConfigParser()
parser.read(EventSettings.app_path + 'settings.ini')
database = dict(parser.items('database'))
events = dict(parser.items('events'))
mail = dict(parser.items('mail'))
retention = dict(parser.items('retention'))
EventSettings.dbhost = database['host']
EventSettings.database = database['name']
EventSettings.dbuser = database['user']
EventSettings.dbpassword = database['password']
EventSettings.agent_retention = int(retention['agent'])
EventSettings.data_retention = int(retention['data'])
EventSettings.event_retention = int(retention['event'])
EventSettings.mailactive = int(mail['active'])
EventSettings.mailserver = mail['server']
EventSettings.mailadmin = mail['admin']
EventSettings.availability_check = int(events['availability_check'])
EventSettings.availability_severity = int(events['availability_severity'])
except: pass
class EventData:
def __init__(self):
self.con = pymysql.connect(host = EventSettings.dbhost, user = EventSettings.dbuser, password = EventSettings.dbpassword,
db = EventSettings.database, charset = 'utf8mb4', cursorclass = pymysql.cursors.DictCursor)
self.cursor = self.con.cursor()
def __del__(self):
self.con.close()
def agent_select_id(self):
sql = 'SELECT id from agentevents ORDER BY id DESC LIMIT 1'
self.cursor.execute(sql)
result = self.cursor.fetchone()
result = str(result['id'])
return result
def agent_events_processed(self, id):
sql = 'UPDATE agentevents SET processed=1 WHERE id<=%s'
self.cursor.execute(sql, str(id))
self.con.commit()
def agent_filter_select(self, id):
sql = '''select t1.notify_email, t1.notify_name, t2.id, t2.timestamp, t2.name, t2.monitor, t2.message, t2.severity, t2.status FROM notifyrule as t1
INNER JOIN agentevents as t2 on
t2.name LIKE t1.agent_name AND t2.monitor LIKE t1.agent_monitor
AND t2.status LIKE t1.agent_status AND t2.severity LIKE t1.agent_severity AND t2.processed=0 AND T2.id<=%s AND t1.notify_enabled=1'''
self.cursor.execute(sql, str(id))
result = self.cursor.fetchall()
agent_events_processed(id)
return result
def agent_avail_select(self, timestamp):
sql = 'SELECT name FROM agentsystem WHERE timestamp < %s'
self.cursor.execute(sql, str(timestamp))
result = self.cursor.fetchall()
return result
def agent_avail_event_open(self, timestamp, name, message, severity):
sql = """INSERT INTO agentevents (timestamp, name, monitor, message, status, severity, processed)
SELECT %s, %s, 'perf.system.availability.seconds', %s, 1, %s, 0 FROM DUAL
WHERE NOT EXISTS (SELECT name FROM agentevents WHERE name=%s AND monitor='perf.system.availability.seconds' AND status=1)"""
self.cursor.execute(sql, (str(timestamp), name, message, str(severity), name))
self.con.commit()
def agent_avail_select_event_open(self, timestamp):
sql = """SELECT DISTINCT t1.name FROM agentevents as t1
INNER JOIN agentdata as t2 on t1.name = t2.name
WHERE t1.monitor='perf.system.availability.seconds' AND t1.status=1 AND t2.timestamp >=%s"""
self.cursor.execute(sql, str(timestamp))
result = self.cursor.fetchall()
if not result is None:
for i in result:
name = i['name']
sql = "UPDATE agentevents SET status=0 WHERE name=%s"
self.cursor.execute(sql, name)
self.con.commit()
def remove_agents(self):
sql = 'DELETE FROM agentsystem WHERE timestamp < ' + str(time.time() - EventSettings.agent_retention)
self.cursor.execute(sql)
self.con.commit()
def remove_events(self):
sql = 'DELETE FROM agentevents WHERE timestamp < ' + str(time.time() - EventSettings.event_retention)
self.cursor.execute(sql)
self.con.commit()
def remove_data(self):
sql = 'DELETE FROM agentdata WHERE timestamp < ' + str(time.time() - EventSettings.data_retention)
self.cursor.execute(sql)
self.con.commit()
ED = EventData()
class EventAvailable:
def check_available():
try:
check_time = str(time.time() - EventSettings.availability_check).split('.')[0]
cur_time = str(time.time()).split('.')[0]
hosts = ED.agent_avail_select(str(check_time))
for i in hosts:
name = i['name']
message = 'Agent not responding for ' + str(int(round(EventSettings.availability_check / 60,0))) + ' minutes'
ED.agent_avail_event_open(cur_time, name, message, str(EventSettings.availability_severity))
except: pass
def check_open():
try:
check_time = str(time.time() - EventSettings.availability_check).split('.')[0]
ED.agent_avail_select_event_open(check_time)
except: pass
class ServerEvent:
def process_events():
try:
id = ED.agent_select_id()
output = ED.agent_filter_select(id)
for i in output:
notify_email = i['notify_email']
notify_name = i['notify_name']
name = i['name']
monitor = i['monitor']
message = i['message']
severity = ''
if i['severity'] == '1': severity = 'critical'
if i['severity'] == '2': severity = 'major'
if i['severity'] == '3': severity = 'warning'
if i['severity'] == '4': severity = 'info'
status = ''
if i['status'] == '0': status = 'closed'
else: status = 'open'
timestamp = int(i['timestamp'])
date = datetime.datetime.fromtimestamp(timestamp)
email_subject = name + ':' + monitor + ':' + severity + ':' + status
email_message = '''<div style='font-family:Arial, Helvetica, sans-serif;font-size: 11pt'><b>message:</b> ''' + message + '<br /><b>name:</b> ' + name + '<br /><b>monitor:</b> ' + monitor + '<br /><b>severity:</b> ' + severity + '<br /><b>status:</b> ' + status + '<br /><b>time opened:</b> ' + str(date) + '<br /><b>policy:</b> ' + notify_name + '</div>'
if EventSettings.mailactive == 1:
msg = EmailMessage()
msg['Subject'] = email_subject
msg['From'] = EventSettings.mailadmin
msg['To'] = notify_email
msg.set_content(email_message, subtype='html')
s = smtplib.SMTP(EventSettings.mailserver)
s.send_message(msg)
s.quit()
f = open(EventSettings.app_path + 'output.txt','a')
f.write(str(time.time()).split('.')[0] + ':' + notify_email + ':' + notify_name + ':' + name + ':' + monitor + ':' + message + ':' + severity + ':' +status + ':' + str(date) + '\n')
f.close()
except: pass
def start_server():
EventConfig.load_config()
while EventSettings.running == True:
a = datetime.datetime.now().second
if a == 0:
EventAvailable.check_available()
EventAvailable.check_open()
ServerEvent.process_events()
ED.remove_agents()
ED.remove_data()
ED.remove_events()
time.sleep(1)
start_server() | 45.219251 | 370 | 0.585501 | 7,907 | 0.935076 | 0 | 0 | 0 | 0 | 0 | 0 | 2,115 | 0.250118 |
a89fa94a38be0f7ff83779a36c140bcbf11011b7 | 1,422 | py | Python | publicacion/migrations/0002_remove_publicacion_user_publicacion_autor_and_more.py | chelocastillo1/test | b783e64dbd3071c3ed074e9ce23da047e9bad97d | [
"CC0-1.0"
] | 1 | 2021-12-12T22:27:52.000Z | 2021-12-12T22:27:52.000Z | publicacion/migrations/0002_remove_publicacion_user_publicacion_autor_and_more.py | chelocastillo1/test | b783e64dbd3071c3ed074e9ce23da047e9bad97d | [
"CC0-1.0"
] | null | null | null | publicacion/migrations/0002_remove_publicacion_user_publicacion_autor_and_more.py | chelocastillo1/test | b783e64dbd3071c3ed074e9ce23da047e9bad97d | [
"CC0-1.0"
] | null | null | null | # Generated by Django 4.0 on 2021-12-15 02:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cuenta', '0001_initial'),
('publicacion', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='publicacion',
name='user',
),
migrations.AddField(
model_name='publicacion',
name='autor',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.DO_NOTHING, to='cuenta.usuario'),
),
migrations.AddField(
model_name='publicacion',
name='destacado',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='publicacion',
name='imagen',
field=models.ImageField(default=None, upload_to=''),
),
migrations.AlterField(
model_name='publicacion',
name='fechaCreacion',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='publicacion',
name='fechaEdicion',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='publicacion',
name='titulo',
field=models.CharField(max_length=100),
),
]
| 28.44 | 116 | 0.561181 | 1,298 | 0.912799 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.19128 |
a89fc07496dfb31710239a16f1bdafb72d4ca886 | 6,242 | py | Python | databricks/koalas/missing/window.py | HG1112/koalas | 580f48c81d3d2236c399063ce453f9170d88b954 | [
"Apache-2.0"
] | 1 | 2019-12-06T05:01:34.000Z | 2019-12-06T05:01:34.000Z | databricks/koalas/missing/window.py | HG1112/koalas | 580f48c81d3d2236c399063ce453f9170d88b954 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/missing/window.py | HG1112/koalas | 580f48c81d3d2236c399063ce453f9170d88b954 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks.koalas.missing import _unsupported_function, _unsupported_property
def unsupported_function_expanding(method_name, deprecated=False, reason=""):
return _unsupported_function(class_name='pandas.core.window.Expanding', method_name=method_name,
deprecated=deprecated, reason=reason)
def unsupported_property_expanding(property_name, deprecated=False, reason=""):
return _unsupported_property(
class_name='pandas.core.window.Expanding', property_name=property_name,
deprecated=deprecated, reason=reason)
def unsupported_function_rolling(method_name, deprecated=False, reason=""):
return _unsupported_function(class_name='pandas.core.window.Rolling', method_name=method_name,
deprecated=deprecated, reason=reason)
def unsupported_property_rolling(property_name, deprecated=False, reason=""):
return _unsupported_property(
class_name='pandas.core.window.Rolling', property_name=property_name,
deprecated=deprecated, reason=reason)
class _MissingPandasLikeExpanding(object):
agg = unsupported_function_expanding("agg")
aggregate = unsupported_function_expanding("aggregate")
apply = unsupported_function_expanding("apply")
corr = unsupported_function_expanding("corr")
count = unsupported_function_expanding("count")
cov = unsupported_function_expanding("cov")
kurt = unsupported_function_expanding("kurt")
max = unsupported_function_expanding("max")
mean = unsupported_function_expanding("mean")
median = unsupported_function_expanding("median")
min = unsupported_function_expanding("min")
quantile = unsupported_function_expanding("quantile")
skew = unsupported_function_expanding("skew")
std = unsupported_function_expanding("std")
sum = unsupported_function_expanding("sum")
validate = unsupported_function_expanding("validate")
var = unsupported_function_expanding("var")
exclusions = unsupported_property_expanding("exclusions")
is_datetimelike = unsupported_property_expanding("is_datetimelike")
is_freq_type = unsupported_property_expanding("is_freq_type")
ndim = unsupported_property_expanding("ndim")
class _MissingPandasLikeRolling(object):
agg = unsupported_property_rolling("agg")
aggregate = unsupported_property_rolling("aggregate")
apply = unsupported_property_rolling("apply")
corr = unsupported_property_rolling("corr")
count = unsupported_property_rolling("count")
cov = unsupported_property_rolling("cov")
kurt = unsupported_property_rolling("kurt")
max = unsupported_property_rolling("max")
mean = unsupported_property_rolling("mean")
median = unsupported_property_rolling("median")
min = unsupported_property_rolling("min")
quantile = unsupported_property_rolling("quantile")
skew = unsupported_property_rolling("skew")
std = unsupported_property_rolling("std")
sum = unsupported_property_rolling("sum")
validate = unsupported_property_rolling("validate")
var = unsupported_property_rolling("var")
exclusions = unsupported_property_rolling("exclusions")
is_datetimelike = unsupported_property_rolling("is_datetimelike")
is_freq_type = unsupported_property_rolling("is_freq_type")
ndim = unsupported_property_rolling("ndim")
class _MissingPandasLikeExpandingGroupby(object):
agg = unsupported_function_expanding("agg")
aggregate = unsupported_function_expanding("aggregate")
apply = unsupported_function_expanding("apply")
corr = unsupported_function_expanding("corr")
count = unsupported_function_expanding("count")
cov = unsupported_function_expanding("cov")
kurt = unsupported_function_expanding("kurt")
max = unsupported_function_expanding("max")
mean = unsupported_function_expanding("mean")
median = unsupported_function_expanding("median")
min = unsupported_function_expanding("min")
quantile = unsupported_function_expanding("quantile")
skew = unsupported_function_expanding("skew")
std = unsupported_function_expanding("std")
sum = unsupported_function_expanding("sum")
validate = unsupported_function_expanding("validate")
var = unsupported_function_expanding("var")
exclusions = unsupported_property_expanding("exclusions")
is_datetimelike = unsupported_property_expanding("is_datetimelike")
is_freq_type = unsupported_property_expanding("is_freq_type")
ndim = unsupported_property_expanding("ndim")
class _MissingPandasLikeRollingGroupby(object):
agg = unsupported_function_rolling("agg")
aggregate = unsupported_function_rolling("aggregate")
apply = unsupported_function_rolling("apply")
corr = unsupported_function_rolling("corr")
count = unsupported_function_rolling("count")
cov = unsupported_function_rolling("cov")
kurt = unsupported_function_rolling("kurt")
max = unsupported_function_rolling("max")
mean = unsupported_function_rolling("mean")
median = unsupported_function_rolling("median")
min = unsupported_function_rolling("min")
quantile = unsupported_function_rolling("quantile")
skew = unsupported_function_rolling("skew")
std = unsupported_function_rolling("std")
sum = unsupported_function_rolling("sum")
validate = unsupported_function_rolling("validate")
var = unsupported_function_rolling("var")
exclusions = unsupported_property_rolling("exclusions")
is_datetimelike = unsupported_property_rolling("is_datetimelike")
is_freq_type = unsupported_property_rolling("is_freq_type")
ndim = unsupported_property_rolling("ndim")
| 44.585714 | 100 | 0.768343 | 4,578 | 0.733419 | 0 | 0 | 0 | 0 | 0 | 0 | 1,341 | 0.214835 |
a8a0133356f4f20d5bb0f1a7ef3b7ad354fcc592 | 1,891 | py | Python | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 34 | 2021-06-16T17:25:13.000Z | 2021-08-13T08:21:22.000Z | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 12 | 2021-08-10T10:08:48.000Z | 2022-03-09T10:14:29.000Z | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 4 | 2021-08-10T13:10:22.000Z | 2022-02-19T14:04:21.000Z | from bumblebee.bigquery_service import BigqueryService
from datetime import datetime
from abc import ABC
from abc import abstractmethod
from bumblebee.config import LoadMethod
class BaseLoader(ABC):
@abstractmethod
def load(self, query):
pass
class PartitionLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod, partition: datetime):
self.bigquery_service = bigquery_service
self.destination_name = destination
self.load_method = load_method
self.partition_date = partition
def load(self, query):
partition_date_str = self.partition_date.strftime("%Y%m%d")
load_destination = "{}${}".format(self.destination_name, partition_date_str)
write_disposition = self.load_method.write_disposition
return self.bigquery_service.transform_load(query=query,
write_disposition=write_disposition,
destination_table=load_destination)
class TableLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod):
self.bigquery_service = bigquery_service
self.full_table_name = destination
self.load_method = load_method
def load(self, query):
return self.bigquery_service.transform_load(query=query,
write_disposition=self.load_method.write_disposition,
destination_table=self.full_table_name)
class DMLLoader(BaseLoader):
def __init__(self,bigquery_service: BigqueryService, destination: str):
self.bigquery_service = bigquery_service
self.full_table_name = destination
def load(self,query):
return self.bigquery_service.execute_query(query)
| 35.018519 | 105 | 0.673189 | 1,703 | 0.900582 | 0 | 0 | 55 | 0.029085 | 0 | 0 | 15 | 0.007932 |
a8a04bb0a9831548bd868b09fed78c535d82ee0a | 250 | py | Python | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | # coding=utf-8
from django.conf.urls import url
from .views import home, positioned, tiled
urlpatterns = [
url(r'^$', home),
url(r'home/$', home),
url(r'positioned-(?P<plan_id>[0-9]{1,4})/$', positioned),
url(r'tiled/$', tiled),
]
| 20.833333 | 61 | 0.608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.305556 |
a8a2455ceee1098085ee522a4b49f599484a640e | 1,499 | py | Python | app/API/read.py | blagisquet/testNautilux | c7952b3ff1a43555d451704430f15a09d9aa574a | [
"MIT"
] | null | null | null | app/API/read.py | blagisquet/testNautilux | c7952b3ff1a43555d451704430f15a09d9aa574a | [
"MIT"
] | null | null | null | app/API/read.py | blagisquet/testNautilux | c7952b3ff1a43555d451704430f15a09d9aa574a | [
"MIT"
] | null | null | null | import mysql.connector
from mysql.connector import Error
try:
# connection au serveur mysql database interventions
connection = mysql.connector.connect(host='localhost',
database='interventions',
user='root')
# SELECT la totalité de la table Intervention
sql_select_Query = "SELECT * FROM Intervention"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
print("Total number of rows in Intervention is: ", cursor.rowcount)
# Récupération de chaque entrée de la table Intervention
print("\nPrinting each intervention record")
for row in records:
print("Id = ", row[0], )
print("Title = ", row[1])
print("Description = ", row[2])
print("Name = ", row[3])
print("Localisation = ", row[4])
print("Date = ", row[5], "\n")
#donne les informations à propos du serveur mysql si connecté
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("You're connected to database: ", record)
# renvoie les erreurs eventuelles
except Error as e:
print("Error while connecting to MySQL", e)
# finally:
# if (connection.is_connected()):
# cursor.close()
# connection.close()
# print("MySQL connection is closed") | 34.068182 | 70 | 0.648432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.484385 |
a8a3681ea625f23d752d8458f7791096844d0480 | 4,482 | py | Python | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 1. Number of 1 Bits (HammingWeight):
https://leetcode.com/explore/interview/card/top-interview-questions-easy/99/others/565/
# Easy way
def hammingWeight(self, n: int) -> int:
return bin(n).count('1')
# Harder way - https://stackoverflow.com/questions/21237767/python-a-b-meaning
def hammingWeight(self, n):
c = 0
while n:
n &= n - 1
c += 1
return c
2. Hamming Distance:
# Easy Way
bin(x ^ y).count('1')
#Right way (Bitwise Operators): https://code.tutsplus.com/articles/understanding-bitwise-operators--active-11301
# Approach 1: Just check every bit in both numbers and increment when they are different
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
while x != 0 or y != 0:
if x % 2 != y % 2:
hamming_distance += 1
x = x >> 1
y = y >> 1
return hamming_distance
# Approach 2: Just make XOR of x and y and after that count the number of '1' bits.
# because XOR of two different bits is always 1
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
if new % 2 == 1:
hamming_distance += 1
new = new >> 1
return hamming_distance
# Approach 3: Again make XOR of x and y but when we count the number of '1' bits
# we make the trick n&(n-1) which removes last '1' bit
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
new = new & (new-1)
hamming_distance += 1
return hamming_distance
# Good explanation of XOR solution: https://www.youtube.com/watch?v=UP4GhCxeC4I
3. Reverse Bits (Reverse Bits of a 32 bits unsigned integer):
# https://leetcode.com/explore/featured/card/top-interview-questions-easy/99/others/648/discuss/54932/Three-different-solutions-in-python
def reverseBits(self, n):
bit_str = '{0:032b}'.format(n) # Format n into bit string (length of 32)
reverse_str = bit_str[::-1] # Reverse bit_string with slice fxnality
return int(reverse_str, 2) # Return string as int w/ 2
4. Pascals Triangle:
def generate(self, numRows):
lists = []
for i in range(numRows):
lists.append([1]*(i+1))
if i>1 :
for j in range(1,i):
lists[i][j]=lists[i-1][j-1]+lists[i-1][j]
return lists
5. Valid Parenthesis:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
# The stack to keep track of opening brackets.
stack = []
# Hash map for keeping track of mappings. This keeps the code very clean.
# Also makes adding more types of parenthesis easier
mapping = {")": "(", "}": "{", "]": "["}
# For every bracket in the expression.
for char in s:
# If the character is an closing bracket
if char in mapping:
# Pop the topmost element from the stack, if it is non empty
# Otherwise assign a dummy value of '#' to the top_element variable
top_element = stack.pop() if stack else '#'
# The mapping for the opening bracket in our hash and the top
# element of the stack don't match, return False
if mapping[char] != top_element:
return False
else:
# We have an opening bracket, simply push it onto the stack.
stack.append(char)
# In the end, if the stack is empty, then we have a valid expression.
# The stack won't be empty for cases like ((()
return not stack
def isValid(self, s):
stack = []
mapping = {")": "(", "}": "{", "]": "["}
for char in s:
if char in mapping:
top_element = stack.pop() if stack else '#'
if mapping[char] != top_element:
return False
else:
stack.append(char)
return not stack
6. Missing Number: (Missing number in an array)
# One line
def missingNumber(self, nums):
return sum(range(len(nums)+1)) - sum(nums)
# Two lines
def missingNumber(self, nums):
n = len(nums)
return n * (n+1) / 2 - sum(nums)
| 33.699248 | 141 | 0.555779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,683 | 0.375502 |
a8a48dba10dc8bcece98d956d127819f587eacf1 | 13,796 | py | Python | src/main/java/nl/Ipsen5Server/Service/kik-bot-api-unofficial/examples/kik_unofficial/protobuf/common/v2/model_pb2.py | anthonyscheeres/Ipen5BackendGroep11 | e2675c2ac6580f0a6f1d9e5f755f19405d17e514 | [
"Apache-2.0"
] | null | null | null | src/main/java/nl/Ipsen5Server/Service/kik-bot-api-unofficial/examples/kik_unofficial/protobuf/common/v2/model_pb2.py | anthonyscheeres/Ipen5BackendGroep11 | e2675c2ac6580f0a6f1d9e5f755f19405d17e514 | [
"Apache-2.0"
] | null | null | null | src/main/java/nl/Ipsen5Server/Service/kik-bot-api-unofficial/examples/kik_unofficial/protobuf/common/v2/model_pb2.py | anthonyscheeres/Ipen5BackendGroep11 | e2675c2ac6580f0a6f1d9e5f755f19405d17e514 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: common/v2/model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.kik_options_pb2 as kik__options__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='common/v2/model.proto',
package='common.v2',
syntax='proto3',
serialized_pb=_b('\n\x15\x63ommon/v2/model.proto\x12\tcommon.v2\x1a\x11kik_options.proto\x1a\x19protobuf_validation.proto\"K\n\tAccountId\x12>\n\nlocal_part\x18\x01 \x01(\tB*\xca\x9d%&\x08\x01\x12\"^[a-z_0-9\\.]{2,30}(_[a-z0-9]{3})?$\")\n\tPersonaId\x12\x1c\n\traw_value\x18\x01 \x01(\x0c\x42\t\xca\x9d%\x05\x08\x01\x30\x80\x01\"(\n\x06\x43hatId\x12\x1e\n\traw_value\x18\x01 \x01(\x0c\x42\x0b\xca\x9d%\x07\x08\x01(\x01\x30\x80\x04\"A\n\nOneToOneId\x12\x33\n\x08personas\x18\x01 \x03(\x0b\x32\x14.common.v2.PersonaIdB\x0b\xca\x9d%\x07\x08\x01x\x02\x80\x01\x02\"/\n\x10\x43lientInstanceId\x12\x1b\n\traw_value\x18\x01 \x01(\x0c\x42\x08\xca\x9d%\x04\x08\x01\x30\x64\"%\n\x04Uuid\x12\x1d\n\traw_value\x18\x01 \x01(\x0c\x42\n\xca\x9d%\x06\x08\x01(\x10\x30\x10\"\x82\x01\n\x05\x45mail\x12y\n\x05\x65mail\x18\x01 \x01(\tBj\xca\x9d%f\x08\x01\x12_^[\\w\\-+]+(\\.[\\w\\-+]+)*@[A-Za-z0-9][A-Za-z0-9\\-]*(\\.[A-Za-z0-9][A-Za-z0-9\\-]*)*(\\.[A-Za-z]{2,})$0\xf8\x07\"4\n\x08Username\x12(\n\x08username\x18\x02 \x01(\tB\x16\xca\x9d%\x12\x08\x01\x12\x0e^[\\w\\.]{2,30}$B~\n\x15\x63om.kik.gen.common.v2P\x01ZLgithub.com/kikinteractive/xiphias-model-common/generated/go/common/v2;common\xa0\x01\x01\xa2\x02\x0bKPBCommonV2\xaa\xa3*\x02\x08\x01\x62\x06proto3')
,
dependencies=[kik__options__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ACCOUNTID = _descriptor.Descriptor(
name='AccountId',
full_name='common.v2.AccountId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local_part', full_name='common.v2.AccountId.local_part', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%&\010\001\022\"^[a-z_0-9\\.]{2,30}(_[a-z0-9]{3})?$'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=82,
serialized_end=157,
)
_PERSONAID = _descriptor.Descriptor(
name='PersonaId',
full_name='common.v2.PersonaId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raw_value', full_name='common.v2.PersonaId.raw_value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\0010\200\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=200,
)
_CHATID = _descriptor.Descriptor(
name='ChatId',
full_name='common.v2.ChatId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raw_value', full_name='common.v2.ChatId.raw_value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001(\0010\200\004'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=202,
serialized_end=242,
)
_ONETOONEID = _descriptor.Descriptor(
name='OneToOneId',
full_name='common.v2.OneToOneId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='personas', full_name='common.v2.OneToOneId.personas', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\002\200\001\002'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=309,
)
_CLIENTINSTANCEID = _descriptor.Descriptor(
name='ClientInstanceId',
full_name='common.v2.ClientInstanceId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raw_value', full_name='common.v2.ClientInstanceId.raw_value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\010\0010d'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=358,
)
_UUID = _descriptor.Descriptor(
name='Uuid',
full_name='common.v2.Uuid',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='raw_value', full_name='common.v2.Uuid.raw_value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\001(\0200\020'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=360,
serialized_end=397,
)
_EMAIL = _descriptor.Descriptor(
name='Email',
full_name='common.v2.Email',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='email', full_name='common.v2.Email.email', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%f\010\001\022_^[\\w\\-+]+(\\.[\\w\\-+]+)*@[A-Za-z0-9][A-Za-z0-9\\-]*(\\.[A-Za-z0-9][A-Za-z0-9\\-]*)*(\\.[A-Za-z]{2,})$0\370\007'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=400,
serialized_end=530,
)
_USERNAME = _descriptor.Descriptor(
name='Username',
full_name='common.v2.Username',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='common.v2.Username.username', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\022\010\001\022\016^[\\w\\.]{2,30}$'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=532,
serialized_end=584,
)
_ONETOONEID.fields_by_name['personas'].message_type = _PERSONAID
DESCRIPTOR.message_types_by_name['AccountId'] = _ACCOUNTID
DESCRIPTOR.message_types_by_name['PersonaId'] = _PERSONAID
DESCRIPTOR.message_types_by_name['ChatId'] = _CHATID
DESCRIPTOR.message_types_by_name['OneToOneId'] = _ONETOONEID
DESCRIPTOR.message_types_by_name['ClientInstanceId'] = _CLIENTINSTANCEID
DESCRIPTOR.message_types_by_name['Uuid'] = _UUID
DESCRIPTOR.message_types_by_name['Email'] = _EMAIL
DESCRIPTOR.message_types_by_name['Username'] = _USERNAME
AccountId = _reflection.GeneratedProtocolMessageType('AccountId', (_message.Message,), dict(
DESCRIPTOR = _ACCOUNTID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.AccountId)
))
_sym_db.RegisterMessage(AccountId)
PersonaId = _reflection.GeneratedProtocolMessageType('PersonaId', (_message.Message,), dict(
DESCRIPTOR = _PERSONAID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.PersonaId)
))
_sym_db.RegisterMessage(PersonaId)
ChatId = _reflection.GeneratedProtocolMessageType('ChatId', (_message.Message,), dict(
DESCRIPTOR = _CHATID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.ChatId)
))
_sym_db.RegisterMessage(ChatId)
OneToOneId = _reflection.GeneratedProtocolMessageType('OneToOneId', (_message.Message,), dict(
DESCRIPTOR = _ONETOONEID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.OneToOneId)
))
_sym_db.RegisterMessage(OneToOneId)
ClientInstanceId = _reflection.GeneratedProtocolMessageType('ClientInstanceId', (_message.Message,), dict(
DESCRIPTOR = _CLIENTINSTANCEID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.ClientInstanceId)
))
_sym_db.RegisterMessage(ClientInstanceId)
Uuid = _reflection.GeneratedProtocolMessageType('Uuid', (_message.Message,), dict(
DESCRIPTOR = _UUID,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.Uuid)
))
_sym_db.RegisterMessage(Uuid)
Email = _reflection.GeneratedProtocolMessageType('Email', (_message.Message,), dict(
DESCRIPTOR = _EMAIL,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.Email)
))
_sym_db.RegisterMessage(Email)
Username = _reflection.GeneratedProtocolMessageType('Username', (_message.Message,), dict(
DESCRIPTOR = _USERNAME,
__module__ = 'common.v2.model_pb2'
# @@protoc_insertion_point(class_scope:common.v2.Username)
))
_sym_db.RegisterMessage(Username)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.kik.gen.common.v2P\001ZLgithub.com/kikinteractive/xiphias-model-common/generated/go/common/v2;common\240\001\001\242\002\013KPBCommonV2\252\243*\002\010\001'))
_ACCOUNTID.fields_by_name['local_part'].has_options = True
_ACCOUNTID.fields_by_name['local_part']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%&\010\001\022\"^[a-z_0-9\\.]{2,30}(_[a-z0-9]{3})?$'))
_PERSONAID.fields_by_name['raw_value'].has_options = True
_PERSONAID.fields_by_name['raw_value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\0010\200\001'))
_CHATID.fields_by_name['raw_value'].has_options = True
_CHATID.fields_by_name['raw_value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001(\0010\200\004'))
_ONETOONEID.fields_by_name['personas'].has_options = True
_ONETOONEID.fields_by_name['personas']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\002\200\001\002'))
_CLIENTINSTANCEID.fields_by_name['raw_value'].has_options = True
_CLIENTINSTANCEID.fields_by_name['raw_value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\010\0010d'))
_UUID.fields_by_name['raw_value'].has_options = True
_UUID.fields_by_name['raw_value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\006\010\001(\0200\020'))
_EMAIL.fields_by_name['email'].has_options = True
_EMAIL.fields_by_name['email']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%f\010\001\022_^[\\w\\-+]+(\\.[\\w\\-+]+)*@[A-Za-z0-9][A-Za-z0-9\\-]*(\\.[A-Za-z0-9][A-Za-z0-9\\-]*)*(\\.[A-Za-z]{2,})$0\370\007'))
_USERNAME.fields_by_name['username'].has_options = True
_USERNAME.fields_by_name['username']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\022\010\001\022\016^[\\w\\.]{2,30}$'))
# @@protoc_insertion_point(module_scope)
| 37.79726 | 1,243 | 0.711293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,085 | 0.2961 |
a8a4c55a0c99efa3899b4d5f168237ee4f5c459c | 24,112 | py | Python | scripts/fitter/pcl_fitter.py | yuki-inaho/zense_grpc_rgbd_ir | 76739df6b1a402177d2cf47c791faa252688d5b3 | [
"Apache-2.0"
] | null | null | null | scripts/fitter/pcl_fitter.py | yuki-inaho/zense_grpc_rgbd_ir | 76739df6b1a402177d2cf47c791faa252688d5b3 | [
"Apache-2.0"
] | null | null | null | scripts/fitter/pcl_fitter.py | yuki-inaho/zense_grpc_rgbd_ir | 76739df6b1a402177d2cf47c791faa252688d5b3 | [
"Apache-2.0"
] | null | null | null |
import math
from math import pi
import numpy as np
import open3d as o3d
import matplotlib.pyplot as plt
import cv2
import toml
from .cameraparam import CameraParam
from .fitted_line import FittedLine
from .ransac_fit import ransac_line_fit, ransac_ground_fit
from .util import check_all_false
# TODO: output random seed used in ransac and open3d
# PCL pre-processing (the unit of these numerics is [m])
DOWNSAMPLE_VOXEL_SIZE = 0.003
DOWNSAMPLE_VOXEL_SIZE_GROUND = 0.005
# Ground fit
X_MIN = 0.
X_MAX = +1.2
Y_MIN = -0.8
Y_MAX = +0.8
GRID_SIZE = 0.080
GROUND_SEED_Z_MAX = 0.
GROUND_SEED_MARGIN = 0.080
GROUND_MARGIN = 0.030
SMOOTHING_KERNEL = GRID_SIZE * 0.5
# Clustering
# DBSCAN_EPS : Density parameter that is used to find neighbouring points
# DBSCAN_MINPOINTS : Minimum number of points to form a cluster
DBSCAN_EPS = 0.016
DBSCAN_MINPOINTS = 10
CLUSTER_MINPOINTS = 50
CMAP_CLUSTER = plt.get_cmap("tab20")
def set_pcl_fitter(toml_path):
dict_toml = toml.load(open(toml_path))
set_roll = float(dict_toml['General']['set_roll'])
set_pitch = float(dict_toml['General']['set_pitch'])
set_yaw = float(dict_toml['General']['set_yaw'])
camera_set_param = CameraParam()
camera_set_param.set_tf_rot_and_trans([set_roll, set_pitch, set_yaw], [0., 0., 0.])
return PCLFitter(camera_set_param, dict_toml)
class PCLFitter(object):
def __init__(self, camera_set_param=None, target_attribute=None):
self.depth_img = None
self.camera_param = None
self.grid_xyzw = None
if camera_set_param is None:
self.camera_set_param = CameraParam()
else:
self.camera_set_param = camera_set_param
if target_attribute is None:
self.set_parameters()
else:
self.set_target_attribute(target_attribute)
def set_target_attribute(self, dict_toml):
self.pcl_cutoff_dist = float(dict_toml['Selection']['pcl_cutoff_dist'])
self.target_max_dist = float(dict_toml['Selection']['target_max_dist'])
self.target_min_dist = float(dict_toml['Selection']['target_min_dist'])
self.target_max_len = float(dict_toml['Selection']['target_max_len'])
self.target_min_len = float(dict_toml['Selection']['target_min_len'])
self.target_max_tilt = float(dict_toml['Selection']['target_max_tilt'])
def set_parameters(self):
self.pcl_cutoff_dist = 1.1
self.target_max_dist = 0.85
self.target_min_dist = 0.3
self.target_min_len = 0.25
self.target_max_len = 0.40
self.target_max_tilt = 30.
def get_pcd_from_depth_img(self, depth_img, camera_param):
self.depth_img = depth_img
self.camera_param = camera_param
pcl_raw = self.tfm_pcl_cam2global(self.cvt_depth2pcl(self.depth_img, self.camera_param), camera_param)
pcd = self.downsample(pcl_raw, voxel_size=DOWNSAMPLE_VOXEL_SIZE)
return pcd
def fit_pcd(self, pcd, cluster_eps=DBSCAN_EPS, cluster_min_points=DBSCAN_MINPOINTS, verbose=True):
pcd_list = []
fitgeom_list = []
pcd_array = np.array(pcd.points, dtype=np.float32)
bflg_above_ground, xy_binidx, grid_xyzw, pcd_grounds_list = self.ground_fit(pcd_array)
pcd_grounds_ary_pre_downsample = np.asarray(pcd_grounds_list[2].points) # pcd_grounds = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
pcd_grounds = self.downsample(pcd_grounds_ary_pre_downsample, voxel_size=DOWNSAMPLE_VOXEL_SIZE_GROUND)
ground_points_ary = np.asarray(pcd_grounds.points)
pcd_list += [ground_points_ary]
fitgeom_list.append(self.get_mesh_ground())
# TODO debug.error() send to cloud if above ground is all false
if check_all_false(bflg_above_ground):
return [], pcd_list, fitgeom_list, pcd_array, ground_points_ary
labels, cluster_pcd = self.clustering(pcd_array[bflg_above_ground],
eps=cluster_eps, min_points=cluster_min_points)
pcd_list.append(cluster_pcd)
line_list = self.line_fit(pcd_array[bflg_above_ground], labels)
self.merge_lines(line_list)
self.mark_multiline_clusters(line_list)
self.extend_lines_to_ground(line_list, grid_xyzw)
self.check_line_truncation(line_list)
self.final_selection(line_list)
if verbose:
self.print_line_info(line_list)
self.bkg_postprocess(line_list)
self.remove_noise_lines(line_list, grid_xyzw)
mesh_cylinders = self.get_line_fit_geometry(line_list)
fitgeom_list += mesh_cylinders
return line_list, pcd_list, fitgeom_list, pcd_array, ground_points_ary
def cvt_depth2pcl(self, depth_img, camera_param):
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
DEPTH_MIN = 1e-3
arr_y = np.arange(depth_img.shape[0], dtype=np.float32)
arr_x = np.arange(depth_img.shape[1], dtype=np.float32)
val_x, val_y = np.meshgrid(arr_x, arr_y)
# TODO: rewrite axis convertion explicitly (i.e. zense clockwise rotation)
tmp_x = +depth_img
tmp_y = +depth_img * (val_y - cy) * (1. / fy)
tmp_z = -depth_img * (val_x - cx) * (1. / fx)
filled = (depth_img > DEPTH_MIN) * (depth_img < self.pcl_cutoff_dist + 0.2)
filled_x = tmp_x[filled]
filled_y = tmp_y[filled]
filled_z = tmp_z[filled]
pcl = np.stack([filled_x, filled_y, filled_z], axis=-1)
return pcl
def tfm_pcl_cam2global(self, pcl_camframe, camera_param):
pcl_tmp = np.dot(pcl_camframe, camera_param.rot_mtx.transpose()) + camera_param.translation
pcl_global = np.dot(pcl_tmp, self.camera_set_param.rot_mtx.transpose())
return pcl_global
def cvt_to_2d_image_xyd(self, input_points, camera_param):
points = input_points.reshape(-1, 3)
points_tmp = np.dot(points, self.camera_set_param.inv_rot_mtx.transpose())
points_camframe = np.dot(points_tmp - camera_param.translation, camera_param.inv_rot_mtx.transpose())
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
depth = +points_camframe[:, 0]
val_y = +points_camframe[:, 1] / depth * fy + cy
val_x = -points_camframe[:, 2] / depth * fx + cx
xyd = np.stack([val_x, val_y, depth], axis=-1)
return xyd.reshape(input_points.shape)
def downsample(self, pcl_raw, voxel_size):
pcd_raw = self.cvt_numpy2open3d(pcl_raw, color=[0., 0., 1.])
pcd = pcd_raw.voxel_down_sample(voxel_size=voxel_size)
return pcd
def cvt_numpy2open3d(self, pcl, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcl.astype(np.float64))
if not color is None:
pcd.paint_uniform_color(color)
return pcd
def ground_fit(self, pcl):
x_nbin = int( (X_MAX - X_MIN) / float(GRID_SIZE) + 1e-3 )
y_nbin = int( (Y_MAX - Y_MIN) / float(GRID_SIZE) + 1e-3 )
x_edge = np.linspace(X_MIN, X_MIN + GRID_SIZE * x_nbin, x_nbin + 1).reshape(1, -1)
y_edge = np.linspace(Y_MIN, Y_MIN + GRID_SIZE * y_nbin, y_nbin + 1).reshape(1, -1)
x_ctr = (x_edge[0, 1:] + x_edge[0, :-1]) * 0.5
y_ctr = (y_edge[0, 1:] + y_edge[0, :-1]) * 0.5
pcl_tmp = pcl.reshape(-1, 1, 3)
x_binflg = (pcl_tmp[:, :, 0] >= x_edge[:, :-1]) * (pcl_tmp[:, :, 0] < x_edge[:, 1:])
y_binflg = (pcl_tmp[:, :, 1] >= y_edge[:, :-1]) * (pcl_tmp[:, :, 1] < y_edge[:, 1:])
x_binidx = np.argmax(x_binflg, axis=-1)
y_binidx = np.argmax(y_binflg, axis=-1)
x_binidx[(x_binflg.sum(axis=-1) == 0)] = -1
y_binidx[(y_binflg.sum(axis=-1) == 0)] = -1
xy_binidx = np.concatenate([x_binidx.reshape(-1,1), y_binidx.reshape(-1,1)], axis=-1)
bflg_out_of_bin = (xy_binidx == -1).sum(-1).astype(np.bool)
bflg_in_bin = (bflg_out_of_bin == False)
grid_xyzw = np.zeros([x_nbin, y_nbin, 4], dtype=np.float64)
for i_x in range(x_nbin):
for i_y in range(y_nbin):
in_bin = (x_binidx == i_x) * (y_binidx == i_y)
pcl_in_bin = pcl[in_bin]
valid = (pcl_in_bin[:, 2] < GROUND_SEED_Z_MAX)
pcl_valid = pcl_in_bin[valid]
if pcl_valid.shape[0] == 0:
z_val = 0.
wgt = 0.1
else:
z_val = pcl_valid[:, 2].min()
wgt = 1.
grid_xyzw[i_x, i_y] = [x_ctr[i_x], y_ctr[i_y], z_val, wgt]
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=0.1)
pcd_groundseed = self.cvt_numpy2open3d(grid_xyzw.reshape(-1, 4)[:, :3], color=[1., 0., 1.])
pcl_ground_seed_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground_seed = (pcl[:, 2] < (pcl_ground_seed_z + GROUND_SEED_MARGIN)) * bflg_in_bin
grid_xyzw = ransac_ground_fit(pcl[bflg_ground_seed], xy_binidx[bflg_ground_seed], grid_xyzw)
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=1.)
grid_xyzw = self.smooth_ground(grid_xyzw, kernel_size=SMOOTHING_KERNEL)
self.grid_xyzw = grid_xyzw
bflg_in_range = (np.linalg.norm(pcl[:,:2], axis=-1) < self.pcl_cutoff_dist)
bflg_valid_points = bflg_in_range * bflg_in_bin
pcl_ground_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground = (pcl[:, 2] < (pcl_ground_z + GROUND_MARGIN)) * bflg_valid_points
bflg_above_ground = (bflg_ground == False) * bflg_valid_points
pcd_out_of_bin = self.cvt_numpy2open3d(pcl[bflg_valid_points == False], color=[0.3, 0., 0.5])
pcd_ground = self.cvt_numpy2open3d(pcl[bflg_ground], color=[0., 0., 0.5])
pcd_all = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
return bflg_above_ground, xy_binidx, grid_xyzw, pcd_all
def fill_empy_gridz(self, grid_xyzw, w_thres=0.1):
filled = (grid_xyzw[:,:,3] > w_thres)
empty = (filled == False)
# print 'filled ', filled.shape, filled.sum()
# print 'empty ', empty.shape, empty.sum()
filled_xyzw = grid_xyzw[filled].reshape(-1, 1, 4)
empty_xyzw = grid_xyzw[empty].reshape(1, -1, 4)
# print 'filled_xyzw ', filled_xyzw.shape
# print 'empty_xyzw ', empty_xyzw.shape
dist_array = np.linalg.norm(filled_xyzw[:,:,:2] - empty_xyzw[:,:,:2], axis=-1)
# print 'dist_array ', dist_array.shape
if dist_array.shape[0] != 0:
nearest_filled = np.argmin(dist_array, axis=0)
grid_xyzw[empty, 2] = filled_xyzw[nearest_filled, 0, 2]
return grid_xyzw
def smooth_ground(self, grid_xyzw, kernel_size):
vect = grid_xyzw[:,:,:2].reshape(1, -1, 2) - grid_xyzw[:,:,:2].reshape(-1, 1, 2)
dsq = (vect ** 2).sum(axis=-1)
z_orig = grid_xyzw[:,:,2].reshape(-1)
wgt = grid_xyzw[:,:,3].reshape(-1)
coeff = 0.5 / kernel_size ** 2
fill_wgt = wgt * np.exp(-dsq * coeff)
z_smooth = (z_orig * fill_wgt).sum(axis=-1) / fill_wgt.sum(axis=-1)
grid_xyzw[:,:,2].reshape(-1)[:] = z_smooth
return grid_xyzw
def get_mesh_ground(self):
return self.cvt_gridvtx2mesh(self.grid_xyzw) if self.grid_xyzw is not None else None
def cvt_gridvtx2mesh(self, grid_vtx, double_sided=True):
ngrid_x = grid_vtx.shape[0]
ngrid_y = grid_vtx.shape[1]
vertices = np.array(grid_vtx[:,:,:3].reshape(-1,3))
triangles = []
for i_x in range(grid_vtx.shape[0] - 1):
for i_y in range(grid_vtx.shape[1] - 1):
ivert_base = i_x * ngrid_y + i_y
triangles.append([ivert_base, ivert_base+ngrid_y, ivert_base+1])
triangles.append([ivert_base+ngrid_y+1, ivert_base+1, ivert_base+ngrid_y])
triangles = np.array(triangles)
if double_sided:
triangles = np.concatenate([triangles, triangles[:,::-1]], axis=0)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
mesh.paint_uniform_color([0.4, 0.4, 0.4])
mesh.compute_vertex_normals()
return mesh
def clustering(self, pcl, eps=DBSCAN_EPS, min_points=DBSCAN_MINPOINTS):
n_points = pcl.shape[0]
print('Clustering {} points ...'.format(n_points),)
pcd = self.cvt_numpy2open3d(pcl)
labels_orig = np.array(
pcd.cluster_dbscan(eps=eps, min_points=min_points, print_progress=False))
n_cluster = labels_orig.max() + 1
print('Found {} clusters.'.format(n_cluster))
cls_flg = (np.arange(n_cluster).reshape(-1,1) == labels_orig.reshape(1,-1))
n_points_in_cls = cls_flg.sum(axis=-1)
sortidx_cls = np.argsort(n_points_in_cls)[::-1]
labels = np.ones(n_points, dtype=np.int32) * -1
for i_cls in range(n_cluster):
labels[cls_flg[sortidx_cls[i_cls]]] = i_cls
colors = CMAP_CLUSTER(labels)
colors[labels < 0] = 0.8
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
return labels, pcd
def line_fit(self, pcl, labels):
MAX_ITER_LINEFIT = 3
RANSAC_N_ITER = 500
CUT_PERCENTILE = 0.8
DTHRES_INLIER = 0.020
MAX_ROOT_Z = 0.20
line_list = []
n_cluster = labels.max() + 1
print("Line fit on %d clusters ..." % n_cluster)
do_break = False
for i_cluster in range(n_cluster):
pcl_cluster = pcl[(labels == i_cluster)]
print("Cluster #{} : {} points".format(i_cluster, pcl_cluster.shape[0]))
pcl_to_fit = pcl_cluster
for i_iter in range(MAX_ITER_LINEFIT):
n_to_fit = pcl_to_fit.shape[0]
print(" - Iteration {} : {} points".format(i_iter, n_to_fit)),
if n_to_fit < CLUSTER_MINPOINTS:
print(" - Too small!")
if i_iter == 0:
do_break = True
break
length, tfm_mtx, is_outlier = ransac_line_fit(pcl_to_fit, n_iter=RANSAC_N_ITER, dthres_inlier=DTHRES_INLIER, cut_percentile=CUT_PERCENTILE, max_root_z=(MAX_ROOT_Z if i_iter==0 else -1.))
if tfm_mtx is None:
print(" - Bad fit!")
break
print(" - Good fit!")
line_list.append(FittedLine(length, tfm_mtx, i_cluster))
pcl_to_fit = pcl_to_fit[is_outlier]
if do_break:
break
print("Found {} lines.".format(len(line_list)))
return line_list
def merge_lines(self, line_list):
MERGE_THRES_COS = math.cos(15. * pi / 180.)
MERGE_THRES_DIST = 0.10
z_array = np.array([line.position[2] for line in line_list])
sorted_idx = np.argsort(z_array)
n_line = len(line_list)
for i_line in range(n_line):
line = line_list[sorted_idx[i_line]]
for i_line2 in range(i_line + 1, n_line):
line2 = line_list[sorted_idx[i_line2]]
if not line2.parent is None:
continue
to_line2 = line2.position - line.position_center
dist_to_line2 = np.linalg.norm(to_line2)
dir_to_line2 = to_line2 / dist_to_line2
cos_to_line2 = np.dot(dir_to_line2, line.direction)
if cos_to_line2 < MERGE_THRES_COS:
continue
if dist_to_line2 > MERGE_THRES_DIST + line.length * 0.5:
continue
line2.parent = line
def count_lines_in_cluster(self, line_list):
counts = {}
for line in line_list:
if not line.cluster_id in counts:
counts[line.cluster_id] = 0
counts[line.cluster_id] += 1
return counts
def mark_multiline_clusters(self, line_list):
counts = self.count_lines_in_cluster(line_list)
for line in line_list:
if counts[line.cluster_id] > 1:
line.is_multiline_cluster = True
def extend_lines_to_ground(self, line_list, grid_xyzw):
N_AVERAGE = 4
MAX_R = GRID_SIZE
MIN_SOLITARY_LEN = 0.100
MAX_EXTEND_LEN = 0.200
MAX_GROUNDED_EXTEND_LEN = 0.060
COSZ_THRESHOLD = math.cos(45. * pi / 180.)
flatten_grid_xyz = grid_xyzw[:,:,:3].reshape(-1, 3)
for line in line_list:
if not line.parent is None:
continue
if line.is_solitary and line.length < MIN_SOLITARY_LEN:
continue
if line.direction[2] < COSZ_THRESHOLD:
continue
flatten_grid_local_frame = line.tfm_to_local_frame(flatten_grid_xyz)
flatten_grid_r = np.linalg.norm(flatten_grid_local_frame[:,:2], axis=-1)
idx_sort = np.argsort(flatten_grid_r)[0:N_AVERAGE]
weight = np.clip((MAX_R - flatten_grid_r[idx_sort]) / MAX_R, 0., 1.)
weight_sum = weight.sum()
if not weight_sum > 0.:
continue
ground_z_local_frame = np.dot(flatten_grid_local_frame[idx_sort,2], weight) / weight_sum
# idx_min = idx_sort[0]
# if flatten_grid_r[idx_min] > MAX_R:
# continue
# ground_z_local_frame = flatten_grid_local_frame[idx_min, 2]
extend_len = -ground_z_local_frame
if extend_len > MAX_EXTEND_LEN:
continue
line.extend_root(extend_len)
line.is_grounded = (extend_len <= MAX_GROUNDED_EXTEND_LEN)
def is_in_image(self, xyd, image_shape):
TOP_MARGIN = 20
SIDE_MARGIN = 20
BOTTOM_MARGIN = 0
x_val = xyd[0]
y_val = xyd[1]
if (y_val > SIDE_MARGIN
and y_val < image_shape[0] - SIDE_MARGIN
and x_val > TOP_MARGIN
and x_val < image_shape[1] - BOTTOM_MARGIN):
return True
else:
return False
def check_line_truncation(self, line_list):
SEEK_MARGIN = [10, 50]
OPENING_ANGLE = 4.
SECTOR_COLOR = 1
DEPTH_MARGIN = 0.015
MAX_OCCLUDING_PIXELS = 5
sector_mask = np.zeros(self.depth_img.shape, dtype=np.uint8)
for line in line_list:
line.sector_mask = {}
line.occlusion_mask = {}
root_is_contained = 0
tip_is_contained = 0
is_occluded = False
sector_mask = sector_mask
xyd_ends = self.cvt_to_2d_image_xyd(line.position_ends, self.camera_param)
line.xyd_ends = xyd_ends
root_is_contained += self.is_in_image(xyd_ends[0], sector_mask.shape)
tip_is_contained += self.is_in_image(xyd_ends[1], sector_mask.shape)
if line.is_solitary and line.is_grounded:
root_to_tip_xy = (xyd_ends[1] - xyd_ends[0])[:2]
sector_angle = math.atan2(root_to_tip_xy[1], root_to_tip_xy[0]) / math.pi * 180.
sector_radius = int(np.linalg.norm(root_to_tip_xy) * 0.5 + (SEEK_MARGIN[1] + SEEK_MARGIN[0]) * 0.5)
center = (xyd_ends.sum(axis=0) * 0.5).astype(np.int32)
sector_mask[:] = 0
cv2.ellipse(sector_mask, (center[0], center[1]), (sector_radius, sector_radius), sector_angle, -OPENING_ANGLE * 0.5, +OPENING_ANGLE * 0.5, SECTOR_COLOR, SEEK_MARGIN[1] - SEEK_MARGIN[0])
# TODO: what if tip is right on ?
# TODO: handle cases where sector_mask goes out of image
depth_in_sector = self.depth_img * sector_mask
occlusion_mask = (depth_in_sector < xyd_ends[1, 2] + DEPTH_MARGIN) * (depth_in_sector > 0.)
# TODO: Handle cases where the sector is out of frame in one camera
if occlusion_mask.sum() > MAX_OCCLUDING_PIXELS:
is_occluded = True
line.sector_mask = sector_mask.astype(np.bool)
line.occlusion_mask = occlusion_mask
line.tip_is_contained = (tip_is_contained != 0)
line.is_contained = ((root_is_contained * tip_is_contained) != 0)
line.is_occluded = is_occluded
def final_selection(self, line_list):
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if not (line.length > self.target_min_len and line.length < self.target_max_len):
continue
line_dist = line.xy_distance
if not (line_dist > self.target_min_dist and line_dist < self.target_max_dist):
continue
if line.direction[2] < target_cosz_min:
continue
line.is_final = True
def bkg_postprocess(self, line_list):
EXTEND_LEN = 1.
MIN_LEN = 0.2
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if line.is_good:
continue
if line.direction[2] < target_cosz_min:
continue
if line.length < MIN_LEN:
continue
if not (line.length < self.target_max_len) or not line.tip_is_contained:
line.extend_tip(EXTEND_LEN)
def remove_noise_lines(self, line_list, grid_xyzw):
MIN_LEN = 0.050
n_orig = len(line_list)
max_ground_z = np.max(grid_xyzw[:,:,2])
z_threshold = max_ground_z + 0.40
r_threshold = self.target_max_dist
n_remove = 0
for line in line_list:
if line.is_good:
continue
if ((line.xy_distance > r_threshold and line.position[2] > z_threshold)
or line.length < MIN_LEN):
line.is_ignored = True
n_remove += 1
print('Noise line removal : {} -> {}'.format(n_orig, n_orig - n_remove))
def print_line_info(self, line_list):
print('### Candidate line info #############################')
print(' Good flg=[sol, nmlc, ground, tip, ends, unoccl, final]')
print('-----------------------------------------------------')
for line in line_list:
# if not (line.is_solitary and not line.is_multiline_cluster and line.is_grounded):
if line.length < 0.200:
continue
flags = [
line.is_solitary,
not line.is_multiline_cluster,
line.is_grounded,
line.tip_is_contained,
line.is_contained,
not line.is_occluded,
line.is_final]
print(' {} flg={} len={:.3f} dist={:.3f} tilt={:.1f}deg'.format(line.is_good, flags, line.length, line.xy_distance, math.acos(line.direction[2]) / pi * 180.))
print('#####################################################')
def get_line_fit_geometry(self, line_list):
mesh_cylinders = []
for line in line_list:
# if line.is_ignored:
# continue
line_color = CMAP_CLUSTER(line.cluster_id)[:3]
if line.length <= 0.0:
print('`line.length` has non-positive value: {}'.format(line.length))
continue
mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.005, height=line.length)
mesh_cylinder.compute_vertex_normals()
mesh_cylinder.paint_uniform_color(line_color)
mesh_cylinder.translate([0., 0., line.length * 0.5])
mesh_cylinder.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_cylinder)
line.add_mesh(mesh_cylinder)
if False:
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.010)
mesh_sphere.compute_vertex_normals()
mesh_sphere.paint_uniform_color(line_color)
mesh_sphere.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_sphere)
line.add_mesh(mesh_sphere)
return mesh_cylinders
| 41.644214 | 202 | 0.610443 | 22,775 | 0.94455 | 0 | 0 | 0 | 0 | 0 | 0 | 1,958 | 0.081204 |
a8a508f6eb9b35510baa4a9373e60749c24eb598 | 463 | py | Python | parsnip/exceptions.py | timmyomahony/python-parsnip | f04426dbeaf1b186ed400d1229e5e34cbb1ffea6 | [
"MIT"
] | 2 | 2017-07-27T09:19:15.000Z | 2019-10-27T11:44:27.000Z | parsnip/exceptions.py | timmyomahony/python-parsnip | f04426dbeaf1b186ed400d1229e5e34cbb1ffea6 | [
"MIT"
] | null | null | null | parsnip/exceptions.py | timmyomahony/python-parsnip | f04426dbeaf1b186ed400d1229e5e34cbb1ffea6 | [
"MIT"
] | null | null | null | class ParsnipException(Exception):
def __init__(self, msg, webtexter=None):
self.args = (msg, webtexter)
self.msg = msg
self.webtexter = webtexter
def __str__(self):
return repr("[%s] %s - %s" % (self.webtexter.NETWORK_NAME, self.webtexter.phone_number, self.msg))
class LoginError(ParsnipException):pass
class MessageSendingError(ParsnipException):pass
class ConnectionError(ParsnipException):pass
class ResourceError(ParsnipException):pass
| 28.9375 | 100 | 0.764579 | 452 | 0.976242 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.030238 |
a8a5e0c27d2bf91d41a039f1df6281d8a3c2cf81 | 17,867 | py | Python | dataschema/schema2sql.py | NunaInc/sql_tools | 51d63ae77c18ccbfdcab703e8ebbee7c78972892 | [
"Apache-2.0"
] | 1 | 2022-03-30T19:47:16.000Z | 2022-03-30T19:47:16.000Z | dataschema/schema2sql.py | NunaInc/sql_tools | 51d63ae77c18ccbfdcab703e8ebbee7c78972892 | [
"Apache-2.0"
] | null | null | null | dataschema/schema2sql.py | NunaInc/sql_tools | 51d63ae77c18ccbfdcab703e8ebbee7c78972892 | [
"Apache-2.0"
] | 1 | 2022-03-30T04:07:12.000Z | 2022-03-30T04:07:12.000Z | #
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Converts Schema to Clickhouse specific SQL create table statement."""
import dataclasses
import os
from google.protobuf import descriptor
from dataschema import Schema, Schema_pb2, proto2schema, python2schema, strutil
from types import ModuleType
from typing import Dict, List, Optional
def GetIndent(indent: int) -> str:
return ' ' * indent
def GetTimestampStr(column: Schema.Column) -> str:
info = column.timestamp_info()
if info is None:
return ''
s = f'({info.precision}'
if info.timezone:
s += f', "{info.timezone}"'
s += ')'
return s
TAB_SIZE = 2
CLICKHOUSE_TYPE_NAME = {
Schema_pb2.ColumnInfo.TYPE_STRING: 'String',
Schema_pb2.ColumnInfo.TYPE_BYTES: 'String',
Schema_pb2.ColumnInfo.TYPE_BOOLEAN: 'UInt8',
Schema_pb2.ColumnInfo.TYPE_INT_8: 'Int8',
Schema_pb2.ColumnInfo.TYPE_INT_16: 'Int16',
Schema_pb2.ColumnInfo.TYPE_INT_32: 'Int32',
Schema_pb2.ColumnInfo.TYPE_INT_64: 'Int64',
Schema_pb2.ColumnInfo.TYPE_UINT_8: 'UInt8',
Schema_pb2.ColumnInfo.TYPE_UINT_16: 'UInt16',
Schema_pb2.ColumnInfo.TYPE_UINT_32: 'UInt32',
Schema_pb2.ColumnInfo.TYPE_UINT_64: 'UInt64',
Schema_pb2.ColumnInfo.TYPE_DECIMAL: 'Decimal',
Schema_pb2.ColumnInfo.TYPE_FLOAT_32: 'Float32',
Schema_pb2.ColumnInfo.TYPE_FLOAT_64: 'Float64',
Schema_pb2.ColumnInfo.TYPE_DATE: 'Date',
Schema_pb2.ColumnInfo.TYPE_DATETIME_64: 'DateTime64',
Schema_pb2.ColumnInfo.TYPE_NESTED: 'Nested',
Schema_pb2.ColumnInfo.TYPE_ARRAY: 'Array',
Schema_pb2.ColumnInfo.TYPE_SET: 'Set',
}
class TableConverter:
"""Converts a schema Table to a SQL create table statement."""
def __init__(self, table: Schema.Table):
self.table = table
def _get_decimal_str(self, column: Schema.Column) -> str:
info = column.decimal_info()
if info is None:
raise ValueError(
f'No decimal info for decimal column `{column.name()}`.')
if info.precision <= 9:
size = 32
elif info.precision <= 18:
size = 64
elif info.precision <= 38:
size = 128
elif info.precision <= 76:
size = 256
else:
raise ValueError('Decimal precision out of range for '
f'`{column.name()}`: {info.precision}')
return f'Decimal{size}({info.scale})'
def _get_timestamp_str(self, column: Schema.Column) -> str:
return GetTimestampStr(column)
def _get_compression_name(self, value: int) -> Optional[str]:
if value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_LZ4:
return 'LZ4'
if value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_LZ4HC:
return 'LZ4HC'
elif value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_ZSTD:
return 'ZSTD'
elif value == Schema_pb2.ColumnClickhouseAnnotation.COMPRESS_UNCOMPRESSED:
return 'UNCOMPRESSED'
return None
def _get_codec(self, column: Schema.Column,
is_nested: bool) -> Optional[str]:
"""Extracts the Clickhouse encoding string for `column`."""
if column.is_low_cardinality():
# No compression for low cardinality allowed in clickhouse.
return None
if is_nested:
# No compression for descendants of nested columns.
return None
# TODO: Support compression for de-sugared nested columns, e.g.:
# `field.sub_field` String CODEC(ZSTD)
codecs = []
delta = column.clickhouse_annotation.delta_compression_width
if delta:
codecs.append(f'Delta({delta})')
compression = self._get_compression_name(
column.clickhouse_annotation.compression_type)
if compression is None:
# TODO: Support different default compression for nested tables,
# currently uses default compression from parent table.
compression = self._get_compression_name(
self.table.clickhouse_annotation.default_compression)
if compression is not None and compression != 'UNCOMPRESSED':
level = column.clickhouse_annotation.compression_level
if level:
compression += f'({level})'
codecs.append(compression)
if codecs:
return ', '.join(codecs)
return None
def _column_to_sql(self, column: Schema.Column, indent: int,
type_only: bool, is_nested: bool, is_wrapped: bool=False
) -> str:
"""Returns a Clickhouse SQL column specification for `column`.
Parameters:
column: Column specification.
indent: Number of indentations at previous level.
type_only: Whether or not to return only the column type.
is_nested: Whether or not the column is a descendant of a nested column.
is_wrapped: Whether or not the column's parent is a wrapper, such as
Array(...). Used to indent nested columns inside wrappers.
Returns:
str: Clickhouse SQL column specification for `column`.
"""
s = ''
if not type_only:
s += f'{GetIndent(indent)}{column.sql_name()} '
end = ''
column_type = column.info.column_type
if (column.info.label == Schema_pb2.ColumnInfo.LABEL_REPEATED and
column_type != Schema_pb2.ColumnInfo.TYPE_MAP and
column_type != Schema_pb2.ColumnInfo.TYPE_NESTED):
s += 'Array('
end += ')'
if column.is_low_cardinality():
s += 'LowCardinality('
end += ')'
# ClickHouse nested types (Nested, Tuple) cannot be inside a Nullable.
if (column.info.label == Schema_pb2.ColumnInfo.LABEL_OPTIONAL and
column_type != Schema_pb2.ColumnInfo.TYPE_NESTED):
s += 'Nullable('
end += ')'
if column_type == Schema_pb2.ColumnInfo.TYPE_MAP:
ktype = self._column_to_sql(
column.fields[0], 0, type_only=True, is_nested=is_nested)
vtype = self._column_to_sql(
column.fields[1], 0, type_only=True, is_nested=is_nested)
s += f'Map({ktype}, {vtype})'
elif column_type in [
Schema_pb2.ColumnInfo.TYPE_ARRAY, Schema_pb2.ColumnInfo.TYPE_SET
]:
s += self._column_to_sql(
column.fields[0], 0, type_only=True, is_nested=is_nested,
is_wrapped=True)
elif column.clickhouse_annotation.type_name:
s += column.clickhouse_annotation.type_name
elif column_type == Schema_pb2.ColumnInfo.TYPE_DECIMAL:
s += self._get_decimal_str(column)
else:
if column_type not in CLICKHOUSE_TYPE_NAME:
raise KeyError(
f'Unknown type to convert to clickhouse: {column_type}')
if (column_type == Schema_pb2.ColumnInfo.TYPE_NESTED and
column.clickhouse_annotation.nested_type_name):
s += column.clickhouse_annotation.nested_type_name
else:
s += CLICKHOUSE_TYPE_NAME[column_type]
if column_type == Schema_pb2.ColumnInfo.TYPE_DECIMAL:
s += self._get_decimal_str(column)
elif column_type == Schema_pb2.ColumnInfo.TYPE_DATETIME_64:
s += self._get_timestamp_str(column)
elif column_type == Schema_pb2.ColumnInfo.TYPE_NESTED:
# If the nested type is within a wrapper, increase indentation.
nested_indent = (indent + (2 * TAB_SIZE) if is_wrapped else
indent + TAB_SIZE)
wrapper_indent = indent + TAB_SIZE if is_wrapped else indent
sub_columns = []
for sub_column in column.fields:
sub_columns.append(
self._column_to_sql(sub_column,
nested_indent,
type_only=False,
is_nested=True))
sub_columns_str = ',\n'.join(sub_columns)
s += f'(\n{sub_columns_str}\n{GetIndent(wrapper_indent)})'
s += end
if not type_only:
codec = self._get_codec(column, is_nested=is_nested)
if codec is not None:
s += f' CODEC({codec})'
return s
def columns_sql(self, indent: int) -> List[str]:
"""Returns a list of Clickhouse SQL column specifications."""
columns = []
for column in self.table.columns:
columns.append(self._column_to_sql(
column, indent, type_only=False, is_nested=False))
return columns
def table_options(self, replication_params: str) -> str:
"""Extracts Clickhouse CREATE TABLE options for this message."""
copt = []
force_order_by = False
if self.table.clickhouse_annotation.HasField('engine'):
if (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_MERGE_TREE):
force_order_by = True
copt.append('ENGINE = MergeTree()')
elif (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_LOG):
copt.append('ENGINE = Log()')
elif (self.table.clickhouse_annotation.engine ==
Schema_pb2.TableClickhouseAnnotation.ENGINE_TINY_LOG):
copt.append('ENGINE = TinyLog()')
elif (self.table.clickhouse_annotation.engine == Schema_pb2.
TableClickhouseAnnotation.ENGINE_REPLICATED_MERGE_TREE):
force_order_by = True
copt.append(
f'ENGINE = ReplicatedMergeTree({replication_params})')
if self.table.clickhouse_annotation.order_by_fields:
order_by = ', '.join(
self.table.clickhouse_annotation.order_by_fields)
copt.append(f'ORDER BY ({order_by})')
elif force_order_by:
copt.append('ORDER BY tuple()')
if self.table.clickhouse_annotation.partition_by_sql_expression:
partition_by = ', '.join(
self.table.clickhouse_annotation.partition_by_sql_expression)
copt.append(f'PARTITION BY ({partition_by})')
if self.table.clickhouse_annotation.sample_by_sql_expression:
sample_by = ', '.join(
self.table.clickhouse_annotation.sample_by_sql_expression)
copt.append(f'SAMPLE BY ({sample_by})')
if self.table.clickhouse_annotation.index_granularity > 0:
ig = self.table.clickhouse_annotation.index_granularity
copt.append(f'SETTINGS index_granularity = {ig}')
if self.table.data_annotation.comment:
comment = "'" + repr('"' + self.table.data_annotation.comment)[2:]
copt.append(f'COMMENT {comment}')
return copt
def to_sql(self,
table_name: Optional[str] = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> str:
"""Returns a CREATE TABLE SQL statement for this message."""
s = 'CREATE TABLE '
if if_not_exists:
s += 'IF NOT EXISTS '
tname_str = table_name if table_name else self.table.name()
columns_str = ',\n'.join(self.columns_sql(2))
s += f'{tname_str} (\n{columns_str}\n)\n'
copts = self.table_options(replication_params)
if copts:
copts_str = '\n'.join(copts)
s += f'\n{copts_str}'
return s
def validate(self) -> bool:
"""Validates the message as a SQL table. Raises exceptions on errors."""
return self.table.validate()
class FileConverter:
"""Converts a proto FileDescriptor to corresponding SQL table statement."""
def __init__(self):
self.name = None
self.basename = None
self.package = None
self.converters = None
def from_proto_file(
self,
file_descriptor: descriptor.FileDescriptor) -> 'FileConverter':
self.name = file_descriptor.name
self.basename = strutil.StripSuffix(
os.path.basename(file_descriptor.name), '.proto')
self.package = file_descriptor.package
self.java_package = file_descriptor.GetOptions().java_package
self.converters = [
TableConverter(proto2schema.ConvertMessage(msg))
for msg in file_descriptor.message_types_by_name.values()
]
return self
def from_module(self, py_module: ModuleType) -> 'FileConverter':
self.name = py_module.__name__
self.basename = strutil.StripSuffix(
os.path.basename(py_module.__file__), '.py')
self.package = py_module.__name__
self.converters = [
TableConverter(python2schema.ConvertDataclass(datacls))
for datacls in py_module.__dict__.values()
if dataclasses.is_dataclass(datacls)
]
return self
def get_path(self, dir_map, basename) -> str:
"""Returns directory path for saving SQL file `basename`."""
end_path = os.path.join('/'.join(self.package.split('.')), basename)
for k, v in dir_map.items():
if self.name.startswith(k):
return os.path.join(v, end_path)
return end_path
def to_sql(self,
table_name: str = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False):
"""Converts the messages in this file to several SQL CREATE TABLE."""
result = {}
for conv in self.converters:
result[conv.table.name()] = conv.to_sql(table_name,
replication_params,
if_not_exists)
return result
def validate(self) -> bool:
"""Validates the messages and fields in this file for SQL correctness."""
for conv in self.converters:
conv.validate()
return True
def ConvertTable(table: Schema.Table,
table_name: str = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> str:
return TableConverter(table).to_sql(table_name, replication_params,
if_not_exists)
class SchemaConverter:
"""Converts a list of file descriptors to SQL create statements."""
def __init__(self):
self.file_converters = []
def add_descriptors(self,
file_descriptors: List[descriptor.FileDescriptor],
export_only: Optional[bool] = False):
for fd in file_descriptors:
try:
fc = FileConverter().from_proto_file(fd)
if not export_only:
self.file_converters.append(fc)
except ValueError as e:
raise ValueError(f'Processing proto file: {fd.name}') from e
def add_modules(self,
py_modules: List[ModuleType],
export_only: Optional[bool] = False):
for pym in py_modules:
try:
fc = FileConverter().from_module(pym)
if not export_only:
self.file_converters.append(fc)
except ValueError as e:
raise ValueError(
f'Processing input pyton module: {pym.__name__}'
f' / {pym.__file__}') from e
def to_sql_files(self,
dir_map: Dict[str, str],
table_name: Optional[str] = '${database}.${table}',
replication_params: str = '${replicationParams}',
if_not_exists: Optional[bool] = False) -> Dict[str, str]:
files = {}
for fc in self.file_converters:
contents_map = fc.to_sql(table_name, replication_params,
if_not_exists)
for (crt_table_name, content) in contents_map.items():
basename = f'{fc.basename}_{crt_table_name}.sql'
path = fc.get_path(dir_map, basename)
full_contents = f"""
--------------------------------------------------------------------------------
--
-- {path}
-- Generated from: {fc.name} / {crt_table_name}
--
{content}
"""
files[path] = full_contents
return files
def validate(self) -> List[str]:
"""Validates the files in for SQL correctness. Returns a list of errors."""
errors = []
for fc in self.file_converters:
try:
fc.validate()
except ValueError as e:
errors.extend([
f'{fc.file_descriptor.name}: ERROR: {arg}' for arg in e.args
])
if errors:
return errors
return None
| 41.745327 | 83 | 0.597022 | 15,342 | 0.858678 | 0 | 0 | 0 | 0 | 0 | 0 | 4,123 | 0.230761 |
a8a60a167cdfc1752d84114af1252a56c3863ffe | 4,701 | py | Python | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | 3 | 2015-09-07T00:16:16.000Z | 2019-01-11T20:27:56.000Z | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Holds implementation of Norris' algorithm"""
from copy import copy
from fca import Concept, ConceptSystem,ConceptLink
#from fca.algorithms.incremental import derivation
def add_object(object, intent, lattice):
sup_g = lattice.get_bottom_concept()
sup_g.intent | intent #{Adjust (sup(G)) for new elements in E'}
if len(sup_g.intent) == 0 and len(sup_g.extent) == 0: # IF sup(G) = (Ø, Ø) THEN
sup_g = Concept(intent=intent, extent=set(object))#Replace sup(G) by: ({x*},f({x*}))
else:
if not intent <= sup_g.intent: #IF NOT (f*({x*}) ⊆ X'(sup(G))) THEN
if len(sup_g.extent) == 0: # IF X(sup(G)) = Ø THEN X'(sup(G)) := X'(sup(G)) ∪ f({x*})
sup_g.intent |= intent
else:
h = Concept(extent=set(), intent=(sup_g.intent | intent)) #Add new pair H {becomes sup(G*)}: (Ø,X'(sup(G)) ∪ f({x*}));
lattice._concepts.append(h)
cl = ConceptLink(_parent=sup_g, _child=h) #Add new edge sup(G)->H
lattice._links.append(cl)
#C[i] <- {H: ||X'(H)||=i}; {Class pairs in buckets with same cardinality of the X' sets}
card = []
sorted_card = sorted(lattice._concepts, key=lambda c: len(c.intent))
cur_card = 0;
row = []
for c in sorted_card:
if cur_card == len(c.intent):
row.append(c)
else:
cur_card = len(c.intent)
card.append(row)
row = [c] #TODO does it empty ??
card.append(row)
max_card = len(sorted_card[-1].intent)
#card_prime = []# C'[i] <- Ø; {Initialize the C' sets}
card_prime = [ [] for i in range(0,max_card+1)]
for i in range(0,max_card+1): # TODO does it take the max? # FOR i : 0 TO maximum cardinality DO
for h in card[i]:#FOR each pair H in C[i]
if h.intent <= intent: #IF X'(H) ⊆ f({x*}) THEN
print "modified pair:"+str(h.intent) # {modified pair}
h.extent.add(object)#Add x* to X(H);
card_prime.insert(i,[h]) # Add H to C'[i] ;
if h.intent == intent: #IF X'(H) = f({x*}) THEN exit algorithm
return
else:#{old pair}
int = h.intent & intent# int <- X'(H) ∩ f({x*});
#IF ¬∃ H1 ∈ C'[||int||] such that X'(H1)=Int THEN {H is a generator}
exists_h1 = False
for c_p in card_prime[len(int)]:
if c_p.intent == int:
exists_h1 = True
break
if not exists_h1:
h_n = Concept(extent=(h.extent | set([object])), intent=int)#Create new pair Hn= (X(H) ∪{x*},int) and add to C'[||int||];
if not card_prime[len(int)]:
card_prime[len(int)] = []
card_prime[len(int)].append(h_n)
cl2 = ConceptLink(_parent=h_n, _child=h) #Add edge Hn -> H;
lattice._links.append(cl2)
#{Modify edges}
for j in range(0,len(int)):#FOR j : 0 TO ||int||-1
for h_a in card_prime[j]:#FOR each Ha ∈ C'[j]
if h_a.intent < int:#IF X'(Ha ) ⊂ int {Ha is a potential parent of Hn}
parent = True
for h_d in lattice.children(h_a): #FOR each Hd child of Ha
if h_d.intent < int: #IF X'(Hd ) ⊂ Int parent<-false; exit FOR END IF
parent = False
break
if parent: #IF parent
if lattice.parents(h).index(h_a) >= 0: #IF Ha is a parent of H
lattice.unlink(h_a, h)#eliminate edge Ha->H END IF;
cl3 = ConceptLink(_parent=h_a, _child=h_n) #Add edge Ha->Hn
lattice._links.append(cl3)
if int == intent: # IF Int=f*({x*}) THEN exit algorithm END IF
return
def test_incremental():
from fca import ConceptLattice, Context
ct = [[True, True, False, False], [False, False, True, True], \
[True, False, True, True], [False, True, False, False], \
[False,False,False,True]]
objs = ['lion', 'finch', 'eagle', 'hare', 'ostrich']
attrs = ['preying', 'mammal', 'flying', 'bird']
c = Context()
c._table=ct
c._attributes=attrs
c._objects=objs
cl = ConceptLattice(c,None)
cl._context=c
cl.compute_lattice()
add_object('snake', set(['preying','reptile']), cl, c)
| 43.12844 | 137 | 0.496703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,385 | 0.292874 |
a8a7d8e6afe65edaedc8265f8a3b5df9ef15cdd9 | 2,714 | py | Python | adaptive/tests/test_pickling.py | AlvaroGI/adaptive | 7b316c9f2972fe9ba768def86117903b90b140e5 | [
"BSD-3-Clause"
] | null | null | null | adaptive/tests/test_pickling.py | AlvaroGI/adaptive | 7b316c9f2972fe9ba768def86117903b90b140e5 | [
"BSD-3-Clause"
] | null | null | null | adaptive/tests/test_pickling.py | AlvaroGI/adaptive | 7b316c9f2972fe9ba768def86117903b90b140e5 | [
"BSD-3-Clause"
] | null | null | null | import pickle
import pytest
from adaptive.learner import (
AverageLearner,
BalancingLearner,
DataSaver,
IntegratorLearner,
Learner1D,
Learner2D,
SequenceLearner,
)
from adaptive.runner import simple
try:
import cloudpickle
with_cloudpickle = True
except ModuleNotFoundError:
with_cloudpickle = False
try:
import dill
with_dill = True
except ModuleNotFoundError:
with_dill = False
def goal_1(learner):
return learner.npoints == 10
def goal_2(learner):
return learner.npoints == 20
def pickleable_f(x):
return hash(str(x)) / 2 ** 63
nonpickleable_f = lambda x: hash(str(x)) / 2 ** 63 # noqa: E731
def identity_function(x):
return x
def datasaver(f, learner_type, learner_kwargs):
return DataSaver(
learner=learner_type(f, **learner_kwargs), arg_picker=identity_function
)
def balancing_learner(f, learner_type, learner_kwargs):
learner_1 = learner_type(f, **learner_kwargs)
learner_2 = learner_type(f, **learner_kwargs)
return BalancingLearner([learner_1, learner_2])
learners_pairs = [
(Learner1D, dict(bounds=(-1, 1))),
(Learner2D, dict(bounds=[(-1, 1), (-1, 1)])),
(SequenceLearner, dict(sequence=list(range(100)))),
(IntegratorLearner, dict(bounds=(0, 1), tol=1e-3)),
(AverageLearner, dict(atol=0.1)),
(datasaver, dict(learner_type=Learner1D, learner_kwargs=dict(bounds=(-1, 1)))),
(
balancing_learner,
dict(learner_type=Learner1D, learner_kwargs=dict(bounds=(-1, 1))),
),
]
serializers = [(pickle, pickleable_f)]
if with_cloudpickle:
serializers.append((cloudpickle, nonpickleable_f))
if with_dill:
serializers.append((dill, nonpickleable_f))
learners = [
(learner_type, learner_kwargs, serializer, f)
for serializer, f in serializers
for learner_type, learner_kwargs in learners_pairs
]
@pytest.mark.parametrize("learner_type, learner_kwargs, serializer, f", learners)
def test_serialization_for(learner_type, learner_kwargs, serializer, f):
"""Test serializing a learner using different serializers."""
learner = learner_type(f, **learner_kwargs)
simple(learner, goal_1)
learner_bytes = serializer.dumps(learner)
loss = learner.loss()
asked = learner.ask(10)
data = learner.data
del f
del learner
learner_loaded = serializer.loads(learner_bytes)
assert learner_loaded.npoints == 10
assert loss == learner_loaded.loss()
assert data == learner_loaded.data
assert asked == learner_loaded.ask(10)
# load again to undo the ask
learner_loaded = serializer.loads(learner_bytes)
simple(learner_loaded, goal_2)
assert learner_loaded.npoints == 20
| 23.396552 | 83 | 0.70339 | 0 | 0 | 0 | 0 | 830 | 0.305822 | 0 | 0 | 146 | 0.053795 |