blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ab5578bf208327a1eff085990fe8f1e60f3c230 | 05abb78c60a69422ae3e00a542bbd4573faf8174 | /exercicios-pythonBrasil/estrutura-de-decisao/ex4.py | af8c4d8e04260c01583f8f17167d8136b44e919b | [] | no_license | xuting1108/Programas-de-estudo | 72b812d52f5b130a95103c38dbe9e471dc5aa6f9 | 01fe21097055d69c2115cff3da2199429e87dead | refs/heads/master | 2022-10-20T17:06:14.517643 | 2019-04-08T11:16:12 | 2019-04-08T11:16:12 | 179,678,721 | 0 | 1 | null | 2022-10-09T13:13:57 | 2019-04-05T12:38:23 | Python | UTF-8 | Python | false | false | 228 | py | #Faça um Programa que verifique se uma letra digitada é vogal ou consoante.
letra = input('digite uma letra: ')
vogal = ['a','e','i','o','u']
if letra in vogal:
print('a letra é vogal')
else:
print('a letra é consoante') | [
"xuting1108@hotmail.com"
] | xuting1108@hotmail.com |
09b99da8cdfe5943e6af2c2b2a60573ceaa7b344 | 446688fabdd0f2932ff4d23a41221b21277cae38 | /tests/nodes/styles/test_manager.py | 356d5c7f4010f497bffe8f0de251ed102e62a0ee | [] | no_license | eyelidsup/Houdini-Toolbox | 529f5a8c135dfdc6996085149ebb5f4aa71ceafd | f5e8729c2d9ac7dd296ec3aaa6cac702e37b8100 | refs/heads/master | 2020-12-08T06:20:26.310716 | 2020-01-09T15:16:45 | 2020-01-09T15:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,850 | py | """Tests for ht.nodes.styles.module module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library Imports
import imp
import os
# Third Party Imports
import pytest
# Houdini Toolbox Imports
from ht.nodes.styles import constants as consts
from ht.nodes.styles import manager
# Houdini Imports
import hou
# Reload the module to test to capture load evaluation since it has already
# been loaded.
imp.reload(manager)
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def init_manager(mocker):
"""Fixture to initialize a style constant."""
mocker.patch.object(manager.StyleManager, "__init__", lambda x: None)
def _create():
return manager.StyleManager()
return _create
# =============================================================================
# CLASSES
# =============================================================================
class Test_StyleManager(object):
"""Test ht.nodes.styles.manager.StyleManager."""
def test___init__(self, mocker):
"""Test the constructor."""
mock_build = mocker.patch.object(manager.StyleManager, "_build")
mgr = manager.StyleManager()
assert mgr._constants == {}
assert mgr._name_rules == {}
assert mgr._node_type_rules == {}
assert mgr._tool_rules == {}
mock_build.assert_called()
# Properties
def test_constants(self, init_manager, mocker):
"""Test the 'constants' property."""
mock_value = mocker.MagicMock(spec=dict)
mgr = init_manager()
mgr._constants = mock_value
assert mgr.constants == mock_value
def test_name_rules(self, init_manager, mocker):
"""Test the 'name_rules' property."""
mock_value = mocker.MagicMock(spec=dict)
mgr = init_manager()
mgr._name_rules = mock_value
assert mgr.name_rules == mock_value
def test_node_type_rules(self, init_manager, mocker):
"""Test the 'node_type_rules' property."""
mock_value = mocker.MagicMock(spec=dict)
mgr = init_manager()
mgr._node_type_rules = mock_value
assert mgr.node_type_rules == mock_value
def test_tool_rules(self, init_manager, mocker):
"""Test the 'tool_rules' property."""
mock_value = mocker.MagicMock(spec=dict)
mgr = init_manager()
mgr._tool_rules = mock_value
assert mgr.tool_rules == mock_value
# Non-Public Methods
# _build
def test__build(self, init_manager, mocker):
"""Test building all the data from files."""
mock_find = mocker.patch("ht.nodes.styles.manager._find_files")
mock_load = mocker.patch("ht.nodes.styles.manager.json.load")
mock_build_consts = mocker.patch.object(manager.StyleManager, "_build_constants_from_data")
mock_build_rules = mocker.patch.object(manager.StyleManager, "_build_rules_from_data")
path1 = mocker.MagicMock(spec=str)
path2 = mocker.MagicMock(spec=str)
# Put files in reversed alphabetical order so they will be sorted
# opposite of their current order in the function.
mock_files = (path2, path1)
mock_find.return_value = mock_files
mgr = init_manager()
mock_load.side_effect = ({"key1": "value1"}, {"key2": "value2"})
expected_data = [
{"key1": "value1", consts.PATH_KEY: path1},
{"key2": "value2", consts.PATH_KEY: path2},
]
mock_handle = mocker.mock_open()
mocker.patch("__builtin__.open", mock_handle)
mgr._build()
mock_load.assert_called_with(mock_handle())
mock_handle.assert_any_call(path1)
mock_handle.assert_any_call(path2)
mock_build_consts.assert_called_with(expected_data)
mock_build_rules.assert_called_with(expected_data)
# _build_constants_from_data
def test__build_constants_from_data(self, init_manager, mocker):
"""Test building StyleConstants from data."""
mock_build_color = mocker.patch("ht.nodes.styles.manager._build_color")
mock_build_shape = mocker.patch("ht.nodes.styles.manager._build_shape")
mock_constant = mocker.patch("ht.nodes.styles.manager.StyleConstant", autospec=True)
mock_rule1 = mocker.MagicMock(spec=dict)
mock_rule2 = mocker.MagicMock(spec=dict)
mock_color1 = mocker.MagicMock(spec=hou.Color)
mock_color2 = mocker.MagicMock(spec=hou.Color)
color_type1 = mocker.MagicMock(spec=str)
color_type2 = mocker.MagicMock(spec=str)
colors = {
mock_rule1: (mock_color1, color_type1),
mock_rule2: (mock_color2, color_type2)
}
mock_build_color.side_effect = lambda e: colors[e]
shape1 = mocker.MagicMock(spec=str)
shape2 = mocker.MagicMock(spec=str)
shapes = {
mock_rule1: shape1,
mock_rule2: shape2
}
mock_build_shape.side_effect = lambda e: shapes[e]
path = mocker.MagicMock(spec=str)
name1 = mocker.MagicMock(spec=str)
name2 = mocker.MagicMock(spec=str)
all_data = [
{
consts.PATH_KEY: path,
consts.CONSTANT_DEFINITION_KEY: {
name1: mock_rule1,
name2: mock_rule2,
}
}
]
constants = {}
mgr = init_manager()
type(mgr).constants = mocker.PropertyMock(return_value=constants)
mgr._build_constants_from_data(all_data)
mock_build_color.assert_has_calls([mocker.call(mock_rule1), mocker.call(mock_rule2)], any_order=True)
mock_build_shape.assert_has_calls([mocker.call(mock_rule1), mocker.call(mock_rule2)], any_order=True)
calls = [
mocker.call(name1, mock_color1, color_type1, shape1, path),
mocker.call(name2, mock_color2, color_type2, shape2, path),
]
mock_constant.assert_has_calls(calls, any_order=True)
assert constants[name1] == mock_constant.return_value
assert constants[name2] == mock_constant.return_value
def test__build_constants_from_data__no_constants(self, init_manager, mocker):
"""Test building StyleConstants from data when there are no constant definitions."""
mock_build_color = mocker.patch("ht.nodes.styles.manager._build_color")
mock_build_shape = mocker.patch("ht.nodes.styles.manager._build_shape")
mock_constant = mocker.patch("ht.nodes.styles.manager.StyleConstant", autospec=True)
all_data = [{consts.PATH_KEY: mocker.MagicMock(spec=str)}]
mgr = init_manager()
mgr._build_constants_from_data(all_data)
mock_build_color.assert_not_called()
mock_build_shape.assert_not_called()
mock_constant.assert_not_called()
# _build_rules_from_data
def test__build_rules_from_data__no_rules(self, init_manager, mocker):
"""Test building rules from data when the data contains no rules."""
mock_build = mocker.patch("ht.nodes.styles.manager._build_category_rules")
all_data = [{consts.PATH_KEY: mocker.MagicMock(spec=str)}]
mgr = init_manager()
mgr._build_rules_from_data(all_data)
mock_build.assert_not_called()
def test__build_rules_from_data__names(self, init_manager, mocker):
"""Test building rules from data when the data contains name rules."""
mock_build = mocker.patch("ht.nodes.styles.manager._build_category_rules")
mock_constants = mocker.patch.object(manager.StyleManager, "constants", new_callable=mocker.PropertyMock)
mock_rule1 = mocker.MagicMock(spec=dict)
mock_rule2 = mocker.MagicMock(spec=dict)
path = mocker.MagicMock(spec=str)
all_data = [
{
consts.PATH_KEY: path,
consts.RULES_KEY: {
"name_rules": {"Sop": [mock_rule1, mock_rule2]},
}
}
]
mgr = init_manager()
mgr._name_rules = {}
mgr._build_rules_from_data(all_data)
assert mgr.name_rules["Sop"] == {}
mock_build.assert_called_with([mock_rule1, mock_rule2], {}, path, mock_constants.return_value)
def test__build_rules_from_data__nodes(self, init_manager, mocker):
"""Test building rules from data when the data contains node type rules."""
mock_build = mocker.patch("ht.nodes.styles.manager._build_category_rules")
mock_constants = mocker.patch.object(manager.StyleManager, "constants", new_callable=mocker.PropertyMock)
mock_rule1 = mocker.MagicMock(spec=dict)
mock_rule2 = mocker.MagicMock(spec=dict)
path = mocker.MagicMock(spec=str)
all_data = [
{
consts.PATH_KEY: path,
consts.RULES_KEY: {
"node_type_rules": {"Sop": [mock_rule1, mock_rule2]},
}
}
]
mgr = init_manager()
mgr._node_type_rules = {}
mgr._build_rules_from_data(all_data)
assert mgr.node_type_rules["Sop"] == {}
mock_build.assert_called_with([mock_rule1, mock_rule2], {}, path, mock_constants.return_value)
def test__build_rules_from_data__tools(self, init_manager, mocker):
"""Test building rules from data when the data contains tool rules."""
mock_build = mocker.patch("ht.nodes.styles.manager._build_category_rules")
mock_constants = mocker.patch.object(manager.StyleManager, "constants", new_callable=mocker.PropertyMock)
mock_rule1 = mocker.MagicMock(spec=dict)
mock_rule2 = mocker.MagicMock(spec=dict)
path = mocker.MagicMock(spec=str)
all_data = [
{
consts.PATH_KEY: path,
consts.RULES_KEY: {"tool_rules": {"Sop": [mock_rule1, mock_rule2]}}
}
]
mgr = init_manager()
mgr._tool_rules = {}
mgr._build_rules_from_data(all_data)
assert mgr.tool_rules["Sop"] == {}
mock_build.assert_called_with([mock_rule1, mock_rule2], {}, path, mock_constants.return_value)
# _get_manager_generator_style
def test__get_manager_generator_style_manager__none(self, init_manager, mocker):
"""Test getting a style for a manager when no style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rules.return_value = {"Sop": {}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isManager.return_value = True
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result is None
mock_resolve.assert_not_called()
def test__get_manager_generator_style_manager__category(self, init_manager, mocker):
"""Test getting a style for a manager where a specific category style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rule = mocker.MagicMock()
mock_rules.return_value = {"Sop": {consts.MANAGER_TYPE_KEY: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isManager.return_value = True
mock_type.isGenerator.return_value = False
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result == mock_resolve.return_value
mock_resolve.assert_called_with(mock_rule)
def test__get_manager_generator_style__manager_all(self, init_manager, mocker):
"""Test getting a style for a manager where a generic 'all' style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rule = mocker.MagicMock()
mock_rules.return_value = {"all": {consts.MANAGER_TYPE_KEY: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isManager.return_value = True
mock_type.isGenerator.return_value = False
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result == mock_resolve.return_value
mock_resolve.assert_called_with(mock_rule)
def test__get_manager_generator_style__generator_none(self, init_manager, mocker):
"""Test getting a style for a generator when no style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rules.return_value = {"Sop": {}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isGenerator.return_value = True
mock_type.isManager.return_value = False
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result is None
mock_resolve.assert_not_called()
def test__get_manager_generator_style__generator_category(self, init_manager, mocker):
"""Test getting a style for a generator where a specific category style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rule = mocker.MagicMock()
mock_rules.return_value = {"Sop": {consts.GENERATOR_TYPE_KEY: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isGenerator.return_value = True
mock_type.isManager.return_value = False
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result == mock_resolve.return_value
mock_resolve.assert_called_with(mock_rule)
def test__get_manager_generator_style__generator_all(self, init_manager, mocker):
"""Test getting a style for a generator where a generic 'all' style exists."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
mock_rule = mocker.MagicMock()
mock_rules.return_value = {consts.ALL_CATEGORY_KEY: {consts.GENERATOR_TYPE_KEY: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isGenerator.return_value = True
mock_type.isManager.return_value = False
mgr = init_manager()
result = mgr._get_manager_generator_style(mock_type)
assert result == mock_resolve.return_value
mock_resolve.assert_called_with(mock_rule)
def test__get_manager_generator_style__error(self, init_manager, mocker):
"""Test getting a style when the node type is neither a manager or generator."""
mock_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_rule = mocker.MagicMock()
mock_rules.return_value = {consts.ALL_CATEGORY_KEY: {consts.GENERATOR_TYPE_KEY: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.isGenerator.return_value = False
mock_type.isManager.return_value = False
mgr = init_manager()
with pytest.raises(ValueError):
mgr._get_manager_generator_style(mock_type)
# _get_name_style
def test__get_name_style__category(self, init_manager, mocker):
"""Test getting a node name style which matches a specific category."""
mock_name_rules = mocker.patch.object(manager.StyleManager, "name_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_name_rules.return_value = {"Sop": {node_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.name.return_value = node_name
mock_node.type.return_value = mock_type
mgr = init_manager()
result = mgr._get_name_style(mock_node)
assert result == mock_resolve.return_value
mock_match.assert_called_with(style_name, node_name)
mock_resolve.assert_called_with(mock_rule)
def test__get_name_style__all(self, init_manager, mocker):
"""Test getting a node name style which matches the generic 'all' category."""
mock_name_rules = mocker.patch.object(manager.StyleManager, "name_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_name_rules.return_value = {consts.ALL_CATEGORY_KEY: {node_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.name.return_value = node_name
mock_node.type.return_value = mock_type
mgr = init_manager()
result = mgr._get_name_style(mock_node)
assert result == mock_resolve.return_value
mock_match.assert_called_with(style_name, node_name)
mock_resolve.assert_called_with(mock_rule)
def test__get_name_style__no_match(self, init_manager, mocker):
"""Test getting a node name style that does not match any rules."""
mock_name_rules = mocker.patch.object(manager.StyleManager, "name_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=False)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_name_rules.return_value = {consts.ALL_CATEGORY_KEY: {node_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.name.return_value = node_name
mock_node.type.return_value = mock_type
mgr = init_manager()
result = mgr._get_name_style(mock_node)
assert result is None
mock_match.assert_called_with(style_name, node_name)
mock_resolve.assert_not_called()
# _get_node_type_style
def test__get_node_type_style__category(self, init_manager, mocker):
"""Test getting a node type style which matches a specific category."""
mock_node_type_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_type_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_node_type_rules.return_value = {"Sop": {node_type_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.nameComponents.return_value = ("", "", node_type_name, "1")
mgr = init_manager()
result = mgr._get_node_type_style(mock_type)
assert result == mock_resolve.return_value
mock_match.assert_called_with(style_name, node_type_name)
mock_resolve.assert_called_with(mock_rule)
def test__get_node_type_style__all(self, init_manager, mocker):
"""Test getting a node type style which matches the generic 'all' category."""
mock_node_type_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_type_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_node_type_rules.return_value = {consts.ALL_CATEGORY_KEY: {node_type_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.nameComponents.return_value = ("", "", node_type_name, "1")
mgr = init_manager()
result = mgr._get_node_type_style(mock_type)
assert result == mock_resolve.return_value
mock_match.assert_called_with(style_name, node_type_name)
mock_resolve.assert_called_with(mock_rule)
def test__get_node_type_style__no_match(self, init_manager, mocker):
"""Test getting a node type style that does not match any rules."""
mock_node_type_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=False)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
style_name = mocker.MagicMock(spec=str)
node_type_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_node_type_rules.return_value = {consts.ALL_CATEGORY_KEY: {node_type_name: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mock_type.nameComponents.return_value = ("", "", node_type_name, "1")
mgr = init_manager()
result = mgr._get_node_type_style(mock_type)
assert result is None
mock_match.assert_called_with(style_name, node_type_name)
mock_resolve.assert_not_called()
# _get_tool_style
def test__get_tool_style__category(self, init_manager, mocker):
"""Test getting a tool style which matches a specific category."""
mock_get_locations = mocker.patch("ht.nodes.styles.manager._get_tool_menu_locations")
mock_tool_rules = mocker.patch.object(manager.StyleManager, "tool_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
location = mocker.MagicMock(spec=str)
locations = (location, )
mock_get_locations.return_value = locations
style_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_tool_rules.return_value = {"Sop": {location: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mgr = init_manager()
result = mgr._get_tool_style(mock_type)
assert result == mock_resolve.return_value
mock_get_locations.assert_called_with(mock_type)
mock_match.assert_called_with(style_name, location)
mock_resolve.assert_called_with(mock_rule)
def test__get_tool_style__all(self, init_manager, mocker):
"""Test getting a tool style which matches the generic 'all' category."""
mock_get_locations = mocker.patch("ht.nodes.styles.manager._get_tool_menu_locations")
mock_tool_rules = mocker.patch.object(manager.StyleManager, "tool_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=True)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
location = mocker.MagicMock(spec=str)
mock_get_locations.return_value = (location, )
style_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_tool_rules.return_value = {consts.ALL_CATEGORY_KEY: {location: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mgr = init_manager()
mock_match.return_value = True
result = mgr._get_tool_style(mock_type)
assert result == mock_resolve.return_value
mock_get_locations.assert_called_with(mock_type)
mock_match.assert_called_with(style_name, location)
mock_resolve.assert_called_with(mock_rule)
def test__get_tool_style__no_match(self, init_manager, mocker):
"""Test getting a tool style that does not match any rules."""
mock_get_locations = mocker.patch("ht.nodes.styles.manager._get_tool_menu_locations")
mock_tool_rules = mocker.patch.object(manager.StyleManager, "tool_rules", new_callable=mocker.PropertyMock)
mock_match = mocker.patch("hou.patternMatch", return_value=False)
mock_resolve = mocker.patch.object(manager.StyleManager, "_resolve_rule")
location = mocker.MagicMock(spec=str)
mock_locations = (location, )
mock_get_locations.return_value = mock_locations
style_name = mocker.MagicMock(spec=str)
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
type(mock_rule).name = mocker.PropertyMock(return_value=style_name)
mock_tool_rules.return_value = {consts.ALL_CATEGORY_KEY: {location: mock_rule}}
mock_category = mocker.MagicMock(spec=hou.NodeTypeCategory)
mock_category.name.return_value = "Sop"
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
mgr = init_manager()
result = mgr._get_tool_style(mock_type)
assert result is None
mock_get_locations.assert_called_with(mock_type)
mock_match.assert_called_with(style_name, location)
mock_resolve.assert_not_called()
# _resolve_rule
def test__resolve_rule__style(self, init_manager, mocker):
"""Test resolving a rule which is a StyleRule."""
mock_rule = mocker.MagicMock(spec=manager.StyleRule)
mgr = init_manager()
result = mgr._resolve_rule(mock_rule)
assert result == mock_rule
def test__resolve_rule__constant(self, init_manager, mocker):
"""Test resolving a rule which is a ConstantRule."""
mock_constants = mocker.patch.object(manager.StyleManager, "constants", new_callable=mocker.PropertyMock)
constant_name = mocker.MagicMock(spec=str)
mock_constant = mocker.MagicMock(spec=manager.StyleConstant)
mock_constants.return_value = {constant_name: mock_constant}
mock_rule = mocker.MagicMock(spec=manager.ConstantRule)
type(mock_rule).constant_name = mocker.PropertyMock(return_value=constant_name)
mgr = init_manager()
result = mgr._resolve_rule(mock_rule)
assert result == mock_constant
# style_node
def test_style_node__by_type(self, init_manager, mocker):
"""Style a node by the node type."""
mock_get_type = mocker.patch.object(manager.StyleManager, "_get_node_type_style")
mock_get_tool = mocker.patch.object(manager.StyleManager, "_get_tool_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_type.return_value = mock_style
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.isGenerator.return_value = False
mock_type.isManager.return_value = False
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.type.return_value = mock_type
mgr = init_manager()
mgr.style_node(mock_node)
mock_style.apply_to_node.assert_called_with(mock_node)
mock_get_type.assert_called_with(mock_type)
mock_get_tool.assert_not_called()
mock_type.isManager.assert_not_called()
def test_style_node__by_tool(self, init_manager, mocker):
"""Style a node by the tool menu location."""
mock_get_type = mocker.patch.object(manager.StyleManager, "_get_node_type_style", return_value=None)
mock_get_tool = mocker.patch.object(manager.StyleManager, "_get_tool_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_tool.return_value = mock_style
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.isGenerator.return_value = False
mock_type.isManager.return_value = False
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.type.return_value = mock_type
mgr = init_manager()
mgr.style_node(mock_node)
mock_style.apply_to_node.assert_called_with(mock_node)
mock_get_type.assert_called_with(mock_type)
mock_get_tool.assert_called_with(mock_type)
mock_type.isManager.assert_not_called()
def test_style_node__by_manager(self, init_manager, mocker):
"""Style a node because it is a manager."""
mock_get_type = mocker.patch.object(manager.StyleManager, "_get_node_type_style", return_value=None)
mock_get_tool = mocker.patch.object(manager.StyleManager, "_get_tool_style", return_value=None)
mock_get_manager = mocker.patch.object(manager.StyleManager, "_get_manager_generator_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_manager.return_value = mock_style
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.isGenerator.return_value = False
mock_type.isManager.return_value = True
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.type.return_value = mock_type
mgr = init_manager()
mgr.style_node(mock_node)
mock_style.apply_to_node.assert_called_with(mock_node)
mock_get_type.assert_called_with(mock_type)
mock_get_tool.assert_called_with(mock_type)
mock_type.isGenerator.assert_not_called()
mock_get_manager.assert_called_with(mock_type)
def test_style_node__by_generator(self, init_manager, mocker):
"""Style a node because it is a generator."""
mock_get_type = mocker.patch.object(manager.StyleManager, "_get_node_type_style", return_value=None)
mock_get_tool = mocker.patch.object(manager.StyleManager, "_get_tool_style", return_value=None)
mock_get_manager = mocker.patch.object(manager.StyleManager, "_get_manager_generator_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_manager.return_value = mock_style
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.isGenerator.return_value = True
mock_type.isManager.return_value = False
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.type.return_value = mock_type
mgr = init_manager()
mgr.style_node(mock_node)
mock_style.apply_to_node.assert_called_with(mock_node)
mock_get_type.assert_called_with(mock_type)
mock_get_tool.assert_called_with(mock_type)
mock_get_manager.assert_called_with(mock_type)
def test_style_node__no_match(self, init_manager, mocker):
"""Try to style a node but do not match any rule types."""
mock_get_type = mocker.patch.object(manager.StyleManager, "_get_node_type_style", return_value=None)
mock_get_tool = mocker.patch.object(manager.StyleManager, "_get_tool_style", return_value=None)
mock_get_manager = mocker.patch.object(manager.StyleManager, "_get_manager_generator_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_manager.return_value = mock_style
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.isGenerator.return_value = False
mock_type.isManager.return_value = False
mock_node = mocker.MagicMock(spec=hou.Node)
mock_node.type.return_value = mock_type
mgr = init_manager()
mgr.style_node(mock_node)
mock_get_type.assert_called_with(mock_type)
mock_get_tool.assert_called_with(mock_type)
mock_get_manager.assert_not_called()
# style_node_by_name
def test_style_node_by_name(self, init_manager, mocker):
"""Style a node by its name."""
mock_get_name = mocker.patch.object(manager.StyleManager, "_get_name_style")
mock_style = mocker.MagicMock(spec=manager.StyleRule)
mock_get_name.return_value = mock_style
mock_node = mocker.MagicMock(spec=hou.Node)
mgr = init_manager()
mgr.style_node_by_name(mock_node)
mock_style.apply_to_node.assert_called_with(mock_node)
mock_get_name.assert_called_with(mock_node)
def test_style_node_by_name__no_match(self, init_manager, mocker):
"""Try to style a node by its name but do not match any rules."""
mock_get_name = mocker.patch.object(manager.StyleManager, "_get_name_style", return_value=None)
mock_node = mocker.MagicMock(spec=hou.Node)
mgr = init_manager()
mgr.style_node_by_name(mock_node)
mock_get_name.assert_called_with(mock_node)
# reload
def test_reload(self, init_manager, mocker):
"""Test reloading all the data."""
mock_constants = mocker.patch.object(manager.StyleManager, "constants", new_callable=mocker.PropertyMock)
mock_name_rules = mocker.patch.object(manager.StyleManager, "name_rules", new_callable=mocker.PropertyMock)
mock_node_type_rules = mocker.patch.object(manager.StyleManager, "node_type_rules", new_callable=mocker.PropertyMock)
mock_tool_rules = mocker.patch.object(manager.StyleManager, "tool_rules", new_callable=mocker.PropertyMock)
mock_build = mocker.patch.object(manager.StyleManager, "_build")
mgr = init_manager()
mock_constants.return_value = mocker.MagicMock(spec=dict)
mock_name_rules.return_value = mocker.MagicMock(spec=dict)
mock_node_type_rules.return_value = mocker.MagicMock(spec=dict)
mock_tool_rules.return_value = mocker.MagicMock(spec=dict)
mgr.reload()
mgr.constants.clear.assert_called_once()
mgr.name_rules.clear.assert_called_once()
mgr.node_type_rules.clear.assert_called_once()
mgr.tool_rules.clear.assert_called_once()
mock_build.assert_called()
class Test__build_category_rules(object):
"""Test ht.nodes.styles.manager._build_category_rules."""
def test_invalid_constant(self, mocker):
"""Test building with an invalid constant."""
path = mocker.MagicMock(spec=str)
constants = {}
rules = [
{
consts.RULE_NAME_KEY: mocker.MagicMock(spec=str),
consts.RULE_CONSTANT_KEY: mocker.MagicMock(spec=str)
}
]
with pytest.raises(manager.ConstantDoesNotExistError):
manager._build_category_rules(rules, {}, path, constants)
def test_constant(self, mocker):
"""Test building a ConstantRule."""
mock_build_color = mocker.patch("ht.nodes.styles.manager._build_color")
mock_const_rule = mocker.patch("ht.nodes.styles.manager.ConstantRule", autospec=True)
constant_name = mocker.MagicMock(spec=str)
category_map = {}
path = mocker.MagicMock(spec=str)
constants = {constant_name: mocker.MagicMock(spec=manager.StyleConstant)}
rule_name = mocker.MagicMock(spec=str)
rules = [
{
consts.RULE_NAME_KEY: rule_name,
consts.RULE_CONSTANT_KEY: constant_name
}
]
manager._build_category_rules(rules, category_map, path, constants)
assert category_map == {rule_name: mock_const_rule.return_value}
mock_const_rule.assert_called_with(rule_name, constant_name, path)
mock_build_color.assert_not_called()
def test_style(self, mocker):
"""Test building a StyleRule."""
mock_build_color = mocker.patch("ht.nodes.styles.manager._build_color")
mock_build_shape = mocker.patch("ht.nodes.styles.manager._build_shape")
mock_const_rule = mocker.patch("ht.nodes.styles.manager.ConstantRule", autospec=True)
mock_style_rule = mocker.patch("ht.nodes.styles.manager.StyleRule", autospec=True)
mock_color = mocker.MagicMock(spec=hou.Color)
color_type = mocker.MagicMock(spec=str)
mock_build_color.return_value = (mock_color, color_type)
shape = mocker.MagicMock(spec=str)
mock_build_shape.return_value = shape
category_map = {}
path = mocker.MagicMock(spec=str)
rule_name = mocker.MagicMock(spec=str)
data = {consts.RULE_NAME_KEY: rule_name}
rules = [data]
manager._build_category_rules(rules, category_map, path, {})
assert category_map == {rule_name: mock_style_rule.return_value}
mock_style_rule.assert_called_with(rule_name, mock_color, color_type, shape, path)
mock_const_rule.assert_not_called()
class Test__build_color(object):
"""Test ht.nodes.styles.manager._build_color."""
def test_no_data(self):
"""Test building a color when there is no data."""
result = manager._build_color({})
assert result == (None, None)
def test_invalid_color_type(self, mocker):
"""Test building a color when the type is invalid."""
mock_type = mocker.patch("hou.colorType")
del mock_type.foo
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: "foo"}}
with pytest.raises(manager.InvalidColorTypeError):
manager._build_color(data)
def test_rgb_single(self, mocker):
"""Test building an RGB color with a single float value."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "RGB"
mock_value = mocker.MagicMock(spec=float)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setRGB.assert_called_with([mock_value] * 3)
def test_rgb(self, mocker):
"""Test building an RGB color with 3 float values."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "RGB"
mock_value = mocker.MagicMock(spec=list)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setRGB.assert_called_with(mock_value)
def test_hsl(self, mocker):
"""Test building an HSL color."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "HSL"
mock_value = mocker.MagicMock(spec=list)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setHSL.assert_called_with(mock_value)
def test_hsv(self, mocker):
"""Test building an HSV color."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "HSV"
mock_value = mocker.MagicMock(spec=list)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setHSV.assert_called_with(mock_value)
def test_lab(self, mocker):
"""Test building a LAB color."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "LAB"
mock_value = mocker.MagicMock(spec=list)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setLAB.assert_called_with(mock_value)
def test_xyz(self, mocker):
"""Test building an XYZ color."""
mock_color = mocker.patch("hou.Color", autospec=True)
color_type = "XYZ"
mock_value = mocker.MagicMock(spec=list)
data = {consts.RULE_COLOR_KEY: {consts.RULE_COLOR_TYPE_KEY: color_type, consts.RULE_COLOR_VALUE_KEY: mock_value}}
result = manager._build_color(data)
assert result == (mock_color(), color_type)
mock_color().setXYZ.assert_called_with(mock_value)
class Test__build_shape(object):
"""Test ht.nodes.styles.manager._build_shape."""
def test(self):
"""Test building a shape where there is a shape key."""
shape = "shape"
mock_rule = {consts.RULE_SHAPE_KEY: shape}
result = manager._build_shape(mock_rule)
assert result == shape
def test_no_rule(self):
"""Test building a shape where there is not a shape key."""
result = manager._build_shape({})
assert result is None
class Test__find_files(object):
"""Test ht.nodes.styles.manager._find_files."""
def test_no_dirs(self, mocker, mock_hou_exceptions):
"""Test finding files where there are no config/styles folders in the HOUDINI_PATH."""
mocker.patch("hou.findDirectories", side_effect=mock_hou_exceptions.OperationFailed)
mock_glob = mocker.patch("ht.nodes.styles.manager.glob.glob")
result = manager._find_files()
assert result == ()
mock_glob.assert_not_called()
def test(self, mocker):
"""Test finding files where there are valid config/styles folders in the HOUDINI_PATH."""
mock_find = mocker.patch("hou.findDirectories")
mock_glob = mocker.patch("ht.nodes.styles.manager.glob.glob")
mock_dir1 = mocker.MagicMock(spec=str)
mock_dir2 = mocker.MagicMock(spec=str)
mock_file1 = mocker.MagicMock(spec=str)
mock_file2 = mocker.MagicMock(spec=str)
mock_file3 = mocker.MagicMock(spec=str)
mock_find.return_value = (mock_dir1, mock_dir2)
mock_glob.side_effect = ((mock_file1, mock_file2), (mock_file3,))
expected = (mock_file1, mock_file2, mock_file3)
result = manager._find_files()
assert result == expected
calls = [mocker.call(os.path.join(mock_dir1, "*.json")), mocker.call(os.path.join(mock_dir2, "*.json"))]
mock_glob.assert_has_calls(calls)
class Test__get_tool_menu_locations(object):
"""Test ht.nodes.styles.manager._get_tool_menu_locations."""
def test_no_match(self, mocker):
"""Test getting tab menu locations when no default tool exists."""
mocker.patch("hou.shelves.tools", return_value={})
mock_default_name = mocker.patch("hou.shelves.defaultToolName")
mock_category = mocker.MagicMock(hou.NodeTypeCategory)
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
result = manager._get_tool_menu_locations(mock_type)
assert result == ()
mock_default_name.assert_called_with(
mock_category.name.return_value,
mock_type.name.return_value
)
def test(self, mocker):
"""Test getting tab menu locations when the default tool exists."""
mock_get_tools = mocker.patch("hou.shelves.tools")
mock_default_name = mocker.patch("hou.shelves.defaultToolName")
mock_tool = mocker.MagicMock(spec=hou.Tool)
mock_get_tools.return_value = {mock_default_name.return_value: mock_tool}
mock_category = mocker.MagicMock(hou.NodeTypeCategory)
mock_type = mocker.MagicMock(spec=hou.NodeType)
mock_type.category.return_value = mock_category
result = manager._get_tool_menu_locations(mock_type)
assert result == mock_tool.toolMenuLocations.return_value
mock_default_name.assert_called_with(
mock_category.name.return_value,
mock_type.name.return_value
)
| [
"captainhammy@gmail.com"
] | captainhammy@gmail.com |
6f0e82c805b291ff0a194472a3145feebdb5cd8f | d0c0c46bade87640425b5587cc984f24f74cad24 | /entrofy/utils.py | 86f6959c8707d44b06c324229b95896f41e2c0f6 | [] | no_license | dhuppenkothen/entrofy | 4b3b28d66321ee29e2bfa9516cc2bf681b81ee7b | d0789a9f84147f623746efabb28721dd2dc4c141 | refs/heads/master | 2023-05-26T03:59:03.695720 | 2023-05-03T17:04:12 | 2023-05-03T17:04:12 | 43,324,813 | 83 | 26 | null | 2023-05-03T16:52:11 | 2015-09-28T20:21:10 | Jupyter Notebook | UTF-8 | Python | false | false | 861 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Utilities for entrofy'''
import numbers
import numpy as np
# The following is borrowed from scikit-learn v0.17
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
| [
"brian.mcfee@nyu.edu"
] | brian.mcfee@nyu.edu |
88e1a352c0787e8f9703d43896d59f0e0f868906 | c81ea73e93df307d35191ab184a85d6c67c57112 | /dockers/rotnet/my_classify_modelnet.py | 07c915271beed83cd0979c75243fd37e5b149e2e | [] | no_license | BlenderCN-Org/diplomka | 8d0503fc5902dfede8317aed84f5a17f691f687f | 575fe3f2436b9c511496c1dc019d9cc3423ba5f0 | refs/heads/master | 2020-05-22T15:42:00.143738 | 2019-05-07T07:37:46 | 2019-05-07T07:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,136 | py | import numpy as np
import os
import sys
import argparse
import glob
import time
ang = np.zeros([60,20],dtype=int)
ang[ 0 ][ 0 ] = 0; ang[ 0 ][ 1 ] = 1; ang[ 0 ][ 2 ] = 2; ang[ 0 ][ 3 ] = 3; ang[ 0 ][ 4 ] = 4; ang[ 0 ][ 5 ] = 5; ang[ 0 ][ 6 ] = 6; ang[ 0 ][ 7 ] = 7; ang[ 0 ][ 8 ] = 8; ang[ 0 ][ 9 ] = 9; ang[ 0 ][ 10 ] = 10; ang[ 0 ][ 11 ] = 11; ang[ 0 ][ 12 ] = 12; ang[ 0 ][ 13 ] = 13; ang[ 0 ][ 14 ] = 14; ang[ 0 ][ 15 ] = 15; ang[ 0 ][ 16 ] = 16; ang[ 0 ][ 17 ] = 17; ang[ 0 ][ 18 ] = 18; ang[ 0 ][ 19 ] = 19;
ang[ 1 ][ 0 ] = 0; ang[ 1 ][ 1 ] = 4; ang[ 1 ][ 2 ] = 1; ang[ 1 ][ 3 ] = 5; ang[ 1 ][ 4 ] = 2; ang[ 1 ][ 5 ] = 6; ang[ 1 ][ 6 ] = 3; ang[ 1 ][ 7 ] = 7; ang[ 1 ][ 8 ] = 12; ang[ 1 ][ 9 ] = 14; ang[ 1 ][ 10 ] = 13; ang[ 1 ][ 11 ] = 15; ang[ 1 ][ 12 ] = 16; ang[ 1 ][ 13 ] = 17; ang[ 1 ][ 14 ] = 18; ang[ 1 ][ 15 ] = 19; ang[ 1 ][ 16 ] = 8; ang[ 1 ][ 17 ] = 10; ang[ 1 ][ 18 ] = 9; ang[ 1 ][ 19 ] = 11;
ang[ 2 ][ 0 ] = 3; ang[ 2 ][ 1 ] = 1; ang[ 2 ][ 2 ] = 7; ang[ 2 ][ 3 ] = 5; ang[ 2 ][ 4 ] = 2; ang[ 2 ][ 5 ] = 0; ang[ 2 ][ 6 ] = 6; ang[ 2 ][ 7 ] = 4; ang[ 2 ][ 8 ] = 18; ang[ 2 ][ 9 ] = 16; ang[ 2 ][ 10 ] = 19; ang[ 2 ][ 11 ] = 17; ang[ 2 ][ 12 ] = 11; ang[ 2 ][ 13 ] = 9; ang[ 2 ][ 14 ] = 10; ang[ 2 ][ 15 ] = 8; ang[ 2 ][ 16 ] = 13; ang[ 2 ][ 17 ] = 12; ang[ 2 ][ 18 ] = 15; ang[ 2 ][ 19 ] = 14;
ang[ 3 ][ 0 ] = 6; ang[ 3 ][ 1 ] = 4; ang[ 3 ][ 2 ] = 2; ang[ 3 ][ 3 ] = 0; ang[ 3 ][ 4 ] = 7; ang[ 3 ][ 5 ] = 5; ang[ 3 ][ 6 ] = 3; ang[ 3 ][ 7 ] = 1; ang[ 3 ][ 8 ] = 19; ang[ 3 ][ 9 ] = 17; ang[ 3 ][ 10 ] = 18; ang[ 3 ][ 11 ] = 16; ang[ 3 ][ 12 ] = 10; ang[ 3 ][ 13 ] = 8; ang[ 3 ][ 14 ] = 11; ang[ 3 ][ 15 ] = 9; ang[ 3 ][ 16 ] = 14; ang[ 3 ][ 17 ] = 15; ang[ 3 ][ 18 ] = 12; ang[ 3 ][ 19 ] = 13;
ang[ 4 ][ 0 ] = 6; ang[ 4 ][ 1 ] = 2; ang[ 4 ][ 2 ] = 7; ang[ 4 ][ 3 ] = 3; ang[ 4 ][ 4 ] = 4; ang[ 4 ][ 5 ] = 0; ang[ 4 ][ 6 ] = 5; ang[ 4 ][ 7 ] = 1; ang[ 4 ][ 8 ] = 14; ang[ 4 ][ 9 ] = 12; ang[ 4 ][ 10 ] = 15; ang[ 4 ][ 11 ] = 13; ang[ 4 ][ 12 ] = 19; ang[ 4 ][ 13 ] = 18; ang[ 4 ][ 14 ] = 17; ang[ 4 ][ 15 ] = 16; ang[ 4 ][ 16 ] = 10; ang[ 4 ][ 17 ] = 8; ang[ 4 ][ 18 ] = 11; ang[ 4 ][ 19 ] = 9;
ang[ 5 ][ 0 ] = 5; ang[ 5 ][ 1 ] = 7; ang[ 5 ][ 2 ] = 1; ang[ 5 ][ 3 ] = 3; ang[ 5 ][ 4 ] = 4; ang[ 5 ][ 5 ] = 6; ang[ 5 ][ 6 ] = 0; ang[ 5 ][ 7 ] = 2; ang[ 5 ][ 8 ] = 17; ang[ 5 ][ 9 ] = 19; ang[ 5 ][ 10 ] = 16; ang[ 5 ][ 11 ] = 18; ang[ 5 ][ 12 ] = 9; ang[ 5 ][ 13 ] = 11; ang[ 5 ][ 14 ] = 8; ang[ 5 ][ 15 ] = 10; ang[ 5 ][ 16 ] = 15; ang[ 5 ][ 17 ] = 14; ang[ 5 ][ 18 ] = 13; ang[ 5 ][ 19 ] = 12;
ang[ 6 ][ 0 ] = 3; ang[ 6 ][ 1 ] = 7; ang[ 6 ][ 2 ] = 2; ang[ 6 ][ 3 ] = 6; ang[ 6 ][ 4 ] = 1; ang[ 6 ][ 5 ] = 5; ang[ 6 ][ 6 ] = 0; ang[ 6 ][ 7 ] = 4; ang[ 6 ][ 8 ] = 13; ang[ 6 ][ 9 ] = 15; ang[ 6 ][ 10 ] = 12; ang[ 6 ][ 11 ] = 14; ang[ 6 ][ 12 ] = 18; ang[ 6 ][ 13 ] = 19; ang[ 6 ][ 14 ] = 16; ang[ 6 ][ 15 ] = 17; ang[ 6 ][ 16 ] = 11; ang[ 6 ][ 17 ] = 9; ang[ 6 ][ 18 ] = 10; ang[ 6 ][ 19 ] = 8;
ang[ 7 ][ 0 ] = 5; ang[ 7 ][ 1 ] = 1; ang[ 7 ][ 2 ] = 4; ang[ 7 ][ 3 ] = 0; ang[ 7 ][ 4 ] = 7; ang[ 7 ][ 5 ] = 3; ang[ 7 ][ 6 ] = 6; ang[ 7 ][ 7 ] = 2; ang[ 7 ][ 8 ] = 15; ang[ 7 ][ 9 ] = 13; ang[ 7 ][ 10 ] = 14; ang[ 7 ][ 11 ] = 12; ang[ 7 ][ 12 ] = 17; ang[ 7 ][ 13 ] = 16; ang[ 7 ][ 14 ] = 19; ang[ 7 ][ 15 ] = 18; ang[ 7 ][ 16 ] = 9; ang[ 7 ][ 17 ] = 11; ang[ 7 ][ 18 ] = 8; ang[ 7 ][ 19 ] = 10;
ang[ 8 ][ 0 ] = 0; ang[ 8 ][ 1 ] = 2; ang[ 8 ][ 2 ] = 4; ang[ 8 ][ 3 ] = 6; ang[ 8 ][ 4 ] = 1; ang[ 8 ][ 5 ] = 3; ang[ 8 ][ 6 ] = 5; ang[ 8 ][ 7 ] = 7; ang[ 8 ][ 8 ] = 16; ang[ 8 ][ 9 ] = 18; ang[ 8 ][ 10 ] = 17; ang[ 8 ][ 11 ] = 19; ang[ 8 ][ 12 ] = 8; ang[ 8 ][ 13 ] = 10; ang[ 8 ][ 14 ] = 9; ang[ 8 ][ 15 ] = 11; ang[ 8 ][ 16 ] = 12; ang[ 8 ][ 17 ] = 13; ang[ 8 ][ 18 ] = 14; ang[ 8 ][ 19 ] = 15;
ang[ 9 ][ 0 ] = 4; ang[ 9 ][ 1 ] = 15; ang[ 9 ][ 2 ] = 16; ang[ 9 ][ 3 ] = 9; ang[ 9 ][ 4 ] = 10; ang[ 9 ][ 5 ] = 19; ang[ 9 ][ 6 ] = 12; ang[ 9 ][ 7 ] = 3; ang[ 9 ][ 8 ] = 8; ang[ 9 ][ 9 ] = 7; ang[ 9 ][ 10 ] = 0; ang[ 9 ][ 11 ] = 11; ang[ 9 ][ 12 ] = 17; ang[ 9 ][ 13 ] = 5; ang[ 9 ][ 14 ] = 2; ang[ 9 ][ 15 ] = 18; ang[ 9 ][ 16 ] = 14; ang[ 9 ][ 17 ] = 6; ang[ 9 ][ 18 ] = 1; ang[ 9 ][ 19 ] = 13;
ang[ 10 ][ 0 ] = 18; ang[ 10 ][ 1 ] = 11; ang[ 10 ][ 2 ] = 6; ang[ 10 ][ 3 ] = 15; ang[ 10 ][ 4 ] = 12; ang[ 10 ][ 5 ] = 1; ang[ 10 ][ 6 ] = 8; ang[ 10 ][ 7 ] = 17; ang[ 10 ][ 8 ] = 2; ang[ 10 ][ 9 ] = 9; ang[ 10 ][ 10 ] = 10; ang[ 10 ][ 11 ] = 5; ang[ 10 ][ 12 ] = 19; ang[ 10 ][ 13 ] = 7; ang[ 10 ][ 14 ] = 0; ang[ 10 ][ 15 ] = 16; ang[ 10 ][ 16 ] = 3; ang[ 10 ][ 17 ] = 13; ang[ 10 ][ 18 ] = 14; ang[ 10 ][ 19 ] = 4;
ang[ 11 ][ 0 ] = 14; ang[ 11 ][ 1 ] = 5; ang[ 11 ][ 2 ] = 8; ang[ 11 ][ 3 ] = 16; ang[ 11 ][ 4 ] = 19; ang[ 11 ][ 5 ] = 11; ang[ 11 ][ 6 ] = 2; ang[ 11 ][ 7 ] = 13; ang[ 11 ][ 8 ] = 6; ang[ 11 ][ 9 ] = 9; ang[ 11 ][ 10 ] = 10; ang[ 11 ][ 11 ] = 1; ang[ 11 ][ 12 ] = 4; ang[ 11 ][ 13 ] = 17; ang[ 11 ][ 14 ] = 18; ang[ 11 ][ 15 ] = 3; ang[ 11 ][ 16 ] = 15; ang[ 11 ][ 17 ] = 7; ang[ 11 ][ 18 ] = 0; ang[ 11 ][ 19 ] = 12;
ang[ 12 ][ 0 ] = 10; ang[ 12 ][ 1 ] = 18; ang[ 12 ][ 2 ] = 14; ang[ 12 ][ 3 ] = 7; ang[ 12 ][ 4 ] = 0; ang[ 12 ][ 5 ] = 13; ang[ 12 ][ 6 ] = 17; ang[ 12 ][ 7 ] = 9; ang[ 12 ][ 8 ] = 8; ang[ 12 ][ 9 ] = 3; ang[ 12 ][ 10 ] = 4; ang[ 12 ][ 11 ] = 11; ang[ 12 ][ 12 ] = 6; ang[ 12 ][ 13 ] = 19; ang[ 12 ][ 14 ] = 16; ang[ 12 ][ 15 ] = 1; ang[ 12 ][ 16 ] = 2; ang[ 12 ][ 17 ] = 12; ang[ 12 ][ 18 ] = 15; ang[ 12 ][ 19 ] = 5;
ang[ 13 ][ 0 ] = 2; ang[ 13 ][ 1 ] = 8; ang[ 13 ][ 2 ] = 13; ang[ 13 ][ 3 ] = 16; ang[ 13 ][ 4 ] = 19; ang[ 13 ][ 5 ] = 14; ang[ 13 ][ 6 ] = 11; ang[ 13 ][ 7 ] = 5; ang[ 13 ][ 8 ] = 18; ang[ 13 ][ 9 ] = 4; ang[ 13 ][ 10 ] = 3; ang[ 13 ][ 11 ] = 17; ang[ 13 ][ 12 ] = 12; ang[ 13 ][ 13 ] = 0; ang[ 13 ][ 14 ] = 7; ang[ 13 ][ 15 ] = 15; ang[ 13 ][ 16 ] = 10; ang[ 13 ][ 17 ] = 6; ang[ 13 ][ 18 ] = 1; ang[ 13 ][ 19 ] = 9;
ang[ 14 ][ 0 ] = 18; ang[ 14 ][ 1 ] = 12; ang[ 14 ][ 2 ] = 11; ang[ 14 ][ 3 ] = 1; ang[ 14 ][ 4 ] = 6; ang[ 14 ][ 5 ] = 8; ang[ 14 ][ 6 ] = 15; ang[ 14 ][ 7 ] = 17; ang[ 14 ][ 8 ] = 19; ang[ 14 ][ 9 ] = 0; ang[ 14 ][ 10 ] = 7; ang[ 14 ][ 11 ] = 16; ang[ 14 ][ 12 ] = 3; ang[ 14 ][ 13 ] = 13; ang[ 14 ][ 14 ] = 14; ang[ 14 ][ 15 ] = 4; ang[ 14 ][ 16 ] = 2; ang[ 14 ][ 17 ] = 10; ang[ 14 ][ 18 ] = 9; ang[ 14 ][ 19 ] = 5;
ang[ 15 ][ 0 ] = 9; ang[ 15 ][ 1 ] = 3; ang[ 15 ][ 2 ] = 16; ang[ 15 ][ 3 ] = 12; ang[ 15 ][ 4 ] = 15; ang[ 15 ][ 5 ] = 19; ang[ 15 ][ 6 ] = 4; ang[ 15 ][ 7 ] = 10; ang[ 15 ][ 8 ] = 5; ang[ 15 ][ 9 ] = 18; ang[ 15 ][ 10 ] = 17; ang[ 15 ][ 11 ] = 2; ang[ 15 ][ 12 ] = 1; ang[ 15 ][ 13 ] = 13; ang[ 15 ][ 14 ] = 14; ang[ 15 ][ 15 ] = 6; ang[ 15 ][ 16 ] = 11; ang[ 15 ][ 17 ] = 7; ang[ 15 ][ 18 ] = 0; ang[ 15 ][ 19 ] = 8;
ang[ 16 ][ 0 ] = 13; ang[ 16 ][ 1 ] = 18; ang[ 16 ][ 2 ] = 0; ang[ 16 ][ 3 ] = 10; ang[ 16 ][ 4 ] = 9; ang[ 16 ][ 5 ] = 7; ang[ 16 ][ 6 ] = 17; ang[ 16 ][ 7 ] = 14; ang[ 16 ][ 8 ] = 1; ang[ 16 ][ 9 ] = 19; ang[ 16 ][ 10 ] = 16; ang[ 16 ][ 11 ] = 6; ang[ 16 ][ 12 ] = 12; ang[ 16 ][ 13 ] = 2; ang[ 16 ][ 14 ] = 5; ang[ 16 ][ 15 ] = 15; ang[ 16 ][ 16 ] = 3; ang[ 16 ][ 17 ] = 11; ang[ 16 ][ 18 ] = 8; ang[ 16 ][ 19 ] = 4;
ang[ 17 ][ 0 ] = 1; ang[ 17 ][ 1 ] = 17; ang[ 17 ][ 2 ] = 11; ang[ 17 ][ 3 ] = 15; ang[ 17 ][ 4 ] = 12; ang[ 17 ][ 5 ] = 8; ang[ 17 ][ 6 ] = 18; ang[ 17 ][ 7 ] = 6; ang[ 17 ][ 8 ] = 13; ang[ 17 ][ 9 ] = 4; ang[ 17 ][ 10 ] = 3; ang[ 17 ][ 11 ] = 14; ang[ 17 ][ 12 ] = 9; ang[ 17 ][ 13 ] = 5; ang[ 17 ][ 14 ] = 2; ang[ 17 ][ 15 ] = 10; ang[ 17 ][ 16 ] = 16; ang[ 17 ][ 17 ] = 0; ang[ 17 ][ 18 ] = 7; ang[ 17 ][ 19 ] = 19;
ang[ 18 ][ 0 ] = 9; ang[ 18 ][ 1 ] = 15; ang[ 18 ][ 2 ] = 3; ang[ 18 ][ 3 ] = 19; ang[ 18 ][ 4 ] = 16; ang[ 18 ][ 5 ] = 4; ang[ 18 ][ 6 ] = 12; ang[ 18 ][ 7 ] = 10; ang[ 18 ][ 8 ] = 1; ang[ 18 ][ 9 ] = 14; ang[ 18 ][ 10 ] = 13; ang[ 18 ][ 11 ] = 6; ang[ 18 ][ 12 ] = 11; ang[ 18 ][ 13 ] = 7; ang[ 18 ][ 14 ] = 0; ang[ 18 ][ 15 ] = 8; ang[ 18 ][ 16 ] = 5; ang[ 18 ][ 17 ] = 17; ang[ 18 ][ 18 ] = 18; ang[ 18 ][ 19 ] = 2;
ang[ 19 ][ 0 ] = 14; ang[ 19 ][ 1 ] = 8; ang[ 19 ][ 2 ] = 19; ang[ 19 ][ 3 ] = 2; ang[ 19 ][ 4 ] = 5; ang[ 19 ][ 5 ] = 16; ang[ 19 ][ 6 ] = 11; ang[ 19 ][ 7 ] = 13; ang[ 19 ][ 8 ] = 15; ang[ 19 ][ 9 ] = 0; ang[ 19 ][ 10 ] = 7; ang[ 19 ][ 11 ] = 12; ang[ 19 ][ 12 ] = 6; ang[ 19 ][ 13 ] = 10; ang[ 19 ][ 14 ] = 9; ang[ 19 ][ 15 ] = 1; ang[ 19 ][ 16 ] = 4; ang[ 19 ][ 17 ] = 17; ang[ 19 ][ 18 ] = 18; ang[ 19 ][ 19 ] = 3;
ang[ 20 ][ 0 ] = 17; ang[ 20 ][ 1 ] = 0; ang[ 20 ][ 2 ] = 14; ang[ 20 ][ 3 ] = 10; ang[ 20 ][ 4 ] = 9; ang[ 20 ][ 5 ] = 13; ang[ 20 ][ 6 ] = 7; ang[ 20 ][ 7 ] = 18; ang[ 20 ][ 8 ] = 5; ang[ 20 ][ 9 ] = 12; ang[ 20 ][ 10 ] = 15; ang[ 20 ][ 11 ] = 2; ang[ 20 ][ 12 ] = 4; ang[ 20 ][ 13 ] = 8; ang[ 20 ][ 14 ] = 11; ang[ 20 ][ 15 ] = 3; ang[ 20 ][ 16 ] = 16; ang[ 20 ][ 17 ] = 1; ang[ 20 ][ 18 ] = 6; ang[ 20 ][ 19 ] = 19;
ang[ 21 ][ 0 ] = 8; ang[ 21 ][ 1 ] = 6; ang[ 21 ][ 2 ] = 17; ang[ 21 ][ 3 ] = 15; ang[ 21 ][ 4 ] = 12; ang[ 21 ][ 5 ] = 18; ang[ 21 ][ 6 ] = 1; ang[ 21 ][ 7 ] = 11; ang[ 21 ][ 8 ] = 0; ang[ 21 ][ 9 ] = 19; ang[ 21 ][ 10 ] = 16; ang[ 21 ][ 11 ] = 7; ang[ 21 ][ 12 ] = 4; ang[ 21 ][ 13 ] = 14; ang[ 21 ][ 14 ] = 13; ang[ 21 ][ 15 ] = 3; ang[ 21 ][ 16 ] = 10; ang[ 21 ][ 17 ] = 2; ang[ 21 ][ 18 ] = 5; ang[ 21 ][ 19 ] = 9;
ang[ 22 ][ 0 ] = 12; ang[ 22 ][ 1 ] = 10; ang[ 22 ][ 2 ] = 16; ang[ 22 ][ 3 ] = 4; ang[ 22 ][ 4 ] = 3; ang[ 22 ][ 5 ] = 19; ang[ 22 ][ 6 ] = 9; ang[ 22 ][ 7 ] = 15; ang[ 22 ][ 8 ] = 13; ang[ 22 ][ 9 ] = 6; ang[ 22 ][ 10 ] = 1; ang[ 22 ][ 11 ] = 14; ang[ 22 ][ 12 ] = 0; ang[ 22 ][ 13 ] = 8; ang[ 22 ][ 14 ] = 11; ang[ 22 ][ 15 ] = 7; ang[ 22 ][ 16 ] = 2; ang[ 22 ][ 17 ] = 18; ang[ 22 ][ 18 ] = 17; ang[ 22 ][ 19 ] = 5;
ang[ 23 ][ 0 ] = 16; ang[ 23 ][ 1 ] = 8; ang[ 23 ][ 2 ] = 5; ang[ 23 ][ 3 ] = 14; ang[ 23 ][ 4 ] = 13; ang[ 23 ][ 5 ] = 2; ang[ 23 ][ 6 ] = 11; ang[ 23 ][ 7 ] = 19; ang[ 23 ][ 8 ] = 1; ang[ 23 ][ 9 ] = 10; ang[ 23 ][ 10 ] = 9; ang[ 23 ][ 11 ] = 6; ang[ 23 ][ 12 ] = 17; ang[ 23 ][ 13 ] = 4; ang[ 23 ][ 14 ] = 3; ang[ 23 ][ 15 ] = 18; ang[ 23 ][ 16 ] = 0; ang[ 23 ][ 17 ] = 12; ang[ 23 ][ 18 ] = 15; ang[ 23 ][ 19 ] = 7;
ang[ 24 ][ 0 ] = 7; ang[ 24 ][ 1 ] = 9; ang[ 24 ][ 2 ] = 14; ang[ 24 ][ 3 ] = 17; ang[ 24 ][ 4 ] = 18; ang[ 24 ][ 5 ] = 13; ang[ 24 ][ 6 ] = 10; ang[ 24 ][ 7 ] = 0; ang[ 24 ][ 8 ] = 19; ang[ 24 ][ 9 ] = 1; ang[ 24 ][ 10 ] = 6; ang[ 24 ][ 11 ] = 16; ang[ 24 ][ 12 ] = 15; ang[ 24 ][ 13 ] = 5; ang[ 24 ][ 14 ] = 2; ang[ 24 ][ 15 ] = 12; ang[ 24 ][ 16 ] = 11; ang[ 24 ][ 17 ] = 3; ang[ 24 ][ 18 ] = 4; ang[ 24 ][ 19 ] = 8;
ang[ 25 ][ 0 ] = 11; ang[ 25 ][ 1 ] = 13; ang[ 25 ][ 2 ] = 5; ang[ 25 ][ 3 ] = 16; ang[ 25 ][ 4 ] = 19; ang[ 25 ][ 5 ] = 2; ang[ 25 ][ 6 ] = 14; ang[ 25 ][ 7 ] = 8; ang[ 25 ][ 8 ] = 7; ang[ 25 ][ 9 ] = 12; ang[ 25 ][ 10 ] = 15; ang[ 25 ][ 11 ] = 0; ang[ 25 ][ 12 ] = 9; ang[ 25 ][ 13 ] = 1; ang[ 25 ][ 14 ] = 6; ang[ 25 ][ 15 ] = 10; ang[ 25 ][ 16 ] = 3; ang[ 25 ][ 17 ] = 18; ang[ 25 ][ 18 ] = 17; ang[ 25 ][ 19 ] = 4;
ang[ 26 ][ 0 ] = 9; ang[ 26 ][ 1 ] = 16; ang[ 26 ][ 2 ] = 15; ang[ 26 ][ 3 ] = 4; ang[ 26 ][ 4 ] = 3; ang[ 26 ][ 5 ] = 12; ang[ 26 ][ 6 ] = 19; ang[ 26 ][ 7 ] = 10; ang[ 26 ][ 8 ] = 11; ang[ 26 ][ 9 ] = 0; ang[ 26 ][ 10 ] = 7; ang[ 26 ][ 11 ] = 8; ang[ 26 ][ 12 ] = 5; ang[ 26 ][ 13 ] = 17; ang[ 26 ][ 14 ] = 18; ang[ 26 ][ 15 ] = 2; ang[ 26 ][ 16 ] = 1; ang[ 26 ][ 17 ] = 13; ang[ 26 ][ 18 ] = 14; ang[ 26 ][ 19 ] = 6;
ang[ 27 ][ 0 ] = 19; ang[ 27 ][ 1 ] = 15; ang[ 27 ][ 2 ] = 10; ang[ 27 ][ 3 ] = 4; ang[ 27 ][ 4 ] = 3; ang[ 27 ][ 5 ] = 9; ang[ 27 ][ 6 ] = 12; ang[ 27 ][ 7 ] = 16; ang[ 27 ][ 8 ] = 18; ang[ 27 ][ 9 ] = 5; ang[ 27 ][ 10 ] = 2; ang[ 27 ][ 11 ] = 17; ang[ 27 ][ 12 ] = 6; ang[ 27 ][ 13 ] = 14; ang[ 27 ][ 14 ] = 13; ang[ 27 ][ 15 ] = 1; ang[ 27 ][ 16 ] = 7; ang[ 27 ][ 17 ] = 11; ang[ 27 ][ 18 ] = 8; ang[ 27 ][ 19 ] = 0;
ang[ 28 ][ 0 ] = 18; ang[ 28 ][ 1 ] = 6; ang[ 28 ][ 2 ] = 12; ang[ 28 ][ 3 ] = 8; ang[ 28 ][ 4 ] = 11; ang[ 28 ][ 5 ] = 15; ang[ 28 ][ 6 ] = 1; ang[ 28 ][ 7 ] = 17; ang[ 28 ][ 8 ] = 3; ang[ 28 ][ 9 ] = 14; ang[ 28 ][ 10 ] = 13; ang[ 28 ][ 11 ] = 4; ang[ 28 ][ 12 ] = 2; ang[ 28 ][ 13 ] = 10; ang[ 28 ][ 14 ] = 9; ang[ 28 ][ 15 ] = 5; ang[ 28 ][ 16 ] = 19; ang[ 28 ][ 17 ] = 7; ang[ 28 ][ 18 ] = 0; ang[ 28 ][ 19 ] = 16;
ang[ 29 ][ 0 ] = 7; ang[ 29 ][ 1 ] = 14; ang[ 29 ][ 2 ] = 18; ang[ 29 ][ 3 ] = 10; ang[ 29 ][ 4 ] = 9; ang[ 29 ][ 5 ] = 17; ang[ 29 ][ 6 ] = 13; ang[ 29 ][ 7 ] = 0; ang[ 29 ][ 8 ] = 11; ang[ 29 ][ 9 ] = 4; ang[ 29 ][ 10 ] = 3; ang[ 29 ][ 11 ] = 8; ang[ 29 ][ 12 ] = 19; ang[ 29 ][ 13 ] = 6; ang[ 29 ][ 14 ] = 1; ang[ 29 ][ 15 ] = 16; ang[ 29 ][ 16 ] = 15; ang[ 29 ][ 17 ] = 5; ang[ 29 ][ 18 ] = 2; ang[ 29 ][ 19 ] = 12;
ang[ 30 ][ 0 ] = 14; ang[ 30 ][ 1 ] = 19; ang[ 30 ][ 2 ] = 5; ang[ 30 ][ 3 ] = 11; ang[ 30 ][ 4 ] = 8; ang[ 30 ][ 5 ] = 2; ang[ 30 ][ 6 ] = 16; ang[ 30 ][ 7 ] = 13; ang[ 30 ][ 8 ] = 4; ang[ 30 ][ 9 ] = 18; ang[ 30 ][ 10 ] = 17; ang[ 30 ][ 11 ] = 3; ang[ 30 ][ 12 ] = 15; ang[ 30 ][ 13 ] = 7; ang[ 30 ][ 14 ] = 0; ang[ 30 ][ 15 ] = 12; ang[ 30 ][ 16 ] = 6; ang[ 30 ][ 17 ] = 10; ang[ 30 ][ 18 ] = 9; ang[ 30 ][ 19 ] = 1;
ang[ 31 ][ 0 ] = 7; ang[ 31 ][ 1 ] = 18; ang[ 31 ][ 2 ] = 9; ang[ 31 ][ 3 ] = 13; ang[ 31 ][ 4 ] = 14; ang[ 31 ][ 5 ] = 10; ang[ 31 ][ 6 ] = 17; ang[ 31 ][ 7 ] = 0; ang[ 31 ][ 8 ] = 15; ang[ 31 ][ 9 ] = 2; ang[ 31 ][ 10 ] = 5; ang[ 31 ][ 11 ] = 12; ang[ 31 ][ 12 ] = 11; ang[ 31 ][ 13 ] = 3; ang[ 31 ][ 14 ] = 4; ang[ 31 ][ 15 ] = 8; ang[ 31 ][ 16 ] = 19; ang[ 31 ][ 17 ] = 6; ang[ 31 ][ 18 ] = 1; ang[ 31 ][ 19 ] = 16;
ang[ 32 ][ 0 ] = 15; ang[ 32 ][ 1 ] = 6; ang[ 32 ][ 2 ] = 11; ang[ 32 ][ 3 ] = 18; ang[ 32 ][ 4 ] = 17; ang[ 32 ][ 5 ] = 8; ang[ 32 ][ 6 ] = 1; ang[ 32 ][ 7 ] = 12; ang[ 32 ][ 8 ] = 5; ang[ 32 ][ 9 ] = 10; ang[ 32 ][ 10 ] = 9; ang[ 32 ][ 11 ] = 2; ang[ 32 ][ 12 ] = 7; ang[ 32 ][ 13 ] = 19; ang[ 32 ][ 14 ] = 16; ang[ 32 ][ 15 ] = 0; ang[ 32 ][ 16 ] = 14; ang[ 32 ][ 17 ] = 4; ang[ 32 ][ 18 ] = 3; ang[ 32 ][ 19 ] = 13;
ang[ 33 ][ 0 ] = 6; ang[ 33 ][ 1 ] = 7; ang[ 33 ][ 2 ] = 4; ang[ 33 ][ 3 ] = 5; ang[ 33 ][ 4 ] = 2; ang[ 33 ][ 5 ] = 3; ang[ 33 ][ 6 ] = 0; ang[ 33 ][ 7 ] = 1; ang[ 33 ][ 8 ] = 10; ang[ 33 ][ 9 ] = 11; ang[ 33 ][ 10 ] = 8; ang[ 33 ][ 11 ] = 9; ang[ 33 ][ 12 ] = 14; ang[ 33 ][ 13 ] = 15; ang[ 33 ][ 14 ] = 12; ang[ 33 ][ 15 ] = 13; ang[ 33 ][ 16 ] = 19; ang[ 33 ][ 17 ] = 18; ang[ 33 ][ 18 ] = 17; ang[ 33 ][ 19 ] = 16;
ang[ 34 ][ 0 ] = 3; ang[ 34 ][ 1 ] = 2; ang[ 34 ][ 2 ] = 1; ang[ 34 ][ 3 ] = 0; ang[ 34 ][ 4 ] = 7; ang[ 34 ][ 5 ] = 6; ang[ 34 ][ 6 ] = 5; ang[ 34 ][ 7 ] = 4; ang[ 34 ][ 8 ] = 11; ang[ 34 ][ 9 ] = 10; ang[ 34 ][ 10 ] = 9; ang[ 34 ][ 11 ] = 8; ang[ 34 ][ 12 ] = 13; ang[ 34 ][ 13 ] = 12; ang[ 34 ][ 14 ] = 15; ang[ 34 ][ 15 ] = 14; ang[ 34 ][ 16 ] = 18; ang[ 34 ][ 17 ] = 19; ang[ 34 ][ 18 ] = 16; ang[ 34 ][ 19 ] = 17;
ang[ 35 ][ 0 ] = 5; ang[ 35 ][ 1 ] = 4; ang[ 35 ][ 2 ] = 7; ang[ 35 ][ 3 ] = 6; ang[ 35 ][ 4 ] = 1; ang[ 35 ][ 5 ] = 0; ang[ 35 ][ 6 ] = 3; ang[ 35 ][ 7 ] = 2; ang[ 35 ][ 8 ] = 9; ang[ 35 ][ 9 ] = 8; ang[ 35 ][ 10 ] = 11; ang[ 35 ][ 11 ] = 10; ang[ 35 ][ 12 ] = 15; ang[ 35 ][ 13 ] = 14; ang[ 35 ][ 14 ] = 13; ang[ 35 ][ 15 ] = 12; ang[ 35 ][ 16 ] = 17; ang[ 35 ][ 17 ] = 16; ang[ 35 ][ 18 ] = 19; ang[ 35 ][ 19 ] = 18;
ang[ 36 ][ 0 ] = 8; ang[ 36 ][ 1 ] = 17; ang[ 36 ][ 2 ] = 12; ang[ 36 ][ 3 ] = 1; ang[ 36 ][ 4 ] = 6; ang[ 36 ][ 5 ] = 15; ang[ 36 ][ 6 ] = 18; ang[ 36 ][ 7 ] = 11; ang[ 36 ][ 8 ] = 10; ang[ 36 ][ 9 ] = 5; ang[ 36 ][ 10 ] = 2; ang[ 36 ][ 11 ] = 9; ang[ 36 ][ 12 ] = 0; ang[ 36 ][ 13 ] = 16; ang[ 36 ][ 14 ] = 19; ang[ 36 ][ 15 ] = 7; ang[ 36 ][ 16 ] = 4; ang[ 36 ][ 17 ] = 14; ang[ 36 ][ 18 ] = 13; ang[ 36 ][ 19 ] = 3;
ang[ 37 ][ 0 ] = 10; ang[ 37 ][ 1 ] = 14; ang[ 37 ][ 2 ] = 0; ang[ 37 ][ 3 ] = 17; ang[ 37 ][ 4 ] = 18; ang[ 37 ][ 5 ] = 7; ang[ 37 ][ 6 ] = 13; ang[ 37 ][ 7 ] = 9; ang[ 37 ][ 8 ] = 2; ang[ 37 ][ 9 ] = 15; ang[ 37 ][ 10 ] = 12; ang[ 37 ][ 11 ] = 5; ang[ 37 ][ 12 ] = 8; ang[ 37 ][ 13 ] = 4; ang[ 37 ][ 14 ] = 3; ang[ 37 ][ 15 ] = 11; ang[ 37 ][ 16 ] = 6; ang[ 37 ][ 17 ] = 19; ang[ 37 ][ 18 ] = 16; ang[ 37 ][ 19 ] = 1;
ang[ 38 ][ 0 ] = 2; ang[ 38 ][ 1 ] = 19; ang[ 38 ][ 2 ] = 8; ang[ 38 ][ 3 ] = 14; ang[ 38 ][ 4 ] = 13; ang[ 38 ][ 5 ] = 11; ang[ 38 ][ 6 ] = 16; ang[ 38 ][ 7 ] = 5; ang[ 38 ][ 8 ] = 12; ang[ 38 ][ 9 ] = 7; ang[ 38 ][ 10 ] = 0; ang[ 38 ][ 11 ] = 15; ang[ 38 ][ 12 ] = 10; ang[ 38 ][ 13 ] = 6; ang[ 38 ][ 14 ] = 1; ang[ 38 ][ 15 ] = 9; ang[ 38 ][ 16 ] = 18; ang[ 38 ][ 17 ] = 3; ang[ 38 ][ 18 ] = 4; ang[ 38 ][ 19 ] = 17;
ang[ 39 ][ 0 ] = 12; ang[ 39 ][ 1 ] = 3; ang[ 39 ][ 2 ] = 10; ang[ 39 ][ 3 ] = 19; ang[ 39 ][ 4 ] = 16; ang[ 39 ][ 5 ] = 9; ang[ 39 ][ 6 ] = 4; ang[ 39 ][ 7 ] = 15; ang[ 39 ][ 8 ] = 0; ang[ 39 ][ 9 ] = 11; ang[ 39 ][ 10 ] = 8; ang[ 39 ][ 11 ] = 7; ang[ 39 ][ 12 ] = 2; ang[ 39 ][ 13 ] = 18; ang[ 39 ][ 14 ] = 17; ang[ 39 ][ 15 ] = 5; ang[ 39 ][ 16 ] = 13; ang[ 39 ][ 17 ] = 1; ang[ 39 ][ 18 ] = 6; ang[ 39 ][ 19 ] = 14;
ang[ 40 ][ 0 ] = 12; ang[ 40 ][ 1 ] = 16; ang[ 40 ][ 2 ] = 3; ang[ 40 ][ 3 ] = 9; ang[ 40 ][ 4 ] = 10; ang[ 40 ][ 5 ] = 4; ang[ 40 ][ 6 ] = 19; ang[ 40 ][ 7 ] = 15; ang[ 40 ][ 8 ] = 2; ang[ 40 ][ 9 ] = 17; ang[ 40 ][ 10 ] = 18; ang[ 40 ][ 11 ] = 5; ang[ 40 ][ 12 ] = 13; ang[ 40 ][ 13 ] = 1; ang[ 40 ][ 14 ] = 6; ang[ 40 ][ 15 ] = 14; ang[ 40 ][ 16 ] = 0; ang[ 40 ][ 17 ] = 8; ang[ 40 ][ 18 ] = 11; ang[ 40 ][ 19 ] = 7;
ang[ 41 ][ 0 ] = 13; ang[ 41 ][ 1 ] = 0; ang[ 41 ][ 2 ] = 9; ang[ 41 ][ 3 ] = 17; ang[ 41 ][ 4 ] = 18; ang[ 41 ][ 5 ] = 10; ang[ 41 ][ 6 ] = 7; ang[ 41 ][ 7 ] = 14; ang[ 41 ][ 8 ] = 3; ang[ 41 ][ 9 ] = 8; ang[ 41 ][ 10 ] = 11; ang[ 41 ][ 11 ] = 4; ang[ 41 ][ 12 ] = 1; ang[ 41 ][ 13 ] = 16; ang[ 41 ][ 14 ] = 19; ang[ 41 ][ 15 ] = 6; ang[ 41 ][ 16 ] = 12; ang[ 41 ][ 17 ] = 2; ang[ 41 ][ 18 ] = 5; ang[ 41 ][ 19 ] = 15;
ang[ 42 ][ 0 ] = 1; ang[ 42 ][ 1 ] = 12; ang[ 42 ][ 2 ] = 17; ang[ 42 ][ 3 ] = 8; ang[ 42 ][ 4 ] = 11; ang[ 42 ][ 5 ] = 18; ang[ 42 ][ 6 ] = 15; ang[ 42 ][ 7 ] = 6; ang[ 42 ][ 8 ] = 9; ang[ 42 ][ 9 ] = 2; ang[ 42 ][ 10 ] = 5; ang[ 42 ][ 11 ] = 10; ang[ 42 ][ 12 ] = 16; ang[ 42 ][ 13 ] = 0; ang[ 42 ][ 14 ] = 7; ang[ 42 ][ 15 ] = 19; ang[ 42 ][ 16 ] = 13; ang[ 42 ][ 17 ] = 3; ang[ 42 ][ 18 ] = 4; ang[ 42 ][ 19 ] = 14;
ang[ 43 ][ 0 ] = 16; ang[ 43 ][ 1 ] = 13; ang[ 43 ][ 2 ] = 8; ang[ 43 ][ 3 ] = 2; ang[ 43 ][ 4 ] = 5; ang[ 43 ][ 5 ] = 11; ang[ 43 ][ 6 ] = 14; ang[ 43 ][ 7 ] = 19; ang[ 43 ][ 8 ] = 17; ang[ 43 ][ 9 ] = 3; ang[ 43 ][ 10 ] = 4; ang[ 43 ][ 11 ] = 18; ang[ 43 ][ 12 ] = 0; ang[ 43 ][ 13 ] = 12; ang[ 43 ][ 14 ] = 15; ang[ 43 ][ 15 ] = 7; ang[ 43 ][ 16 ] = 1; ang[ 43 ][ 17 ] = 9; ang[ 43 ][ 18 ] = 10; ang[ 43 ][ 19 ] = 6;
ang[ 44 ][ 0 ] = 16; ang[ 44 ][ 1 ] = 5; ang[ 44 ][ 2 ] = 13; ang[ 44 ][ 3 ] = 11; ang[ 44 ][ 4 ] = 8; ang[ 44 ][ 5 ] = 14; ang[ 44 ][ 6 ] = 2; ang[ 44 ][ 7 ] = 19; ang[ 44 ][ 8 ] = 0; ang[ 44 ][ 9 ] = 15; ang[ 44 ][ 10 ] = 12; ang[ 44 ][ 11 ] = 7; ang[ 44 ][ 12 ] = 1; ang[ 44 ][ 13 ] = 9; ang[ 44 ][ 14 ] = 10; ang[ 44 ][ 15 ] = 6; ang[ 44 ][ 16 ] = 17; ang[ 44 ][ 17 ] = 4; ang[ 44 ][ 18 ] = 3; ang[ 44 ][ 19 ] = 18;
ang[ 45 ][ 0 ] = 17; ang[ 45 ][ 1 ] = 14; ang[ 45 ][ 2 ] = 9; ang[ 45 ][ 3 ] = 7; ang[ 45 ][ 4 ] = 0; ang[ 45 ][ 5 ] = 10; ang[ 45 ][ 6 ] = 13; ang[ 45 ][ 7 ] = 18; ang[ 45 ][ 8 ] = 16; ang[ 45 ][ 9 ] = 6; ang[ 45 ][ 10 ] = 1; ang[ 45 ][ 11 ] = 19; ang[ 45 ][ 12 ] = 5; ang[ 45 ][ 13 ] = 15; ang[ 45 ][ 14 ] = 12; ang[ 45 ][ 15 ] = 2; ang[ 45 ][ 16 ] = 4; ang[ 45 ][ 17 ] = 8; ang[ 45 ][ 18 ] = 11; ang[ 45 ][ 19 ] = 3;
ang[ 46 ][ 0 ] = 4; ang[ 46 ][ 1 ] = 10; ang[ 46 ][ 2 ] = 15; ang[ 46 ][ 3 ] = 19; ang[ 46 ][ 4 ] = 16; ang[ 46 ][ 5 ] = 12; ang[ 46 ][ 6 ] = 9; ang[ 46 ][ 7 ] = 3; ang[ 46 ][ 8 ] = 17; ang[ 46 ][ 9 ] = 2; ang[ 46 ][ 10 ] = 5; ang[ 46 ][ 11 ] = 18; ang[ 46 ][ 12 ] = 14; ang[ 46 ][ 13 ] = 6; ang[ 46 ][ 14 ] = 1; ang[ 46 ][ 15 ] = 13; ang[ 46 ][ 16 ] = 8; ang[ 46 ][ 17 ] = 0; ang[ 46 ][ 18 ] = 7; ang[ 46 ][ 19 ] = 11;
ang[ 47 ][ 0 ] = 8; ang[ 47 ][ 1 ] = 12; ang[ 47 ][ 2 ] = 6; ang[ 47 ][ 3 ] = 18; ang[ 47 ][ 4 ] = 17; ang[ 47 ][ 5 ] = 1; ang[ 47 ][ 6 ] = 15; ang[ 47 ][ 7 ] = 11; ang[ 47 ][ 8 ] = 4; ang[ 47 ][ 9 ] = 13; ang[ 47 ][ 10 ] = 14; ang[ 47 ][ 11 ] = 3; ang[ 47 ][ 12 ] = 10; ang[ 47 ][ 13 ] = 2; ang[ 47 ][ 14 ] = 5; ang[ 47 ][ 15 ] = 9; ang[ 47 ][ 16 ] = 0; ang[ 47 ][ 17 ] = 16; ang[ 47 ][ 18 ] = 19; ang[ 47 ][ 19 ] = 7;
ang[ 48 ][ 0 ] = 13; ang[ 48 ][ 1 ] = 9; ang[ 48 ][ 2 ] = 18; ang[ 48 ][ 3 ] = 7; ang[ 48 ][ 4 ] = 0; ang[ 48 ][ 5 ] = 17; ang[ 48 ][ 6 ] = 10; ang[ 48 ][ 7 ] = 14; ang[ 48 ][ 8 ] = 12; ang[ 48 ][ 9 ] = 5; ang[ 48 ][ 10 ] = 2; ang[ 48 ][ 11 ] = 15; ang[ 48 ][ 12 ] = 3; ang[ 48 ][ 13 ] = 11; ang[ 48 ][ 14 ] = 8; ang[ 48 ][ 15 ] = 4; ang[ 48 ][ 16 ] = 1; ang[ 48 ][ 17 ] = 16; ang[ 48 ][ 18 ] = 19; ang[ 48 ][ 19 ] = 6;
ang[ 49 ][ 0 ] = 11; ang[ 49 ][ 1 ] = 5; ang[ 49 ][ 2 ] = 19; ang[ 49 ][ 3 ] = 14; ang[ 49 ][ 4 ] = 13; ang[ 49 ][ 5 ] = 16; ang[ 49 ][ 6 ] = 2; ang[ 49 ][ 7 ] = 8; ang[ 49 ][ 8 ] = 3; ang[ 49 ][ 9 ] = 17; ang[ 49 ][ 10 ] = 18; ang[ 49 ][ 11 ] = 4; ang[ 49 ][ 12 ] = 7; ang[ 49 ][ 13 ] = 15; ang[ 49 ][ 14 ] = 12; ang[ 49 ][ 15 ] = 0; ang[ 49 ][ 16 ] = 9; ang[ 49 ][ 17 ] = 1; ang[ 49 ][ 18 ] = 6; ang[ 49 ][ 19 ] = 10;
ang[ 50 ][ 0 ] = 15; ang[ 50 ][ 1 ] = 17; ang[ 50 ][ 2 ] = 6; ang[ 50 ][ 3 ] = 8; ang[ 50 ][ 4 ] = 11; ang[ 50 ][ 5 ] = 1; ang[ 50 ][ 6 ] = 18; ang[ 50 ][ 7 ] = 12; ang[ 50 ][ 8 ] = 7; ang[ 50 ][ 9 ] = 16; ang[ 50 ][ 10 ] = 19; ang[ 50 ][ 11 ] = 0; ang[ 50 ][ 12 ] = 14; ang[ 50 ][ 13 ] = 4; ang[ 50 ][ 14 ] = 3; ang[ 50 ][ 15 ] = 13; ang[ 50 ][ 16 ] = 5; ang[ 50 ][ 17 ] = 9; ang[ 50 ][ 18 ] = 10; ang[ 50 ][ 19 ] = 2;
ang[ 51 ][ 0 ] = 4; ang[ 51 ][ 1 ] = 16; ang[ 51 ][ 2 ] = 10; ang[ 51 ][ 3 ] = 12; ang[ 51 ][ 4 ] = 15; ang[ 51 ][ 5 ] = 9; ang[ 51 ][ 6 ] = 19; ang[ 51 ][ 7 ] = 3; ang[ 51 ][ 8 ] = 14; ang[ 51 ][ 9 ] = 1; ang[ 51 ][ 10 ] = 6; ang[ 51 ][ 11 ] = 13; ang[ 51 ][ 12 ] = 8; ang[ 51 ][ 13 ] = 0; ang[ 51 ][ 14 ] = 7; ang[ 51 ][ 15 ] = 11; ang[ 51 ][ 16 ] = 17; ang[ 51 ][ 17 ] = 5; ang[ 51 ][ 18 ] = 2; ang[ 51 ][ 19 ] = 18;
ang[ 52 ][ 0 ] = 10; ang[ 52 ][ 1 ] = 0; ang[ 52 ][ 2 ] = 18; ang[ 52 ][ 3 ] = 13; ang[ 52 ][ 4 ] = 14; ang[ 52 ][ 5 ] = 17; ang[ 52 ][ 6 ] = 7; ang[ 52 ][ 7 ] = 9; ang[ 52 ][ 8 ] = 6; ang[ 52 ][ 9 ] = 16; ang[ 52 ][ 10 ] = 19; ang[ 52 ][ 11 ] = 1; ang[ 52 ][ 12 ] = 2; ang[ 52 ][ 13 ] = 12; ang[ 52 ][ 14 ] = 15; ang[ 52 ][ 15 ] = 5; ang[ 52 ][ 16 ] = 8; ang[ 52 ][ 17 ] = 4; ang[ 52 ][ 18 ] = 3; ang[ 52 ][ 19 ] = 11;
ang[ 53 ][ 0 ] = 19; ang[ 53 ][ 1 ] = 10; ang[ 53 ][ 2 ] = 3; ang[ 53 ][ 3 ] = 12; ang[ 53 ][ 4 ] = 15; ang[ 53 ][ 5 ] = 4; ang[ 53 ][ 6 ] = 9; ang[ 53 ][ 7 ] = 16; ang[ 53 ][ 8 ] = 7; ang[ 53 ][ 9 ] = 8; ang[ 53 ][ 10 ] = 11; ang[ 53 ][ 11 ] = 0; ang[ 53 ][ 12 ] = 18; ang[ 53 ][ 13 ] = 2; ang[ 53 ][ 14 ] = 5; ang[ 53 ][ 15 ] = 17; ang[ 53 ][ 16 ] = 6; ang[ 53 ][ 17 ] = 14; ang[ 53 ][ 18 ] = 13; ang[ 53 ][ 19 ] = 1;
ang[ 54 ][ 0 ] = 11; ang[ 54 ][ 1 ] = 19; ang[ 54 ][ 2 ] = 13; ang[ 54 ][ 3 ] = 2; ang[ 54 ][ 4 ] = 5; ang[ 54 ][ 5 ] = 14; ang[ 54 ][ 6 ] = 16; ang[ 54 ][ 7 ] = 8; ang[ 54 ][ 8 ] = 9; ang[ 54 ][ 9 ] = 6; ang[ 54 ][ 10 ] = 1; ang[ 54 ][ 11 ] = 10; ang[ 54 ][ 12 ] = 3; ang[ 54 ][ 13 ] = 18; ang[ 54 ][ 14 ] = 17; ang[ 54 ][ 15 ] = 4; ang[ 54 ][ 16 ] = 7; ang[ 54 ][ 17 ] = 15; ang[ 54 ][ 18 ] = 12; ang[ 54 ][ 19 ] = 0;
ang[ 55 ][ 0 ] = 1; ang[ 55 ][ 1 ] = 11; ang[ 55 ][ 2 ] = 12; ang[ 55 ][ 3 ] = 18; ang[ 55 ][ 4 ] = 17; ang[ 55 ][ 5 ] = 15; ang[ 55 ][ 6 ] = 8; ang[ 55 ][ 7 ] = 6; ang[ 55 ][ 8 ] = 16; ang[ 55 ][ 9 ] = 7; ang[ 55 ][ 10 ] = 0; ang[ 55 ][ 11 ] = 19; ang[ 55 ][ 12 ] = 13; ang[ 55 ][ 13 ] = 3; ang[ 55 ][ 14 ] = 4; ang[ 55 ][ 15 ] = 14; ang[ 55 ][ 16 ] = 9; ang[ 55 ][ 17 ] = 5; ang[ 55 ][ 18 ] = 2; ang[ 55 ][ 19 ] = 10;
ang[ 56 ][ 0 ] = 17; ang[ 56 ][ 1 ] = 9; ang[ 56 ][ 2 ] = 0; ang[ 56 ][ 3 ] = 13; ang[ 56 ][ 4 ] = 14; ang[ 56 ][ 5 ] = 7; ang[ 56 ][ 6 ] = 10; ang[ 56 ][ 7 ] = 18; ang[ 56 ][ 8 ] = 4; ang[ 56 ][ 9 ] = 11; ang[ 56 ][ 10 ] = 8; ang[ 56 ][ 11 ] = 3; ang[ 56 ][ 12 ] = 16; ang[ 56 ][ 13 ] = 1; ang[ 56 ][ 14 ] = 6; ang[ 56 ][ 15 ] = 19; ang[ 56 ][ 16 ] = 5; ang[ 56 ][ 17 ] = 15; ang[ 56 ][ 18 ] = 12; ang[ 56 ][ 19 ] = 2;
ang[ 57 ][ 0 ] = 15; ang[ 57 ][ 1 ] = 11; ang[ 57 ][ 2 ] = 17; ang[ 57 ][ 3 ] = 1; ang[ 57 ][ 4 ] = 6; ang[ 57 ][ 5 ] = 18; ang[ 57 ][ 6 ] = 8; ang[ 57 ][ 7 ] = 12; ang[ 57 ][ 8 ] = 14; ang[ 57 ][ 9 ] = 3; ang[ 57 ][ 10 ] = 4; ang[ 57 ][ 11 ] = 13; ang[ 57 ][ 12 ] = 5; ang[ 57 ][ 13 ] = 9; ang[ 57 ][ 14 ] = 10; ang[ 57 ][ 15 ] = 2; ang[ 57 ][ 16 ] = 7; ang[ 57 ][ 17 ] = 19; ang[ 57 ][ 18 ] = 16; ang[ 57 ][ 19 ] = 0;
ang[ 58 ][ 0 ] = 19; ang[ 58 ][ 1 ] = 3; ang[ 58 ][ 2 ] = 15; ang[ 58 ][ 3 ] = 9; ang[ 58 ][ 4 ] = 10; ang[ 58 ][ 5 ] = 12; ang[ 58 ][ 6 ] = 4; ang[ 58 ][ 7 ] = 16; ang[ 58 ][ 8 ] = 6; ang[ 58 ][ 9 ] = 13; ang[ 58 ][ 10 ] = 14; ang[ 58 ][ 11 ] = 1; ang[ 58 ][ 12 ] = 7; ang[ 58 ][ 13 ] = 11; ang[ 58 ][ 14 ] = 8; ang[ 58 ][ 15 ] = 0; ang[ 58 ][ 16 ] = 18; ang[ 58 ][ 17 ] = 2; ang[ 58 ][ 18 ] = 5; ang[ 58 ][ 19 ] = 17;
ang[ 59 ][ 0 ] = 2; ang[ 59 ][ 1 ] = 13; ang[ 59 ][ 2 ] = 19; ang[ 59 ][ 3 ] = 11; ang[ 59 ][ 4 ] = 8; ang[ 59 ][ 5 ] = 16; ang[ 59 ][ 6 ] = 14; ang[ 59 ][ 7 ] = 5; ang[ 59 ][ 8 ] = 10; ang[ 59 ][ 9 ] = 1; ang[ 59 ][ 10 ] = 6; ang[ 59 ][ 11 ] = 9; ang[ 59 ][ 12 ] = 18; ang[ 59 ][ 13 ] = 3; ang[ 59 ][ 14 ] = 4; ang[ 59 ][ 15 ] = 17; ang[ 59 ][ 16 ] = 12; ang[ 59 ][ 17 ] = 0; ang[ 59 ][ 18 ] = 7; ang[ 59 ][ 19 ] = 15;
caffe_root = '/opt/caffe/caffe-rotationnet2/' # Change this to your path.
sys.path.insert(0, caffe_root + 'python')
import caffe
def main(argv):
pycaffe_dir = caffe_root + 'python/'
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"--input_file",
default ="/data/converted/testrotnet.txt",
help="text file containg the image paths"
)
# Optional arguments.
parser.add_argument(
"--model_def",
default="./Training/rotationnet_modelnet40_case1_solver.prototxt",
help="Model definition file."
)
parser.add_argument('--weights', type=int, default=-1)
parser.add_argument('--views', type=int, default=12)
parser.add_argument('--log_dir', default='logs', type=str)
parser.add_argument(
"--center_only",
action='store_true',
default = False,
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='227,227',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(caffe_root,
'data/ilsvrc12/imagenet_mean.binaryproto'),
help="Data set image mean of H x W x K dimensions (np array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
default=255,
help="Multiply input features by this scale before input to net"
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
args = parser.parse_args()
args.pretrained_model = os.path.join(args.log_dir, 'case1_iter_'+str(args.weights) + '.caffemodel')
image_dims = [int(s) for s in args.images_dim.split(',')]
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.mean_file:
mean = get_mean(args.mean_file)
caffe.set_mode_gpu()
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean=mean,
input_scale=1.0, raw_scale=255.0, channel_swap=channel_swap)
listfiles, labels = read_lists(args.input_file)
#dataset = Dataset(listfiles, labels, subtract_mean=False, V=20)
# Load image file.
args.input_file = os.path.expanduser(args.input_file)
preds = []
labels = [int(label) for label in labels]
total = len(listfiles)
views = args.views
batch = 8 * views
for i in range( len(listfiles) / (batch*views)):
#im_files = [line.rstrip('\n') for line in open(listfiles[views*i+j])]
im_files = listfiles[i*batch*views : (i+1)*batch*views]
#labels.append(int(im_files[0]))
#im_files = im_files[2:]
inputs= [caffe.io.load_image(im_f) for im_f in im_files]
predictions = classifier.predict(inputs, not args.center_only)
classified = classify(predictions)
preds.append(classified)
print(classified)
import Evaluation_tools as et
data = '/data'
logs = '/logs'
eval_file = os.path.join(logs, 'rotnet.txt')
et.write_eval_file(data, eval_file, preds, labels, 'ROTNET')
et.make_matrix(data, eval_file, logs)
def read_lists(list_of_lists_file):
listfile_labels = np.loadtxt(list_of_lists_file, dtype=str).tolist()
listfiles, labels = zip(*[(l[0], int(l[1])) for l in listfile_labels])
return listfiles, labels
def get_mean(mean_file):
image_dims = [227,227]
channel_swap = [2,1,0]
blob = caffe.proto.caffe_pb2.BlobProto()
data = open(mean_file , 'rb' ).read()
blob.ParseFromString(data)
arr = np.array( caffe.io.blobproto_to_array(blob))
mean = arr[0]
# Resize mean (which requires H x W x K input in range [0,1]).
in_shape = image_dims
m_min, m_max = mean.min(), mean.max()
normal_mean = (mean - m_min) / (m_max - m_min)
mean = caffe.io.resize_image(normal_mean.transpose((1,2,0)),
in_shape).transpose((2,0,1)) * (m_max - m_min) + m_min
return mean
def classify(prediction, num_classes, alligned = True):
clsnum = num_classes
clsid = -1
classified = []
scores = prediction
numR = scores.shape[1]/ (clsnum+1)
nsamp = len(scores) / numR
for i in range(0,len(scores)):
for j in range(0,numR):
for k in range(0,clsnum):
scores[i][ j * (clsnum+1) + k ] = scores[i][ j * (clsnum+1) + k ] / scores[i][ j * (clsnum+1) + clsnum ]
scores[i][ j * (clsnum+1) + clsnum ] = 0
clsnum = (clsnum+1)
if alligned:
for n in range(nsamp):
s = np.ones(clsnum*numR)
for i in range(numR):
for j in range(clsnum):
for k in range(numR):
idx = i + k
if idx > (numR-1):
idx = idx - numR
s[ i * clsnum + j ] = s[ i * clsnum + j ] * scores[ n * numR + k ][ idx * clsnum + j ]
classified.append(np.argmax( s ) % clsnum)
else:
for n in range(nsamp):
s = np.ones(clsnum*ang.shape[0])
for i in range(ang.shape[0]):
for j in range(clsnum):
for k in range(numR):
s[ i * clsnum + j ] = s[ i * clsnum + j ] * scores[ n * numR + ang[ i ][ k ] ][ k * clsnum + j ]
classified.append(np.argmax( s ) % clsnum)
return classified
if __name__ == '__main__':
main(sys.argv)
| [
"miroslavkrabec@seznam.cz"
] | miroslavkrabec@seznam.cz |
3208c332acc55b368ab94bdf44b3b9837d19ccc6 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /catalog-configs/Vocab/oly_residue_feature_residue_range_granularity_term.py | 2a6871af85b0c4d5dfc6549841f414bf74e6de31 | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 6,309 | py | import argparse
from attrdict import AttrDict
from deriva.core import ErmrestCatalog, get_credential, DerivaPathError
from deriva.utils.catalog.components.deriva_model import DerivaCatalog
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b'
}
table_name = 'oly_residue_feature_residue_range_granularity_term'
schema_name = 'Vocab'
column_annotations = {
'RCT': {
chaise_tags.display: {
'name': 'Creation Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMT': {
chaise_tags.display: {
'name': 'Last Modified Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RCB': {
chaise_tags.display: {
'name': 'Created By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMB': {
chaise_tags.display: {
'name': 'Modified By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'The preferred human-readable name for this term.',
'Description': 'A longer human-readable description of this term.',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'oly_residue_feature_residue_range_granularity_term_RCB_fkey'],
['Vocab', 'oly_residue_feature_residue_range_granularity_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'oly_residue_feature_residue_range_granularity_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['RID'],
constraint_names=[('Vocab', 'oly_residue_feature_residue_range_granularity_term_RIDkey1')],
),
em.Key.define(
['ID'],
constraint_names=[('Vocab', 'oly_residue_feature_residue_range_granularity_term_IDkey1')],
),
em.Key.define(
['URI'],
constraint_names=[('Vocab', 'oly_residue_feature_residue_range_granularity_term_URIkey1')],
),
]
fkey_defs = [
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[
('Vocab', 'oly_residue_feature_residue_range_granularity_term_Owner_fkey')
],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'oly_residue_feature_residue_range_granularity_term_RCB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'oly_residue_feature_residue_range_granularity_term_RMB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 5
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = DerivaCatalog(host, catalog_id=catalog_id, validate=False)
main(catalog, mode, replace)
| [
"carl@isi.edu"
] | carl@isi.edu |
2459f55cbfc596a0348544e1082025d1c74df70f | 5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e | /leetcode/medium/Single_Number_II.py | add9ece16433795ee1263f81466aec110f3a062b | [
"MIT"
] | permissive | shhuan/algorithms | 36d70f1ab23dab881bf1a15573fbca7b2a3f4235 | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | refs/heads/master | 2021-05-07T14:21:15.362588 | 2017-11-07T08:20:16 | 2017-11-07T08:20:16 | 109,799,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | # -*- coding: utf-8 -*-
"""
created by huash06 at 2015-04-14 09:08
Given an array of integers, every element appears three times except for one. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
本题目抄袭网上解答
http://blog.csdn.net/u011960402/article/details/17750993
"""
__author__ = 'huash06'
import sys
import os
class Solution:
# @param A, a list of integer
# @return an integer
def singleNumber(self, A):
if not A:
return -1
# 题目中,如果数组中的元素都是三个三个出现的,那么从二进制表示的角度,每个位上的1加起来,应该可以整除3。
# 如果某个特定位上的1加起来,可以被3整除,说明对应x的那位是0,因为如果是1,不可能被3整除
# 如果某个特定位上的1加起来,不可以被3整除,说明对应x的那位是1
# 我们可以开辟一个大小为32的数组,第0个元素表示,A中所有元素的二进制表示的最低位的和,依次类推。
# 最后,再转换为十进制数即可。这里要说明的是,用一个大小为32的整数数组表示,同样空间是O(1)的
count = [0]*32
for v in A:
for i in range(32):
if ((v >> i) & 1) > 0:
count[i] += 1
print(count)
result = 0
for i in range(31, -1, -1):
result <<= 1
if count[i] % 3 != 0:
result += 1
# python3 没有int, 是自动类型
if result & (1 << 31):
result -= (1 << 32)
return result
def singleNumber2(self, A):
"""
看了上面的分析之后,我们突然会想是否有一些更简洁的方法来实现这个问题。
同样从位运算的角度来看,我希望一个数用来表示出现一次的位,也就是说这个数的为1的位就表示这个位上面的数出现过了一次,
比如0x10001,就表示bit[0]和bit[4]就是出现过了一次的位。然后再用一个数表示出现过了两次的位,再用一个数表示出现过了3次的位。
只要这个位出现过了3次,我就把这个位拿清除掉,这样剩余的最后出现过一次的位的这个数就是我们要找的数了。
:param A:
:return:
"""
ones = 0
twos = 0
threes = 0
for v in A:
twos |= ones & v
ones ^= v
threes = ones & twos
ones &= ~threes
twos &= ~threes
return ones
s = Solution()
# print(s.singleNumber([1, 2, 3, 1, 1, 3, 3]))
# print(s.singleNumber([6, 3, 3, 3]))
print(s.singleNumber([-2,-2,1,1,-3,1,-3,-3,-4,-2]))
print(s.singleNumber2([-2,-2,1,1,-3,1,-3,-3,-4,-2])) | [
"shuangquanhuang@gmail.com"
] | shuangquanhuang@gmail.com |
5639785d939846e909c75cbee27aa23627cd0be5 | ff60aaabe366ebd8f60b8e0e66a86896553f32a3 | /3)printrepeatedterm.py | 6f2a348d23c6f7fac53e93f03f6698939d5cb112 | [] | no_license | ramyasutraye/Python-6 | cb2f55339f6b74dffe6f73551f3554703e17b673 | 1cc1c8a9f0045b72e1d55ef1bb3cf48d8df8612c | refs/heads/master | 2020-04-12T02:05:21.861754 | 2018-09-21T19:02:29 | 2018-09-21T19:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | b=[]
c=[]
n=int(input("n"))
if(1<=n<=100000):
d = 0
for i in range(0,n):
a=int(input("a"))
b.append(a)
if(i==b[i]):
if(i>1):
c.append(i)
d=d+1
if(d>0):
for i in range(0,d):
print(c[i],end='')
else:
print(-1) | [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
2632cd2dc9be90088cf3c9cb11bca712c914b92d | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_17718.py | ecbea9a552d27059af7d9e0f0c3939276c87fda1 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | # Django Admin image upload not saving on database
super(Categorias, self).save()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
1803103160982a7a1919f6e7222975837423e07b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/906.py | 6098eb329012ba22538c600f690fc3242f5cc393 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py |
f=open("A-large.in.txt","r")
seq=f.readlines()
f.close()
N=eval(seq[0])
resultats=[]
for i in range(1,N+1):
print("Case #"+str(i))
l=seq[i].split()
n=eval(l[0])
j=1
tabO=[]
tabB=[]
ordre=[]
while j!=len(l):
letter=l[j]
j=j+1
number=eval(l[j])
j=j+1
ordre.append(letter)
if letter=='O':
tabO.append(number)
elif letter=='B':
tabB.append(number)
CO=1
CB=1
result=0
iO=0
iB=0
move=0
while move!=n:
letter=ordre[move]
if letter=='O':
target=tabO[iO]
if iB<len(tabB):
target2=tabB[iB]
if CB<target2:
CB=CB+1
elif CB>target2:
CB=CB-1
if CO<target:
CO=CO+1
elif CO>target:
CO=CO-1
else:
iO=iO+1
move=move+1
if letter=='B':
target=tabB[iB]
if iO<len(tabO):
target2=tabO[iO]
if CO<target2:
CO=CO+1
elif CO>target2:
CO=CO-1
if CB<target:
CB=CB+1
elif CB>target:
CB=CB-1
else:
iB=iB+1
move=move+1
result=result+1
resultats.append("Case #"+str(i)+": "+str(result)+"\n")
f=open("o.in","w")
f.writelines(resultats)
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a0b9911f771263a3ccb57196cec783570a84af37 | a83bafc38b514a0339a5991be15870551ac49681 | /test/test_size.py | fa8c40e84c918eb4dedd5c56839a82898118cc61 | [] | no_license | bimdata/python-api-client | 4ec2f81e404ef88d3a7e4d08e18965b598c567a2 | c9b6ea0fbb4729b2a1c10522bdddfe08d944739d | refs/heads/master | 2023-08-17T13:38:43.198097 | 2023-08-09T12:48:12 | 2023-08-09T12:48:12 | 131,603,315 | 0 | 4 | null | 2022-10-10T15:21:26 | 2018-04-30T14:06:15 | Python | UTF-8 | Python | false | false | 850 | py | """
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1 (v1)
Contact: support@bimdata.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import bimdata_api_client
from bimdata_api_client.model.size import Size
class TestSize(unittest.TestCase):
"""Size unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSize(self):
"""Test Size"""
# FIXME: construct object with mandatory attributes with example values
# model = Size() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"infra@bimdata.io"
] | infra@bimdata.io |
7cd2ab7936c6d68d7f3f14fd20b81bad6a522aee | 4dd6a8d8024a72a3e2d8e71e86fd34888a149902 | /keras/keras31_GRU.py | 6f2d253b30f0ee9f5173859af80147f7d052dc83 | [] | no_license | KOOKDONGHUN/study | d483b125d349956b325bc5f4d99a4a95dd80ccbc | 517effbb19ddc820d53f0a6194463e7687467af6 | refs/heads/master | 2023-01-14T09:13:48.346502 | 2020-11-20T09:03:25 | 2020-11-20T09:03:25 | 259,818,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM, GRU
# 1. 데이터
x = array([[1,2,3],[2,3,4],[3,4,5],[4,5,6]]) #
x = x.reshape(x.shape[0], x.shape[1], 1) #
y = array([4,5,6,7]) #
print(x.shape) #
print(y.shape) #
# 2. 모델구성
model = Sequential()
model.add(GRU(17,activation='relu',input_shape=(3,1))) #
model.add(Dense(42))
model.add(Dense(39))
model.add(Dense(41))
model.add(Dense(1)) # 아웃풋 레이어
model.summary()
# 3. 실행
from keras.callbacks import EarlyStopping
els = EarlyStopping(monitor='loss', patience=8, mode='auto')
model.compile(optimizer='adam',loss = 'mse')
model.fit(x,y,epochs=200,batch_size=1,callbacks=[els]) #
# 4. 테스트
x_predict = array([5,6,7])
x_predict = x_predict.reshape(1,3,1)
print(x_predict,"\n",x_predict.shape)
y_predict = model.predict(x_predict)
print(y_predict) | [
"dh3978@naver.com"
] | dh3978@naver.com |
a98edef9f2a80afb42119e0f7f8ae31e94f83631 | 50f16b7cfa04a71d6d3f0d525d964eb217f419d0 | /mi/instrument/wetlabs/fluorometer/flort_d/test/test_driver.py | 338aea75349b8cb19d62eff5fab6cf207397685b | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cameron55445/mi-instrument | 25a7dd35cfedf68b4dd04fc2a7d837036e768752 | 1a74ebf7c31131c856fc06d91bef7c72fcfadb54 | refs/heads/master | 2021-01-18T16:53:25.208318 | 2015-08-05T00:15:56 | 2015-08-05T00:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,337 | py | """
@package mi.instrument.wetlabs.fluorometer.flort_d.test.test_driver
@file marine-integrations/mi/instrument/wetlabs/fluorometer/flort_d/driver.py
@author Art Teranishi
@brief Test cases for flort_d driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Art Teranishi'
__license__ = 'Apache 2.0'
import gevent
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.time_tools import get_timestamp_delayed
from mi.core.instrument.chunker import StringChunker
from mi.instrument.wetlabs.fluorometer.flort_d.driver import InstrumentDriver, FlortDMNU_Particle, FlortDSample_Particle
from mi.instrument.wetlabs.fluorometer.flort_d.driver import DataParticleType
from mi.instrument.wetlabs.fluorometer.flort_d.driver import InstrumentCommand
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolState
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolEvent
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Capability
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Parameter
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Protocol
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Prompt
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlortDMNU_ParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlortDSample_ParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import MNU_REGEX
from mi.instrument.wetlabs.fluorometer.flort_d.driver import RUN_REGEX
from mi.instrument.wetlabs.fluorometer.flort_d.driver import NEWLINE
from mi.core.instrument.instrument_driver import DriverProtocolState, DriverConfigKey, ResourceAgentState
# SAMPLE DATA FOR TESTING
from mi.instrument.wetlabs.fluorometer.flort_d.test.sample_data import SAMPLE_MNU_RESPONSE
from mi.instrument.wetlabs.fluorometer.flort_d.test.sample_data import SAMPLE_SAMPLE_RESPONSE
from mi.instrument.wetlabs.fluorometer.flort_d.test.sample_data import SAMPLE_MET_RESPONSE
from mi.core.exceptions import InstrumentCommandException, SampleException
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.wetlabs.fluorometer.flort_d.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='3DLE2A',
instrument_agent_name='wetlabs_fluorometer_flort_d',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverConfigKey.PARAMETERS: {Parameter.RUN_WIPER_INTERVAL: '00:10:00',
Parameter.RUN_CLOCK_SYNC_INTERVAL: '00:10:00',
Parameter.RUN_ACQUIRE_STATUS_INTERVAL: '00:10:00'}}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python, mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(DriverTestMixin):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
#Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_Driver = InstrumentDriver
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.SERIAL_NUM: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 'Ser 123.123.12'},
Parameter.FIRMWARE_VERSION: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 'Ber 16.02'},
Parameter.MEASUREMENTS_PER_REPORTED: {TYPE: int, READONLY: False, DA: False, STARTUP: False, DEFAULT: None, VALUE: 18},
Parameter.MEASUREMENT_1_DARK_COUNT: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 51},
Parameter.MEASUREMENT_1_SLOPE: {TYPE: float, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 1.814},
Parameter.MEASUREMENT_2_DARK_COUNT: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 67},
Parameter.MEASUREMENT_2_SLOPE: {TYPE: float, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: .0345},
Parameter.MEASUREMENT_3_DARK_COUNT: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 49},
Parameter.MEASUREMENT_3_SLOPE: {TYPE: float, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 9.1234},
Parameter.MEASUREMENTS_PER_PACKET: {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: None, VALUE: 7},
Parameter.PACKETS_PER_SET: {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0, VALUE: 0},
Parameter.PREDEFINED_OUTPUT_SEQ: {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0, VALUE: 0},
Parameter.BAUD_RATE: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: 1, VALUE: 1},
Parameter.RECORDING_MODE: {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 1, VALUE: 1},
Parameter.DATE: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: '01/01/01'},
Parameter.TIME: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: '12:00:03'},
Parameter.SAMPLING_INTERVAL: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: '00:05:00'},
Parameter.MANUAL_MODE: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: 0, VALUE: 0},
Parameter.MANUAL_START_TIME: {TYPE: str, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: '17:00:00'},
Parameter.INTERNAL_MEMORY: {TYPE: int, READONLY: True, DA: False, STARTUP: False, DEFAULT: None, VALUE: 4095},
Parameter.RUN_WIPER_INTERVAL: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '00:00:00', VALUE: '00:01:00'},
Parameter.RUN_CLOCK_SYNC_INTERVAL: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '00:00:00', VALUE: '12:00:00'},
Parameter.RUN_ACQUIRE_STATUS_INTERVAL: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '00:00:00', VALUE: '12:00:00'}
}
_driver_capabilities = {
Capability.RUN_WIPER: {STATES: [ProtocolState.COMMAND]},
Capability.CLOCK_SYNC: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND]}
}
_flortD_mnu_parameters = {
FlortDMNU_ParticleKey.SERIAL_NUM: {TYPE: unicode, VALUE: 'BBFL2W-993', REQUIRED: True},
FlortDMNU_ParticleKey.FIRMWARE_VER: {TYPE: unicode, VALUE: 'Triplet5.20', REQUIRED: True},
FlortDMNU_ParticleKey.AVE: {TYPE: int, VALUE: 1, REQUIRED: True},
FlortDMNU_ParticleKey.PKT: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.M1D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.M2D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.M3D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.M1S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlortDMNU_ParticleKey.M2S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlortDMNU_ParticleKey.M3S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlortDMNU_ParticleKey.SEQ: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.RAT: {TYPE: int, VALUE: 19200, REQUIRED: True},
FlortDMNU_ParticleKey.SET: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.REC: {TYPE: int, VALUE: 1, REQUIRED: True},
FlortDMNU_ParticleKey.MAN: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDMNU_ParticleKey.INT: {TYPE: unicode, VALUE: '00:00:10', REQUIRED: True},
FlortDMNU_ParticleKey.DAT: {TYPE: unicode, VALUE: '07/11/13', REQUIRED: True},
FlortDMNU_ParticleKey.CLK: {TYPE: unicode, VALUE: '12:48:34', REQUIRED: True},
FlortDMNU_ParticleKey.MST: {TYPE: unicode, VALUE: '12:48:31', REQUIRED: True},
FlortDMNU_ParticleKey.MEM: {TYPE: int, VALUE: 4095, REQUIRED: True}
}
_flortD_sample_parameters = {
FlortDSample_ParticleKey.date_string: {TYPE: unicode, VALUE: '07/16/13', REQUIRED: True},
FlortDSample_ParticleKey.time_string: {TYPE: unicode, VALUE: '09:33:06', REQUIRED: True},
FlortDSample_ParticleKey.wave_beta: {TYPE: int, VALUE: 700, REQUIRED: True},
FlortDSample_ParticleKey.raw_sig_beta: {TYPE: int, VALUE: 4130, REQUIRED: True},
FlortDSample_ParticleKey.wave_chl: {TYPE: int, VALUE: 695, REQUIRED: True},
FlortDSample_ParticleKey.raw_sig_chl: {TYPE: int, VALUE: 1018, REQUIRED: True},
FlortDSample_ParticleKey.wave_cdom: {TYPE: int, VALUE: 460, REQUIRED: True},
FlortDSample_ParticleKey.raw_sig_cdom: {TYPE: int, VALUE: 4130, REQUIRED: True},
FlortDSample_ParticleKey.raw_temp: {TYPE: int, VALUE: 525, REQUIRED: True},
FlortDSample_ParticleKey.SIG_1_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlortDSample_ParticleKey.SIG_2_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlortDSample_ParticleKey.SIG_3_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlortDSample_ParticleKey.SIG_1_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDSample_ParticleKey.SIG_2_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True},
FlortDSample_ParticleKey.SIG_3_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True}
}
# #
# Driver Parameter Methods
# #
def assert_particle_mnu(self, data_particle, verify_values=False):
"""
Verify flortd_mnu particle
@param data_particle: FlortDMNU_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlortDMNU_ParticleKey, self._flortD_mnu_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORTD_MNU)
self.assert_data_particle_parameters(data_particle, self._flortD_mnu_parameters, verify_values)
def assert_particle_sample(self, data_particle, verify_values=False):
"""
Verify flortd_sample particle
@param data_particle: FlortDSample_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlortDSample_ParticleKey, self._flortD_sample_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORTD_SAMPLE)
self.assert_data_particle_parameters(data_particle, self._flortD_sample_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
Get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MNU_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MET_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_SAMPLE_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
def test_corrupt_data_sample(self):
particle = FlortDMNU_Particle(SAMPLE_MNU_RESPONSE.replace('Ave 1', 'Ave foo'))
with self.assertRaises(SampleException):
particle.generate()
particle = FlortDSample_Particle(SAMPLE_SAMPLE_RESPONSE.replace('700', 'foo'))
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, SAMPLE_MNU_RESPONSE, self.assert_particle_mnu, True)
self.assert_particle_published(driver, SAMPLE_SAMPLE_RESPONSE, self.assert_particle_sample, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="PortAgentClient")
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [ProtocolEvent.DISCOVER],
ProtocolState.COMMAND: [ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_DIRECT,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.RUN_WIPER,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLOCK_SYNC],
ProtocolState.AUTOSAMPLE: [ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.RUN_WIPER_SCHEDULED,
ProtocolEvent.SCHEDULED_CLOCK_SYNC,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
ProtocolEvent.GET],
ProtocolState.DIRECT_ACCESS: [ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT]
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_command_response(self):
"""
Test response with no errors
Test the general command response will raise an exception if the command is not recognized by
the instrument
"""
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
#test response with no errors
protocol._parse_command_response(SAMPLE_MNU_RESPONSE, None)
#test response with 'unrecognized command'
response = False
try:
protocol._parse_command_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
#test correct response with error
response = False
try:
protocol._parse_command_response(SAMPLE_MET_RESPONSE + NEWLINE + 'unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_run_wiper_response(self):
"""
Test response with no errors
Test the run wiper response will raise an exception:
1. if the command is not recognized by
2. the status of the wiper is bad
"""
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
#test response with no errors
protocol._parse_run_wiper_response('mvs 1', None)
#test response with 'unrecognized command'
response = False
try:
protocol._parse_run_wiper_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
#test response with error
response = False
try:
protocol._parse_run_wiper_response("mvs 0" + NEWLINE, None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_discover_state(self):
"""
Test discovering the instrument in the COMMAND state and in the AUTOSAMPLE state
"""
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
#COMMAND state
protocol._linebuf = SAMPLE_MNU_RESPONSE
protocol._promptbuf = SAMPLE_MNU_RESPONSE
next_state, next_agent_state = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.COMMAND)
self.assertEqual(next_agent_state, ResourceAgentState.IDLE)
#AUTOSAMPLE state
protocol._linebuf = SAMPLE_SAMPLE_RESPONSE
protocol._promptbuf = SAMPLE_SAMPLE_RESPONSE
next_state, next_agent_state = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.AUTOSAMPLE)
self.assertEqual(next_agent_state, ResourceAgentState.STREAMING)
def test_create_commands(self):
"""
Test creating different types of commands
1. command with no end of line
2. simple command with no parameters
3. command with parameter
"""
#create the operator commands
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
#!!!!!
cmd = protocol._build_no_eol_command('!!!!!')
self.assertEqual(cmd, '!!!!!')
#$met
cmd = protocol._build_simple_command('$met')
self.assertEqual(cmd, '$met' + NEWLINE)
#$mnu
cmd = protocol._build_simple_command('$mnu')
self.assertEqual(cmd, '$mnu' + NEWLINE)
#$run
cmd = protocol._build_simple_command('$run')
self.assertEqual(cmd, '$run' + NEWLINE)
#parameters
cmd = protocol._build_single_parameter_command('$ave', Parameter.MEASUREMENTS_PER_REPORTED, 14)
self.assertEqual(cmd, '$ave 14' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m2d', Parameter.MEASUREMENT_2_DARK_COUNT, 34)
self.assertEqual(cmd, '$m2d 34' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m1s', Parameter.MEASUREMENT_1_SLOPE, 23.1341)
self.assertEqual(cmd, '$m1s 23.1341' + NEWLINE)
cmd = protocol._build_single_parameter_command('$dat', Parameter.DATE, '041014')
self.assertEqual(cmd, '$dat 041014' + NEWLINE)
cmd = protocol._build_single_parameter_command('$clk', Parameter.TIME, '010034')
self.assertEqual(cmd, '$clk 010034' + NEWLINE)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def test_commands(self):
"""
Run instrument commands from command mode.
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
#test commands, now that we are in command mode
#$mnu
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=MNU_REGEX)
#$run - testing putting instrument into autosample
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
#!!!!! - testing put instrument into command mode
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, regex=MNU_REGEX)
#$mvs - test running wiper
self.assert_driver_command(ProtocolEvent.RUN_WIPER, state=ProtocolState.COMMAND, regex=RUN_REGEX)
#test syncing clock
self.assert_driver_command(ProtocolEvent.CLOCK_SYNC, state=ProtocolState.COMMAND)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
def test_autosample(self):
"""
Verify that we can enter streaming and that all particles are produced
properly.
Because we have to test for different data particles we can't use
the common assert_sample_autosample method
1. initialize the instrument to COMMAND state
2. command the instrument to AUTOSAMPLE
3. verify the particle coming in
4. command the instrument to STOP AUTOSAMPLE state
5. verify the particle coming in
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.FLORTD_SAMPLE, self.assert_particle_sample, timeout=10)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
self.assert_async_particle_generation(DataParticleType.FLORTD_MNU, self.assert_particle_mnu, timeout=10)
def test_parameters(self):
"""
Verify that we can set the parameters
1. Cannot set read only parameters
2. Can set read/write parameters
3. Can set read/write parameters w/direct access only
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
#test read/write parameter
self.assert_set(Parameter.MEASUREMENTS_PER_REPORTED, 20)
#test setting immutable parameters when startup
#NOTE: this does not use the startup config because setting a combination of parameters from their default
#values will cause the instrument to no longer break out of autosample mode. This is a safe way to test
#setting startup params without the risk of going into autosample mode.
self.assert_set(Parameter.MEASUREMENTS_PER_PACKET, 18, startup=True, no_get=True)
self.assert_get(Parameter.MEASUREMENTS_PER_PACKET, 18)
self.assert_set(Parameter.PREDEFINED_OUTPUT_SEQ, 3, startup=True, no_get=True)
self.assert_get(Parameter.PREDEFINED_OUTPUT_SEQ, 3)
self.assert_set(Parameter.PACKETS_PER_SET, 10, startup=True, no_get=True)
self.assert_get(Parameter.PACKETS_PER_SET, 10)
self.assert_set(Parameter.RECORDING_MODE, 1, startup=True, no_get=True)
self.assert_get(Parameter.RECORDING_MODE, 1)
self.assert_set(Parameter.MANUAL_MODE, 1, startup=True, no_get=True)
self.assert_get(Parameter.MANUAL_MODE, 1)
self.assert_set(Parameter.RUN_WIPER_INTERVAL, '05:00:23', startup=True, no_get=True)
self.assert_get(Parameter.RUN_WIPER_INTERVAL, '05:00:23')
self.assert_set(Parameter.RUN_CLOCK_SYNC_INTERVAL, '12:00:00', startup=True, no_get=True)
self.assert_get(Parameter.RUN_CLOCK_SYNC_INTERVAL, '12:00:00')
self.assert_set(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, '00:00:30', startup=True, no_get=True)
self.assert_get(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, '00:00:30')
#test read only parameter (includes immutable, when not startup)- should not be set, value should not change
self.assert_set_exception(Parameter.SERIAL_NUM, '12.123.1234')
self.assert_set_exception(Parameter.FIRMWARE_VERSION, 'VER123')
self.assert_set_exception(Parameter.MEASUREMENTS_PER_PACKET, 16)
self.assert_set_exception(Parameter.MEASUREMENT_1_DARK_COUNT, 10)
self.assert_set_exception(Parameter.MEASUREMENT_2_DARK_COUNT, 20)
self.assert_set_exception(Parameter.MEASUREMENT_3_DARK_COUNT, 30)
self.assert_set_exception(Parameter.MEASUREMENT_1_SLOPE, 12.00)
self.assert_set_exception(Parameter.MEASUREMENT_2_SLOPE, 13.00)
self.assert_set_exception(Parameter.MEASUREMENT_3_SLOPE, 14.00)
self.assert_set_exception(Parameter.PREDEFINED_OUTPUT_SEQ, 0)
self.assert_set_exception(Parameter.BAUD_RATE, 2422)
self.assert_set_exception(Parameter.PACKETS_PER_SET, 0)
self.assert_set_exception(Parameter.RECORDING_MODE, 0)
self.assert_set_exception(Parameter.MANUAL_MODE, 0)
self.assert_set_exception(Parameter.SAMPLING_INTERVAL, "003000")
self.assert_set_exception(Parameter.DATE, get_timestamp_delayed("%m/%d/%y"))
self.assert_set_exception(Parameter.TIME, get_timestamp_delayed("%H:%M:%S"))
self.assert_set_exception(Parameter.MANUAL_START_TIME, "15:10:45")
self.assert_set_exception(Parameter.INTERNAL_MEMORY, 512)
self.assert_set_exception(Parameter.RUN_WIPER_INTERVAL, "00:00:00")
self.assert_set_exception(Parameter.RUN_CLOCK_SYNC_INTERVAL, "00:00:00")
self.assert_set_exception(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, "00:00:00")
def test_direct_access(self):
"""
Verify we can enter the direct access state
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
self.assert_state_change(ProtocolState.COMMAND, 5)
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.START_DIRECT)
self.assert_state_change(ProtocolState.DIRECT_ACCESS, 5)
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.STOP_DIRECT)
self.assert_state_change(ProtocolState.COMMAND, 5)
log.debug('leaving direct access')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_direct_access_telnet_mode(self):
"""
Verify while in Direct Access, we can manually set DA parameters. After stopping DA, the instrument
will enter Command State and any parameters set during DA are reset to previous values. Also verifying
timeouts with inactivity, with activity, and without activity.
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$pkt 128" + NEWLINE)
self.tcp_client.expect("Pkt 128")
log.debug("DA Parameter Measurements_per_packet_value Updated")
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$ave 20" + NEWLINE)
self.tcp_client.expect("Ave 20")
log.debug("DA Parameter $ave Updated")
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$seq 1" + NEWLINE)
self.tcp_client.expect("Seq 1")
log.debug("DA Parameter $seq Updated")
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$man 1" + NEWLINE)
self.tcp_client.expect("Man 1")
log.debug("DA Parameter $man Updated")
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$rec 1" + NEWLINE)
self.tcp_client.expect("Rec 1")
log.debug("DA Parameter $rec Updated")
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$set 5" + NEWLINE)
self.tcp_client.expect("Set 5")
log.debug("DA Parameter $set Updated")
self.assert_direct_access_stop_telnet()
# verify the setting got restored.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 10)
self.assert_get_parameter(Parameter.MEASUREMENTS_PER_PACKET, 0)
self.assert_get_parameter(Parameter.MEASUREMENTS_PER_REPORTED, 18)
self.assert_get_parameter(Parameter.PREDEFINED_OUTPUT_SEQ, 0)
self.assert_get_parameter(Parameter.MANUAL_MODE, 0)
self.assert_get_parameter(Parameter.RECORDING_MODE, 0)
self.assert_get_parameter(Parameter.RECORDING_MODE, 0)
###
# Test direct access inactivity timeout
###
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=90)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
###
# Test session timeout without activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=120, session_timeout=30)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
###
# Test direct access session timeout with activity
###
self.assert_direct_access_start_telnet(inactivity_timeout=30, session_timeout=60)
# Send some activity every 30 seconds to keep DA alive.
for i in range(1, 2, 3):
self.tcp_client.send_data(NEWLINE)
log.debug("Sending a little keep alive communication, sleeping for 15 seconds")
gevent.sleep(15)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 45)
def test_direct_access_telnet_mode_autosample(self):
"""
Verify Direct Access can start autosampling for the instrument, and if stopping DA, the
driver will resort to Autosample State. Also, testing disconnect
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
log.debug("DA Server Started. Adjust DA Parameter.")
self.tcp_client.send_data("$run" + NEWLINE)
self.tcp_client.expect("mvs 1")
log.debug("DA autosample started")
#Assert if stopping DA while autosampling, discover will put driver into Autosample state
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, timeout=10)
def test_autosample(self):
"""
start and stop autosample
"""
self.assert_enter_command_mode()
self.assert_start_autosample()
self.assert_stop_autosample()
def test_get_set_parameters(self):
"""
Verify that all parameters can be get/set properly. This includes ensuring that
read only parameters cannot be set.
"""
self.assert_enter_command_mode()
#read/write
self.assert_set_parameter(Parameter.MEASUREMENTS_PER_REPORTED, 20, verify=True)
#read only
self.assert_get_parameter(Parameter.MEASUREMENTS_PER_PACKET, 0)
self.assert_get_parameter(Parameter.PREDEFINED_OUTPUT_SEQ, 0)
self.assert_get_parameter(Parameter.PACKETS_PER_SET, 0)
self.assert_get_parameter(Parameter.RECORDING_MODE, 0)
self.assert_get_parameter(Parameter.MANUAL_MODE, 0)
self.assert_get_parameter(Parameter.RUN_WIPER_INTERVAL, "00:10:00")
self.assert_get_parameter(Parameter.RUN_CLOCK_SYNC_INTERVAL, "00:10:00")
self.assert_get_parameter(Parameter.RUN_ACQUIRE_STATUS_INTERVAL, "00:10:00")
#NOTE: these parameters have no default values and cannot be tested
#self.assert_get_parameter(Parameter.MEASUREMENT_1_DARK_COUNT, 10)
#self.assert_get_parameter(Parameter.MEASUREMENT_2_DARK_COUNT, 20)
#self.assert_get_parameter(Parameter.MEASUREMENT_3_DARK_COUNT, 30)
#self.assert_get_parameter(Parameter.MEASUREMENT_1_SLOPE, 12.00)
#self.assert_get_parameter(Parameter.MEASUREMENT_2_SLOPE, 13.00)
#self.assert_get_parameter(Parameter.MEASUREMENT_3_SLOPE, 14.00)
#self.assert_get_parameter(Parameter.SERIAL_NUM, '12.123.1234')
#self.assert_get_parameter(Parameter.FIRMWARE_VERSION, 'VER123')
#self.assert_get_parameter(Parameter.SAMPLING_INTERVAL, "003000")
#self.assert_get_parameter(Parameter.DATE, get_timestamp_delayed("%m/%d/%y"))
#self.assert_get_parameter(Parameter.TIME, get_timestamp_delayed("%H:%M:%S"))
#self.assert_get_parameter(Parameter.MANUAL_START_TIME, "15:10:45")
#self.assert_get_parameter(Parameter.INTERNAL_MEMORY, 512)
#self.assert_get_parameter(Parameter.BAUD_RATE, 2422)
def test_get_capabilities(self):
"""
@brief Walk through all driver protocol states and verify capabilities
returned by get_current_capabilities
"""
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.START_DIRECT,
ProtocolEvent.RUN_WIPER],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_enter_command_mode()
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.STREAMING),
AgentCapabilityType.RESOURCE_COMMAND: [ProtocolEvent.STOP_AUTOSAMPLE],
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()}
self.assert_start_autosample()
self.assert_capabilities(capabilities)
self.assert_stop_autosample()
# ##################
# # DA Mode
# ##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [ProtocolEvent.STOP_DIRECT]
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
| [
"petercable@gmail.com"
] | petercable@gmail.com |
f94e6cb886df4a274d453c097a89b73b77ba8d77 | 36d2fcfee8932c2ce75fefe2bc9d53e4a19f88a5 | /tensorflow/contrib/distribute/python/minimize_loss_test.py | 16541c7a1e342f5636e238a301d0946d3e0c4bc4 | [
"Apache-2.0"
] | permissive | Gongxuping/tensorflow | 54a82b73b2df13056da33182c101f873350236e7 | cd174ffdb7b47cb27b22c6a96050e962eb9c2c99 | refs/heads/master | 2020-04-30T06:22:56.972721 | 2019-03-20T03:48:05 | 2019-03-20T03:54:03 | 176,650,065 | 1 | 0 | Apache-2.0 | 2019-03-20T04:00:34 | 2019-03-20T04:00:34 | null | UTF-8 | Python | false | false | 20,055 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for running legacy optimizer code with DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.losses import losses_impl
class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
def _get_iterator(self, strategy, input_fn):
iterator = strategy.make_input_fn_iterator(lambda _: input_fn())
self.evaluate(iterator.initialize())
return iterator
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
with distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=2).run_op
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(5):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
use_callable_loss):
with distribution.scope():
model_fn, dataset_fn, layer = minimize_loss_example(
optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(iterator.get_next(),)))
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases = [], []
for _ in range(10):
run_step()
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(is_not_increasing)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers() +
combinations.distributions_and_v2_optimizers(),
combinations.combine(mode=["graph", "eager"])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2,
mode=["graph"]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
created_variables = []
trainable_variables = []
def appending_creator(next_creator, *args, **kwargs):
v = next_creator(*args, **kwargs)
created_variables.append(v.name)
if "trainable" in kwargs and kwargs["trainable"]:
trainable_variables.append(v.name)
return v
# Creator scope needs to be set before it's used inside
# `distribution.scope`.
with variable_scope.variable_creator_scope(
appending_creator), distribution.scope():
model_fn, dataset_fn, _ = minimize_loss_example(
optimizer_fn,
use_bias=True,
use_callable_loss=True,
create_optimizer_inside_model_fn=True)
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
def get_expected_variables(optimizer_fn, num_parameter_devices):
variables_map = {
"GradientDescent": ["dense/kernel", "dense/bias"],
"Adagrad": [
"dense/kernel/Adagrad", "dense/kernel",
"dense/bias/Adagrad", "dense/bias"
]
}
variables = variables_map[optimizer_fn().get_name()]
variables.extend([
v + "/replica_{}".format(replica)
for v in variables
for replica in range(1, num_parameter_devices)
])
return set([v + ":0" for v in variables])
self.assertEqual(
get_expected_variables(optimizer_fn,
len(distribution.extended.parameter_devices)),
set(created_variables))
@combinations.generate(
combinations.times(
combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=["graph", "eager"],
# TODO(isaprykin): Allow False here. Currently subsequent
# replicas will re-execute UPDATE_OPS of previous replicas.
update_ops_in_cross_replica_mode=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
update_ops_in_cross_replica_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
renorm, update_ops_in_cross_replica_mode):
"""Verifies that moving mean updates are reduced across replicas."""
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
model_fn, dataset_fn, batchnorm = batchnorm_example(
optimizer_fn,
batch_per_epoch=num_replicas,
momentum=momentum,
renorm=renorm,
update_ops_in_replica_mode=not update_ops_in_cross_replica_mode)
def step_fn(ctx, inputs):
del ctx # Unused
fetches = distribution.experimental_local_results(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
if update_ops_in_cross_replica_mode:
fetches += tuple(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
return control_flow_ops.group(fetches)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
expected_moving_means = [0.] * 8
def averaged_batch_mean(i):
# Each batch has shape [16, 8] where the ith element in jth list is
# (8 * j + i + replica_id * 100). So the batch mean in each replica is
# (60 + i + replica_id * 100). So here comes its batch mean over all
# replicas:
return 60. + i + (num_replicas - 1.) / 2. * 100.
for _ in range(10):
run_step()
moving_means = self.evaluate(batchnorm.moving_mean)
# We make sure that the moving_mean is updated as if the sample mean is
# calculated over all replicas.
for i, expected_moving_mean in enumerate(expected_moving_means):
expected_moving_means[i] -= ((
expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))
self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)
@combinations.generate(
combinations.times(
combinations.combine(
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
],
loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
losses_impl.Reduction.SUM_OVER_BATCH_SIZE,
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
combinations.times(
combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
]),
combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
mode=["graph"],
use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
use_callable_loss):
with distribution.scope():
all_vars = []
def model_fn(inputs):
x, y = inputs
def loss_fn():
# Use fixed initialization to make the steps deterministic.
w = variable_scope.get_variable("w", initializer=[[2.]])
all_vars.append(w)
predict = math_ops.matmul(x, w)
return losses_impl.mean_squared_error(
y, predict, reduction=loss_reduction)
optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
return optimizer.minimize(loss_fn())
def dataset_fn():
features = dataset_ops.Dataset.from_tensors([[2.], [7.]])
labels = dataset_ops.Dataset.from_tensors([[6.], [21.]])
return dataset_ops.Dataset.zip((features, labels)).repeat()
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
distribution.extended.call_for_each_replica(
model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
return distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1).run_op
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
v = all_vars[0]
self.assertTrue(all(v is vi for vi in all_vars[1:]))
weight = numpy.squeeze(self.evaluate(v))
# Our model is:
# predict = x * w
# loss = (predict - y)^2
# dloss/dpredict = 2*(predict - y)
# dloss/dw = 2 * x^T @ (predict - y)
# For our batch size of 2, assuming sum loss reduction:
# x = [2, 7]
# y = [6, 21]
# w_initial = 2
# predict = [4, 14]
# predict - y = [-2, -7]
# dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106
# So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2
# with sum loss reduction, or 10.6 with mean.
if loss_reduction == losses_impl.Reduction.SUM:
# Note that the "distribution.num_replicas_in_sync" factor will go away
# once we split the input across replicas, instead of pulling a complete
# batch of input per replica.
self.assertNear(weight, 2 + 21.2 * distribution.num_replicas_in_sync,
0.0001)
else:
# One of the mean loss reductions.
self.assertNear(weight, 2 + 10.6, 0.0001)
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"]),
combinations.combine(is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
def dataset_fn():
dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
# TODO(priyag): batch with drop_remainder=True causes shapes to be
# fully defined for TPU. Remove this when XLA supports dynamic shapes.
return dataset.batch(batch_size=1, drop_remainder=True)
optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=True)
key1 = "foo"
value1 = "bar"
def model_fn(output_context, x):
"""A very simple model written by the user."""
def loss_fn():
y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)
return y * y
train_op = optimizer.minimize(loss_fn)
loss = loss_fn()
output_context.set_last_step_output(
name="replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_non_tensor_output(key1, value1)
return (train_op, loss)
def step_fn(output_context, inputs):
(train_op, loss) = distribution.extended.call_for_each_replica(
model_fn, args=(output_context, inputs))
output_context.set_last_step_output(
name="cross_replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_last_step_output(
name="cross_replica_loss_not_reduced",
output=loss)
return distribution.group(train_op)
iterator = self._get_iterator(distribution, dataset_fn)
def run_step():
initial_loss = lambda: constant_op.constant(1e7)
# Initial values corresponding to reduced losses are just single
# tensors. But for non reduced losses, we need to have initial
# values that are of the same structure as non reduced losses. In
# MirroredStrategy, this will be a list of losses, in TPUStrategy
# it will be single tensor. Using `call_for_each_replica` followed
# by `experimental_local_results` gives us the desired initial
# value structure.
not_reduced = distribution.experimental_local_results(
distribution.extended.call_for_each_replica(initial_loss))
initial_loop_values = {
"replica_loss_reduced": initial_loss(),
"cross_replica_loss_reduced": initial_loss(),
"cross_replica_loss_not_reduced": not_reduced,
}
ctx = distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=2,
initial_loop_values=initial_loop_values)
self.assertEqual({key1: (value1,)}, ctx.non_tensor_outputs)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_reduced"],
reduced=True, distribution=distribution)
self._verify_loss_output(
initial_loss(),
loss_output=ctx.last_step_outputs["cross_replica_loss_not_reduced"],
reduced=False, distribution=distribution)
return (ctx.run_op, ctx.last_step_outputs["replica_loss_reduced"])
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
weights, biases, losses = [], [], []
for _ in range(5):
_, loss = run_step()
losses.append(loss)
weights.append(self.evaluate(layer.kernel))
biases.append(self.evaluate(layer.bias))
loss_is_not_increasing = all(y <= x for x, y in zip(losses, losses[1:]))
self.assertTrue(loss_is_not_increasing)
error = abs(
numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
error_is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
self.assertTrue(error_is_not_increasing)
def _verify_loss_output(self, initial_loss, loss_output, reduced,
distribution):
if not reduced:
self.assertLen(distribution.experimental_local_results(loss_output),
distribution.num_replicas_in_sync)
loss_tensor = distribution.reduce(reduce_util.ReduceOp.MEAN, loss_output)
else:
unwrapped_output = distribution.experimental_local_results(loss_output)
self.assertLen(unwrapped_output, 1)
loss_tensor = unwrapped_output[0]
self.assertEqual(initial_loss.dtype, loss_tensor.dtype)
self.assertEqual(initial_loss.shape, loss_tensor.shape)
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
26efc3cce877f299cf32012575f07145b63c0430 | dda618067f13657f1afd04c94200711c1920ea5f | /scoop/user/views/sitemap.py | 56f977aedeaf09d8b4316a0d943c922d7c04de75 | [] | no_license | artscoop/scoop | 831c59fbde94d7d4587f4e004f3581d685083c48 | 8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7 | refs/heads/master | 2020-06-17T20:09:13.722360 | 2017-07-12T01:25:20 | 2017-07-12T01:25:20 | 74,974,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # coding: utf-8
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from scoop.user.models.user import User
class ProfileSitemap(Sitemap):
""" Sitemap des profils utilisateurs """
limit = getattr(settings, 'SITEMAPS_ITEMS_PER_PAGE', 10000)
# Getter
def items(self):
""" Renvoyer les objets du sitemap """
return User.objects.only('id', 'username').active()
def location(self, obj):
""" Renvoyer l'URL d'un profil du sitemap """
return obj.get_absolute_url()
| [
"steve.kossouho@gmail.com"
] | steve.kossouho@gmail.com |
393ec2cbe1fca2ff890772f6098dac1b3cc965e2 | c397b704f6a18e5db90ef29325ee6d0564243c23 | /dashboard/templatetags/tag.py | 0ac21c9b270bdc07cb70bdcd725b394ffa58e0f5 | [] | no_license | Raza046/Learning-management-System | 47f112b3a05c3581675ff6d11f2bf95598b97b50 | 2bf29f2caff6247ca7a23cf041ca52f4f311b9b4 | refs/heads/master | 2023-02-23T14:42:27.815725 | 2021-01-29T10:25:55 | 2021-01-29T10:25:55 | 334,098,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from django import template
register = template.Library()
@register.filter
def to_str1(value1):
v = value1.split(":")[0]
print("Str1 --> " + str(v))
return int(v)
@register.filter
def last_str(value):
v2 = value.split(":")[1].split("-")[1]
print("Str2 --> " + str(v2))
return int(v2)
| [
"rulmustafa22@gmail.com"
] | rulmustafa22@gmail.com |
6b91fcb254bfed2355e297e9b6d8373cda8ba3cd | 1faf6ad07b7ed8c58577517803d7e26e97e71a7b | /setup.py | 31a093f80d58d10e4f455b5cf4c785619af955cc | [] | no_license | pizzapanther/Django-Permissions-Keeper | d6b668b1926d2c0df39617c1adacec2350a63114 | 3f77332f74d58c67464a38d960050dba31d50e17 | refs/heads/master | 2021-01-22T03:39:03.654842 | 2012-05-04T21:43:40 | 2012-05-04T21:43:40 | 4,229,019 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | import sys
from setuptools import setup, find_packages
setup(
name = "permkeep",
version = '12.05.1',
description = "Easy way to keep group permissions in sync between Development and Production environments.",
url = "https://github.com/pizzapanther/Django-Permissions-Keeper",
author = "Paul Bailey",
author_email = "paul.m.bailey@gmail.com",
license = "BSD",
packages = ['permkeep', 'permkeep.management', 'permkeep.management.commands'],
include_package_data = True,
)
| [
"paul.m.bailey@gmail.com"
] | paul.m.bailey@gmail.com |
220633df6985c7f48707704ceff0d82d9b9de024 | 9508879fcf1cff718f3fe80502baff8b82c04427 | /python_domain/strings/the_minion_game.py | 90bbaa4f8b053286f8a6493f0de4ace47d978aa8 | [] | no_license | davidozhang/hackerrank | e37b4aace7d63c8be10b0d4d2bffb4d34d401d55 | bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80 | refs/heads/master | 2021-05-04T11:31:59.110118 | 2017-11-15T09:17:27 | 2017-11-15T09:17:27 | 47,906,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | #!/usr/bin/python
VOWELS = set(['A', 'E', 'I', 'O', 'U'])
def main():
cons_indices, vowel_indices, cons_count, vowel_count = [], [], 0, 0
word = raw_input()
length = len(word)
for i in xrange(length):
cons_indices.append(i) if word[i] not in VOWELS else vowel_indices.append(i)
for j in cons_indices:
cons_count += length - j
for k in vowel_indices:
vowel_count += length - k
if cons_count > vowel_count:
print 'Stuart ' + str(cons_count)
elif cons_count < vowel_count:
print 'Kevin ' + str(vowel_count)
else:
print 'Draw'
if __name__ == '__main__':
main()
| [
"davzee@hotmail.com"
] | davzee@hotmail.com |
2c28c51ebc9ab2af80770363f29a38e35845b4eb | 987a68b9c196f39ba1810a2261cd4a08c35416a3 | /BitManipulation/477-total-hamming-distance.py | d72e1cdc9fb4e1437904f0b57379eaa8d1614906 | [] | no_license | xizhang77/LeetCode | c26e4699fbe1f2d2c4706b2e5ee82131be066ee5 | ce68f5af57f772185211f4e81952d0345a6d23cb | refs/heads/master | 2021-06-05T15:33:22.318833 | 2019-11-19T06:53:24 | 2019-11-19T06:53:24 | 135,076,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | # -*- coding: utf-8 -*-
'''
The Hamming distance between two integers is the number of positions at which the corresponding bits are different.
Now your job is to find the total Hamming distance between all pairs of the given numbers.
Example:
Input: 4, 14, 2
Output: 6
Explanation: In binary representation, the 4 is 0100, 14 is 1110, and 2 is 0010 (just
showing the four bits relevant in this case). So the answer will be:
HammingDistance(4, 14) + HammingDistance(4, 2) + HammingDistance(14, 2) = 2 + 2 + 2 = 6.
Note:
Elements of the given array are in the range of 0 to 10^9
Length of the array will not exceed 10^4.
'''
# Time: O(32*n) ~ O(n)
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ans = 0
n = len(nums)
for pos in range(32):
mask = 1 << pos
ones = 0
for a in nums:
ones += 1 if a&mask else 0
ans += (n-ones)*ones
return ans | [
"xizhang1@cs.stonybrook.edu"
] | xizhang1@cs.stonybrook.edu |
e514d3ebe05555c553230d7c7194e58ed12483ee | 735a315ea82893f2acd5ac141f1a9b8be89f5cb9 | /pylib/mdsplus/widgets/mdsplusonoff.py | e81302052652ef5f273b89179e660d88e9edca3e | [] | no_license | drsmith48/pppl-mdsplus-python | 5ce6f7ccef4a23ea4b8296aa06f51f3a646dd36f | 0fb5100e6718c8c10f04c3aac120558f521f9a59 | refs/heads/master | 2021-07-08T02:29:59.069616 | 2017-10-04T20:17:32 | 2017-10-04T20:17:32 | 105,808,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | import gtk
import gobject
from mdspluswidget import MDSplusWidget
class MDSplusOnOff(MDSplusWidget,gtk.CheckButton):
__gtype_name__ = 'MDSplusOnOff'
__gproperties__ = MDSplusWidget.__gproperties__
def reset(self):
try:
self.set_active(not self.getNode().state)
except Exception,e:
print "onoff reset had problem: %s" % (str(e),)
raise
def apply(self):
if self.putOnApply:
try:
self.getNode().state=not self.get_active()
except Exception,e:
print "onooff apply had problem; %s" % (str(e),)
def __init__(self):
gtk.CheckButton.__init__(self)
MDSplusWidget.__init__(self)
self.putOnApply = True
self.nidOffset = 0
gobject.type_register(MDSplusOnOff)
| [
"drsmith8@wisc.edu"
] | drsmith8@wisc.edu |
b0ad84988b762ee1b062b2341ef5f74ca5aa6e21 | beea74a2a1f2445b107af411197e8b6300e715e6 | /supervised_learning/0x01-classification/18-deep_neural_network.py | c0b59e9055452ecf800ba4ef9300a59b1bbc3e1d | [] | no_license | 95ktsmith/holbertonschool-machine_learning | 0240d8fa8523b06d3353c2bffa74205b84253be8 | 2757c8526290197d45a4de33cda71e686ddcbf1c | refs/heads/master | 2023-07-26T16:02:26.399758 | 2021-09-09T15:57:57 | 2021-09-09T15:57:57 | 310,087,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | #!/usr/bin/env python3
""" Deep Neural Network """
import numpy as np
class DeepNeuralNetwork:
""" Class representing a deep neural network """
def __init__(self, nx, layers):
""" Init
nx is the number of input features
nx must be a positive integer
layers is a list representing the number of nodes in each layer
layers must be a list of positive integers
L: number of layers in the network
cache: dictionary to hold all intermediary values of the network
weights: dictionary to hold all weights and biases of the network
"""
if type(nx) is not int:
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if type(layers) is not list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
self.__L = len(layers)
self.__cache = {}
self.__weights = {}
for i in range(0, self.L):
if type(layers[i]) is not int or layers[i] < 1:
raise TypeError("layers must be a list of positive integers")
if i == 0:
w = np.random.randn(layers[i], nx) * np.sqrt(2 / nx)
self.__weights['W1'] = w
self.__weights['b1'] = np.zeros((layers[i], 1))
else:
w = np.random.randn(layers[i], layers[i - 1])
w *= np.sqrt(2 / layers[i - 1])
self.__weights['W{}'.format(i + 1)] = w
self.__weights['b{}'.format(i + 1)] = np.zeros((layers[i], 1))
@property
def L(self):
""" L getter """
return self.__L
@property
def cache(self):
""" cache getter """
return self.__cache
@property
def weights(self):
""" weights getter """
return self.__weights
def forward_prop(self, X):
""" Calculates the forward propagation of the neural network
X with shape (nx, m) contains input data
nx is the number of input features
m is the number of examples
Updates __cache
"""
self.__cache["A0"] = X
for i in range(1, self.L + 1):
w = "W" + str(i)
a = "A" + str(i - 1)
b = "b" + str(i)
Y = self.weights[w] @ self.cache[a] + self.weights[b]
A = 1 / (1 + np.exp(Y * -1))
self.__cache["A" + str(i)] = A
return self.__cache["A" + str(self.L)], self.__cache
| [
"95ktsmith@gmail.com"
] | 95ktsmith@gmail.com |
6f030a71946122b5dda42fcbca479ce4f90632ec | 2614f223ad9aecddaca4358d51910e036bf0c2ff | /archived/asap/count_asap.py | 7981d5311e807c7e01362783a836046a0c2a45e3 | [] | no_license | cff874460349/tct | d615efb7485e5de3de66e2d02fff5ce9b33350a3 | 5bb4b16c4fad03a470243dd7db807a6a733d02e5 | refs/heads/master | 2021-03-14T00:55:51.556468 | 2019-06-28T03:56:54 | 2019-06-28T03:56:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,174 | py | # count the number of annotations in each category
import os
import csv
import collections
from xml.dom.minidom import parse
import xml.dom.minidom
import xlsxwriter
def scan_files(directory, prefix=None, postfix=None):
files_list = []
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root, special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root, special_file))
else:
files_list.append(os.path.join(root, special_file))
return files_list
colorCounts = {"#000000": 0,
"#aa0000": 0,
"#aa007f": 0,
"#aa00ff": 0,
"#ff0000": 0,
"#005500": 0,
"#00557f": 0,
"#0055ff": 0,
"#aa5500": 0,
"#aa557f": 0,
"#aa55ff": 0,
"#ff5500": 0,
"#ff557f": 0,
"#ff55ff": 0,
"#00aa00": 0,
"#00aa7f": 0,
"#00aaff": 0,
"#55aa00": 0,
"#55aa7f": 0}
largeCounts = {"#000000": 0,
"#aa0000": 0,
"#aa007f": 0,
"#aa00ff": 0,
"#ff0000": 0,
"#005500": 0,
"#00557f": 0,
"#0055ff": 0,
"#aa5500": 0,
"#aa557f": 0,
"#aa55ff": 0,
"#ff5500": 0,
"#ff557f": 0,
"#ff55ff": 0,
"#00aa00": 0,
"#00aa7f": 0,
"#00aaff": 0,
"#55aa00": 0,
"#55aa7f": 0}
classes = {"#aa0000": "HSIL", "#aa007f": "ASCH", "#005500": "LSIL", "#00557f": "ASCUS",
"#0055ff": "SCC", "#aa557f": "ADC", "#aa55ff": "EC", "#ff5500": "AGC1",
"#ff557f": "AGC2", "#ff55ff": "AGC3", "#00aa00": "FUNGI", "#00aa7f": "TRI",
"#00aaff": "CC", "#55aa00": "ACTINO", "#55aa7f": "VIRUS", "#ffffff": "NORMAL",
"#000000": "MC", "#aa00ff": "SC", "#ff0000": "RC", "#aa5500": "GEC"}
total = 0
large = 0
def count(files_list):
global total # number of files
global large
for file in files_list:
DOMTree = xml.dom.minidom.parse(file)
collection = DOMTree.documentElement
total += 1
annotations = collection.getElementsByTagName("Annotation")
wrong = 0
for annotation in annotations:
if annotation.getAttribute("Color") in colorCounts:
colorCounts[annotation.getAttribute("Color")] += 1
coordinates = annotation.getElementsByTagName("Coordinate")
x_coords = []
y_coords = []
for coordinate in coordinates:
x_coords.append(float(coordinate.getAttribute("X")))
y_coords.append(float(coordinate.getAttribute("Y")))
x_min = int(min(x_coords))
x_max = int(max(x_coords))
y_min = int(min(y_coords))
y_max = int(max(y_coords))
if (x_max-x_min) > 608 or (y_max-y_min) > 608:
largeCounts[annotation.getAttribute("Color")] += 1
large += 1
else:
wrong += 1
print("position: " + annotation.getAttribute("Name"))
if wrong > 0:
print("# wrong color = " + str(wrong) + " --> " + file)
print()
def write_csv(counts, csv_path):
with open(csv_path, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for key,value in counts.items():
csv_writer.writerow([key,classes[key], value])
# count annotations from single root directory
file_path = "/home/sakulaki/yolo-yuli/xxx/data_20180922/checked"
files_list = scan_files(file_path, postfix=".xml")
count(files_list)
write_csv(collections.OrderedDict(sorted(colorCounts.items())), "/home/sakulaki/yolo-yuli/xxx/data_20180922/summary.csv")
# # count annotations from multiple separated directories
# classes = ("01_ASCUS", "02_LSIL", "03_ASCH", "04_HSIL", "05_SCC", "06_AGC1", "07_AGC2", "08_ADC", "09_EC", "10_FUNGI", "11_TRI", "12_CC", "13_ACTINO", "14_VIRUS")
# file_path = "/media/tsimage/Elements/data"
# for class_i in classes:
# file_path_i = os.path.join(file_path, class_i)
# files_list = scan_files(file_path_i, postfix=".xml")
# count(files_list)
# output results
print("# files: " + str(total))
print(colorCounts)
print("# labels large than 608: " + str(large))
print(largeCounts)
# write to excel
# workbook = xlsxwriter.Workbook("C:/liyu/gui/tct/res/个人统计.xlsx")
# worksheet = workbook.add_worksheet()
# worksheet.write(0, 0, file_path)
# row = 1
# for key in colorCounts.keys():
# worksheet.write(row, 0, key)
# worksheet.write(row, 1, colorCounts[key])
# row += 1
# workbook.close()
| [
"yuli00986081@gmail.com"
] | yuli00986081@gmail.com |
d674af227aff1f94b7c80bd78974fba9c40fc93f | 32cf94c304c2c832595a28b49c7d9e0361d50950 | /test/re2/unicode_test.py | a88a3ad5a4bd5c1ae59908c3aef9b84135ee98f9 | [
"MIT"
] | permissive | oudream/ccxx | 11d3cd9c044c5f413ebc0735548f102a6f583114 | 26cecfb02e861ce6b821b33350493bac4793e997 | refs/heads/master | 2023-01-29T11:20:12.210439 | 2023-01-12T06:49:23 | 2023-01-12T06:49:23 | 47,005,127 | 46 | 11 | MIT | 2020-10-17T02:24:06 | 2015-11-28T01:05:30 | C | UTF-8 | Python | false | false | 6,720 | py | #!/usr/bin/python2.4
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Unittest for the util/regexp/re2/unicode.py module."""
import os
import StringIO
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.util.regexp.re2 import unicode
_UNICODE_DIR = os.path.join(flags.FLAGS.test_srcdir, "google3", "third_party",
"unicode", "ucd-5.1.0")
class ConvertTest(googletest.TestCase):
"""Test the conversion functions."""
def testUInt(self):
self.assertEquals(0x0000, unicode._UInt("0000"))
self.assertEquals(0x263A, unicode._UInt("263A"))
self.assertEquals(0x10FFFF, unicode._UInt("10FFFF"))
self.assertRaises(unicode.InputError, unicode._UInt, "263")
self.assertRaises(unicode.InputError, unicode._UInt, "263AAAA")
self.assertRaises(unicode.InputError, unicode._UInt, "110000")
def testURange(self):
self.assertEquals([1, 2, 3], unicode._URange("0001..0003"))
self.assertEquals([1], unicode._URange("0001"))
self.assertRaises(unicode.InputError, unicode._URange, "0001..0003..0005")
self.assertRaises(unicode.InputError, unicode._URange, "0003..0001")
self.assertRaises(unicode.InputError, unicode._URange, "0001..0001")
def testUStr(self):
self.assertEquals("0x263A", unicode._UStr(0x263a))
self.assertEquals("0x10FFFF", unicode._UStr(0x10FFFF))
self.assertRaises(unicode.InputError, unicode._UStr, 0x110000)
self.assertRaises(unicode.InputError, unicode._UStr, -1)
_UNICODE_TABLE = """# Commented line, should be ignored.
# The next line is blank and should be ignored.
0041;Capital A;Line 1
0061..007A;Lowercase;Line 2
1F00;<Greek, First>;Ignored
1FFE;<Greek, Last>;Line 3
10FFFF;Runemax;Line 4
0000;Zero;Line 5
"""
_BAD_TABLE1 = """
111111;Not a code point;
"""
_BAD_TABLE2 = """
0000;<Zero, First>;Missing <Zero, Last>
"""
_BAD_TABLE3 = """
0010..0001;Bad range;
"""
class AbortError(Exception):
"""Function should not have been called."""
def Abort():
raise AbortError("Abort")
def StringTable(s, n, f):
unicode.ReadUnicodeTable(StringIO.StringIO(s), n, f)
class ReadUnicodeTableTest(googletest.TestCase):
"""Test the ReadUnicodeTable function."""
def testSimpleTable(self):
ncall = [0] # can't assign to ordinary int in DoLine
def DoLine(codes, fields):
self.assertEquals(3, len(fields))
ncall[0] += 1
self.assertEquals("Line %d" % (ncall[0],), fields[2])
if ncall[0] == 1:
self.assertEquals([0x0041], codes)
self.assertEquals("0041", fields[0])
self.assertEquals("Capital A", fields[1])
elif ncall[0] == 2:
self.assertEquals(range(0x0061, 0x007A + 1), codes)
self.assertEquals("0061..007A", fields[0])
self.assertEquals("Lowercase", fields[1])
elif ncall[0] == 3:
self.assertEquals(range(0x1F00, 0x1FFE + 1), codes)
self.assertEquals("1F00..1FFE", fields[0])
self.assertEquals("Greek", fields[1])
elif ncall[0] == 4:
self.assertEquals([0x10FFFF], codes)
self.assertEquals("10FFFF", fields[0])
self.assertEquals("Runemax", fields[1])
elif ncall[0] == 5:
self.assertEquals([0x0000], codes)
self.assertEquals("0000", fields[0])
self.assertEquals("Zero", fields[1])
StringTable(_UNICODE_TABLE, 3, DoLine)
self.assertEquals(5, ncall[0])
def testErrorTables(self):
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 4, Abort)
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 2, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE1, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE2, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE3, 3, Abort)
class ParseContinueTest(googletest.TestCase):
"""Test the ParseContinue function."""
def testParseContinue(self):
self.assertEquals(("Private Use", "First"),
unicode._ParseContinue("<Private Use, First>"))
self.assertEquals(("Private Use", "Last"),
unicode._ParseContinue("<Private Use, Last>"))
self.assertEquals(("<Private Use, Blah>", None),
unicode._ParseContinue("<Private Use, Blah>"))
class CaseGroupsTest(googletest.TestCase):
"""Test the CaseGroups function (and the CaseFoldingReader)."""
def FindGroup(self, c):
if type(c) == str:
c = ord(c)
for g in self.groups:
if c in g:
return g
return None
def testCaseGroups(self):
self.groups = unicode.CaseGroups(unicode_dir=_UNICODE_DIR)
self.assertEquals([ord("A"), ord("a")], self.FindGroup("a"))
self.assertEquals(None, self.FindGroup("0"))
class ScriptsTest(googletest.TestCase):
"""Test the Scripts function (and the ScriptsReader)."""
def FindScript(self, c):
if type(c) == str:
c = ord(c)
for script, codes in self.scripts.items():
for code in codes:
if c == code:
return script
return None
def testScripts(self):
self.scripts = unicode.Scripts(unicode_dir=_UNICODE_DIR)
self.assertEquals("Latin", self.FindScript("a"))
self.assertEquals("Common", self.FindScript("0"))
self.assertEquals(None, self.FindScript(0xFFFE))
class CategoriesTest(googletest.TestCase):
"""Test the Categories function (and the UnicodeDataReader)."""
def FindCategory(self, c):
if type(c) == str:
c = ord(c)
short = None
for category, codes in self.categories.items():
for code in codes:
if code == c:
# prefer category Nd over N
if len(category) > 1:
return category
if short == None:
short = category
return short
def testCategories(self):
self.categories = unicode.Categories(unicode_dir=_UNICODE_DIR)
self.assertEquals("Ll", self.FindCategory("a"))
self.assertEquals("Nd", self.FindCategory("0"))
self.assertEquals("Lo", self.FindCategory(0xAD00)) # in First, Last range
self.assertEquals(None, self.FindCategory(0xFFFE))
self.assertEquals("Lo", self.FindCategory(0x8B5A))
self.assertEquals("Lo", self.FindCategory(0x6C38))
self.assertEquals("Lo", self.FindCategory(0x92D2))
self.assertTrue(ord("a") in self.categories["L"])
self.assertTrue(ord("0") in self.categories["N"])
self.assertTrue(0x8B5A in self.categories["L"])
self.assertTrue(0x6C38 in self.categories["L"])
self.assertTrue(0x92D2 in self.categories["L"])
def main():
googletest.main()
if __name__ == "__main__":
main()
| [
"oudream@126.com"
] | oudream@126.com |
a6321682c22d998495daa7c30cf51f69ad8cfdcd | fbe0d8ceb2d753596d61f948939999499767abcd | /selectivechat_project/asgi.py | 07e8be94cc5c78874d1fc5c2106161cf867f541d | [
"MIT"
] | permissive | calixo888/selectivechat | ac0929ecbdb99e7e76fdbf8e5e037de20d418e64 | 2acbabe2355590986ea838348013d6ae7ddc6210 | refs/heads/master | 2022-05-03T02:40:23.329969 | 2020-10-25T23:18:26 | 2020-10-25T23:18:26 | 238,330,840 | 0 | 0 | MIT | 2022-04-22T23:00:33 | 2020-02-04T23:49:05 | JavaScript | UTF-8 | Python | false | false | 215 | py | import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "selectivechat_project.settings")
django.setup()
application = get_default_application()
| [
"calix.huang1@gmail.com"
] | calix.huang1@gmail.com |
1a68aeae4a81ff86a15a5dc3c037aa863c913cbc | a4cab19f70365d1d5fba31f7d93ecd315ccc6268 | /0x01-python-if_else_loops_functions/8-uppercase.py | 6b69d971831179f43a3140e150e24617959a7914 | [] | no_license | cmmolanos1/holbertonschool-higher_level_programming | c2154022cb868caccb7759812c292106c3654b41 | a58c4fb3659d520b4d78631c6a00c6301f79522d | refs/heads/master | 2020-07-22T23:54:31.275115 | 2020-04-11T00:46:00 | 2020-04-11T00:46:00 | 207,374,519 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!/usr/bin/python3
def uppercase(str):
for i in range(len(str)):
char = ord(str[i])
if 97 <= char <= 122:
char = char - 32
print("{:c}".format(char), end='')
print()
| [
"926@holbertonschool.com"
] | 926@holbertonschool.com |
e89f9593303b5bb1ecd95e569dd8df7c44645932 | c2d2b22edac0b3c0b021ad28ad638dece18c2549 | /Python/Problems/Knapsack_Problem/solution.py | 208187de85cbc3228edbb5207e7f8c0bec3a934a | [] | no_license | dileepmenon/GeeksForGeeks | 248c69065d947f760f054dd4681063f21ca9be23 | 9b5f8955a02cae27e4677b2c3fa1578e6ad1c545 | refs/heads/master | 2021-05-23T05:24:58.876414 | 2017-12-04T05:58:47 | 2017-12-04T05:58:47 | 95,201,250 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!bin/python3
def KP(w_l, p_l, W):
if not w_l:
return 0
if W == 0:
return 0
try:
return d[(p_l[0], len(p_l), W)]
except:
if W-w_l[0] >= 0:
s = max(p_l[0] + KP(w_l[1:], p_l[1:], W-w_l[0]),
KP(w_l[1:], p_l[1:], W))
else:
s = KP(w_l[1:], p_l[1:], W)
d[(p_l[0], len(p_l), W)] = s
return s
num_t = int(input())
for i in range(num_t):
n = int(input())
W = int(input())
p_l = list(map(int, input().strip().split()))
w_l = list(map(int, input().strip().split()))
d = {}
print(KP(w_l, p_l, W))
| [
"dileepmenon92@yahoo.com"
] | dileepmenon92@yahoo.com |
91242f64a3dda3056dd978c5cd6248e143e8a1d6 | 3f642957664a470897b10b15ab29734bf53ffd9c | /config/settings/prod.py | 9bdb2635999a4209e918ea4ff182fcec05d179f1 | [] | no_license | birkoss/walleteur-api | da276de50f2d976bae9d749da98f80f3f6c45518 | bc16f2e5801ccddf90bff1cf1eb867c697fb9d71 | refs/heads/master | 2023-03-28T16:59:23.678986 | 2021-04-11T00:13:06 | 2021-04-11T00:13:06 | 354,413,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from .base import *
ALLOWED_HOSTS = ['api.walleteur.app']
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': get_secret('DATABASE_NAME'),
'USER': get_secret('DATABASE_USER'),
'PASSWORD': get_secret('DATABASE_PASSWORD'),
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
}
STATIC_URL = 'https://walleteur.app/assets/api/'
STATIC_ROOT = '/home/walleteur/domains/walleteur.app/public_html/assets/api/'
| [
"admin@birkoss.com"
] | admin@birkoss.com |
f6f3db537ccc13b21749242df396539920dc9396 | 3f2847f499878ea2a42cffea6f312ebe5fc14098 | /aiosmb/dcerpc/v5/common/even6/resultset.py | 46f925c1d14ad1eb0f7059f73a16c8520df23bf1 | [] | no_license | rvrsh3ll/aiosmb | a924b2d2be3e60b44a1522db52a1d2316fc3ba6f | a00a7d32f43e91746e74f5d72cd8f2ff4a4e8ea6 | refs/heads/master | 2023-06-07T15:33:14.701529 | 2023-05-30T16:02:35 | 2023-05-30T16:02:35 | 203,462,354 | 0 | 0 | null | 2023-05-31T07:45:14 | 2019-08-20T22:15:30 | Python | UTF-8 | Python | false | false | 1,258 | py |
import io
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-even6/762528ba-f36a-4d17-ba2b-0f0244a45f2f
class RESULT_SET:
def __init__(self):
self.totalSize = None
self.headerSize = None
self.eventOffset = None
self.bookmarkOffset = None
self.binXmlSize = None
self.eventData = None
self.numberOfSubqueryIDs = None
self.subqueryIDs = None
self.bookMarkData = None
@staticmethod
def from_bytes(data):
return RESULT_SET.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
r = RESULT_SET()
pos = buff.tell()
r.totalSize = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
pos += r.totalSize
r.headerSize = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
r.eventOffset = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
r.bookmarkOffset = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
r.binXmlSize = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
r.eventData = buff.read(r.binXmlSize)
r.numberOfSubqueryIDs = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
r.subqueryIDs = buff.read(r.numberOfSubqueryIDs)
r.bookMarkData = buff.read(pos - buff.tell())
return r | [
"info@skelsec.com"
] | info@skelsec.com |
581191e94f960c99a337ad22746bd5278f4755a0 | bc108434d5f485a5ca593942b0fbe2f4d044ebda | /nn/pdll-v1.0/nn/functional.py | 3ad6c9bd8a6e6f47a937446ab2845ff6bcf68e11 | [] | no_license | js-ts/AI | 746a34493a772fb88aee296f463122b68f3b299d | 353e7abfa7b02b45d2b7fec096b58e07651eb71d | refs/heads/master | 2023-05-29T16:19:03.463999 | 2021-06-22T05:49:44 | 2021-06-22T05:49:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,814 | py | import math
from typing import Tuple, Union
import operator
import functools
from pdll.backend import Tensor, np
from pdll.autograd import Function, Variable, register
from .utils import im2col, col2im
class _Sigmoid(Function):
"""sigmoid
"""
def forward(self, t: Tensor) -> Tensor:
self.out = 1. / (1. + np.exp(-t))
return self.out
def backward(self, grad: Tensor) -> Tensor:
return grad * self.out * (1. - self.out)
class _ReLU(Function):
"""relu
"""
def forward(self, t: Tensor) -> Tensor:
self.mask = t > 0
return t * self.mask
def backward(self, grad: Tensor) -> Tensor:
return grad * self.mask
class _Tanh(Function):
"""
formul: (exp(x) + exp(-x)) / (exp(x) - exp(-x))
derive : 1 - tanh(x) ** 2
"""
def forward(self, t: Tensor) -> Tensor:
self.out = np.tanh(t)
return self.out
def backward(self, grad: Tensor) -> Tensor:
return grad * (1 - self.out ** 2)
@register(Variable)
def relu(self, ):
return _ReLU()(self)[0]
@register(Variable)
def tanh(self, ):
return _Tanh()(self)[0]
@register(Variable)
def sigmoid(self, ):
return _Sigmoid()(self)[0]
class _Conv2d(Function):
'''conv
'''
def __init__(self, kernel, stride, padding, dilation, groups):
super().__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, data: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
'''
n c h w
co ci kh kw
'''
# self.data = data
self.weight = weight
self.data_shape = data.shape
n, cin, _, _ = data.shape
cout, _, _, _ = weight.shape
matrix, out_h, out_w = im2col(data, self.kernel, self.stride, self.padding, self.dilation) # -> n*hout*wout cin*hk*wk
# matrix = matrix.transpose(0, 4, 5, 1, 2, 3).reshape(n * out_h * out_w, cin * self.kernel[0] * self.kernel[1])
# weight = weight.transpose(1, 2, 3, 0).reshape(-1, cout) # -> cin*hk*wk cout [groups * cout/groups * cin
# self.matrix = matrix
# output = (matrix @ weight).reshape(n, out_h, out_w, cout).transpose(0, 3, 1, 2)
matrix = matrix.reshape(n, self.groups, cin//self.groups, self.kernel[0], self.kernel[1], out_h, out_w)
matrix = matrix.transpose(1, 0, 5, 6, 2, 3, 4).reshape(self.groups, n * out_h * out_w, cin//self.groups * self.kernel[0] * self.kernel[1])
weight = weight.reshape(self.groups, cout//self.groups, cin//self.groups, self.kernel[0], self.kernel[1])
weight = weight.transpose(0, 2, 3, 4, 1).reshape(self.groups, cin//self.groups * self.kernel[0] * self.kernel[1], cout//self.groups)
# output = matrix.bmm(weight) # groups n*out_h*out_w cout/groups
self.matrix = matrix # groups n*hout*wout cin//groups*hk*wk
self.weight = weight # groups cin//groups*hk*wk cout//groups
output = (matrix @ weight).transpose(1, 0, 2).reshape(n * out_h * out_w, self.groups * cout//self.groups)
output = output.reshape(n, out_h, out_w, cout).transpose(0, 3, 1, 2)
if bias is not None:
return output + bias.reshape(1, -1, 1, 1)
else:
return output
def backward(self, grad: Tensor):
'''grad n cout hout wout
'''
n, cout, hout, wout = grad.shape
# _, cin, hk, wk = self.weight.shape
_, cin, _, _ = self.data_shape
bias_grad = grad.sum(axis=(0, 2, 3))
# indx_reverse = np.argsort([0, 3, 1, 2])
# grad_reverse = grad.transpose(0, 2, 3, 1)
# grad_reverse = grad_reverse.reshape(n * hout * wout, cout)
# weight_grad = self.matrix.T @ grad_reverse # cin hk wk cout
# weight_grad = weight_grad.reshape(cin, hk, wk, cout)
# weight_grad = weight_grad.transpose(3, 0, 1, 2)
# weight = self.weight.transpose(1, 2, 3, 0).reshape(-1, cout) # -> cin*hk*wk cout
# data_grad = grad_reverse @ weight.T # n*hout*wout cin*hk*wk
# data_grad = data_grad.reshape(n, hout, wout, cin, hk, wk)
# data_grad = data_grad.transpose(0, 3, 4, 5, 1, 2) # (n, cin, hk, wk, hout, wout)
# data_grad = col2im(data_grad, self.data_shape, self.kernel, self.stride, self.padding)
grad_reverse = grad.transpose(0, 2, 3, 1) # n, hout, wout, cout
grad_reverse = grad_reverse.reshape(n * hout * wout, self.groups, cout//self.groups)
grad_reverse = grad_reverse.transpose(1, 0, 2) # groups, n*hout*wout, cout//groups
weight_grad = self.matrix.transpose(0, 2, 1) @ grad_reverse # bmm
weight_grad = weight_grad.reshape(self.groups, cin//self.groups*self.kernel[0]*self.kernel[1], cout//self.groups)
weight_grad = weight_grad.transpose(0, 2, 1).reshape(cout, cin//self.groups, self.kernel[0], self.kernel[1])
data_grad = grad_reverse @ self.weight.transpose(0, 2, 1) # groups, n*hout*wout, cin//groups*hk*wk
data_grad = data_grad.transpose(1, 0, 2).reshape(n * hout * wout, cin * self.kernel[0] * self.kernel[1])
data_grad = data_grad.reshape(n, hout, wout, cin, self.kernel[0], self.kernel[1])
data_grad = data_grad.transpose(0, 3, 4, 5, 1, 2)
data_grad = col2im(data_grad, self.data_shape, self.kernel, self.stride, self.padding)
return data_grad, weight_grad, bias_grad
def conv2d(v: Variable, w: Variable, b: Variable, kernel: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...]], padding: Union[int, Tuple[int, ...]], dilation: int, groups: int):
'''conv2d
'''
return _Conv2d(kernel, stride, padding, dilation, groups)(v, w, b)[0]
class _Pool2d(Function):
'''pooling
'''
def __init__(self, kernel, stride, padding, dilation=1, mode='max'):
super().__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.dilation = dilation
self.mode = mode
def forward(self, data: Tensor):
''''''
self.shape = data.shape
n, c, _, _ = data.shape
matrix, out_h, out_w = im2col(data, self.kernel, self.stride, self.padding, dilation=self.dilation)
matrix = matrix.reshape(n, c, self.kernel[0] * self.kernel[1], out_h, out_w)
self.matrix = matrix
if self.mode.lower() == 'max': # TODO
out = np.max(matrix, axis=2)
elif self.mode.lower() == 'avg':
out = np.average(matrix, axis=2)
else:
raise RuntimeError
return out
def backward(self, grad: Tensor):
n, c, oh, ow = grad.shape
grad = grad[:, :, np.newaxis, :, :]
if self.mode.lower() == 'max':
mask = self.matrix == np.max(self.matrix, axis=2, keepdims=True)
grad = grad * mask
elif self.mode.lower() == 'avg':
grad = grad * np.ones_like(self.matrix) / (self.kernel[0] * self.kernel[1])
else:
raise RuntimeError
grad = grad.reshape(n, c, self.kernel[0], self.kernel[1], oh, ow)
return col2im(grad, self.shape, self.kernel, self.stride, self.padding, dilation=self.dilation)
def pool2d(v: Variable, kernel: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...]], padding: Union[int, Tuple[int, ...]]=0, dilation: int=1, mode: str='max') -> Variable:
'''pool2d
'''
if isinstance(kernel, int):
kernel = (kernel, kernel)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(padding, int):
padding = (padding, padding, padding, padding)
elif isinstance(padding, (tuple, list)) and len(padding) == 2:
padding = (padding[0], padding[0], padding[1], padding[1])
elif isinstance(padding, (tuple, list)) and len(padding) == 4:
padding = tuple(padding)
else:
raise RuntimeError('not suppot padding format')
return _Pool2d(kernel, stride, padding, dilation, mode)(v)[0]
max_pool2d = functools.partial(pool2d, mode='max')
avg_pool2d = functools.partial(pool2d, mode='avg')
class _Softmax(Function):
def __init__(self, axis: int):
self.axis = axis
def forward(self, data):
'''softmax(x-c)
-c: deal with overflow inf problem
'''
t = np.exp(data - data.max(axis=self.axis, keepdims=True))
a = t / (t.sum(axis=self.axis, keepdims=True))
self.a = a
return a
def backward(self, grad):
'''
dl/da_i da_i/exp(x_i)
dl/da_k da_k/sum_exp(x_k)
dl/dx_i = dl/da_i * a_i - a_i * sum_j(dl/da_j * a_j)
'''
return self.a * (grad - (grad * self.a).sum(axis=self.axis, keepdims=True))
@register(Variable)
def softmax(self: Variable, axis: int) -> Variable:
'''softmax
'''
return _Softmax(axis)(self)[0]
class _CrossEntropy(Function):
def __init__(self, axis=-1, reduction='mean'):
super().__init__()
self.axis = axis
self.reduction = reduction.lower()
def forward(self, logit, label):
'''label one-hot
'''
t = np.exp(logit - logit.max(axis=self.axis, keepdims=True))
a = t / (t.sum(axis=self.axis, keepdims=True))
self.a = a
self.label = label
if self.reduction == 'sum':
return (-label * np.log(a)).sum()
elif self.reduction == 'mean':
return (-label * np.log(a)).sum() / functools.reduce(operator.mul, logit.shape[:-1])
def backward(self, grad=1.):
'''grad = 1.
'''
if self.reduction == 'sum':
grad = grad * np.ones_like(self.a)
elif self.reduction == 'mean':
grad = grad / functools.reduce(operator.mul, self.a.shape[:-1]) * np.ones_like(self.a)
grad_logit = grad * (self.a - self.label)
return grad_logit, None
def cross_entropy(logit: Variable, label: Variable, axis: int=-1, reduction: str='mean') -> Variable:
'''
'''
return _CrossEntropy(axis, reduction)(logit, label)[0]
#
class _Dropout(Function):
'''
'''
def __init__(self, p: float, training: bool=True, inspace: bool=True):
self.p = p
self.training = training
self.inspace = inspace
def forward(self, t: Tensor) -> Tensor:
'''
'''
if not self.training:
self.p = 0.
mask = np.random.rand(*t.shape) > self.p
self.mask = mask
return t * mask / (1 - self.p)
def backward(self, grad):
'''
'''
return grad * self.mask / (1 - self.p)
def dropout(v: Variable, p: float, training: bool=True, inspace: bool=True) -> Variable:
'''dropout
'''
return _Dropout(p, training, inspace)(v)[0]
class _Padding(Function):
'''
'''
def __init__(self, pad: Union[int, Tuple[int, ...]], mode: str='constant', value: float=0):
super().__init__()
if isinstance(pad, int):
pad = (pad, )
else:
assert len(pad) % 2 == 0, ''
self.pad = tuple(pad)
self.mode = mode
self.value = value
assert self.mode in ('constant')
def forward(self, data: Tensor) -> Tensor:
'''
'''
pad = self.pad
shape = data.shape
assert len(shape) >= len(pad)//2, ''
if len(pad) == 1:
pad = tuple([pad[0], ] * (2 * len(shape)))
else:
pad = pad + (0, ) * (2 * len(shape) - len(pad))
assert len(pad) == 2 * len(shape), ''
padding = list(zip(pad[0::2][::-1], pad[1::2][::-1]))
self.shape = shape
self.padding = padding
return np.pad(data, pad_width=padding, mode=self.mode, constant_values=self.value)
def backward(self, grad: Tensor) -> Tensor:
'''
'''
slices = []
for pad in self.padding:
if pad[1] == 0:
slices.append(slice(pad[0], None))
else:
slices.append(slice(pad[0], -pad[1]))
return grad[tuple(slices)]
def zero_pad2d(data: Variable, padding: Union[int, Tuple[int, int, int, int]]):
'''zero pad2d
'''
assert len(data.shape) == 4, ''
if isinstance(padding, int):
padding = (padding, ) * 4
return _Padding(padding, mode='constant', value=0)(data)[0]
def constant_pad2d(data: Variable, padding: Union[int, Tuple[int, int, int, int]], value: float=0):
'''constant pad2d
'''
assert len(data.shape) == 4, ''
if isinstance(padding, int):
padding = (padding, ) * 4
return _Padding(padding, mode='constant', value=value)(data)[0]
| [
"wenyu.lyu@gmail.com"
] | wenyu.lyu@gmail.com |
e0e63584b29b38b3bbf8b8c4aae8e8e2e55f8837 | 13d222bc3332378d433835914da26ed16b583c8b | /tests/challenge90/test_challenge90.py | 888f1e85cb508d5239b468228512fd72a0fa717a | [] | no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | """ Tests for challenge90 """
from robber import expect
from pemjh.challenge90 import main
def test_challenge90():
""" Regression testing challenge90 """
expect(main()).to.eq(1217)
| [
"matthew.hussey@googlemail.com"
] | matthew.hussey@googlemail.com |
36362d5255285a4d5af86585e2249648aeeb2fe9 | eadde35af2b82fe3de7bc2f1cbc579af0a1a8816 | /bookstore/migrations/0005_auto_20180910_1302.py | aca3b8ea2a485fa9413c77f0abf75b52ecb84281 | [] | no_license | hdforoozan/Shop | 425a2371d2f96baca29db0948294edb31a4504b8 | 776d36149733a38bda26715bc3b16c03f0d53a9c | refs/heads/master | 2020-04-24T06:22:14.033290 | 2019-02-20T22:51:49 | 2019-02-20T22:51:49 | 171,762,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 2.1 on 2018-09-10 08:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookstore', '0004_auto_20180909_1735'),
]
operations = [
migrations.AlterField(
model_name='book',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10),
),
]
| [
"hdforoozan@gmail.com"
] | hdforoozan@gmail.com |
ad362b8979bb63891013b3da09d9a8e8c9d0f089 | 0fe9717ee13b33f0a39af1a86f9a08da111f8dee | /notebooks/CharacterisingFunctions.py | 0846d356b0e8dc562122f7bae35af8aa28d641bc | [] | no_license | julieweeds/oldbailey-corpling | 6bea1ef9daec87c089fc0061652301e480260061 | 101fa6bff0e43cf91c863922159374713e04e66a | refs/heads/master | 2021-01-20T17:06:18.142997 | 2018-02-09T09:58:47 | 2018-02-09T09:58:47 | 90,863,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,514 | py | #taking the code out of CharacteristicWords.ipynb and analysis.ipynb to make available elsewhere
import matplotlib.pyplot as plt
import numpy as np
import operator
import math
# For a given set of corpora, find the frequency distribution of the k highest frequency words
# Output total size of corpus and sorted list of term, frequency pairs
def find_hfw_dist(corpora, k=100000,ftype='termfreq'):
# add worddicts for individual corpora
# sort and output highest frequency words
# visualise
#default is 'termfreq' but can also be used for 'docfreq'
sumdict = {}
corpussize = 0
for acorpus in corpora:
if ftype=='docfreq':
fdict=acorpus.docfreq
else:
fdict=acorpus.allworddict
for (key, value) in fdict.items():
sumdict[key.lower()] = sumdict.get(key.lower(), 0) + value
corpussize += value
print("Size of corpus is {}".format(corpussize))
candidates = sorted(sumdict.items(), key=operator.itemgetter(1), reverse=True)
# print(candidates[:50])
# print(len(sumdict))
# print(sumdict)
return corpussize, candidates[:k]
def makedict(alist):
adict = {}
for (key, value) in alist:
adict[key] = adict.get(key, 0) + value
return adict
def pmi(wordfreq, refwordfreq, corpussize, refcorpussize):
if wordfreq * refcorpussize * refwordfreq * corpussize == 0:
score = 0
# print(wordfreq,refwordfreq,corpussize,refcorpussize)
else:
score = np.log((wordfreq * refcorpussize) / (refwordfreq * corpussize))
return score
def rev_pmi(wordfreq, refwordfreq, corpussize, refcorpussize):
return pmi(refwordfreq - wordfreq, refwordfreq, refcorpussize - corpussize, refcorpussize)
def llr(wordfreq, refwordfreq, corpussize, refcorpussize):
# print(wordfreq,refwordfreq,corpussize,refcorpussize)
mypmi = pmi(wordfreq, refwordfreq, corpussize, refcorpussize)
myrevpmi = rev_pmi(wordfreq, refwordfreq, corpussize, refcorpussize)
# myrevpmi2=rev_pmi2(wordfreq,refwordfreq,corpussize,refcorpussize)
# print(mypmi,myrevpmi,myrevpmi2)
llr_score = 2 * (wordfreq * mypmi + (refwordfreq - wordfreq) * myrevpmi)
if pmi(wordfreq, refwordfreq, corpussize, refcorpussize) < 0:
return -llr_score
else:
return llr_score
def klp(p, q):
return p * np.log((2 * p) / (p + q))
def kl(wordfreq, refwordfreq, corpussize, refcorpussize):
# ref should be the total corpus - function works out difference
p = wordfreq / corpussize
q = (refwordfreq - wordfreq) / (refcorpussize - corpussize)
return klp(p, q)
def jsd(wordfreq, refwordfreq, corpussize, refcorpussize):
p = wordfreq / corpussize
q = (refwordfreq - wordfreq) / (refcorpussize - corpussize)
k1 = klp(p, q)
k2 = klp(q, p)
score = 0.5 * (k1 + k2)
if p > q:
return score
else:
return -score
def likelihoodlift(wordfreq, refwordfreq, corpussize, refcorpussize, alpha):
beta = 0
if alpha == 1:
return math.log(wordfreq / corpussize)
elif alpha == 0:
return pmi(wordfreq, refwordfreq, corpussize, refcorpussize)
else:
return (alpha * math.log(beta + (wordfreq / corpussize)) + (1 - alpha) * pmi(wordfreq, refwordfreq, corpussize,
refcorpussize))
def mysurprise(wf, rwf, cs, rcs, measure, params):
if measure == 'pmi':
return pmi(wf, rwf, cs, rcs)
elif measure == 'llr':
return llr(wf, rwf, cs, rcs)
elif measure == 'kl':
return kl(wf, rwf, cs, rcs)
elif measure == 'jsd':
return jsd(wf, rwf, cs, rcs)
elif measure == 'likelihoodlift':
return likelihoodlift(wf, rwf, cs, rcs, params.get('alpha', 0.5))
else:
print("Unknown measure of surprise")
def improved_compute_surprises(corpusA, corpusB, measure, params={},k=50,display=True):
(corpusAsize, wordlistA) = corpusA
(corpusBsize, wordlistB) = corpusB
if 'threshold' in params.keys():
threshold = params['threshold']
else:
threshold = len(wordlistA)
# dictA=makedict(wordlistA)
dictB = makedict(wordlistB)
scores = []
# print(wordlistA[:threshold])
for (term, freq) in wordlistA[:threshold]:
scores.append((term, mysurprise(freq, dictB.get(term, freq + 1), corpusAsize, corpusBsize, measure, params)))
sortedscores = sorted(scores, key=operator.itemgetter(1), reverse=True)
if display and k>0:
print("Top {} terms are ".format(k))
print(sortedscores[:k])
rank = 0
if measure == "llr":
for (term, score) in sortedscores:
if score > 10.828:
rank += 1
else:
break
print("{} significantly characterising terms".format(rank))
else:
rank = k
return (sortedscores[:rank])
def autolabel(rects, ax):
"""
Attach a text label above each bar displaying its height
"""
maxheight=np.array([rect.get_height() for rect in rects]).max()
if maxheight>1:
aformat='%1.1f'
add=math.log(maxheight,10)
else:
aformat='%.3f'
add=0.0005
#print(maxheight,aformat)
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height + add,
aformat % height,
ha='center', va='bottom')
return (maxheight+add)*1.1
def display_list(hfw_list,cutoff=10,words=[],leg=None,title=None,ylim=10,abbrevx=True,xlabel='High Frequency Words',ylabel='Probability',colors=None):
width=0.7/len(hfw_list)
toplot=[]
for hfw in hfw_list:
corpussize=hfw[0]
if words==[]:
todisplay=hfw[1][:cutoff]
else:
todisplay=[(x,y) for (x,y) in hfw[1] if x in words]
cutoff=len(words)
barvalues=sorted(todisplay,key=operator.itemgetter(0),reverse=False)
#print(barvalues)
xs,ys=[*zip(*barvalues)]
if corpussize>0:
ps=[y*100/corpussize for y in ys]
else:
ps=ys
toplot.append(ps)
#print(toplot)
N=len(xs)
ind=np.arange(N)
fig,ax=plt.subplots(figsize=(2*cutoff,cutoff/2))
rectset=[]
if colors==None:
colors=['r','b','y','g']
for i,ps in enumerate(toplot):
rectset.append(ax.bar(ind+i*width,ps,width,color=colors[i]))
if leg!=None:
ax.legend(rectset,leg)
ax.set_xticks(ind)
if abbrevx:
xs=[x.split(' ')[0] for x in xs]
ax.set_xticklabels(xs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
for rects in rectset:
ylim=autolabel(rects,ax)
if title!=None:
ax.set_title(title)
ax.set_ylim(0,ylim)
return xs
def improved_display_list(xvalues, yvalueslist, labels={}):
width = 0.7 / len(yvalueslist)
N = len(xvalues)
ind = np.arange(N)
fig, ax = plt.subplots(figsize=(20, 12))
rectset = []
colors = ['r', 'b', 'y', 'g']
for i, ps in enumerate(yvalueslist):
rectset.append(ax.bar(ind + i * width, ps, width, color=colors[i]))
leg = labels.get('leg', None)
title = labels.get('title', None)
xlabel = labels.get('xlabel', 'Year')
ylabel = labels.get('ylabel', 'Probability')
ylim = labels.get('ylim', 1)
if leg != None:
ax.legend(rectset, leg)
ax.set_xticks(ind)
ax.set_xticklabels(xvalues)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim(0, ylim)
for rects in rectset:
autolabel(rects, ax)
if title != None:
ax.set_title(title)
plt.show()
# We have a corpus e.g., male_corpus and a set of characterising terms for that corpus e.g., malewords
def find_pos(term, corpus):
pospos = ['NOUN', 'VERB', 'ADJ', 'ADV', 'PUNCT', 'PROPN']
counts = {}
for apos in pospos:
counts[apos] = corpus.wordposdict.get((term, apos), 0)
total = sum(counts.values())
gt = corpus.allworddict.get(term, 0)
counts['OTHER'] = gt - total
# print(term,gt,counts)
if gt > 0:
poses = [(tag, weight / gt) for (tag, weight) in counts.items()]
else:
poses = []
# print(term,poses)
return poses
def analyse(termset, corpus):
freqs = []
somefreqs = []
posdict = {}
someposdict = {}
threshold = 20
for i, (term, relevance) in enumerate(termset):
freq = corpus.allworddict[term]
freqs.append(freq)
if i < threshold:
somefreqs.append(freq)
poses = find_pos(term, corpus)
for mypos, weight in poses:
posdict[mypos] = posdict.get(mypos, 0) + weight
if i < threshold:
someposdict[mypos] = someposdict.get(mypos, 0) + weight
freqarray = np.array(freqs)
meanfreq = np.mean(freqarray)
sdfreq = np.std(freqarray)
meanprob = meanfreq / corpus.wordtotal
sdprob = sdfreq / corpus.wordtotal
print("Mean frequency is {}, sd is {}".format(meanfreq, sdfreq))
print("Mean probability is {}, sd is {}".format(meanprob, sdprob))
somefreqarray = np.array(somefreqs)
meansomefreq = np.mean(somefreqarray)
sdsomefreq = np.std(somefreqarray)
meansomeprob = meansomefreq / corpus.wordtotal
sdsomeprob = sdsomefreq / corpus.wordtotal
print("For top {} words, mean freq is {}, sd is {}".format(threshold, meansomefreq, sdsomefreq))
print("For top {} words, mean prob is {}, sd is {}".format(threshold, meansomeprob, sdsomeprob))
# print(posdict)
xvalues = posdict.keys()
totaly = sum(posdict.values())
totalz = sum(someposdict.values())
allvalues = []
somevalues = []
for x in xvalues:
allvalues.append(posdict.get(x, 0))
somevalues.append(someposdict.get(x, 0))
yvalues = [[100 * y / totaly for y in allvalues], [100 * z / totalz for z in somevalues]]
labels = {'title': 'Distribution of POS in Characterising Terms', 'xlabel': 'Part of Speech',
'ylabel': 'Proportion', 'leg': ['Whole Set', "Top {}-restricted Set".format(threshold)], 'ylim': 100}
improved_display_list(xvalues, yvalues, labels)
def nearest_neighbours(wordset, w2vmodel):
threshold = 20
found = 0
for i, (term, score) in enumerate(wordset):
try:
neighbours = w2vmodel.wv.most_similar([term])
found += 1
if i < threshold:
print(term, neighbours)
except:
print("{} not in vocab".format(term))
oov = 100 - (found * 100 / len(wordset))
print("Out of vocabulary: {}".format(oov))
def make_matrix(wordset, model, threshold=0.5):
matrix = []
for (termA, _score) in wordset:
row = []
for (termB, _score) in wordset:
try:
sim = model.wv.similarity(termA, termB)
if sim < threshold:
sim = 0
except:
sim = 0
row.append(sim)
matrix.append(row)
return matrix
punctdict = {"\n": "_NEWLINE", ";": "_SEMICOLON", ":": "_COLON", "\"": "_QUOTE", "'s": "_GEN", "-": "_HYPHEN",
"(": "_LEFTBRACKET", ")": "_RIGHTBRACKET", ",": "_COMMA", ".": "_FULLSTOP", "..": "_DOTDOT"}
def clean(term):
# remove punctuation which will confuse Gephi
cleanterm = punctdict.get(term, term)
return cleanterm
def make_csv(wordset, model, filename, threshold=0.5):
matrix = make_matrix(wordset, model, threshold=threshold)
terms = [clean(term) for (term, score) in wordset]
# with open(filename,'w') as csvfile:
# csvwriter=csv.writer(csvfile,dialect='excel')
# headings=['']+terms
# print(headings)
# csvwriter.writerow(headings)
# for term,row in zip(terms,matrix):
# csvwriter.writerow([term]+row)
with open(filename, 'w') as csvfile:
line = ""
for term in terms:
line += ';' + term
line += '\n'
csvfile.write(line)
# print(line)
for term, row in zip(terms, matrix):
line = term
# print(row)
for item in row:
line += ';' + str(item)
line += '\n'
csvfile.write(line)
# print(line)
def find_topk(alist, k):
# ignore top neighbour as this is the word itself
sortedlist = sorted(alist, reverse=True)
if sortedlist[1] == 0:
return []
if k == -1:
return (sortedlist[1:])
else:
return (sortedlist[1:k + 1])
def semantic_coherance(word_set, model, k=1, verbose=True):
matrix = make_matrix(word_set, model)
# print(matrix)
mysum = 0
total = 0
for row in matrix:
topk = find_topk(row, k)
mysum += sum(topk)
total += len(topk)
if total == 0:
average = 0
else:
average = mysum / total
if verbose:
print("Average semantic coherance at k={}: {}".format(k, average))
return average
def coherance_profile(words, model, verbose=True):
scores = []
scores.append(semantic_coherance(words, model, k=1, verbose=verbose))
scores.append(semantic_coherance(words, model, k=2, verbose=verbose))
scores.append(semantic_coherance(words, model, k=5, verbose=verbose))
scores.append(semantic_coherance(words, model, k=10, verbose=verbose))
scores.append(semantic_coherance(words, model, k=-1, verbose=verbose))
return scores
def frequency_profile(wordsets, corpus, labels=[]):
allfrequencies = []
for wordset in wordsets:
frequencies = []
# print(wordset)
for (word, score) in wordset:
frequencies.append(int(corpus.allworddict[word]))
allfrequencies.append(np.array(frequencies))
# print(allfrequencies)
fig, ax = plt.subplots(figsize=(20, 10))
ax.boxplot(allfrequencies, showmeans=True, labels=labels)
ax.set_title('Frequency Profile of Characteristic Words')
ax.set_yscale('log')
plt.show()
ft = 25
def frequency_threshold(csets, threshold=ft, corpus=None):
tsets = []
for cset in csets:
tset = []
for term, score in cset:
freq = int(corpus.allworddict[term])
if freq > threshold:
tset.append((term, score))
tsets.append(tset)
return tsets
if __name__=="__main__":
print("This file contains functions taken from CharacterisingWords.ipynb")
print("No tests have been written, as yet, in this file")
| [
"julie.weeds@gmail.com"
] | julie.weeds@gmail.com |
d08de54d6bc59e78633d8e6b4a520fc80e51009c | 909bb1b51213e47424ac9ccf3c3ca83764a5c04b | /semestre01/exercises_university_uri/1282.1.py | 27d805f65a4be6aee16bc016ef4043a431a27604 | [
"MIT"
] | permissive | alaanlimaa/impactaADS | 07e5d74c88ca0ec85c96960fadc2e22cbb67269b | 307d0b2c7831a5038184592afae7a825b2774498 | refs/heads/main | 2023-07-23T20:36:06.045907 | 2021-08-30T15:45:02 | 2021-08-30T15:45:02 | 387,860,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | def txt(msg):
print('-' * len(msg))
print(msg)
print('-' * len(msg))
qtdCanais = int(input())
lista = []
for c in range(1, qtdCanais + 1):
num = input()
if ' ' in num:
numE = num.replace(' ', '-')
numE = numE.replace(';', ' ').split()
lista.append(numE)
else:
num = num.replace(';', ' ').split()
lista.append(num)
premium = float(input())
notPremium = float(input())
txt('BÔNUS')
for s in lista:
insc = int(s[1])
mont = float(s[2])
div = insc % 1000
if 'não' in s:
if insc > 1000 and div > 0:
totInsc = insc - div
monetizar = ((totInsc / 1000) * notPremium) + mont
elif insc < 1000 and div > 0:
monetizar = float(s[2])
elif div == 0:
monetizar = ((insc / 1000) * notPremium) + mont
else:
if insc > 1000 and div > 0:
totInsc = insc - div
monetizar = ((totInsc / 1000) * premium) + mont
elif insc < 1000 and div > 0:
monetizar = float(s[2])
elif div == 0:
monetizar = ((insc / 1000) * premium) + mont
if '-' in str(s[0]):
print(f'{str(s[0]).replace("-", " ")}: R$ {monetizar:.2f}')
else:
print(f'{s[0]}: R$ {monetizar:.2f}')
| [
"alanlimabusiness@outlook.com"
] | alanlimabusiness@outlook.com |
60c39f0052992c31c1a7d737ffd1a39b574e54e5 | a742bd051641865d2e5b5d299c6bc14ddad47f22 | /algorithm/leetcode/greedy/02-无重叠区间.py | 74158138c6e570b1e20ea082b1bcb41d0e108c23 | [] | no_license | lxconfig/UbuntuCode_bak | fb8f9fae7c42cf6d984bf8231604ccec309fb604 | 3508e1ce089131b19603c3206aab4cf43023bb19 | refs/heads/master | 2023-02-03T19:10:32.001740 | 2020-12-19T07:27:57 | 2020-12-19T07:27:57 | 321,351,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py |
"""
给定一个区间的集合,找到需要移除区间的最小数量,使剩余区间互不重叠。
"""
from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
def OverlapIntervals(intervals: List[List[int]]) -> int:
if not intervals: return 0
items = sorted(intervals, key=lambda x: x[1])
x, count = items[0], 1
for item in items[1: ]:
if item[0] >= x[1]:
count, x = count + 1, item
return count
return len(intervals) - OverlapIntervals(intervals) | [
"525868229@qq.com"
] | 525868229@qq.com |
57938b46b9880b0ecc3b834e564bc5fe379aef20 | bd1281d7da02ace6b50aeb267b9a0cd8876eea11 | /badwolf/log/views.py | c31fb6ed5e556649a9a6c522df649952389c2021 | [
"MIT"
] | permissive | bosondata/badwolf | 6ebde680df6d3e7e7e0a7d295ce7cf6944a93d3d | c693785af101f68505769cd712bbf13e37423587 | refs/heads/master | 2023-03-08T04:51:36.911524 | 2019-07-11T02:44:22 | 2019-07-11T02:44:22 | 67,678,705 | 95 | 15 | MIT | 2018-12-19T09:05:49 | 2016-09-08T07:18:49 | Python | UTF-8 | Python | false | false | 3,039 | py | # -*- coding: utf-8 -*-
import os
import logging
import deansi
from flask import Blueprint, current_app, send_from_directory, request, abort, Response
from docker import DockerClient
logger = logging.getLogger(__name__)
blueprint = Blueprint('log', __name__)
FOLLOW_LOG_JS = '''<script type="text/javascript">
var observeDOM = (function(){
var MutationObserver = window.MutationObserver || window.WebKitMutationObserver,
eventListenerSupported = window.addEventListener;
return function(obj, callback){
if( MutationObserver ){
// define a new observer
var obs = new MutationObserver(function(mutations, observer){
if( mutations[0].addedNodes.length || mutations[0].removedNodes.length )
callback();
});
// have the observer observe foo for changes in children
obs.observe( obj, { childList:true, subtree:true });
}
else if( eventListenerSupported ){
obj.addEventListener('DOMNodeInserted', callback, false);
obj.addEventListener('DOMNodeRemoved', callback, false);
}
};
})();
window.autoFollow = true;
window.onscroll = function() {
if (window.innerHeight + window.pageYOffset >= document.body.offsetHeight - 10) {
window.autoFollow = true;
} else {
window.autoFollow = false;
}
};
observeDOM(document, function() {
if (window.autoFollow) {
window.scrollTo(window.pageXOffset, document.body.scrollHeight);
}
});
</script>'''
@blueprint.route('/build/<sha>', methods=['GET'])
def build_log(sha):
task_id = request.args.get('task_id')
log_dir = os.path.join(current_app.config['BADWOLF_LOG_DIR'], sha)
# old log path
if os.path.exists(os.path.join(log_dir, 'build.html')):
return send_from_directory(log_dir, 'build.html')
if not task_id:
abort(404)
# new log path
log_dir = os.path.join(log_dir, task_id)
if os.path.exists(os.path.join(log_dir, 'build.html')):
return send_from_directory(log_dir, 'build.html')
# Try realtime logs
docker = DockerClient(
base_url=current_app.config['DOCKER_HOST'],
timeout=current_app.config['DOCKER_API_TIMEOUT'],
version='auto',
)
containers = docker.containers.list(filters=dict(
status='running',
label='task_id={}'.format(task_id),
))
if not containers:
abort(404)
# TODO: ensure only 1 container matched task_id
container = containers[0]
def _streaming_gen():
yield '<style>{}</style>'.format(deansi.styleSheet())
yield FOLLOW_LOG_JS
yield '<div class="ansi_terminal">'
buffer = []
for log in container.logs(stdout=True, stderr=True, stream=True, follow=True):
char = str(log)
buffer.append(char)
if char == '\n':
yield deansi.deansi(''.join(buffer))
buffer = []
if buffer:
yield deansi.deansi(''.join(buffer))
yield '</div>'
return Response(_streaming_gen(), mimetype='text/html;charset=utf-8')
| [
"messense@icloud.com"
] | messense@icloud.com |
a4e5dd0c35c7a3cb860b83cb9f710b863ad829b1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1034.py | 37a51eca5168bb24b1adf080e12fe5ea12ac91a4 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import os
input_file = open("input_large.in", "r")
output_file = open("output_large.out", "w")
cases = int(input_file.readline())
for i in range(cases):
params = input_file.readline().split(" ")
full_dist = float(params[0])
num_horses = int(params[1])
h_distances = []
h_speeds = []
for h in range(num_horses):
h_params = input_file.readline().split(" ")
h_start_pos = int(h_params[0])
h_speed = int(h_params[1])
h_distances.append(float(full_dist) - float(h_start_pos))
h_speeds.append(float(h_speed))
time_remaining = [h_distances[j] / h_speeds[j] for j in range(len(h_distances))]
annie_time = max(time_remaining)
annie_speed = full_dist / annie_time
output_file.write("Case #" + str(i + 1) + ": " + str(annie_speed) + "\n")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
0ca58964ef93d7904fda1f224bdc824f1998133f | fd180ada22150a08106fba3258ed671940e7db25 | /torch/fx/graph_module.py | bbc5c26a818213e074e424d5c8dca146f88cc453 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | xta0/pytorch | 004d1f6c9a1e59c5d9cb7d827609a0cd98eacf71 | adc21c6db278829797161ff23a92e40068f0db5c | refs/heads/master | 2022-12-21T09:31:34.213078 | 2020-10-05T18:41:12 | 2020-10-05T18:43:28 | 198,114,372 | 0 | 0 | NOASSERTION | 2019-07-21T23:17:39 | 2019-07-21T23:17:38 | null | UTF-8 | Python | false | false | 8,911 | py | import torch
import torch.overrides
import linecache
from typing import Type, Dict, List, Any, Union
from .graph import Graph
import copy
# normal exec loses the source code, however we can patch
# the linecache module to still recover it.
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id = 0
def exec_with_source(src: str, globals: Dict[str, Any]):
global _next_id
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
def patched_getline(*args, **kwargs):
if args[0] in _eval_cache:
return _eval_cache[args[0]]
return _orig_getlines(*args, **kwargs)
linecache.getlines = patched_getline
def _forward_from_src(src : str):
gbls: Dict[str, Any] = {
'torch': torch
}
exec_with_source(src, gbls)
return gbls['forward']
def deserialize_graphmodule(body : dict) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
class CodeOnlyModule(torch.nn.Module):
def __init__(self, body):
super().__init__()
self.__dict__ = body
CodeOnlyModule.forward = _forward_from_src(body['code'])
from .symbolic_trace import Tracer
# we shouldn't trace into any of the submodules, they were not
# because they were not traced in the original GraphModule
class KeepModules(Tracer):
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
return True
com = CodeOnlyModule(body)
return GraphModule(com, KeepModules().trace(com))
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
f = getattr(from_module, item)
t = getattr(to_module, item, None)
if f is t:
# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
return
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
from_module, to_module = f, t
setattr(to_module, field, getattr(from_module, field))
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
t = getattr(to_module, item, None)
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
to_module = t
setattr(to_module, field, from_obj)
class GraphModule(torch.nn.Module):
"""
GraphModule is an nn.Module generated from an fx.Graph. GraphModule has
important attributes:
graph : The graph from which this GraphModule was generated
code : The Python source code for the function generated from `graph`
forward : The Python method generated from `graph`
Note that when `graph` is reassigned, `code` and `forward` will be automatically
regenerated.
"""
def __new__(cls: 'Type[GraphModule]', *args, **kwargs):
# each instance of a graph module needs its own forward method
# so create a new singleton class for each instance.
# it is a subclass of the user-defined class, the only difference
# is an extra layer to install the forward method
class GraphModuleImpl(cls): # type: ignore
pass
return super().__new__(GraphModuleImpl)
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph):
"""
Construct a GraphModule.
root - `root` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
- In the case that `root` is a Module, any references to Module-based objects (via qualified
name) in the Graph's Nodes' `target` field will be copied over from the respective place
within `root`'s Module hierarchy into the GraphModule's module hierarchy.
- In the case that `root` is a dict, the qualified name found in a Node's `target` will be
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
over into the appropriate place within the GraphModule's module hierarchy.
graph - `graph` contains the nodes this GraphModule should use for code generation
"""
super().__init__()
if isinstance(root, torch.nn.Module):
if hasattr(root, 'training'):
self.training = root.training
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
_copy_attr(root, self, node.target)
elif isinstance(root, dict):
targets_to_copy = []
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
if node.target not in root:
raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +
' but that target was not provided in `root`!')
targets_to_copy.append(node.target)
# Sort targets in ascending order of the # of atoms.
# This will ensure that less deeply nested attributes are assigned
# before more deeply nested attributes. For example, foo.bar
# will be assigned before foo.bar.baz. Otherwise, we might assign
# the user-provided `foo.bar` and wipe out the previously-assigned
# `foo.bar.baz`
targets_to_copy.sort(key=lambda t: t.count('.'))
for target_to_copy in targets_to_copy:
_assign_attr(root[target_to_copy], self, target_to_copy)
else:
raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')
self.graph = graph
# TorchScript breaks trying to compile the graph setter because of the
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
#
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
__jit_unused_properties__ = ['graph']
@property
def graph(self):
return self._graph
@graph.setter
def graph(self, val) -> None:
self._graph = val
self.code = self._graph.python_code(root_module='self')
cls = type(self)
cls.forward = _forward_from_src(self.code)
def __reduce__(self):
dict_without_graph = self.__dict__.copy()
del dict_without_graph['_graph']
return (deserialize_graphmodule, (dict_without_graph,))
# because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return GraphModule(fake_mod, self.graph)
def __copy__(self):
return GraphModule(self, self.graph)
def __str__(self) -> str:
orig_str = super().__str__()
return '\n'.join([orig_str, self.code])
# workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
84cde06244cc011a3a7ae182d30b956383dc9e3b | 4cd4e5c39523f5889efb676414c5f4e58bc38991 | /point_clouds/aux.py | 1875110bb05ac37092d93f63297800da5c1efd46 | [
"MIT"
] | permissive | optas/geo_tool | 84a63c8dd9e9881234737a816a2a5b119e4368eb | 7eda787b4b9361ee6cb1601a62495d9d5c3605e6 | refs/heads/master | 2022-02-26T18:03:09.930737 | 2022-01-23T03:19:46 | 2022-01-23T03:19:46 | 67,737,034 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,712 | py | '''
Created on Aug 21, 2017
@author: optas
'''
import warnings
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.sparse.linalg import eigs
from numpy.linalg import norm
from .. fundamentals import Graph
from .. utils.linalg_utils import l2_norm
def greedy_match_pc_to_pc(from_pc, to_pc):
'''map from_pc points to to_pc by minimizing the from-to-to euclidean distance.'''
nn = NearestNeighbors(n_neighbors=1).fit(to_pc)
distances, indices = nn.kneighbors(from_pc)
return indices, distances
def chamfer_pseudo_distance(pc1, pc2):
_, d1 = greedy_match_pc_to_pc(pc1, pc2)
_, d2 = greedy_match_pc_to_pc(pc2, pc1)
return np.sum(d1) + np.sum(d2)
def laplacian_spectrum(pc, n_evecs, k=6):
''' k: (int) number of nearest neighbors each point is connected with in the constructed Adjacency
matrix that will be used to derive the Laplacian.
'''
neighbors_ids, distances = pc.k_nearest_neighbors(k)
A = Graph.knn_to_adjacency(neighbors_ids, distances)
if Graph.connected_components(A)[0] != 1:
raise ValueError('Graph has more than one connected component, increase k.')
A = (A + A.T) / 2.0
L = Graph.adjacency_to_laplacian(A, 'norm').astype('f4')
evals, evecs = eigs(L, n_evecs + 1, sigma=-10e-1, which='LM')
if np.any(l2_norm(evecs.imag, axis=0) / l2_norm(evecs.real, axis=0) > 1.0 / 100):
warnings.warn('Produced eigen-vectors are complex and contain significant mass on the imaginary part.')
evecs = evecs.real # eigs returns complex values by default.
evals = evals.real
index = np.argsort(evals) # Sort evals from smallest to largest
evals = evals[index]
evecs = evecs[:, index]
return evals, evecs
def unit_cube_grid_point_cloud(resolution, clip_sphere=False):
'''Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
that is placed in the unit-cube.
If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
'''
grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
spacing = 1.0 / float(resolution - 1)
for i in xrange(resolution):
for j in xrange(resolution):
for k in xrange(resolution):
grid[i, j, k, 0] = i * spacing - 0.5
grid[i, j, k, 1] = j * spacing - 0.5
grid[i, j, k, 2] = k * spacing - 0.5
if clip_sphere:
grid = grid.reshape(-1, 3)
grid = grid[norm(grid, axis=1) <= 0.5]
return grid, spacing
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
Original from https://github.com/daerduoCarey/partnet_seg_exps/blob/master/exps/utils/pc_util.py
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
Original from Original from https://github.com/daerduoCarey/partnet_seg_exps/blob/master/exps/utils/pc_util.py
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a, b, c]))
if len(points) == 0:
return np.zeros((0, 3))
points = np.vstack(points)
return points | [
"optas@stanford.edu"
] | optas@stanford.edu |
2286d2a45d3515907c5f7eb59c5add02b7a6e530 | 76f59c245744e468577a293a0b9b078f064acf07 | /79.word-search.py | 632b1ff08a943956d5e83bffd8706832f9f80968 | [] | no_license | satoshun-algorithm-example/leetcode | c3774f07e653cf58640a6e7239705e58c5abde82 | 16b39e903755dea86f9a4f16df187bb8bbf835c5 | refs/heads/master | 2020-07-01T10:24:05.343283 | 2020-01-13T03:27:27 | 2020-01-13T03:27:27 | 201,144,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | #
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
from typing import List
# @lc code=start
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
if not word:
return True
leading = word[0]
for y in range(len(board)):
for x in range(len(board[0])):
if board[y][x] == leading:
access = [[True for _ in range(len(board[0]))] for _ in range(len(board))]
access[y][x] = False
if self.search(x, y, board, word[1:], access):
return True
return False
def search(self, x, y, board, word, access):
if not word:
return True
if x - 1 >= 0 and access[y][x - 1]:
if board[y][x - 1] == word[0]:
access[y][x - 1] = False
if self.search(x - 1, y, board, word[1:], access):
return True
access[y][x - 1] = True
if x + 1 < len(board[0]) and access[y][x + 1]:
if board[y][x + 1] == word[0]:
access[y][x + 1] = False
if self.search(x + 1, y, board, word[1:], access):
return True
access[y][x + 1] = True
if y - 1 >= 0 and access[y - 1][x]:
if board[y - 1][x] == word[0]:
access[y - 1][x] = False
if self.search(x, y - 1, board, word[1:], access):
return True
access[y - 1][x] = True
if y + 1 < len(board) and access[y + 1][x]:
if board[y + 1][x] == word[0]:
access[y + 1][x] = False
if self.search(x, y + 1, board, word[1:], access):
return True
access[y + 1][x] = True
return False
# @lc code=end
| [
"shun.sato1@gmail.com"
] | shun.sato1@gmail.com |
8668ef75a42f540aed1ffddd722eedfa41397b39 | 5e0e3d05918154f1b30b6ab54f564720494f8c17 | /Hetionet/venv/Lib/site-packages/cassandra/__init__.py | 96cc537663ce8a695b16988403dafb89b0af5951 | [
"MIT"
] | permissive | TusharMalakar/Machine-learning-using-BigData | fc3697b3e114474c5d4d70cfda4237edaa446652 | d8cd334d122662f59ca111459071a16cc74b169f | refs/heads/master | 2023-05-25T06:11:26.131265 | 2019-12-26T20:12:14 | 2019-12-26T20:12:14 | 205,404,326 | 0 | 2 | MIT | 2022-11-11T07:48:47 | 2019-08-30T15:02:05 | Python | UTF-8 | Python | false | false | 19,004 | py | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('cassandra').addHandler(NullHandler())
__version_info__ = (3, 20, 0)
__version__ = '.'.join(map(str, __version_info__))
class ConsistencyLevel(object):
"""
Spcifies how many replicas must respond for an operation to be considered
a success. By default, ``ONE`` is used for all operations.
"""
ANY = 0
"""
Only requires that one replica receives the write *or* the coordinator
stores a hint to replay later. Valid only for writes.
"""
ONE = 1
"""
Only one replica needs to respond to consider the operation a success
"""
TWO = 2
"""
Two replicas must respond to consider the operation a success
"""
THREE = 3
"""
Three replicas must respond to consider the operation a success
"""
QUORUM = 4
"""
``ceil(RF/2)`` replicas must respond to consider the operation a success
"""
ALL = 5
"""
All replicas must respond to consider the operation a success
"""
LOCAL_QUORUM = 6
"""
Requires a quorum of replicas in the local datacenter
"""
EACH_QUORUM = 7
"""
Requires a quorum of replicas in each datacenter
"""
SERIAL = 8
"""
For conditional inserts/updates that utilize Cassandra's lightweight
transactions, this requires consensus among all replicas for the
modified data.
"""
LOCAL_SERIAL = 9
"""
Like :attr:`~ConsistencyLevel.SERIAL`, but only requires consensus
among replicas in the local datacenter.
"""
LOCAL_ONE = 10
"""
Sends a request only to replicas in the local datacenter and waits for
one response.
"""
@staticmethod
def is_serial(cl):
return cl == ConsistencyLevel.SERIAL or cl == ConsistencyLevel.LOCAL_SERIAL
ConsistencyLevel.value_to_name = {
ConsistencyLevel.ANY: 'ANY',
ConsistencyLevel.ONE: 'ONE',
ConsistencyLevel.TWO: 'TWO',
ConsistencyLevel.THREE: 'THREE',
ConsistencyLevel.QUORUM: 'QUORUM',
ConsistencyLevel.ALL: 'ALL',
ConsistencyLevel.LOCAL_QUORUM: 'LOCAL_QUORUM',
ConsistencyLevel.EACH_QUORUM: 'EACH_QUORUM',
ConsistencyLevel.SERIAL: 'SERIAL',
ConsistencyLevel.LOCAL_SERIAL: 'LOCAL_SERIAL',
ConsistencyLevel.LOCAL_ONE: 'LOCAL_ONE'
}
ConsistencyLevel.name_to_value = {
'ANY': ConsistencyLevel.ANY,
'ONE': ConsistencyLevel.ONE,
'TWO': ConsistencyLevel.TWO,
'THREE': ConsistencyLevel.THREE,
'QUORUM': ConsistencyLevel.QUORUM,
'ALL': ConsistencyLevel.ALL,
'LOCAL_QUORUM': ConsistencyLevel.LOCAL_QUORUM,
'EACH_QUORUM': ConsistencyLevel.EACH_QUORUM,
'SERIAL': ConsistencyLevel.SERIAL,
'LOCAL_SERIAL': ConsistencyLevel.LOCAL_SERIAL,
'LOCAL_ONE': ConsistencyLevel.LOCAL_ONE
}
def consistency_value_to_name(value):
return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set"
class ProtocolVersion(object):
"""
Defines native protocol versions supported by this driver.
"""
V1 = 1
"""
v1, supported in Cassandra 1.2-->2.2
"""
V2 = 2
"""
v2, supported in Cassandra 2.0-->2.2;
added support for lightweight transactions, batch operations, and automatic query paging.
"""
V3 = 3
"""
v3, supported in Cassandra 2.1-->3.x+;
added support for protocol-level client-side timestamps (see :attr:`.Session.use_client_timestamp`),
serial consistency levels for :class:`~.BatchStatement`, and an improved connection pool.
"""
V4 = 4
"""
v4, supported in Cassandra 2.2-->3.x+;
added a number of new types, server warnings, new failure messages, and custom payloads. Details in the
`project docs <https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec>`_
"""
V5 = 5
"""
v5, in beta from 3.x+
"""
SUPPORTED_VERSIONS = (V5, V4, V3, V2, V1)
"""
A tuple of all supported protocol versions
"""
BETA_VERSIONS = (V5,)
"""
A tuple of all beta protocol versions
"""
MIN_SUPPORTED = min(SUPPORTED_VERSIONS)
"""
Minimum protocol version supported by this driver.
"""
MAX_SUPPORTED = max(SUPPORTED_VERSIONS)
"""
Maximum protocol versioni supported by this driver.
"""
@classmethod
def get_lower_supported(cls, previous_version):
"""
Return the lower supported protocol version. Beta versions are omitted.
"""
try:
version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if
v not in ProtocolVersion.BETA_VERSIONS and v < previous_version)
except StopIteration:
version = 0
return version
@classmethod
def uses_int_query_flags(cls, version):
return version >= cls.V5
@classmethod
def uses_prepare_flags(cls, version):
return version >= cls.V5
@classmethod
def uses_prepared_metadata(cls, version):
return version >= cls.V5
@classmethod
def uses_error_code_map(cls, version):
return version >= cls.V5
@classmethod
def uses_keyspace_flag(cls, version):
return version >= cls.V5
class WriteType(object):
"""
For usage with :class:`.RetryPolicy`, this describe a type
of write operation.
"""
SIMPLE = 0
"""
A write to a single partition key. Such writes are guaranteed to be atomic
and isolated.
"""
BATCH = 1
"""
A write to multiple partition keys that used the distributed batch log to
ensure atomicity.
"""
UNLOGGED_BATCH = 2
"""
A write to multiple partition keys that did not use the distributed batch
log. Atomicity for such writes is not guaranteed.
"""
COUNTER = 3
"""
A counter write (for one or multiple partition keys). Such writes should
not be replayed in order to avoid overcount.
"""
BATCH_LOG = 4
"""
The initial write to the distributed batch log that Cassandra performs
internally before a BATCH write.
"""
CAS = 5
"""
A lighweight-transaction write, such as "DELETE ... IF EXISTS".
"""
VIEW = 6
"""
This WriteType is only seen in results for requests that were unable to
complete MV operations.
"""
CDC = 7
"""
This WriteType is only seen in results for requests that were unable to
complete CDC operations.
"""
WriteType.name_to_value = {
'SIMPLE': WriteType.SIMPLE,
'BATCH': WriteType.BATCH,
'UNLOGGED_BATCH': WriteType.UNLOGGED_BATCH,
'COUNTER': WriteType.COUNTER,
'BATCH_LOG': WriteType.BATCH_LOG,
'CAS': WriteType.CAS,
'VIEW': WriteType.VIEW,
'CDC': WriteType.CDC
}
WriteType.value_to_name = {v: k for k, v in WriteType.name_to_value.items()}
class SchemaChangeType(object):
DROPPED = 'DROPPED'
CREATED = 'CREATED'
UPDATED = 'UPDATED'
class SchemaTargetType(object):
KEYSPACE = 'KEYSPACE'
TABLE = 'TABLE'
TYPE = 'TYPE'
FUNCTION = 'FUNCTION'
AGGREGATE = 'AGGREGATE'
class SignatureDescriptor(object):
def __init__(self, name, argument_types):
self.name = name
self.argument_types = argument_types
@property
def signature(self):
"""
function signature string in the form 'name([type0[,type1[...]]])'
can be used to uniquely identify overloaded function names within a keyspace
"""
return self.format_signature(self.name, self.argument_types)
@staticmethod
def format_signature(name, argument_types):
return "%s(%s)" % (name, ','.join(t for t in argument_types))
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.argument_types)
class UserFunctionDescriptor(SignatureDescriptor):
"""
Describes a User function by name and argument signature
"""
name = None
"""
name of the function
"""
argument_types = None
"""
Ordered list of CQL argument type names comprising the type signature
"""
class UserAggregateDescriptor(SignatureDescriptor):
"""
Describes a User aggregate function by name and argument signature
"""
name = None
"""
name of the aggregate
"""
argument_types = None
"""
Ordered list of CQL argument type names comprising the type signature
"""
class DriverException(Exception):
"""
Base for all exceptions explicitly raised by the driver.
"""
pass
class RequestExecutionException(DriverException):
"""
Base for request execution exceptions returned from the server.
"""
pass
class Unavailable(RequestExecutionException):
"""
There were not enough live replicas to satisfy the requested consistency
level, so the coordinator node immediately failed the request without
forwarding it to any replicas.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_replicas = None
""" The number of replicas that needed to be live to complete the operation """
alive_replicas = None
""" The number of replicas that were actually alive """
def __init__(self, summary_message, consistency=None, required_replicas=None, alive_replicas=None):
self.consistency = consistency
self.required_replicas = required_replicas
self.alive_replicas = alive_replicas
Exception.__init__(self, summary_message + ' info=' +
repr({'consistency': consistency_value_to_name(consistency),
'required_replicas': required_replicas,
'alive_replicas': alive_replicas}))
class Timeout(RequestExecutionException):
"""
Replicas failed to respond to the coordinator node before timing out.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_responses = None
""" The number of required replica responses """
received_responses = None
"""
The number of replicas that responded before the coordinator timed out
the operation
"""
def __init__(self, summary_message, consistency=None, required_responses=None,
received_responses=None, **kwargs):
self.consistency = consistency
self.required_responses = required_responses
self.received_responses = received_responses
if "write_type" in kwargs:
kwargs["write_type"] = WriteType.value_to_name[kwargs["write_type"]]
info = {'consistency': consistency_value_to_name(consistency),
'required_responses': required_responses,
'received_responses': received_responses}
info.update(kwargs)
Exception.__init__(self, summary_message + ' info=' + repr(info))
class ReadTimeout(Timeout):
"""
A subclass of :exc:`Timeout` for read operations.
This indicates that the replicas failed to respond to the coordinator
node before the configured timeout. This timeout is configured in
``cassandra.yaml`` with the ``read_request_timeout_in_ms``
and ``range_request_timeout_in_ms`` options.
"""
data_retrieved = None
"""
A boolean indicating whether the requested data was retrieved
by the coordinator from any replicas before it timed out the
operation
"""
def __init__(self, message, data_retrieved=None, **kwargs):
Timeout.__init__(self, message, **kwargs)
self.data_retrieved = data_retrieved
class WriteTimeout(Timeout):
"""
A subclass of :exc:`Timeout` for write operations.
This indicates that the replicas failed to respond to the coordinator
node before the configured timeout. This timeout is configured in
``cassandra.yaml`` with the ``write_request_timeout_in_ms``
option.
"""
write_type = None
"""
The type of write operation, enum on :class:`~cassandra.policies.WriteType`
"""
def __init__(self, message, write_type=None, **kwargs):
kwargs["write_type"] = write_type
Timeout.__init__(self, message, **kwargs)
self.write_type = write_type
class CDCWriteFailure(RequestExecutionException):
"""
Hit limit on data in CDC folder, writes are rejected
"""
def __init__(self, message):
Exception.__init__(self, message)
class CoordinationFailure(RequestExecutionException):
"""
Replicas sent a failure to the coordinator.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_responses = None
""" The number of required replica responses """
received_responses = None
"""
The number of replicas that responded before the coordinator timed out
the operation
"""
failures = None
"""
The number of replicas that sent a failure message
"""
error_code_map = None
"""
A map of inet addresses to error codes representing replicas that sent
a failure message. Only set when `protocol_version` is 5 or higher.
"""
def __init__(self, summary_message, consistency=None, required_responses=None,
received_responses=None, failures=None, error_code_map=None):
self.consistency = consistency
self.required_responses = required_responses
self.received_responses = received_responses
self.failures = failures
self.error_code_map = error_code_map
info_dict = {
'consistency': consistency_value_to_name(consistency),
'required_responses': required_responses,
'received_responses': received_responses,
'failures': failures
}
if error_code_map is not None:
# make error codes look like "0x002a"
formatted_map = dict((addr, '0x%04x' % err_code)
for (addr, err_code) in error_code_map.items())
info_dict['error_code_map'] = formatted_map
Exception.__init__(self, summary_message + ' info=' + repr(info_dict))
class ReadFailure(CoordinationFailure):
"""
A subclass of :exc:`CoordinationFailure` for read operations.
This indicates that the replicas sent a failure message to the coordinator.
"""
data_retrieved = None
"""
A boolean indicating whether the requested data was retrieved
by the coordinator from any replicas before it timed out the
operation
"""
def __init__(self, message, data_retrieved=None, **kwargs):
CoordinationFailure.__init__(self, message, **kwargs)
self.data_retrieved = data_retrieved
class WriteFailure(CoordinationFailure):
"""
A subclass of :exc:`CoordinationFailure` for write operations.
This indicates that the replicas sent a failure message to the coordinator.
"""
write_type = None
"""
The type of write operation, enum on :class:`~cassandra.policies.WriteType`
"""
def __init__(self, message, write_type=None, **kwargs):
CoordinationFailure.__init__(self, message, **kwargs)
self.write_type = write_type
class FunctionFailure(RequestExecutionException):
"""
User Defined Function failed during execution
"""
keyspace = None
"""
Keyspace of the function
"""
function = None
"""
Name of the function
"""
arg_types = None
"""
List of argument type names of the function
"""
def __init__(self, summary_message, keyspace, function, arg_types):
self.keyspace = keyspace
self.function = function
self.arg_types = arg_types
Exception.__init__(self, summary_message)
class RequestValidationException(DriverException):
"""
Server request validation failed
"""
pass
class ConfigurationException(RequestValidationException):
"""
Server indicated request errro due to current configuration
"""
pass
class AlreadyExists(ConfigurationException):
"""
An attempt was made to create a keyspace or table that already exists.
"""
keyspace = None
"""
The name of the keyspace that already exists, or, if an attempt was
made to create a new table, the keyspace that the table is in.
"""
table = None
"""
The name of the table that already exists, or, if an attempt was
make to create a keyspace, :const:`None`.
"""
def __init__(self, keyspace=None, table=None):
if table:
message = "Table '%s.%s' already exists" % (keyspace, table)
else:
message = "Keyspace '%s' already exists" % (keyspace,)
Exception.__init__(self, message)
self.keyspace = keyspace
self.table = table
class InvalidRequest(RequestValidationException):
"""
A query was made that was invalid for some reason, such as trying to set
the keyspace for a connection to a nonexistent keyspace.
"""
pass
class Unauthorized(RequestValidationException):
"""
The current user is not authorized to perform the requested operation.
"""
pass
class AuthenticationFailed(DriverException):
"""
Failed to authenticate.
"""
pass
class OperationTimedOut(DriverException):
"""
The operation took longer than the specified (client-side) timeout
to complete. This is not an error generated by Cassandra, only
the driver.
"""
errors = None
"""
A dict of errors keyed by the :class:`~.Host` against which they occurred.
"""
last_host = None
"""
The last :class:`~.Host` this operation was attempted against.
"""
def __init__(self, errors=None, last_host=None):
self.errors = errors
self.last_host = last_host
message = "errors=%s, last_host=%s" % (self.errors, self.last_host)
Exception.__init__(self, message)
class UnsupportedOperation(DriverException):
"""
An attempt was made to use a feature that is not supported by the
selected protocol version. See :attr:`Cluster.protocol_version`
for more details.
"""
pass
class UnresolvableContactPoints(DriverException):
"""
The driver was unable to resolve any provided hostnames.
Note that this is *not* raised when a :class:`.Cluster` is created with no
contact points, only when lookup fails for all hosts
"""
pass
| [
"35859780+TusharMalakar@users.noreply.github.com"
] | 35859780+TusharMalakar@users.noreply.github.com |
771095473e3066ad09df7493a6581d7bb5843b18 | 78b01cc5249504f067ed9a53063037818c4a2663 | /main.py | 79c4c87baff9be781702595bc7afbc06ad51483e | [
"MIT"
] | permissive | toshikurauchi/chopro_book | 4fae592b261a74b2e97782346348bba7df809879 | 73e7d1e4cd540457a1609959a4e912eba5e8278e | refs/heads/master | 2021-06-24T15:20:18.442808 | 2019-03-17T03:30:41 | 2019-03-17T03:30:41 | 135,749,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,004 | py | #!/usr/bin/python3
import sys
import re
from unicodedata import normalize
from pathlib import Path
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from chopro import ChoPro
from config import *
SHORT_LIMIT = 100
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///chopro_book.db'
db = SQLAlchemy(app)
class Playlist(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
songs = db.relationship('PlaylistSong', backref='playlist', cascade="all,delete")
def __init__(self, id=None, name='', song_files=[]):
self.id = id
self.name = name
self.song_files = song_files
self.new_songs = []
self.deleted_songs = []
def __repr__(self):
return '<Playlist %r>' % self.name
@property
def sorted_songs(self):
return sorted(self.songs, key=lambda s: s.index)
@property
def song_files(self):
return [s.filename for s in self.sorted_songs]
@song_files.setter
def song_files(self, new_songs):
current_songs = {s.filename: s for s in self.songs}
self.new_songs = []
self.songs = []
for index, f in enumerate(new_songs):
if f in current_songs:
song = current_songs.pop(f)
song.index = index
else:
song = PlaylistSong(self, f, index)
self.new_songs.append(song)
self.songs.append(song)
self.deleted_songs = list(current_songs.values())
@property
def song_list(self):
print([(s.filename, s.index) for s in self.sorted_songs])
return [s.song for s in self.sorted_songs]
class PlaylistSong(db.Model):
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(120), nullable=False)
transpose = db.Column(db.Integer)
index = db.Column(db.Integer)
playlist_id = db.Column(db.Integer, db.ForeignKey('playlist.id'), nullable=False)
def __init__(self, playlist, filename, index, transpose=0):
self.playlist = playlist
self.filename = filename
self.index = index
self.transpose = transpose
@property
def song(self):
return Song(self.filename, self.transpose, self.id)
class Song:
def __init__(self, filename, transpose=0, playlist_song_id=None):
self.filename = filename
self.transpose = transpose
self.playlist_song_id = playlist_song_id
self._lyrics = None
self.name = clean_name(self.filename)
def __eq__(self, other):
return self.filename == other.filename
def chopro(self):
full_filename = (Path(CHOPRO_DIR) / self.filename).absolute()
with open(full_filename) as cpfile:
cpstr = cpfile.read()
return ChoPro(cpstr, self.transpose)
@property
def html(self):
return self.chopro().get_html()
@property
def lyrics(self):
if self._lyrics is None:
try:
self._lyrics = self.chopro().get_lyrics()
except:
raise Exception(self.name)
return self._lyrics
@property
def short_lyrics(self):
lyrics = self.lyrics
short = ''
song_started = False
for line in lyrics.split('\n'):
clean = line.strip()
if not song_started and clean:
song_started = True
if 'intro' in clean.lower():
continue
if clean:
short += clean + '<br>'
if len(short) > SHORT_LIMIT:
break
return short
@property
def next_transpose(self):
return str(self.transpose + 1)
@property
def prev_transpose(self):
return str(self.transpose - 1)
@property
def slug(self):
# Based on: http://flask.pocoo.org/snippets/5/
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
result = []
for word in _punct_re.split(self.filename.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(''.join(map(chr, word)))
return '-'.join(result)
def clean_name(name):
regex = re.compile(r".(chopro|chordpro)$", re.IGNORECASE)
return regex.sub('', name)
def list_songs(ignore=[]):
path = Path(CHOPRO_DIR)
songfiles = sorted([Song(f.name) for f in path.iterdir()], key=lambda s: s.filename)
return [s for s in songfiles if s not in ignore]
@app.route('/')
def index():
return render_template('index.html', songs=list_songs())
@app.route('/playlists')
def playlists():
all_playlists = Playlist.query.all()
return render_template('playlists.html', playlists=all_playlists)
@app.route('/song')
def chords():
transpose = int(request.args.get('transpose', 0))
song = Song(request.args['filename'], transpose)
return render_template('chords.html', song=song)
@app.route('/playlist', methods=['GET', 'POST'])
@app.route('/playlist/<pid>/edit', methods=['GET', 'POST'], endpoint='playlist_edit')
def playlist_form(pid=None):
if (request.method == 'POST'):
name = request.form.get('name')
songs = request.form.get('songs').split(';;')
if pid is not None:
playlist = Playlist.query.get(pid)
playlist.name = name
playlist.song_files = songs
else:
playlist = Playlist(name=name, song_files=songs)
for s in playlist.new_songs:
db.session.add(s)
for s in playlist.deleted_songs:
db.session.delete(s)
db.session.add(playlist)
db.session.commit()
return redirect(url_for('playlist_view', pid=playlist.id))
# GET
playlist = Playlist()
form_action = url_for('playlist_form')
if pid is not None:
playlist = Playlist.query.get(pid)
form_action = url_for('playlist_edit', pid=pid)
selected_songs = playlist.song_list
available_songs = list_songs(ignore=selected_songs)
return render_template('playlist_form.html', playlist=playlist, selected_songs=selected_songs, available_songs=available_songs, form_action=form_action)
@app.route('/playlist/<pid>', methods=['GET'])
def playlist_view(pid):
playlist = Playlist.query.get(pid)
return render_template('playlist_view.html', playlist=playlist, songs=playlist.song_list)
@app.route('/playlist-song/<pid>', methods=['GET'])
def playlist_song(pid):
playlist_song = PlaylistSong.query.get(pid)
transpose = int(request.args.get('transpose', 0))
playlist_song.transpose = transpose
# Save
db.session.add(playlist_song)
db.session.commit()
song = playlist_song.song
return render_template('playlist_song.html', song=song)
if __name__=='__main__':
app.run(debug=True) | [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
9d3f971d92a6f6e1cd18b6d14d0e39c203776983 | a8c76e503df527445b14163f74faa8aca7e90ecd | /examples/cardless_credit_example.py | 9b76216fa16aa9281bbd570ee1371176ada29324 | [
"MIT"
] | permissive | baseup/xendit-python | c2a4f0766886f6124a86810e0831653e1ca1e1f4 | 8b677fbbad5fe3bbcd0a2b93e30e8040543b8f61 | refs/heads/master | 2023-03-09T02:56:53.513101 | 2021-03-04T08:06:44 | 2021-03-04T08:06:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,442 | py | import time
from print_running_function import print_running_function
# Hackish method to import from another directory
# Useful while xendit-python isn't released yet to the public
import importlib.machinery
loader = importlib.machinery.SourceFileLoader("xendit", "../xendit/__init__.py")
xendit = loader.load_module("xendit")
class CreateCardlessCreditPayment:
@staticmethod
def run(xendit_instance, **kwargs):
try:
cardless_credit_payment = xendit_instance.CardlessCredit.create_payment(
**kwargs
)
print(cardless_credit_payment)
except xendit.XenditError as e:
print("Error status code:", e.status_code)
print("Error message:", e)
@staticmethod
def example(xendit_instance):
cardless_credit_items = []
cardless_credit_items.append(
{
"id": "item-123",
"name": "Phone Case",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
)
customer_details = {
"first_name": "customer first name",
"last_name": "customer last name",
"email": "customer@email.com",
"phone": "0812332145",
}
shipping_address = {
"first_name": "first name",
"last_name": "last name",
"address": "Jl Teknologi No. 12",
"city": "Jakarta",
"postal_code": "12345",
"phone": "081513114262",
"country_code": "IDN",
}
args = {
"cardless_credit_type": xendit.CardlessCreditType.KREDIVO,
"external_id": f"id-{int(time.time())}",
"amount": 10000,
"payment_type": "3_months",
"items": cardless_credit_items,
"customer_details": customer_details,
"shipping_address": shipping_address,
"redirect_url": "https://my-shop.com/home",
"callback_url": "https://my-shop.com/callback",
}
print_running_function("xendit.CardlessCredit.create_payment", args)
CreateCardlessCreditPayment.run(xendit_instance, **args)
class CalculatePaymentType:
@staticmethod
def run(xendit_instance, **kwargs):
try:
cardless_credit_payment_types = xendit_instance.CardlessCredit.calculate_payment_type(
**kwargs
)
print(cardless_credit_payment_types)
except xendit.XenditError as e:
print("Error status code:", e.status_code)
print("Error message:", e)
@staticmethod
def example(xendit_instance):
cardless_credit_items = []
cardless_credit_items.append(
{
"id": "item-123",
"name": "Phone Case",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
)
args = {
"cardless_credit_type": xendit.CardlessCreditType.KREDIVO,
"amount": 10000,
"items": cardless_credit_items,
}
print_running_function("xendit.CardlessCredit.calculate_payment_type", args)
CalculatePaymentType.run(xendit_instance, **args)
def ask_cardless_credit_input():
print("Input the action that you want to use")
print("0. Exit")
print("1. Create Payment / Generate Checkout URL")
print("2. Calculate Payment Types")
try:
return int(input())
except ValueError:
print("Invalid input. Please type a number")
return ask_cardless_credit_input()
def cardless_credit_example(xendit_instance):
cardless_credit_input = ask_cardless_credit_input()
while cardless_credit_input != 0:
if cardless_credit_input == 1:
print("Running example of Create Payment / Generate Checkout URL")
CreateCardlessCreditPayment.example(xendit_instance)
elif cardless_credit_input == 2:
print("Running example of Calculate Payment Types")
CalculatePaymentType.example(xendit_instance)
cardless_credit_input = ask_cardless_credit_input()
| [
"adyaksa.wisanggeni@gmail.com"
] | adyaksa.wisanggeni@gmail.com |
3c4bf638f60768a4555ee438fa275702572a60bd | fd6af2839be96a7966925d2d4c47a5af6dffd938 | /ratatosk/pipeline/__init__.py | 35d6ef55b865e5f3ca00702f832ebc8b84f18c4c | [
"Apache-2.0"
] | permissive | percyfal/ratatosk | 429ce423100ddbf4008b51df66db99c5205f8def | 71ee4741952219c1fccf9cb6c172a354610d499c | refs/heads/master | 2020-05-16T22:43:33.212506 | 2013-09-24T11:24:13 | 2013-09-24T11:24:13 | 8,706,433 | 7 | 4 | null | 2013-04-08T16:27:07 | 2013-03-11T15:25:44 | Python | UTF-8 | Python | false | false | 1,231 | py | import os
import ratatosk
from ratatosk.pipeline import align, seqcap, haloplex
# Define configuration file locations and classes for predefined workflows
config_dict = {
'ratatosk' : {'config':os.path.join(ratatosk.__path__[0], os.pardir, "config", "ratatosk.yaml"),
'cls':None},
'Align' : {'config' : os.path.join(ratatosk.__path__[0], os.pardir, "config", "align.yaml"),
'cls' : align.Align},
'Seqcap' : {'config' : os.path.join(ratatosk.__path__[0], os.pardir, "config", "seqcap.yaml"),
'cls' : seqcap.SeqCap},
'SeqcapSummary' : {'config' : os.path.join(ratatosk.__path__[0], os.pardir, "config", "seqcap.yaml"),
'cls' : seqcap.SeqCapSummary},
'HaloPlex' : {'config' : os.path.join(ratatosk.__path__[0], os.pardir, "config", "haloplex.yaml"),
'cls' : haloplex.HaloPlex},
'HaloPlexSummary' : {'config' : os.path.join(ratatosk.__path__[0],os.pardir, "config", "haloplex.yaml"),
'cls' : haloplex.HaloPlexSummary},
'HaloPlexCombine' : {'config' : os.path.join(ratatosk.__path__[0],os.pardir, "config", "haloplex.yaml"),
'cls' : haloplex.HaloPlexCombine},
}
| [
"per.unneberg@scilifelab.se"
] | per.unneberg@scilifelab.se |
19e193b4a359c9f87d1a73dafa8fd757f7da437c | a352b01827b6b8b4cd4ae7b2d4a7427513e3326a | /ceph/datadog_checks/ceph/__about__.py | 3a5935a2d01e4ca14d98640702b7667bef2c2caa | [] | permissive | kloudfuse/integrations-core | 8e52ac61464325b297f3e62c0405cccc4a4663ac | f78a61f9898bf1c26dd70407c23267514fac0d53 | refs/heads/master | 2023-03-24T00:10:05.259573 | 2021-03-29T16:44:06 | 2021-03-29T16:51:38 | 351,222,055 | 0 | 0 | BSD-3-Clause | 2021-03-29T16:51:39 | 2021-03-24T21:01:55 | Python | UTF-8 | Python | false | false | 22 | py | __version__ = "2.3.1"
| [
"noreply@github.com"
] | kloudfuse.noreply@github.com |
211e74285c92d6c731fea774cb7f83a564920a4a | 412b699e0f497ac03d6618fe349f4469646c6f2d | /env/lib/python3.8/site-packages/websockets/http11.py | 0754ddabb5fcba68671de81516c8c494cbe83f76 | [
"MIT"
] | permissive | EtienneBrJ/Portfolio | 7c70573f02a5779f9070d6d9df58d460828176e3 | 6b8d8cf9622eadef47bd10690c1bf1e7fd892bfd | refs/heads/main | 2023-09-03T15:03:43.698518 | 2021-11-04T01:02:33 | 2021-11-04T01:02:33 | 411,076,325 | 1 | 0 | MIT | 2021-10-31T13:43:09 | 2021-09-27T23:48:59 | HTML | UTF-8 | Python | false | false | 10,688 | py | import re
from typing import Callable, Generator, NamedTuple, Optional
from .datastructures import Headers
from .exceptions import SecurityError
MAX_HEADERS = 256
MAX_LINE = 4110
def d(value: bytes) -> str:
"""
Decode a bytestring for interpolating into an error message.
"""
return value.decode(errors="backslashreplace")
# See https://tools.ietf.org/html/rfc7230#appendix-B.
# Regex for validating header names.
_token_re = re.compile(rb"[-!#$%&\'*+.^_`|~0-9a-zA-Z]+")
# Regex for validating header values.
# We don't attempt to support obsolete line folding.
# Include HTAB (\x09), SP (\x20), VCHAR (\x21-\x7e), obs-text (\x80-\xff).
# The ABNF is complicated because it attempts to express that optional
# whitespace is ignored. We strip whitespace and don't revalidate that.
# See also https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
_value_re = re.compile(rb"[\x09\x20-\x7e\x80-\xff]*")
# Consider converting to dataclasses when dropping support for Python < 3.7.
class Request(NamedTuple):
"""
WebSocket handshake request.
:param path: path and optional query
:param headers:
"""
path: str
headers: Headers
# body isn't useful is the context of this library
@classmethod
def parse(
cls, read_line: Callable[[], Generator[None, None, bytes]]
) -> Generator[None, None, "Request"]:
"""
Parse an HTTP/1.1 GET request and return ``(path, headers)``.
``path`` isn't URL-decoded or validated in any way.
``path`` and ``headers`` are expected to contain only ASCII characters.
Other characters are represented with surrogate escapes.
:func:`parse_request` doesn't attempt to read the request body because
WebSocket handshake requests don't have one. If the request contains a
body, it may be read from ``stream`` after this coroutine returns.
:param read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data
:raises EOFError: if the connection is closed without a full HTTP request
:raises SecurityError: if the request exceeds a security limit
:raises ValueError: if the request isn't well formatted
"""
# https://tools.ietf.org/html/rfc7230#section-3.1.1
# Parsing is simple because fixed values are expected for method and
# version and because path isn't checked. Since WebSocket software tends
# to implement HTTP/1.1 strictly, there's little need for lenient parsing.
try:
request_line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP request line") from exc
try:
method, raw_path, version = request_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP request line: {d(request_line)}") from None
if method != b"GET":
raise ValueError(f"unsupported HTTP method: {d(method)}")
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
path = raw_path.decode("ascii", "surrogateescape")
headers = yield from parse_headers(read_line)
return cls(path, headers)
def serialize(self) -> bytes:
"""
Serialize an HTTP/1.1 GET request.
"""
# Since the path and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {self.path} HTTP/1.1\r\n".encode()
request += self.headers.serialize()
return request
# Consider converting to dataclasses when dropping support for Python < 3.7.
class Response(NamedTuple):
"""
WebSocket handshake response.
"""
status_code: int
reason_phrase: str
headers: Headers
body: Optional[bytes] = None
# If processing the response triggers an exception, it's stored here.
exception: Optional[Exception] = None
@classmethod
def parse(
cls,
read_line: Callable[[], Generator[None, None, bytes]],
read_exact: Callable[[int], Generator[None, None, bytes]],
read_to_eof: Callable[[], Generator[None, None, bytes]],
) -> Generator[None, None, "Response"]:
"""
Parse an HTTP/1.1 response and return ``(status_code, reason, headers)``.
``reason`` and ``headers`` are expected to contain only ASCII characters.
Other characters are represented with surrogate escapes.
:func:`parse_request` doesn't attempt to read the response body because
WebSocket handshake responses don't have one. If the response contains a
body, it may be read from ``stream`` after this coroutine returns.
:param read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data
:param read_exact: generator-based coroutine that reads the requested
number of bytes or raises an exception if there isn't enough data
:raises EOFError: if the connection is closed without a full HTTP response
:raises SecurityError: if the response exceeds a security limit
:raises LookupError: if the response isn't well formatted
:raises ValueError: if the response isn't well formatted
"""
# https://tools.ietf.org/html/rfc7230#section-3.1.2
# As in parse_request, parsing is simple because a fixed value is expected
# for version, status_code is a 3-digit number, and reason can be ignored.
try:
status_line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP status line") from exc
try:
version, raw_status_code, raw_reason = status_line.split(b" ", 2)
except ValueError: # not enough values to unpack (expected 3, got 1-2)
raise ValueError(f"invalid HTTP status line: {d(status_line)}") from None
if version != b"HTTP/1.1":
raise ValueError(f"unsupported HTTP version: {d(version)}")
try:
status_code = int(raw_status_code)
except ValueError: # invalid literal for int() with base 10
raise ValueError(
f"invalid HTTP status code: {d(raw_status_code)}"
) from None
if not 100 <= status_code < 1000:
raise ValueError(f"unsupported HTTP status code: {d(raw_status_code)}")
if not _value_re.fullmatch(raw_reason):
raise ValueError(f"invalid HTTP reason phrase: {d(raw_reason)}")
reason = raw_reason.decode()
headers = yield from parse_headers(read_line)
# https://tools.ietf.org/html/rfc7230#section-3.3.3
if "Transfer-Encoding" in headers:
raise NotImplementedError("transfer codings aren't supported")
# Since websockets only does GET requests (no HEAD, no CONNECT), all
# responses except 1xx, 204, and 304 include a message body.
if 100 <= status_code < 200 or status_code == 204 or status_code == 304:
body = None
else:
content_length: Optional[int]
try:
# MultipleValuesError is sufficiently unlikely that we don't
# attempt to handle it. Instead we document that its parent
# class, LookupError, may be raised.
raw_content_length = headers["Content-Length"]
except KeyError:
content_length = None
else:
content_length = int(raw_content_length)
if content_length is None:
body = yield from read_to_eof()
else:
body = yield from read_exact(content_length)
return cls(status_code, reason, headers, body)
def serialize(self) -> bytes:
"""
Serialize an HTTP/1.1 GET response.
"""
# Since the status line and headers only contain ASCII characters,
# we can keep this simple.
response = f"HTTP/1.1 {self.status_code} {self.reason_phrase}\r\n".encode()
response += self.headers.serialize()
if self.body is not None:
response += self.body
return response
def parse_headers(
read_line: Callable[[], Generator[None, None, bytes]]
) -> Generator[None, None, Headers]:
"""
Parse HTTP headers.
Non-ASCII characters are represented with surrogate escapes.
:param read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data
"""
# https://tools.ietf.org/html/rfc7230#section-3.2
# We don't attempt to support obsolete line folding.
headers = Headers()
for _ in range(MAX_HEADERS + 1):
try:
line = yield from parse_line(read_line)
except EOFError as exc:
raise EOFError("connection closed while reading HTTP headers") from exc
if line == b"":
break
try:
raw_name, raw_value = line.split(b":", 1)
except ValueError: # not enough values to unpack (expected 2, got 1)
raise ValueError(f"invalid HTTP header line: {d(line)}") from None
if not _token_re.fullmatch(raw_name):
raise ValueError(f"invalid HTTP header name: {d(raw_name)}")
raw_value = raw_value.strip(b" \t")
if not _value_re.fullmatch(raw_value):
raise ValueError(f"invalid HTTP header value: {d(raw_value)}")
name = raw_name.decode("ascii") # guaranteed to be ASCII at this point
value = raw_value.decode("ascii", "surrogateescape")
headers[name] = value
else:
raise SecurityError("too many HTTP headers")
return headers
def parse_line(
read_line: Callable[[], Generator[None, None, bytes]]
) -> Generator[None, None, bytes]:
"""
Parse a single line.
CRLF is stripped from the return value.
:param read_line: generator-based coroutine that reads a LF-terminated
line or raises an exception if there isn't enough data
"""
# Security: TODO: add a limit here
line = yield from read_line()
# Security: this guarantees header values are small (hard-coded = 4 KiB)
if len(line) > MAX_LINE:
raise SecurityError("line too long")
# Not mandatory but safe - https://tools.ietf.org/html/rfc7230#section-3.5
if not line.endswith(b"\r\n"):
raise EOFError("line without CRLF")
return line[:-2]
| [
"etiennebrxv@gmail.com"
] | etiennebrxv@gmail.com |
fed848079573db2088199105182feb85233e4b34 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Numerical_Eng_Python/example10_1.py | 865e519667c1c0de4e2117143d9072094cfc3225 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 355 | py | ## example10_1
from goldSearch import *
def f(x):
lam = 1.0 # Constraint multiplier
c = min(0.0, x) # Constraint function
return 1.6*x**3 + 3.0*x**2 - 2.0*x + lam*c**2
xStart = 1.0
h = 0.01
x1,x2 = bracket(f,xStart,h)
x,fMin = search(f,x1,x2)
print "x =",x
print "f(x) =",fMin
raw_input ("\nPress return to exit")
| [
"bb@b.om"
] | bb@b.om |
dfea637a01c7fc7e2e04fa69d41f970d90bacc01 | f569978afb27e72bf6a88438aa622b8c50cbc61b | /douyin_open/StarAuthorStarAuthor/models/inline_response2001_data.py | f80ce93b78c4d3185af47d06639fa3ee35248320 | [] | no_license | strangebank/swagger-petstore-perl | 4834409d6225b8a09b8195128d74a9b10ef1484a | 49dfc229e2e897cdb15cbf969121713162154f28 | refs/heads/master | 2023-01-05T10:21:33.518937 | 2020-11-05T04:33:16 | 2020-11-05T04:33:16 | 310,189,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,780 | py | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse2001Data(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_code': 'ErrorCode',
'description': 'Description',
'update_timestamp': 'int',
'nick_name': 'str',
'unique_id': 'str',
'spread_score': 'float',
'cp_score': 'float',
'growth_score': 'float',
'cooperation_score': 'float',
'shop_score': 'float',
'star_score': 'float'
}
attribute_map = {
'error_code': 'error_code',
'description': 'description',
'update_timestamp': 'update_timestamp',
'nick_name': 'nick_name',
'unique_id': 'unique_id',
'spread_score': 'spread_score',
'cp_score': 'cp_score',
'growth_score': 'growth_score',
'cooperation_score': 'cooperation_score',
'shop_score': 'shop_score',
'star_score': 'star_score'
}
def __init__(self, error_code=None, description=None, update_timestamp=None, nick_name=None, unique_id=None, spread_score=None, cp_score=None, growth_score=None, cooperation_score=None, shop_score=None, star_score=None): # noqa: E501
"""InlineResponse2001Data - a model defined in Swagger""" # noqa: E501
self._error_code = None
self._description = None
self._update_timestamp = None
self._nick_name = None
self._unique_id = None
self._spread_score = None
self._cp_score = None
self._growth_score = None
self._cooperation_score = None
self._shop_score = None
self._star_score = None
self.discriminator = None
self.error_code = error_code
self.description = description
if update_timestamp is not None:
self.update_timestamp = update_timestamp
if nick_name is not None:
self.nick_name = nick_name
if unique_id is not None:
self.unique_id = unique_id
if spread_score is not None:
self.spread_score = spread_score
if cp_score is not None:
self.cp_score = cp_score
if growth_score is not None:
self.growth_score = growth_score
if cooperation_score is not None:
self.cooperation_score = cooperation_score
if shop_score is not None:
self.shop_score = shop_score
if star_score is not None:
self.star_score = star_score
@property
def error_code(self):
"""Gets the error_code of this InlineResponse2001Data. # noqa: E501
:return: The error_code of this InlineResponse2001Data. # noqa: E501
:rtype: ErrorCode
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this InlineResponse2001Data.
:param error_code: The error_code of this InlineResponse2001Data. # noqa: E501
:type: ErrorCode
"""
if error_code is None:
raise ValueError("Invalid value for `error_code`, must not be `None`") # noqa: E501
self._error_code = error_code
@property
def description(self):
"""Gets the description of this InlineResponse2001Data. # noqa: E501
:return: The description of this InlineResponse2001Data. # noqa: E501
:rtype: Description
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this InlineResponse2001Data.
:param description: The description of this InlineResponse2001Data. # noqa: E501
:type: Description
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def update_timestamp(self):
"""Gets the update_timestamp of this InlineResponse2001Data. # noqa: E501
达人指数更新时间戳 # noqa: E501
:return: The update_timestamp of this InlineResponse2001Data. # noqa: E501
:rtype: int
"""
return self._update_timestamp
@update_timestamp.setter
def update_timestamp(self, update_timestamp):
"""Sets the update_timestamp of this InlineResponse2001Data.
达人指数更新时间戳 # noqa: E501
:param update_timestamp: The update_timestamp of this InlineResponse2001Data. # noqa: E501
:type: int
"""
self._update_timestamp = update_timestamp
@property
def nick_name(self):
"""Gets the nick_name of this InlineResponse2001Data. # noqa: E501
达人昵称 # noqa: E501
:return: The nick_name of this InlineResponse2001Data. # noqa: E501
:rtype: str
"""
return self._nick_name
@nick_name.setter
def nick_name(self, nick_name):
"""Sets the nick_name of this InlineResponse2001Data.
达人昵称 # noqa: E501
:param nick_name: The nick_name of this InlineResponse2001Data. # noqa: E501
:type: str
"""
self._nick_name = nick_name
@property
def unique_id(self):
"""Gets the unique_id of this InlineResponse2001Data. # noqa: E501
达人抖音号 # noqa: E501
:return: The unique_id of this InlineResponse2001Data. # noqa: E501
:rtype: str
"""
return self._unique_id
@unique_id.setter
def unique_id(self, unique_id):
"""Sets the unique_id of this InlineResponse2001Data.
达人抖音号 # noqa: E501
:param unique_id: The unique_id of this InlineResponse2001Data. # noqa: E501
:type: str
"""
self._unique_id = unique_id
@property
def spread_score(self):
"""Gets the spread_score of this InlineResponse2001Data. # noqa: E501
传播指数 # noqa: E501
:return: The spread_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._spread_score
@spread_score.setter
def spread_score(self, spread_score):
"""Sets the spread_score of this InlineResponse2001Data.
传播指数 # noqa: E501
:param spread_score: The spread_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._spread_score = spread_score
@property
def cp_score(self):
"""Gets the cp_score of this InlineResponse2001Data. # noqa: E501
性价比指数 # noqa: E501
:return: The cp_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._cp_score
@cp_score.setter
def cp_score(self, cp_score):
"""Sets the cp_score of this InlineResponse2001Data.
性价比指数 # noqa: E501
:param cp_score: The cp_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._cp_score = cp_score
@property
def growth_score(self):
"""Gets the growth_score of this InlineResponse2001Data. # noqa: E501
涨粉指数 # noqa: E501
:return: The growth_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._growth_score
@growth_score.setter
def growth_score(self, growth_score):
"""Sets the growth_score of this InlineResponse2001Data.
涨粉指数 # noqa: E501
:param growth_score: The growth_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._growth_score = growth_score
@property
def cooperation_score(self):
"""Gets the cooperation_score of this InlineResponse2001Data. # noqa: E501
合作指数 # noqa: E501
:return: The cooperation_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._cooperation_score
@cooperation_score.setter
def cooperation_score(self, cooperation_score):
"""Sets the cooperation_score of this InlineResponse2001Data.
合作指数 # noqa: E501
:param cooperation_score: The cooperation_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._cooperation_score = cooperation_score
@property
def shop_score(self):
"""Gets the shop_score of this InlineResponse2001Data. # noqa: E501
种草指数 # noqa: E501
:return: The shop_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._shop_score
@shop_score.setter
def shop_score(self, shop_score):
"""Sets the shop_score of this InlineResponse2001Data.
种草指数 # noqa: E501
:param shop_score: The shop_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._shop_score = shop_score
@property
def star_score(self):
"""Gets the star_score of this InlineResponse2001Data. # noqa: E501
星图指数 # noqa: E501
:return: The star_score of this InlineResponse2001Data. # noqa: E501
:rtype: float
"""
return self._star_score
@star_score.setter
def star_score(self, star_score):
"""Sets the star_score of this InlineResponse2001Data.
星图指数 # noqa: E501
:param star_score: The star_score of this InlineResponse2001Data. # noqa: E501
:type: float
"""
self._star_score = star_score
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2001Data, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2001Data):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"strangebank@gmail.com"
] | strangebank@gmail.com |
1ec53720543cc85475eddd02f15984d387c04f94 | e095a91a3424ecc364c4532e8fc705b728a0d1b1 | /book/python_package/__init__.py | 532fd71b77070d6596ac341fb324f4c6c6ff81c6 | [] | no_license | Anakinliu/PythonProjects | caed257e71d2e52f691abc5095c4aca5c052feb2 | 2246794a88d06eaa381db1b3a72e9bc54a315dd7 | refs/heads/master | 2021-06-03T05:51:09.319613 | 2021-01-26T02:35:38 | 2021-01-26T02:35:38 | 101,546,309 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | #from . import test
| [
"gugeliuyinquan@gmail.com"
] | gugeliuyinquan@gmail.com |
caa93679d4f88d255d29bee7432afb478a73f857 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /SFE4q5pFTi8TBwj76_12.py | c1a8e0e344559ea11eaa899093dd2bb0d00074f6 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py |
def get_vote_count(votes):
return votes.get(list(votes)[0])-votes.get(list(votes)[1])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
63c121a05dfeb948f6e5c1fa84cafc4740c8d4dd | 1b4abb5e310c7ae1b2928f9ea80a6b3a8c2fb8ed | /model/ml/infrastructure/parameter_search.py | 065cd4b1f932cd37ca710d1d220b8003732a721f | [] | no_license | zhang-198/ExampleDrivenErrorDetection | 2e2c708665f2b57b6ac7c785604a2ac6234f7ba9 | ae8bc24fc441957d9a29e5fa4cc247f1805d8b4d | refs/heads/master | 2023-05-23T14:49:29.628520 | 2020-04-09T14:02:28 | 2020-04-09T14:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,479 | py | import os
import glob
import shutil
import json
import numpy as np
import sys
def evaluate_check_point_json(checkpoint_json_file):
with open(checkpoint_json_file) as data_file:
data = json.load(data_file)
loss_history = data['val_loss_history']
checkpoint_pointer = data['val_loss_history_it']
best_i = np.argmin(loss_history)
return loss_history[best_i], checkpoint_pointer[best_i]
def get_latest_checkpoint(path):
try:
newest = max(glob.iglob(path + '/*.json'), key=os.path.getctime)
return newest
except ValueError:
return None
def delete_folder_content(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def run_command(command, run):
print command
if run:
os.system(command)
def get_best_loss(best_loss, json_file):
if os.path.exists(json_file):
best_loss_new, _ = evaluate_check_point_json(json_file)
return best_loss_new
else:
return best_loss
run = True
data_name = 'BlackOakUppercase'
num_columns = 12
for column_id in range(num_columns):
command = 'python scripts/preprocess.py \\\n' + \
'--input_txt /root/torch-rnn/storage/' + data_name + '/column_' + str(
column_id) + '/orig_input/column_' + str(column_id) + '.txt \\\n' + \
'--output_h5 /root/torch-rnn/storage/' + data_name + '/column_' + str(
column_id) + '/input/my_data.h5 \\\n' + \
'--output_json /root/torch-rnn/storage/' + data_name + '/column_' + str(
column_id) + '/input/my_data.json\n\n'
run_command(command, run)
directory = '/root/torch-rnn/storage/' + data_name + '/column_' + str(column_id) + '/best'
if not os.path.exists(directory):
os.makedirs(directory)
best_loss = sys.float_info.max
# check whether we need batch size of 50
# check whether seq_length was important
for units in [128]:
for num_layers in [1]:
for batch_size in [5, 10]:
for learning_rate in [0.001, 0.002, 0.003]:
for dropout in [0.0, 0.1, 0.3]:
for seq_length in [15, 25, 50]:
command = 'th train.lua ' + \
'-input_h5 /root/torch-rnn/storage/' + data_name + '/column_' + str(column_id) + '/input/my_data.h5 ' + \
'-input_json /root/torch-rnn/storage/' + data_name + '/column_' + str(column_id) + '/input/my_data.json '+ \
'-checkpoint_name /root/torch-rnn/storage/' + data_name + '/column_' + str(column_id) + '/cv/checkpoint '+ \
'-rnn_size ' + str(units) + ' ' + \
'-checkpoint_every 50 ' + \
'-num_layers ' + str(num_layers) + ' ' + \
'-dropout ' + str(dropout) + ' ' + \
'-seq_length ' + str(seq_length) + ' ' + \
'-max_epochs 100 ' + \
'-batch_size ' + str(batch_size) + ' ' + \
'-learning_rate ' + str(learning_rate) + \
'\n\n'
run_command(command, run)
checkpoint_path = '/root/torch-rnn/storage/' + data_name + '/column_' + str(column_id) + '/cv'
latest_checkpoint_file = get_latest_checkpoint(checkpoint_path)
if latest_checkpoint_file == None:
with open(directory + "/log.txt", "a") as myfile:
myfile.write("rnn_size: " + str(units) + ", " + \
"num_layers: " + str(num_layers) + ", " + \
"dropout: " + str(dropout) + ", " + \
"seq_length: " + str(seq_length) + ", " + \
"batch_size: " + str(batch_size) + ", " + \
"learning_rate: " + str(learning_rate) + ", " + \
"best checkpoint id: " + "none" + ", " + \
"loss: " + "none" + "\n"
)
else:
loss, checkpoint_index = evaluate_check_point_json(latest_checkpoint_file)
best_loss = get_best_loss(best_loss, directory + "/best.json")
if best_loss > loss:
# found a better parameter config
best_loss = loss
# save this checkpoint
shutil.copy(checkpoint_path + "/checkpoint_" + str(checkpoint_index) + ".t7", directory + "/best.t7")
shutil.copy(checkpoint_path + "/checkpoint_" + str(checkpoint_index) + ".json", directory + "/best.json")
# log everything
with open(directory + "/log.txt", "a") as myfile:
myfile.write("rnn_size: " + str(units) + ", " + \
"num_layers: "+ str(num_layers) + ", " + \
"dropout: " + str(dropout) + ", " + \
"seq_length: " + str(seq_length) + ", " + \
"batch_size: " + str(batch_size) + ", " + \
"learning_rate: " + str(learning_rate) + ", " + \
"best checkpoint id: " + str(checkpoint_index) + ", " + \
"loss: " + str(loss) + "\n"
)
#clean up old checkpoints
delete_folder_content(checkpoint_path)
| [
"neutatz@googlemail.com"
] | neutatz@googlemail.com |
cf142b9f2fd1fb5e8cf3857fad308b0fa5003c56 | dd256415176fc8ab4b63ce06d616c153dffb729f | /aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/24-Jul-2019/nested_class_example/inner_class_1.py | ff91fb3904a8d4af0d622839083cff7ca1336443 | [] | no_license | adityapatel329/python_works | 6d9c6b4a64cccbe2717231a7cfd07cb350553df3 | 6cb8b2e7f691401b1d2b980f6d1def848b0a71eb | refs/heads/master | 2020-07-24T17:15:39.839826 | 2019-09-12T07:53:28 | 2019-09-12T07:53:28 | 207,993,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | class Outer:
def __init__(self):
self.inner = self.Inner()
def reveal(self):
self.inner.display("Calling Inner class function from outer class")
class Inner:
def display(self,msg):
print(msg)
outer = Outer()
outer.reveal()
## both method is right
inner = outer.Inner() ## inner =Outer().Inner() or inner = outer.inner
inner.display("Just print it!")
| [
"aditya.patel@1rivet.local"
] | aditya.patel@1rivet.local |
18def03de92ac213dcf488baf0b20ef2ea65a3b1 | b475baab9cdc73b104c077d48ab7053094040068 | /torchbiggraph/converters/export_to_tsv.py | 49cbff0b999d5de9eac4e65970863799a581535a | [
"BSD-3-Clause"
] | permissive | king2727/PyTorch-BigGraph | 91008349eb92d32283ced6a29d60b39229b0d276 | e3de4a3df84e4d7994477bbaa76d828592110a87 | refs/heads/main | 2023-08-26T00:32:43.912575 | 2021-10-27T14:32:52 | 2021-10-27T14:34:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,783 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import argparse
from typing import Iterable, TextIO
from torchbiggraph.checkpoint_manager import CheckpointManager
from torchbiggraph.config import ConfigFileLoader, ConfigSchema
from torchbiggraph.graph_storages import (
ENTITY_STORAGES,
RELATION_TYPE_STORAGES,
AbstractEntityStorage,
AbstractRelationTypeStorage,
)
from torchbiggraph.model import MultiRelationEmbedder, make_model
def write(outf: TextIO, key: Iterable[str], value: Iterable[float]) -> None:
outf.write("%s\t%s\n" % ("\t".join(key), "\t".join("%.9f" % x for x in value)))
def make_tsv(
config: ConfigSchema, entities_tf: TextIO, relation_types_tf: TextIO
) -> None:
print("Loading relation types and entities...")
entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)
relation_type_storage = RELATION_TYPE_STORAGES.make_instance(config.entity_path)
print("Initializing model...")
model = make_model(config)
print("Loading model check point...")
checkpoint_manager = CheckpointManager(config.checkpoint_path)
state_dict, _ = checkpoint_manager.read_model()
if state_dict is not None:
model.load_state_dict(state_dict, strict=False)
make_tsv_for_entities(model, checkpoint_manager, entity_storage, entities_tf)
make_tsv_for_relation_types(model, relation_type_storage, relation_types_tf)
def make_tsv_for_entities(
model: MultiRelationEmbedder,
checkpoint_manager: CheckpointManager,
entity_storage: AbstractEntityStorage,
entities_tf: TextIO,
) -> None:
print("Writing entity embeddings...")
for ent_t_name, ent_t_config in model.entities.items():
for partition in range(ent_t_config.num_partitions):
print(
f"Reading embeddings for entity type {ent_t_name} partition "
f"{partition} from checkpoint..."
)
entities = entity_storage.load_names(ent_t_name, partition)
embeddings, _ = checkpoint_manager.read(ent_t_name, partition)
if model.global_embs is not None:
embeddings += model.global_embs[model.EMB_PREFIX + ent_t_name]
print(
f"Writing embeddings for entity type {ent_t_name} partition "
f"{partition} to output file..."
)
for ix in range(len(embeddings)):
write(entities_tf, (entities[ix],), embeddings[ix])
if (ix + 1) % 5000 == 0:
print(f"- Processed {ix+1}/{len(embeddings)} entities so far...")
print(f"- Processed all {len(embeddings)} entities")
entities_output_filename = getattr(entities_tf, "name", "the output file")
print(f"Done exporting entity data to {entities_output_filename}")
def make_tsv_for_relation_types(
model: MultiRelationEmbedder,
relation_type_storage: AbstractRelationTypeStorage,
relation_types_tf: TextIO,
) -> None:
print("Writing relation type parameters...")
relation_types = relation_type_storage.load_names()
if model.num_dynamic_rels > 0:
(rel_t_config,) = model.relations
op_name = rel_t_config.operator
(lhs_operator,) = model.lhs_operators
(rhs_operator,) = model.rhs_operators
for side, operator in [("lhs", lhs_operator), ("rhs", rhs_operator)]:
for param_name, all_params in operator.named_parameters():
for rel_t_name, param in zip(relation_types, all_params):
shape = "x".join(f"{d}" for d in param.shape)
write(
relation_types_tf,
(rel_t_name, side, op_name, param_name, shape),
param.flatten(),
)
else:
for rel_t_name, rel_t_config, operator in zip(
relation_types, model.relations, model.rhs_operators
):
if rel_t_name != rel_t_config.name:
raise ValueError(
f"Mismatch in relations names: got {rel_t_name} in the "
f"dictionary and {rel_t_config.name} in the config."
)
op_name = rel_t_config.operator
for param_name, param in operator.named_parameters():
shape = "x".join(f"{d}" for d in param.shape)
write(
relation_types_tf,
(rel_t_name, "rhs", op_name, param_name, shape),
param.flatten(),
)
relation_types_output_filename = getattr(
relation_types_tf, "name", "the output file"
)
print(f"Done exporting relation type data to {relation_types_output_filename}")
def main():
config_help = "\n\nConfig parameters:\n\n" + "\n".join(ConfigSchema.help())
parser = argparse.ArgumentParser(
epilog=config_help,
# Needed to preserve line wraps in epilog.
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("config", help="Path to config file")
parser.add_argument("-p", "--param", action="append", nargs="*")
parser.add_argument("--entities-output", required=True)
parser.add_argument("--relation-types-output", required=True)
opt = parser.parse_args()
loader = ConfigFileLoader()
config = loader.load_config(opt.config, opt.param)
with open(opt.entities_output, "xt") as entities_tf, open(
opt.relation_types_output, "xt"
) as relation_types_tf:
make_tsv(config, entities_tf, relation_types_tf)
if __name__ == "__main__":
main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
64a76e03201f2dd54989a72856ef55fb03fd7f1c | e197cbe92104a328ac87c201569724b6142c34ce | /workshop1/settings.py | 2353d21f77d7fe92e0b3303f1ff34fb19057af63 | [] | no_license | chepe4pi/workshop1 | 877654782dee2f0a0d5d26aabd19356225fad59a | e744ec0520071beba3549519a04d3d728b61c0a0 | refs/heads/master | 2021-07-14T02:53:52.473856 | 2017-10-18T15:04:47 | 2017-10-18T15:04:47 | 105,813,167 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | """
Django settings for workshop1 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fi!p06auio$kb6(50g5*y1g*8#*3aojig=jn2pjsq72et6+6e6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'orders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'workshop1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'workshop1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'workshop_1',
'USER': 'workshop_1',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '100/day',
'user': '5000/day'
}
}
| [
"chepe4pi@gmail.com"
] | chepe4pi@gmail.com |
550dd7c6a02182f25d9f4efb8462999eb0161fe3 | 3ad8887aca54daa74b1fe446cb35cd0902e1e9bd | /jackdaw/gatherer/ldap/agent/common.py | 9dbba6051d5737ff4460fb9c6620d25d4d1ef4b7 | [] | no_license | huangzccn/jackdaw | 6ea5f3f7901c1c64b469ea4c25de0e77a3fc49a2 | 1a9800152fb8f19d5db43fcd235f45f6db2e3878 | refs/heads/master | 2023-08-29T11:44:46.692776 | 2021-10-23T20:00:36 | 2021-10-23T20:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import enum
class LDAPAgentCommand(enum.Enum):
SPNSERVICE = 0
SPNSERVICES = 1
USER = 2
USERS = 3
MACHINE = 4
MACHINES = 5
OU = 6
OUS = 7
DOMAININFO = 8
GROUP = 9
GROUPS = 10
MEMBERSHIP = 11
MEMBERSHIPS = 12
SD = 13
SDS = 14
GPO = 15
GPOS = 16
TRUSTS = 17
SCHEMA = 18
EXCEPTION = 99
SPNSERVICES_FINISHED = 31
USERS_FINISHED = 32
MACHINES_FINISHED = 33
OUS_FINISHED = 34
GROUPS_FINISHED = 35
MEMBERSHIPS_FINISHED = 36
SDS_FINISHED = 37
DOMAININFO_FINISHED = 38
GPOS_FINISHED = 39
TRUSTS_FINISHED = 40
MEMBERSHIP_FINISHED = 41
SCHEMA_FINISHED = 42
MSLDAP_JOB_TYPES = {
'users' : LDAPAgentCommand.USERS_FINISHED ,
'machines' : LDAPAgentCommand.MACHINES_FINISHED ,
'sds' : LDAPAgentCommand.SDS_FINISHED ,
'memberships' : LDAPAgentCommand.MEMBERSHIPS_FINISHED ,
'ous' : LDAPAgentCommand.OUS_FINISHED ,
'gpos' : LDAPAgentCommand.GPOS_FINISHED ,
'groups' : LDAPAgentCommand.GROUPS_FINISHED ,
'spns' : LDAPAgentCommand.SPNSERVICES_FINISHED ,
'adinfo' : LDAPAgentCommand.DOMAININFO_FINISHED,
'trusts' : LDAPAgentCommand.TRUSTS_FINISHED,
'schema' : LDAPAgentCommand.SCHEMA_FINISHED,
}
MSLDAP_JOB_TYPES_INV = {v: k for k, v in MSLDAP_JOB_TYPES.items()}
class LDAPAgentJob:
def __init__(self, command, data):
self.command = command
self.data = data | [
"info@skelsec.com"
] | info@skelsec.com |
816c4461ef4bd6c6665cc240da911eecc02460b1 | bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd | /neekanee/job_scrapers/plugins/com/icims/oasispetroleum.py | 02b50cd3371124e8785267ca1f23c53926380bbc | [] | no_license | thayton/neekanee | 0890dd5e5cf5bf855d4867ae02de6554291dc349 | f2b2a13e584469d982f7cc20b49a9b19fed8942d | refs/heads/master | 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from neekanee.jobscrapers.icims.icims2 import IcimsJobScraper
COMPANY = {
'name': 'Oasis Petroleum',
'hq': 'Houston, TX',
'home_page_url': 'http://www.oasispetroleum.com',
'jobs_page_url': 'https://jobs-oasispetroleum.icims.com/jobs/intro?in_iframe=1',
'empcnt': [201,500]
}
class OasisPetroleumJobScraper(IcimsJobScraper):
def __init__(self):
super(OasisPetroleumJobScraper, self).__init__(COMPANY)
def get_scraper():
return OasisPetroleumJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
| [
"thayton@neekanee.com"
] | thayton@neekanee.com |
68bca719f51fa8be7d0b4bbe77de46f4e3f08e4a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/others/Vaecf_ID2903_for_PyTorch/cornac/eval_methods/base_method.py | 1c7a05cdd6297ea47d19a7b33daedcc6023b3f63 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 26,942 | py | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import time
import numpy as np
from scipy.sparse import csr_matrix
from tqdm.auto import tqdm
from ..data import FeatureModality
from ..data import TextModality, ReviewModality
from ..data import ImageModality
from ..data import GraphModality
from ..data import SentimentModality
from ..data import Dataset
from ..metrics import RatingMetric
from ..metrics import RankingMetric
from ..experiment.result import Result
from ..utils import get_rng
def rating_eval(model, metrics, test_set, user_based=False, verbose=False):
"""Evaluate model on provided rating metrics.
Parameters
----------
model: :obj:`cornac.models.Recommender`, required
Recommender model to be evaluated.
metrics: :obj:`iterable`, required
List of rating metrics :obj:`cornac.metrics.RatingMetric`.
test_set: :obj:`cornac.data.Dataset`, required
Dataset to be used for evaluation.
user_based: bool, optional, default: False
Evaluation mode. Whether results are averaging based on number of users or number of ratings.
verbose: bool, optional, default: False
Output evaluation progress.
Returns
-------
res: (List, List)
Tuple of two lists:
- average result for each of the metrics
- average result per user for each of the metrics
"""
if len(metrics) == 0:
return [], []
avg_results = []
user_results = []
(u_indices, i_indices, r_values) = test_set.uir_tuple
r_preds = np.fromiter(
tqdm(
(
model.rate(user_idx, item_idx).item()
for user_idx, item_idx in zip(u_indices, i_indices)
),
desc="Rating",
disable=not verbose,
miniters=100,
total=len(u_indices),
),
dtype=np.float,
)
gt_mat = test_set.csr_matrix
pd_mat = csr_matrix((r_preds, (u_indices, i_indices)), shape=gt_mat.shape)
for mt in metrics:
if user_based: # averaging over users
user_results.append(
{
user_idx: mt.compute(
gt_ratings=gt_mat.getrow(user_idx).data,
pd_ratings=pd_mat.getrow(user_idx).data,
).item()
for user_idx in test_set.user_indices
}
)
avg_results.append(sum(user_results[-1].values()) / len(user_results[-1]))
else: # averaging over ratings
user_results.append({})
avg_results.append(mt.compute(gt_ratings=r_values, pd_ratings=r_preds))
return avg_results, user_results
def ranking_eval(
model,
metrics,
train_set,
test_set,
val_set=None,
rating_threshold=1.0,
exclude_unknowns=True,
verbose=False,
):
"""Evaluate model on provided ranking metrics.
Parameters
----------
model: :obj:`cornac.models.Recommender`, required
Recommender model to be evaluated.
metrics: :obj:`iterable`, required
List of rating metrics :obj:`cornac.metrics.RankingMetric`.
train_set: :obj:`cornac.data.Dataset`, required
Dataset to be used for model training. This will be used to exclude
observations already appeared during training.
test_set: :obj:`cornac.data.Dataset`, required
Dataset to be used for evaluation.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
Dataset to be used for model selection. This will be used to exclude
observations already appeared during validation.
rating_threshold: float, optional, default: 1.0
The threshold to convert ratings into positive or negative feedback.
exclude_unknowns: bool, optional, default: True
Ignore unknown users and items during evaluation.
verbose: bool, optional, default: False
Output evaluation progress.
Returns
-------
res: (List, List)
Tuple of two lists:
- average result for each of the metrics
- average result per user for each of the metrics
"""
if len(metrics) == 0:
return [], []
avg_results = []
user_results = [{} for _ in enumerate(metrics)]
gt_mat = test_set.csr_matrix
train_mat = train_set.csr_matrix
val_mat = None if val_set is None else val_set.csr_matrix
def pos_items(csr_row):
return [
item_idx
for (item_idx, rating) in zip(csr_row.indices, csr_row.data)
if rating >= rating_threshold
]
for user_idx in tqdm(
test_set.user_indices, desc="Ranking", disable=not verbose, miniters=100
):
test_pos_items = pos_items(gt_mat.getrow(user_idx))
if len(test_pos_items) == 0:
continue
u_gt_pos = np.zeros(test_set.num_items, dtype=np.int)
u_gt_pos[test_pos_items] = 1
val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx))
train_pos_items = (
[]
if train_set.is_unk_user(user_idx)
else pos_items(train_mat.getrow(user_idx))
)
u_gt_neg = np.ones(test_set.num_items, dtype=np.int)
u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0
item_indices = None if exclude_unknowns else np.arange(test_set.num_items)
item_rank, item_scores = model.rank(user_idx, item_indices)
for i, mt in enumerate(metrics):
mt_score = mt.compute(
gt_pos=u_gt_pos,
gt_neg=u_gt_neg,
pd_rank=item_rank,
pd_scores=item_scores,
)
user_results[i][user_idx] = mt_score
# avg results of ranking metrics
for i, mt in enumerate(metrics):
avg_results.append(sum(user_results[i].values()) / len(user_results[i]))
return avg_results, user_results
class BaseMethod:
"""Base Evaluation Method
Parameters
----------
data: array-like, required
Raw preference data in the triplet format [(user_id, item_id, rating_value)].
rating_threshold: float, optional, default: 1.0
Threshold used to binarize rating values into positive or negative feedback for
model evaluation using ranking metrics (rating metrics are not affected).
seed: int, optional, default: None
Random seed for reproducibility.
exclude_unknowns: bool, optional, default: True
If `True`, unknown users and items will be ignored during model evaluation.
verbose: bool, optional, default: False
Output running log.
"""
def __init__(
self,
data=None,
fmt="UIR",
rating_threshold=1.0,
seed=None,
exclude_unknowns=True,
verbose=False,
**kwargs
):
self._data = data
self.fmt = fmt
self.train_set = None
self.test_set = None
self.val_set = None
self.rating_threshold = rating_threshold
self.exclude_unknowns = exclude_unknowns
self.verbose = verbose
self.seed = seed
self.rng = get_rng(seed)
self.global_uid_map = OrderedDict()
self.global_iid_map = OrderedDict()
self.user_feature = kwargs.get("user_feature", None)
self.user_text = kwargs.get("user_text", None)
self.user_image = kwargs.get("user_image", None)
self.user_graph = kwargs.get("user_graph", None)
self.item_feature = kwargs.get("item_feature", None)
self.item_text = kwargs.get("item_text", None)
self.item_image = kwargs.get("item_image", None)
self.item_graph = kwargs.get("item_graph", None)
self.sentiment = kwargs.get("sentiment", None)
self.review_text = kwargs.get("review_text", None)
if verbose:
print("rating_threshold = {:.1f}".format(rating_threshold))
print("exclude_unknowns = {}".format(exclude_unknowns))
@property
def total_users(self):
return len(self.global_uid_map)
@property
def total_items(self):
return len(self.global_iid_map)
@property
def user_feature(self):
return self.__user_feature
@property
def user_text(self):
return self.__user_text
@user_feature.setter
def user_feature(self, input_modality):
if input_modality is not None and not isinstance(
input_modality, FeatureModality
):
raise ValueError(
"input_modality has to be instance of FeatureModality but {}".format(
type(input_modality)
)
)
self.__user_feature = input_modality
@user_text.setter
def user_text(self, input_modality):
if input_modality is not None and not isinstance(input_modality, TextModality):
raise ValueError(
"input_modality has to be instance of TextModality but {}".format(
type(input_modality)
)
)
self.__user_text = input_modality
@property
def user_image(self):
return self.__user_image
@user_image.setter
def user_image(self, input_modality):
if input_modality is not None and not isinstance(input_modality, ImageModality):
raise ValueError(
"input_modality has to be instance of ImageModality but {}".format(
type(input_modality)
)
)
self.__user_image = input_modality
@property
def user_graph(self):
return self.__user_graph
@user_graph.setter
def user_graph(self, input_modality):
if input_modality is not None and not isinstance(input_modality, GraphModality):
raise ValueError(
"input_modality has to be instance of GraphModality but {}".format(
type(input_modality)
)
)
self.__user_graph = input_modality
@property
def item_feature(self):
return self.__item_feature
@property
def item_text(self):
return self.__item_text
@item_feature.setter
def item_feature(self, input_modality):
if input_modality is not None and not isinstance(
input_modality, FeatureModality
):
raise ValueError(
"input_modality has to be instance of FeatureModality but {}".format(
type(input_modality)
)
)
self.__item_feature = input_modality
@item_text.setter
def item_text(self, input_modality):
if input_modality is not None and not isinstance(input_modality, TextModality):
raise ValueError(
"input_modality has to be instance of TextModality but {}".format(
type(input_modality)
)
)
self.__item_text = input_modality
@property
def item_image(self):
return self.__item_image
@item_image.setter
def item_image(self, input_modality):
if input_modality is not None and not isinstance(input_modality, ImageModality):
raise ValueError(
"input_modality has to be instance of ImageModality but {}".format(
type(input_modality)
)
)
self.__item_image = input_modality
@property
def item_graph(self):
return self.__item_graph
@item_graph.setter
def item_graph(self, input_modality):
if input_modality is not None and not isinstance(input_modality, GraphModality):
raise ValueError(
"input_modality has to be instance of GraphModality but {}".format(
type(input_modality)
)
)
self.__item_graph = input_modality
@property
def sentiment(self):
return self.__sentiment
@sentiment.setter
def sentiment(self, input_modality):
if input_modality is not None and not isinstance(
input_modality, SentimentModality
):
raise ValueError(
"input_modality has to be instance of SentimentModality but {}".format(
type(input_modality)
)
)
self.__sentiment = input_modality
@property
def review_text(self):
return self.__review_text
@review_text.setter
def review_text(self, input_modality):
if input_modality is not None and not isinstance(
input_modality, ReviewModality
):
raise ValueError(
"input_modality has to be instance of ReviewModality but {}".format(
type(input_modality)
)
)
self.__review_text = input_modality
def _reset(self):
"""Reset the random number generator for reproducibility"""
self.rng = get_rng(self.seed)
self.test_set = self.test_set.reset()
def _organize_metrics(self, metrics):
"""Organize metrics according to their types (rating or raking)
Parameters
----------
metrics: :obj:`iterable`
List of metrics.
"""
if isinstance(metrics, dict):
self.rating_metrics = metrics.get("rating", [])
self.ranking_metrics = metrics.get("ranking", [])
elif isinstance(metrics, list):
self.rating_metrics = []
self.ranking_metrics = []
for mt in metrics:
if isinstance(mt, RatingMetric):
self.rating_metrics.append(mt)
elif isinstance(mt, RankingMetric) and hasattr(mt.k, "__len__"):
self.ranking_metrics.extend(
[mt.__class__(k=_k) for _k in sorted(set(mt.k))]
)
else:
self.ranking_metrics.append(mt)
else:
raise ValueError("Type of metrics has to be either dict or list!")
# sort metrics by name
self.rating_metrics = sorted(self.rating_metrics, key=lambda mt: mt.name)
self.ranking_metrics = sorted(self.ranking_metrics, key=lambda mt: mt.name)
def _build_datasets(self, train_data, test_data, val_data=None):
self.train_set = Dataset.build(
data=train_data,
fmt=self.fmt,
global_uid_map=self.global_uid_map,
global_iid_map=self.global_iid_map,
seed=self.seed,
exclude_unknowns=False,
)
if self.verbose:
print("---")
print("Training data:")
print("Number of users = {}".format(self.train_set.num_users))
print("Number of items = {}".format(self.train_set.num_items))
print("Number of ratings = {}".format(self.train_set.num_ratings))
print("Max rating = {:.1f}".format(self.train_set.max_rating))
print("Min rating = {:.1f}".format(self.train_set.min_rating))
print("Global mean = {:.1f}".format(self.train_set.global_mean))
self.test_set = Dataset.build(
data=test_data,
fmt=self.fmt,
global_uid_map=self.global_uid_map,
global_iid_map=self.global_iid_map,
seed=self.seed,
exclude_unknowns=self.exclude_unknowns,
)
if self.verbose:
print("---")
print("Test data:")
print("Number of users = {}".format(len(self.test_set.uid_map)))
print("Number of items = {}".format(len(self.test_set.iid_map)))
print("Number of ratings = {}".format(self.test_set.num_ratings))
print(
"Number of unknown users = {}".format(
self.test_set.num_users - self.train_set.num_users
)
)
print(
"Number of unknown items = {}".format(
self.test_set.num_items - self.train_set.num_items
)
)
if val_data is not None and len(val_data) > 0:
self.val_set = Dataset.build(
data=val_data,
fmt=self.fmt,
global_uid_map=self.global_uid_map,
global_iid_map=self.global_iid_map,
seed=self.seed,
exclude_unknowns=self.exclude_unknowns,
)
if self.verbose:
print("---")
print("Validation data:")
print("Number of users = {}".format(len(self.val_set.uid_map)))
print("Number of items = {}".format(len(self.val_set.iid_map)))
print("Number of ratings = {}".format(self.val_set.num_ratings))
if self.verbose:
print("---")
print("Total users = {}".format(self.total_users))
print("Total items = {}".format(self.total_items))
self.train_set.total_users = self.total_users
self.train_set.total_items = self.total_items
def _build_modalities(self):
for user_modality in [
self.user_feature,
self.user_text,
self.user_image,
self.user_graph,
]:
if user_modality is None:
continue
user_modality.build(
id_map=self.global_uid_map,
uid_map=self.train_set.uid_map,
iid_map=self.train_set.iid_map,
dok_matrix=self.train_set.dok_matrix,
)
for item_modality in [
self.item_feature,
self.item_text,
self.item_image,
self.item_graph,
]:
if item_modality is None:
continue
item_modality.build(
id_map=self.global_iid_map,
uid_map=self.train_set.uid_map,
iid_map=self.train_set.iid_map,
dok_matrix=self.train_set.dok_matrix,
)
for modality in [self.sentiment, self.review_text]:
if modality is None:
continue
modality.build(
uid_map=self.train_set.uid_map,
iid_map=self.train_set.iid_map,
dok_matrix=self.train_set.dok_matrix,
)
self.add_modalities(
user_feature=self.user_feature,
user_text=self.user_text,
user_image=self.user_image,
user_graph=self.user_graph,
item_feature=self.item_feature,
item_text=self.item_text,
item_image=self.item_image,
item_graph=self.item_graph,
sentiment=self.sentiment,
review_text=self.review_text,
)
def add_modalities(self, **kwargs):
"""
Add successfully built modalities to all datasets. This is handy for
seperately built modalities that are not invoked in the build method.
"""
self.user_feature = kwargs.get("user_feature", None)
self.user_text = kwargs.get("user_text", None)
self.user_image = kwargs.get("user_image", None)
self.user_graph = kwargs.get("user_graph", None)
self.item_feature = kwargs.get("item_feature", None)
self.item_text = kwargs.get("item_text", None)
self.item_image = kwargs.get("item_image", None)
self.item_graph = kwargs.get("item_graph", None)
self.sentiment = kwargs.get("sentiment", None)
self.review_text = kwargs.get("review_text", None)
for data_set in [self.train_set, self.test_set, self.val_set]:
if data_set is None:
continue
data_set.add_modalities(
user_feature=self.user_feature,
user_text=self.user_text,
user_image=self.user_image,
user_graph=self.user_graph,
item_feature=self.item_feature,
item_text=self.item_text,
item_image=self.item_image,
item_graph=self.item_graph,
sentiment=self.sentiment,
review_text=self.review_text,
)
def build(self, train_data, test_data, val_data=None):
if train_data is None or len(train_data) == 0:
raise ValueError("train_data is required but None or empty!")
if test_data is None or len(test_data) == 0:
raise ValueError("test_data is required but None or empty!")
self.global_uid_map.clear()
self.global_iid_map.clear()
self._build_datasets(train_data, test_data, val_data)
self._build_modalities()
return self
def _eval(self, model, test_set, val_set, user_based):
metric_avg_results = OrderedDict()
metric_user_results = OrderedDict()
avg_results, user_results = rating_eval(
model=model,
metrics=self.rating_metrics,
test_set=test_set,
user_based=user_based,
verbose=self.verbose,
)
for i, mt in enumerate(self.rating_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
avg_results, user_results = ranking_eval(
model=model,
metrics=self.ranking_metrics,
train_set=self.train_set,
test_set=test_set,
val_set=val_set,
rating_threshold=self.rating_threshold,
exclude_unknowns=self.exclude_unknowns,
verbose=self.verbose,
)
for i, mt in enumerate(self.ranking_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
return Result(model.name, metric_avg_results, metric_user_results)
def evaluate(self, model, metrics, user_based, show_validation=True):
"""Evaluate given models according to given metrics
Parameters
----------
model: :obj:`cornac.models.Recommender`
Recommender model to be evaluated.
metrics: :obj:`iterable`
List of metrics.
user_based: bool, required
Evaluation strategy for the rating metrics. Whether results
are averaging based on number of users or number of ratings.
show_validation: bool, optional, default: True
Whether to show the results on validation set (if exists).
Returns
-------
res: :obj:`cornac.experiment.Result`
"""
if self.train_set is None:
raise ValueError("train_set is required but None!")
if self.test_set is None:
raise ValueError("test_set is required but None!")
self._reset()
self._organize_metrics(metrics)
###########
# FITTING #
###########
if self.verbose:
print("\n[{}] Training started!".format(model.name))
start = time.time()
model.fit(self.train_set, self.val_set)
train_time = time.time() - start
##############
# EVALUATION #
##############
if self.verbose:
print("\n[{}] Evaluation started!".format(model.name))
start = time.time()
test_result = self._eval(
model=model,
test_set=self.test_set,
val_set=self.val_set,
user_based=user_based,
)
test_time = time.time() - start
test_result.metric_avg_results["Train (s)"] = train_time
test_result.metric_avg_results["Test (s)"] = test_time
val_result = None
if show_validation and self.val_set is not None:
start = time.time()
val_result = self._eval(
model=model, test_set=self.val_set, val_set=None, user_based=user_based
)
val_time = time.time() - start
val_result.metric_avg_results["Time (s)"] = val_time
return test_result, val_result
@classmethod
def from_splits(
cls,
train_data,
test_data,
val_data=None,
fmt="UIR",
rating_threshold=1.0,
exclude_unknowns=False,
seed=None,
verbose=False,
**kwargs
):
"""Constructing evaluation method given data.
Parameters
----------
train_data: array-like
Training data
test_data: array-like
Test data
val_data: array-like, optional, default: None
Validation data
fmt: str, default: 'UIR'
Format of the input data. Currently, we are supporting:
'UIR': User, Item, Rating
'UIRT': User, Item, Rating, Timestamp
rating_threshold: float, default: 1.0
Threshold to decide positive or negative preferences.
exclude_unknowns: bool, default: False
Whether to exclude unknown users/items in evaluation.
seed: int, optional, default: None
Random seed for reproduce the splitting.
verbose: bool, default: False
The verbosity flag.
Returns
-------
method: :obj:`<cornac.eval_methods.BaseMethod>`
Evaluation method object.
"""
method = cls(
fmt=fmt,
rating_threshold=rating_threshold,
exclude_unknowns=exclude_unknowns,
seed=seed,
verbose=verbose,
**kwargs
)
return method.build(
train_data=train_data, test_data=test_data, val_data=val_data
)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
863046733c83dda48b2b4afcc90be17c2f9b0841 | 7ff0077a55f6bf4a74704e430f354aeabaae3e0b | /tensorflow_probability/python/bijectors/weibull_test.py | c1289dbf4e865ef6940993c305076a246fcf7033 | [
"Apache-2.0"
] | permissive | markaduol/probability | 50a1d97810d11c747bd9546f977b2937c9e04d78 | 8af21dff96502a5bdc01b1be2c595043a3efc5d1 | refs/heads/master | 2020-03-29T20:50:26.001297 | 2018-09-25T21:51:10 | 2018-09-25T21:51:50 | 150,333,784 | 0 | 1 | Apache-2.0 | 2018-09-25T21:54:49 | 2018-09-25T21:54:49 | null | UTF-8 | Python | false | false | 2,768 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
class WeibullBijectorTest(tf.test.TestCase):
"""Tests correctness of the weibull bijector."""
def testBijector(self):
with self.test_session():
scale = 5.
concentration = 0.3
bijector = tfb.Weibull(
scale=scale, concentration=concentration, validate_args=True)
self.assertEqual("weibull", bijector.name)
x = np.array([[[0.], [1.], [14.], [20.], [100.]]], dtype=np.float32)
# Weibull distribution
weibull_dist = stats.frechet_r(c=concentration, scale=scale)
y = weibull_dist.cdf(x).astype(np.float32)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
weibull_dist.logpdf(x),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=0)),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Weibull(scale=20., concentration=0.3),
lower_x=1.,
upper_x=100.,
eval_func=self.evaluate,
rtol=0.02)
def testBijectiveAndFinite(self):
bijector = tfb.Weibull(scale=20., concentration=2., validate_args=True)
x = np.linspace(1., 8., num=10).astype(np.float32)
y = np.linspace(
-np.expm1(-1 / 400.),
-np.expm1(-16), num=10).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
2854d57257d3db636fb50901650e1de302aeb079 | 562522946c03d168569cd79f43140c8326441fb4 | /nn.py | c6f15358a68cd1305fd4f5a439e748f625d64abd | [] | no_license | hackingmath/puzzles | 3d5f17d037f97767e14d1c1be51e352dc35928c7 | a3fd8cc976759b655b9eb13db173c461d8ced5ca | refs/heads/master | 2021-06-08T12:55:03.290076 | 2021-06-07T16:43:55 | 2021-06-07T16:43:55 | 159,741,819 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | """DIY Neural Net
Sept. 26, 2019"""
import math
from matrix import Matrix,transpose,multiply
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def dsigmoid(y):
return y * (1 - y)
class NeuralNetwork(object):
def __init__(self, input_nodes,hidden_nodes,output_nodes,
learningrate):
self.input_nodes = input_nodes #int
self.hidden_nodes = hidden_nodes #int
self.output_nodes = output_nodes #int
self.weights_ih = Matrix(self.hidden_nodes,self.input_nodes)
self.weights_ho = Matrix(self.output_nodes,self.hidden_nodes)
self.weights_ih.randomize()
self.weights_ho.randomize()
self.lr = learningrate
def activation_function(self,x):
"""The Sigmoid Function"""
out = [0]*len(x)
for i, element in enumerate(x):
#print("element:",element)
out[i] = sigmoid(x[i][0])
#print(out)
return out
#train the neural network
def train(self,inputs_list,targets_list):
#convert inputs list to 2d array
inputs = inputs_list.transpose()
targets = targets_list.transpose()
#calculate signals into hidden layer
hidden_inputs = multiply(self.weights_ih,inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#calculate signals entering final output layer
final_inputs = multiply(self.weights_ho,hidden_outputs)
#calculate signals exiting final output layer
final_outputs = self.activation_function(final_inputs)
#output layer error is the target - actual
output_errors = targets - final_outputs
#hidden layer error is the output_errors, split by weights,
#recombined at hidden nodes
hidden_errors = multiply(transpose(self.weights_ho),output_errors)
#update the weights for the links between the hidden and output layers
self.weights_ho += self.lr * multiply((output_errors*final_inputs *\
(1.0 - final_outputs)),
transpose(hidden_outputs))
#update the weights for the links between the input and hidden layers
self.weights_ih += self.lr * multiply((hidden_errors * hidden_outputs *\
(1.0 - hidden_outputs)),
transpose(inputs))
def query(self,inputs_list):
#convert inputs list to 2d array
| [
"noreply@github.com"
] | hackingmath.noreply@github.com |
9ebe38d95dfb57e6e17ff420add16b4fcaac2ebe | effed4b460230e0559619e083902754fc24aae24 | /books/urls.py | 58c8958f70ab90ad40b76013cf6030790b12f86e | [] | no_license | wgoode3/books | 4a65d24fa09aad350138bce5cfcd416880843298 | 452b916880c560b9c06a690e752e11f89d311f8f | refs/heads/master | 2021-09-05T08:55:10.998792 | 2018-01-25T20:28:59 | 2018-01-25T20:28:59 | 118,942,117 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | """books URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.user_app.urls')),
url(r'^', include('apps.book_app.urls'))
]
| [
"wgoode3@gmail.com"
] | wgoode3@gmail.com |
93e56287fee314f5e72515d1053e8119aadf4c05 | 96e38b89fa057fa0c1cf34e498b4624041dfc6e2 | /BOJ/DFS/Python/16946.py | 9a1a8a8cdd3697ecee32131f3f4d4f30a7f49492 | [] | no_license | malkoG/polyglot-cp | 66059246b01766da3c359dbd16f04348d3c7ecd2 | 584763144afe40d73e72dd55f90ee1206029ca8f | refs/heads/master | 2021-11-24T13:33:49.625237 | 2019-10-06T07:42:49 | 2019-10-06T07:42:49 | 176,255,722 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | dx = [-1, 0, 1, 0]
dy = [0, -1, 0, 1]
area = [0] * 1010101
def dfs(field, component, x, y, max_x, max_y, num_of_components):
component[y][x] = num_of_components
area[num_of_components] += 1
for i in range(4):
next_x = x + dx[i]
next_y = y + dy[i]
if next_x > 0 and next_x <= max_x and next_y > 0 and next_y <= max_y and component[next_y][next_x] == 0 and field[next_y][next_x] == '0':
dfs(field, component, next_x, next_y, max_x, max_y, num_of_components)
N,M=map(int, input().split())
component = [ ([0] * (M+2)) for i in range(N+2) ]
field = ["0" * (M+2)]
for i in range(N):
field.append("0" + input() + "0")
field.append("0" * (M+2))
number_of_components = 1
for y in range(1, N+1):
for x in range(1, M+1):
if component[y][x] == 0 and field[y][x] == "0":
dfs(field, component, x, y, M, N, number_of_components)
number_of_components += 1
for y in range(1, N+1):
for x in range(1, M+1):
if field[y][x] == "0":
print("0", end="")
else:
acc = 1
set_of_components = set()
for i in range(4):
set_of_components.add(component[y + dy[i]][x + dx[i]])
acc += sum([ area[s] for s in set_of_components ])
print(str(acc), end="")
print()
| [
"rijgndqw012@gmail.com"
] | rijgndqw012@gmail.com |
ab8a90598939065d1f8b2310c5f55e53c90d42aa | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/util/LaunchErrorDialog.pyi | 1f4a30077675a4950e0f64dfed58bf2573bfe952 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,780 | pyi | from typing import List
import java.awt
import java.awt.dnd
import java.awt.event
import java.awt.im
import java.awt.image
import java.beans
import java.io
import java.lang
import java.util
import javax.accessibility
import javax.swing
class LaunchErrorDialog(javax.swing.JDialog):
def __init__(self, url: java.net.URL, fileURL: java.net.URL): ...
def action(self, __a0: java.awt.Event, __a1: object) -> bool: ...
@overload
def add(self, __a0: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.PopupMenu) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: int) -> java.awt.Component: ...
@overload
def add(self, __a0: unicode, __a1: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object, __a2: int) -> None: ...
def addComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def addContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def addFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def addHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def addHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def addInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def addKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def addMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def addMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def addMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def addNotify(self) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def addWindowFocusListener(self, __a0: java.awt.event.WindowFocusListener) -> None: ...
def addWindowListener(self, __a0: java.awt.event.WindowListener) -> None: ...
def addWindowStateListener(self, __a0: java.awt.event.WindowStateListener) -> None: ...
def applyComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
@overload
def applyResourceBundle(self, __a0: unicode) -> None: ...
@overload
def applyResourceBundle(self, __a0: java.util.ResourceBundle) -> None: ...
def areFocusTraversalKeysSet(self, __a0: int) -> bool: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> int: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> int: ...
@overload
def contains(self, __a0: java.awt.Point) -> bool: ...
@overload
def contains(self, __a0: int, __a1: int) -> bool: ...
def countComponents(self) -> int: ...
@overload
def createBufferStrategy(self, __a0: int) -> None: ...
@overload
def createBufferStrategy(self, __a0: int, __a1: java.awt.BufferCapabilities) -> None: ...
@overload
def createImage(self, __a0: java.awt.image.ImageProducer) -> java.awt.Image: ...
@overload
def createImage(self, __a0: int, __a1: int) -> java.awt.Image: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int) -> java.awt.image.VolatileImage: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int, __a2: java.awt.ImageCapabilities) -> java.awt.image.VolatileImage: ...
def deliverEvent(self, __a0: java.awt.Event) -> None: ...
def disable(self) -> None: ...
def dispatchEvent(self, __a0: java.awt.AWTEvent) -> None: ...
def dispose(self) -> None: ...
def doLayout(self) -> None: ...
def enableInputMethods(self, __a0: bool) -> None: ...
def equals(self, __a0: object) -> bool: ...
@overload
def findComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def findComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: long, __a2: long) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
def getAccessibleContext(self) -> javax.accessibility.AccessibleContext: ...
def getAlignmentX(self) -> float: ...
def getAlignmentY(self) -> float: ...
def getBackground(self) -> java.awt.Color: ...
def getBaseline(self, __a0: int, __a1: int) -> int: ...
def getBaselineResizeBehavior(self) -> java.awt.Component.BaselineResizeBehavior: ...
@overload
def getBounds(self) -> java.awt.Rectangle: ...
@overload
def getBounds(self, __a0: java.awt.Rectangle) -> java.awt.Rectangle: ...
def getBufferStrategy(self) -> java.awt.image.BufferStrategy: ...
def getClass(self) -> java.lang.Class: ...
def getColorModel(self) -> java.awt.image.ColorModel: ...
def getComponent(self, __a0: int) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
def getComponentCount(self) -> int: ...
def getComponentListeners(self) -> List[java.awt.event.ComponentListener]: ...
def getComponentOrientation(self) -> java.awt.ComponentOrientation: ...
def getComponentZOrder(self, __a0: java.awt.Component) -> int: ...
def getComponents(self) -> List[java.awt.Component]: ...
def getContainerListeners(self) -> List[java.awt.event.ContainerListener]: ...
def getContentPane(self) -> java.awt.Container: ...
def getCursor(self) -> java.awt.Cursor: ...
def getDefaultCloseOperation(self) -> int: ...
def getDropTarget(self) -> java.awt.dnd.DropTarget: ...
def getFocusCycleRootAncestor(self) -> java.awt.Container: ...
def getFocusListeners(self) -> List[java.awt.event.FocusListener]: ...
def getFocusOwner(self) -> java.awt.Component: ...
def getFocusTraversalKeys(self, __a0: int) -> java.util.Set: ...
def getFocusTraversalKeysEnabled(self) -> bool: ...
def getFocusTraversalPolicy(self) -> java.awt.FocusTraversalPolicy: ...
def getFocusableWindowState(self) -> bool: ...
def getFont(self) -> java.awt.Font: ...
def getFontMetrics(self, __a0: java.awt.Font) -> java.awt.FontMetrics: ...
def getForeground(self) -> java.awt.Color: ...
def getGlassPane(self) -> java.awt.Component: ...
def getGraphics(self) -> java.awt.Graphics: ...
def getGraphicsConfiguration(self) -> java.awt.GraphicsConfiguration: ...
def getHeight(self) -> int: ...
def getHierarchyBoundsListeners(self) -> List[java.awt.event.HierarchyBoundsListener]: ...
def getHierarchyListeners(self) -> List[java.awt.event.HierarchyListener]: ...
def getIconImages(self) -> List[object]: ...
def getIgnoreRepaint(self) -> bool: ...
def getInputContext(self) -> java.awt.im.InputContext: ...
def getInputMethodListeners(self) -> List[java.awt.event.InputMethodListener]: ...
def getInputMethodRequests(self) -> java.awt.im.InputMethodRequests: ...
def getInsets(self) -> java.awt.Insets: ...
def getJMenuBar(self) -> javax.swing.JMenuBar: ...
def getKeyListeners(self) -> List[java.awt.event.KeyListener]: ...
def getLayeredPane(self) -> javax.swing.JLayeredPane: ...
def getLayout(self) -> java.awt.LayoutManager: ...
def getListeners(self, __a0: java.lang.Class) -> List[java.util.EventListener]: ...
def getLocale(self) -> java.util.Locale: ...
@overload
def getLocation(self) -> java.awt.Point: ...
@overload
def getLocation(self, __a0: java.awt.Point) -> java.awt.Point: ...
def getLocationOnScreen(self) -> java.awt.Point: ...
def getMaximumSize(self) -> java.awt.Dimension: ...
def getMinimumSize(self) -> java.awt.Dimension: ...
def getModalExclusionType(self) -> java.awt.Dialog.ModalExclusionType: ...
def getModalityType(self) -> java.awt.Dialog.ModalityType: ...
def getMostRecentFocusOwner(self) -> java.awt.Component: ...
def getMouseListeners(self) -> List[java.awt.event.MouseListener]: ...
def getMouseMotionListeners(self) -> List[java.awt.event.MouseMotionListener]: ...
@overload
def getMousePosition(self) -> java.awt.Point: ...
@overload
def getMousePosition(self, __a0: bool) -> java.awt.Point: ...
def getMouseWheelListeners(self) -> List[java.awt.event.MouseWheelListener]: ...
def getName(self) -> unicode: ...
def getOpacity(self) -> float: ...
def getOwnedWindows(self) -> List[java.awt.Window]: ...
def getOwner(self) -> java.awt.Window: ...
@staticmethod
def getOwnerlessWindows() -> List[java.awt.Window]: ...
def getParent(self) -> java.awt.Container: ...
def getPreferredSize(self) -> java.awt.Dimension: ...
@overload
def getPropertyChangeListeners(self) -> List[java.beans.PropertyChangeListener]: ...
@overload
def getPropertyChangeListeners(self, __a0: unicode) -> List[java.beans.PropertyChangeListener]: ...
def getRootPane(self) -> javax.swing.JRootPane: ...
def getShape(self) -> java.awt.Shape: ...
@overload
def getSize(self) -> java.awt.Dimension: ...
@overload
def getSize(self, __a0: java.awt.Dimension) -> java.awt.Dimension: ...
def getTitle(self) -> unicode: ...
def getToolkit(self) -> java.awt.Toolkit: ...
def getTransferHandler(self) -> javax.swing.TransferHandler: ...
def getTreeLock(self) -> object: ...
def getType(self) -> java.awt.Window.Type: ...
def getWarningString(self) -> unicode: ...
def getWidth(self) -> int: ...
def getWindowFocusListeners(self) -> List[java.awt.event.WindowFocusListener]: ...
def getWindowListeners(self) -> List[java.awt.event.WindowListener]: ...
def getWindowStateListeners(self) -> List[java.awt.event.WindowStateListener]: ...
@staticmethod
def getWindows() -> List[java.awt.Window]: ...
def getX(self) -> int: ...
def getY(self) -> int: ...
def gotFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def handleEvent(self, __a0: java.awt.Event) -> bool: ...
def hasFocus(self) -> bool: ...
def hashCode(self) -> int: ...
def hide(self) -> None: ...
def imageUpdate(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: int, __a4: int, __a5: int) -> bool: ...
def inside(self, __a0: int, __a1: int) -> bool: ...
def invalidate(self) -> None: ...
def isActive(self) -> bool: ...
def isAlwaysOnTop(self) -> bool: ...
def isAlwaysOnTopSupported(self) -> bool: ...
def isAncestorOf(self, __a0: java.awt.Component) -> bool: ...
def isAutoRequestFocus(self) -> bool: ...
def isBackgroundSet(self) -> bool: ...
def isCursorSet(self) -> bool: ...
@staticmethod
def isDefaultLookAndFeelDecorated() -> bool: ...
def isDisplayable(self) -> bool: ...
def isDoubleBuffered(self) -> bool: ...
def isEnabled(self) -> bool: ...
@overload
def isFocusCycleRoot(self) -> bool: ...
@overload
def isFocusCycleRoot(self, __a0: java.awt.Container) -> bool: ...
def isFocusOwner(self) -> bool: ...
def isFocusTraversable(self) -> bool: ...
def isFocusTraversalPolicyProvider(self) -> bool: ...
def isFocusTraversalPolicySet(self) -> bool: ...
def isFocusable(self) -> bool: ...
def isFocusableWindow(self) -> bool: ...
def isFocused(self) -> bool: ...
def isFontSet(self) -> bool: ...
def isForegroundSet(self) -> bool: ...
def isLightweight(self) -> bool: ...
def isLocationByPlatform(self) -> bool: ...
def isMaximumSizeSet(self) -> bool: ...
def isMinimumSizeSet(self) -> bool: ...
def isModal(self) -> bool: ...
def isOpaque(self) -> bool: ...
def isPreferredSizeSet(self) -> bool: ...
def isResizable(self) -> bool: ...
def isShowing(self) -> bool: ...
def isUndecorated(self) -> bool: ...
def isValid(self) -> bool: ...
def isValidateRoot(self) -> bool: ...
def isVisible(self) -> bool: ...
def keyDown(self, __a0: java.awt.Event, __a1: int) -> bool: ...
def keyUp(self, __a0: java.awt.Event, __a1: int) -> bool: ...
@overload
def list(self) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream, __a1: int) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter, __a1: int) -> None: ...
def locate(self, __a0: int, __a1: int) -> java.awt.Component: ...
def location(self) -> java.awt.Point: ...
def lostFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def mouseDown(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseDrag(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseEnter(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseExit(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseMove(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseUp(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def move(self, __a0: int, __a1: int) -> None: ...
def nextFocus(self) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def pack(self) -> None: ...
def paint(self, __a0: java.awt.Graphics) -> None: ...
def paintAll(self, __a0: java.awt.Graphics) -> None: ...
def paintComponents(self, __a0: java.awt.Graphics) -> None: ...
def postEvent(self, __a0: java.awt.Event) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> bool: ...
def print(self, __a0: java.awt.Graphics) -> None: ...
def printAll(self, __a0: java.awt.Graphics) -> None: ...
def printComponents(self, __a0: java.awt.Graphics) -> None: ...
@overload
def remove(self, __a0: int) -> None: ...
@overload
def remove(self, __a0: java.awt.Component) -> None: ...
@overload
def remove(self, __a0: java.awt.MenuComponent) -> None: ...
def removeAll(self) -> None: ...
def removeComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def removeContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def removeFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def removeHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def removeHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def removeInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def removeKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def removeMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def removeMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def removeMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def removeNotify(self) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def removeWindowFocusListener(self, __a0: java.awt.event.WindowFocusListener) -> None: ...
def removeWindowListener(self, __a0: java.awt.event.WindowListener) -> None: ...
def removeWindowStateListener(self, __a0: java.awt.event.WindowStateListener) -> None: ...
@overload
def repaint(self) -> None: ...
@overload
def repaint(self, __a0: long) -> None: ...
@overload
def repaint(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def repaint(self, __a0: long, __a1: int, __a2: int, __a3: int, __a4: int) -> None: ...
@overload
def requestFocus(self) -> None: ...
@overload
def requestFocus(self, __a0: java.awt.event.FocusEvent.Cause) -> None: ...
@overload
def requestFocusInWindow(self) -> bool: ...
@overload
def requestFocusInWindow(self, __a0: java.awt.event.FocusEvent.Cause) -> bool: ...
def reshape(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def resize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def resize(self, __a0: int, __a1: int) -> None: ...
def revalidate(self) -> None: ...
def setAlwaysOnTop(self, __a0: bool) -> None: ...
def setAutoRequestFocus(self, __a0: bool) -> None: ...
def setBackground(self, __a0: java.awt.Color) -> None: ...
@overload
def setBounds(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def setBounds(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
def setComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
def setComponentZOrder(self, __a0: java.awt.Component, __a1: int) -> None: ...
def setContentPane(self, __a0: java.awt.Container) -> None: ...
def setCursor(self, __a0: java.awt.Cursor) -> None: ...
def setDefaultCloseOperation(self, __a0: int) -> None: ...
@staticmethod
def setDefaultLookAndFeelDecorated(__a0: bool) -> None: ...
def setDropTarget(self, __a0: java.awt.dnd.DropTarget) -> None: ...
def setEnabled(self, __a0: bool) -> None: ...
def setFocusCycleRoot(self, __a0: bool) -> None: ...
def setFocusTraversalKeys(self, __a0: int, __a1: java.util.Set) -> None: ...
def setFocusTraversalKeysEnabled(self, __a0: bool) -> None: ...
def setFocusTraversalPolicy(self, __a0: java.awt.FocusTraversalPolicy) -> None: ...
def setFocusTraversalPolicyProvider(self, __a0: bool) -> None: ...
def setFocusable(self, __a0: bool) -> None: ...
def setFocusableWindowState(self, __a0: bool) -> None: ...
def setFont(self, __a0: java.awt.Font) -> None: ...
def setForeground(self, __a0: java.awt.Color) -> None: ...
def setGlassPane(self, __a0: java.awt.Component) -> None: ...
def setIconImage(self, __a0: java.awt.Image) -> None: ...
def setIconImages(self, __a0: List[object]) -> None: ...
def setIgnoreRepaint(self, __a0: bool) -> None: ...
def setJMenuBar(self, __a0: javax.swing.JMenuBar) -> None: ...
def setLayeredPane(self, __a0: javax.swing.JLayeredPane) -> None: ...
def setLayout(self, __a0: java.awt.LayoutManager) -> None: ...
def setLocale(self, __a0: java.util.Locale) -> None: ...
@overload
def setLocation(self, __a0: java.awt.Point) -> None: ...
@overload
def setLocation(self, __a0: int, __a1: int) -> None: ...
def setLocationByPlatform(self, __a0: bool) -> None: ...
def setLocationRelativeTo(self, __a0: java.awt.Component) -> None: ...
def setMaximumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMinimumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMixingCutoutShape(self, __a0: java.awt.Shape) -> None: ...
def setModal(self, __a0: bool) -> None: ...
def setModalExclusionType(self, __a0: java.awt.Dialog.ModalExclusionType) -> None: ...
def setModalityType(self, __a0: java.awt.Dialog.ModalityType) -> None: ...
def setName(self, __a0: unicode) -> None: ...
def setOpacity(self, __a0: float) -> None: ...
def setPreferredSize(self, __a0: java.awt.Dimension) -> None: ...
def setResizable(self, __a0: bool) -> None: ...
def setShape(self, __a0: java.awt.Shape) -> None: ...
@overload
def setSize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def setSize(self, __a0: int, __a1: int) -> None: ...
def setTitle(self, __a0: unicode) -> None: ...
def setTransferHandler(self, __a0: javax.swing.TransferHandler) -> None: ...
def setType(self, __a0: java.awt.Window.Type) -> None: ...
def setUndecorated(self, __a0: bool) -> None: ...
def setVisible(self, __a0: bool) -> None: ...
@overload
def show(self) -> None: ...
@overload
def show(self, __a0: bool) -> None: ...
def toBack(self) -> None: ...
def toFront(self) -> None: ...
def toString(self) -> unicode: ...
def transferFocus(self) -> None: ...
def transferFocusBackward(self) -> None: ...
def transferFocusDownCycle(self) -> None: ...
def transferFocusUpCycle(self) -> None: ...
def update(self, __a0: java.awt.Graphics) -> None: ...
def validate(self) -> None: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
74a377f3bcaf26e2e0c06cefce6c53b6c4ade061 | 42186fa6507999ce60d334a0f04d9ae2127579cd | /安恒杯10月/ezshop/payment/urls.py | 6c40abef29cdd96038a297c075c72985a8be4cf7 | [] | no_license | Imtinmin/CTF_Challenge | ef8b62b3a4a1741d814d989f795a243257ff6f2b | ea276596f9effdbe0cf9ef4457e2e676e652bb74 | refs/heads/master | 2022-12-21T12:40:40.625562 | 2020-04-30T03:27:56 | 2020-04-30T03:27:56 | 158,999,004 | 18 | 3 | null | 2022-12-10T04:34:27 | 2018-11-25T04:53:04 | PHP | UTF-8 | Python | false | false | 140 | py | from django.urls import path
from .views import checkPayment
app_name='payment'
urlpatterns = [
path('check', checkPayment, name='check')
] | [
"954093370@qq.com"
] | 954093370@qq.com |
6cbfd50513a334de077f90cd3336baae3b057561 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_log_webtrends_filter.py | bc99eab7084734fe6f4b2deb0b596cead378d899 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 13,893 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_webtrends_filter
short_description: Filters for WebTrends in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_webtrends feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.8"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
log_webtrends_filter:
description:
- Filters for WebTrends.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- Webtrends log filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Lowest severity level to log to WebTrends.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Filters for WebTrends.
fortios_log_webtrends_filter:
vdom: "{{ vdom }}"
log_webtrends_filter:
anomaly: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_log_webtrends_filter_data(json):
option_list = ['anomaly', 'dns', 'filter',
'filter_type', 'forward_traffic', 'gtp',
'local_traffic', 'multicast_traffic', 'netscan_discovery',
'netscan_vulnerability', 'severity', 'sniffer_traffic',
'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_webtrends_filter(data, fos):
vdom = data['vdom']
log_webtrends_filter_data = data['log_webtrends_filter']
filtered_data = underscore_to_hyphen(filter_log_webtrends_filter_data(log_webtrends_filter_data))
return fos.set('log.webtrends',
'filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_webtrends(data, fos):
if data['log_webtrends_filter']:
resp = log_webtrends_filter(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('log_webtrends_filter'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"log_webtrends_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include",
"exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency",
"alert",
"critical",
"error",
"warning",
"notification",
"information",
"debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable",
"disable"]}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_log_webtrends(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
7dde5a888030c37e55eb6f400d27220026f4bf54 | 9d16c9badcc3d30ec7eb1c3caa73f3ecd2d8161e | /blog_project/settings.py | 6466a803a5d69312122aebf75c641987bc947d10 | [] | no_license | Jordan-Rob/dj-blog | aca34eb2cd737ee3fd806eb360b615ceceedc9c4 | 81a1414c1213dd3c8731c1195a11f7c456b973d6 | refs/heads/master | 2021-09-27T18:55:48.685682 | 2020-02-19T07:57:49 | 2020-02-19T07:57:49 | 241,149,581 | 0 | 1 | null | 2021-09-22T18:38:44 | 2020-02-17T16:04:21 | Python | UTF-8 | Python | false | false | 3,540 | py | """
Django settings for blog_project project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+q*^r!04#6#g*pgmf#m_&)4p^v-x#hf^g4a44f$45e$mhsn5m9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"jordanrob709@gmail.com"
] | jordanrob709@gmail.com |
7a46a14e687412faa1f5cc1ed42e79d9948cbd85 | db69daa1b517b539d78e4ab79691c33fdb410e8f | /check_friend.py | d31cd515ec1cdaad92e1d1894ae6181aa6afd5ee | [
"MIT"
] | permissive | team55/vkbot | 4cd49faefda81db4aae7db6c0bb6d2204097494f | 34705106560dbf6d96eee8b21cfd6d78e05646ef | refs/heads/master | 2021-01-21T01:39:28.846429 | 2016-06-26T11:16:15 | 2016-06-26T11:16:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | import time
import config
import accounts
fields = 'photo_50,country,last_seen'
s = open(accounts.getFile('allowed.txt'), encoding='utf-8').readlines()
noadd = set(map(int, open(accounts.getFile('noadd.txt')).read().split()))
allowed = set(s[0] + ' ')
s = s[1].split()
offline_allowed = config.get('check_friend.offline_allowed', 'i')
def writeNoadd():
with open(accounts.getFile('noadd.txt'), 'w') as f:
f.write('\n'.join(map(str, sorted(noadd))))
def appendNoadd(users):
noadd.update(users)
with open(accounts.getFile('noadd.txt'), 'a') as f:
f.write('\n' + '\n'.join(map(str, sorted(users))))
def check_char(c):
return c in allowed
checks = [
(lambda fr:'deactivated' not in fr, 'Account is deactivated'),
(lambda fr:fr['photo_50'] and not fr['photo_50'].endswith('camera_50.png'), 'No avatar'),
(lambda fr:fr.get('country', {'id':0})['id'] in [0, 1, 2, 3], 'Bad country'),
(lambda fr:all(check_char(i) for i in fr['first_name'] + fr['last_name']), 'Bad characters in name'),
(lambda fr:'last_seen' in fr and time.time() - fr['last_seen']['time'] < 3600 * 24 * offline_allowed, 'Offline too long'),
(lambda fr:not any(i in (fr['first_name'] + ' ' + fr['last_name']).lower() for i in s), 'Bad substring in name'),
(lambda fr:fr['id'] not in noadd, 'Ignored'),
(lambda fr:fr['first_name'] != fr['last_name'], 'First name equal to last name'),
]
def is_good(fr, need_reason=False):
reasons = []
for fun, msg in checks:
if not fun(fr):
if need_reason:
reasons.append(msg)
else:
return False
if need_reason:
return ', '.join(reasons) or None
else:
return True
| [
"kalinochkind@gmail.com"
] | kalinochkind@gmail.com |
7f8182d84f68de9ced11511734d8a10f9cf0764e | 9c5116ab446a0fba4dfaaa1685cbd3a1042dc054 | /kubernetes/test/test_v1_subject_rules_review_spec.py | e302de4235058403d19052db7541b474def21908 | [
"Apache-2.0"
] | permissive | caruccio/client-python | fc11a354ce15507c94308e35b6790b6776e01e6e | cb65186027ce68beedcd7752c488b8e3b5c0968e | refs/heads/master | 2021-01-25T08:18:45.601502 | 2017-06-08T13:14:06 | 2017-06-08T13:14:06 | 93,747,698 | 0 | 0 | null | 2017-06-08T12:37:32 | 2017-06-08T12:37:32 | null | UTF-8 | Python | false | false | 4,236 | py | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use kubernetes.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a kubernetes.client. By listing and beginning a watch from the returned resourceVersion, kubernetes.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so kubernetes.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but kubernetes.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_subject_rules_review_spec import V1SubjectRulesReviewSpec
class TestV1SubjectRulesReviewSpec(unittest.TestCase):
""" V1SubjectRulesReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SubjectRulesReviewSpec(self):
"""
Test V1SubjectRulesReviewSpec
"""
model = kubernetes.client.models.v1_subject_rules_review_spec.V1SubjectRulesReviewSpec()
if __name__ == '__main__':
unittest.main()
| [
"mateus.caruccio@getupcloud.com"
] | mateus.caruccio@getupcloud.com |
97b226cbbe0e31725743a250e4d4740cda6c9572 | 5c3ae39ce3964fab73959052cdece57c263ad52c | /tests/unit/test_payment_method_gateway.py | 786d846ab0f5ab21b7ff22d5d911bae42ad6b95f | [
"MIT"
] | permissive | maneeshd/braintree_python | bb072f8db300797338cf3ccbfa755a45eabb0db2 | 4aa3f4b8a376ea81bf16a053d840efe55ae13675 | refs/heads/master | 2023-08-31T12:04:24.420050 | 2020-02-29T06:40:34 | 2020-02-29T06:48:49 | 243,717,955 | 0 | 0 | MIT | 2020-10-02T20:21:05 | 2020-02-28T08:50:21 | Python | UTF-8 | Python | false | false | 6,835 | py | from tests.test_helper import *
from braintree.payment_method_gateway import PaymentMethodGateway
if sys.version_info[0] == 2:
from mock import MagicMock
else:
from unittest.mock import MagicMock
class TestPaymentMethodGateway(unittest.TestCase):
def test_create_signature(self):
actual_signature = PaymentMethod.signature("create")
expected_signature = [
"billing_address_id",
"cardholder_name",
"customer_id",
"cvv",
"device_data",
"device_session_id",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"payment_method_nonce",
"paypal_refresh_token",
# NEXT_MAJOR_VERSION remove this parameter as it's been ignored in the gateway
"paypal_vault_without_upgrade",
"token",
{
"billing_address": Address.create_signature()},
{
"options": [
"fail_on_duplicate_payment_method",
"make_default",
"us_bank_account_verification_method",
"verification_merchant_account_id",
"verify_card",
"verification_amount",
"verification_account_type",
{
"adyen":[
"overwrite_brand",
"selected_brand"
]
},
{
"paypal":[
"payee_email",
"order_id",
"custom_field",
"description",
"amount",
{
"shipping":[
"company",
"country_code_alpha2",
"country_code_alpha3",
"country_code_numeric",
"country_name",
"customer_id",
"extended_address",
"first_name",
"last_name",
"locality",
"postal_code",
"region",
"street_address"
]
},
]
},
]
}
]
self.assertEqual(expected_signature, actual_signature)
def test_update_signature(self):
actual_signature = PaymentMethod.update_signature()
expected_signature = [
"billing_address_id",
"cardholder_name",
"cvv",
"device_session_id",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"token",
"venmo_sdk_payment_method_code",
"device_data",
"fraud_merchant_id",
"payment_method_nonce",
{
"options": [
"make_default",
"us_bank_account_verification_method",
"verify_card",
"verification_amount",
"verification_merchant_account_id",
"verification_account_type",
"venmo_sdk_session",
{
"adyen":[
"overwrite_brand",
"selected_brand"
]
}
]
},
{
"billing_address" : Address.update_signature() + [{"options": ["update_existing"]}]
}
]
self.assertEqual(expected_signature, actual_signature)
def test_nonce_grant_params(self):
"""
We validate parameters to PaymentMethod.grant properly
"""
payment_method_gateway = PaymentMethodGateway(BraintreeGateway(None))
options = { "include_billing_postal_code": True }
with self.assertRaises(ValueError):
payment_method_gateway.grant("", options)
with self.assertRaises(ValueError):
payment_method_gateway.grant("\t", False)
with self.assertRaises(ValueError):
payment_method_gateway.grant(None, True)
def test_nonce_revoke_params(self):
payment_method_gateway = PaymentMethodGateway(BraintreeGateway(None))
with self.assertRaises(ValueError):
payment_method_gateway.revoke("")
with self.assertRaises(ValueError):
payment_method_gateway.revoke("\t")
with self.assertRaises(ValueError):
payment_method_gateway.revoke(None)
def test_delete_with_revoke_all_grants_value_as_true(self):
payment_method_gateway, http_mock = self.setup_payment_method_gateway_and_mock_http()
payment_method_gateway.delete("some_token", {"revoke_all_grants": True})
self.assertTrue("delete('/merchants/integration_merchant_id/payment_methods/any/some_token?revoke_all_grants=true')" in str(http_mock.mock_calls))
def test_delete_with_revoke_all_grants_value_as_false(self):
payment_method_gateway, http_mock = self.setup_payment_method_gateway_and_mock_http()
payment_method_gateway.delete("some_token", {"revoke_all_grants": False})
self.assertTrue("delete('/merchants/integration_merchant_id/payment_methods/any/some_token?revoke_all_grants=false')" in str(http_mock.mock_calls))
def test_delete_without_revoke_all_grants(self):
payment_method_gateway, http_mock = self.setup_payment_method_gateway_and_mock_http()
payment_method_gateway.delete("some_token")
self.assertTrue("delete('/merchants/integration_merchant_id/payment_methods/any/some_token')" in str(http_mock.mock_calls))
def test_delete_with_invalid_keys_to_raise_error(self):
payment_method_gateway, http_mock = self.setup_payment_method_gateway_and_mock_http()
with self.assertRaises(KeyError):
payment_method_gateway.delete("some_token", {"invalid_keys": False})
def setup_payment_method_gateway_and_mock_http(self):
braintree_gateway = BraintreeGateway(Configuration.instantiate())
payment_method_gateway = PaymentMethodGateway(braintree_gateway)
http_mock = MagicMock(name='config.http.delete')
braintree_gateway.config.http = http_mock
return payment_method_gateway, http_mock
| [
"code@getbraintree.com"
] | code@getbraintree.com |
6408cc3a3d782a6b3d7e64ff7926e7380008c136 | 16631cf7cd4a70f2cd2750851649d3eff5e17724 | /2019/day02/part2.py | ec6da2e434b6d2de88016e4776ea6087928eb9f4 | [] | no_license | kynax/AdventOfCode | 1dd609a3308d733f2dd7d4ea00508d2da73180b9 | 36a339241dd7a31ebe08a73e5efa599e5faeea1a | refs/heads/master | 2022-12-21T13:32:52.591068 | 2022-12-16T22:41:30 | 2022-12-16T22:41:30 | 48,439,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import sys
from intcode import *
m = [int(l) for l in sys.stdin.readline().split(',')]
for verb in range(100):
for noun in range(100):
c = IntCode()
c.mem_init(m)
c.mem_set(1, noun)
c.mem_set(2, verb)
c.run()
if c.result() == 19690720:
print(100 * noun + verb)
| [
"guilemay@gmail.com"
] | guilemay@gmail.com |
a91390aea6a8dc9e7b2c54cfb9d54260053f026d | 3a784e3d612cfc58d73eb017b2f1b068a570d55c | /z3/fancy.py | 1bad9d13f5828658b4960d33dcd6715690aa6461 | [] | no_license | DialloMamadou/PPC | 23f527671007952b0b7707716a367aa47b709842 | 03e508a2b2f3314fbb40eab345506b62fcb8e1da | refs/heads/master | 2020-05-23T15:06:43.588228 | 2019-05-05T19:33:09 | 2019-05-05T19:33:09 | 186,819,656 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Mr Greenguest puzzle (a.k.a fancy dress problem) in Z3
#
# Problem (and LPL) code in
#
# http://diuflx71.unifr.ch/lpl/GetModel?name=/demo/demo2
#
# """
# (** Mr. Greenfan wants to give a dress party where the male guests
# * must wear green dresses. The following rules are given:
# * 1 If someone wears a green tie he has to wear a green shirt.
# * 2 A guest may only wear green socks and a green shirt
# * if he wears a green tie or a green hat.
# * 3 A guest wearing a green shirt or a green hat or who does
# * not wear green socks must wear a green tie.
# * 4 A guest who is not dressed according to rules 1-3 must
# * pay a $11 entrance fee.
# * Mr Greenguest wants to participate but owns only a green shirt
# * (otherwise he would have to pay one for $9). He could buy
# * a green tie for $10, a green hat (used) for $2 and green socks
# * for $12.
# * What is the cheapest solution for Mr Greenguest to participate?
# *)
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
# variables
# t: tie
# h: hat
# r: shirt
# s: socks
# n: entrance fee
[t,h,r,s,n] = Bools('t h r s n')
cost = makeIntVar(sol,"cost",0,100)
# constraints
# This is a straight translation from the LPL code
# ( (t->r) \/ n)
sol.add( Or(Implies(t,r), n))
# ( ((s \/ r) -> (t \/ h)) \/ n )
sol.add( Or(Implies(Or(s,r), Or(t,h)), n))
# ( ((r \/ h \/ not s) -> t) \/ n )
sol.add(Or( Implies(Or(r, h, Not(s)), t), n))
sol.add(cost == 10*t + 2*h + 12*s + 11*n)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print "cost:", mod.eval(cost),
print [(x, mod.eval(x)) for x in [t,h,r,s,n]]
getLessSolution(sol,mod,cost)
| [
"hakank@gmail.com"
] | hakank@gmail.com |
1b3d67978f6b4b17f7e6bd2541a0308600324518 | aa9aa0868d857d64603e7b0e9e8cff0e2bbdf189 | /server/migrations/0059_machine_report_format.py | dd178bc844c43e713743a8291a09c6e2fc397df5 | [
"Apache-2.0"
] | permissive | haribert/sal | 2ecafa7d5b7c812f860465f7f4511209d8916526 | 9c80cf9b063ba1cb1fb7649dc6aba7f032de261b | refs/heads/master | 2021-09-06T05:07:46.868175 | 2018-02-02T16:12:44 | 2018-02-02T16:12:44 | 112,587,211 | 0 | 0 | null | 2017-11-30T08:44:43 | 2017-11-30T08:44:43 | null | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-09-30 12:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0058_auto_20170822_1430'),
]
operations = [
migrations.AddField(
model_name='machine',
name='report_format',
field=models.CharField(choices=[(b'base64', b'base64'), (b'base64bz2', b'base64bz2')], default=b'base64bz2', editable=False, max_length=256),
),
]
| [
"graham@grahamgilbert.com"
] | graham@grahamgilbert.com |
a84b044c4ec14bb86cb2f2cf4cafa93abd776f37 | bc6b561958649c391c159d4dd3363c60eeabc7e4 | /mayan/apps/file_caching/tests/test_events.py | f64952560df1b1a38aab02077d4856121e0f301a | [
"Apache-2.0"
] | permissive | chrisranjana/Mayan-EDMS | 37deb105cda268768fea502491ae875ff905e0e9 | 34b414ce49a2eb156e27dc1a2915e52121c9d1b7 | refs/heads/master | 2020-12-22T13:50:41.263625 | 2020-01-28T18:45:24 | 2020-01-28T18:45:24 | 236,804,825 | 0 | 1 | NOASSERTION | 2020-01-28T18:12:53 | 2020-01-28T18:12:52 | null | UTF-8 | Python | false | false | 1,096 | py | from __future__ import unicode_literals
from actstream.models import Action
from mayan.apps.common.tests.base import BaseTestCase
from ..events import event_cache_created, event_cache_purged
from ..models import Cache
from .mixins import CacheTestMixin
class CacheEventsTestCase(CacheTestMixin, BaseTestCase):
def test_cache_create_event(self):
action_count = Action.objects.count()
self._create_test_cache()
self.assertEqual(Action.objects.count(), action_count + 1)
event = Action.objects.first()
cache = Cache.objects.last()
self.assertEqual(event.verb, event_cache_created.id)
self.assertEqual(event.target, cache)
def test_cache_purge_event(self):
self._create_test_cache()
action_count = Action.objects.count()
self.test_cache.purge()
self.assertEqual(Action.objects.count(), action_count + 1)
event = Action.objects.first()
cache = Cache.objects.last()
self.assertEqual(event.verb, event_cache_purged.id)
self.assertEqual(event.target, cache)
| [
"roberto.rosario@mayan-edms.com"
] | roberto.rosario@mayan-edms.com |
a346ab92b72621f5e8966b5d8b20365ea9816590 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02854/s614017761.py | 93c7fca03d5483bad4cda9f6e10005518c14e660 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n = int(input())
a = list(map(int,input().split()))
r = ans = sum(a)
l = 0
ans = float('inf')
for i in a:
r -= i
l += i
ans = min(ans,abs(r-l))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b3ca48aa937f678d46460cb290f303d6d3d024ae | d11a77c7230880973fc48cb64f86f621b642b3e9 | /solutions/video_similarity_search/quick_deploy/server/src/operations/search.py | 3087edb4814e19e9ab7cc1f6b9f0f15189c46203 | [
"Apache-2.0"
] | permissive | parsa-ra/bootcamp | 22bd6082948aad13b14537582b321ae570b4dc7a | f881a562751c673f45eaba063803adcb472fc1da | refs/heads/master | 2023-08-10T19:50:15.334127 | 2021-09-18T07:16:16 | 2021-09-18T07:16:16 | 407,830,782 | 0 | 0 | Apache-2.0 | 2021-09-18T10:36:49 | 2021-09-18T10:36:48 | null | UTF-8 | Python | false | false | 838 | py | import sys
from logs import LOGGER
sys.path.append("..")
from config import TOP_K
from config import DEFAULT_TABLE
def do_search(host, table_name, img_path, model, milvus_client, mysql_cli):
if not table_name:
table_name = DEFAULT_TABLE
try:
feat = model.resnet50_extract_feat(img_path)
vectors = milvus_client.search_vectors(table_name, [feat], TOP_K)
vids = [str(x.id) for x in vectors[0]]
paths = mysql_cli.search_by_milvus_ids(vids, table_name)
distances = [x.distance for x in vectors[0]]
for i in range(len(paths)):
tmp = "http://" + str(host) + "/data?gif_path=" + str(paths[i])
paths[i] = tmp
return paths, distances
except Exception as e:
LOGGER.error(" Error with search : {}".format(e))
sys.exit(1) | [
"shiyu.chen@zilliz.com"
] | shiyu.chen@zilliz.com |
e1bfb943e312a2e57db0c78c33fa8d0db2f45f44 | 81f999d6f8e622542212e6fc2b5e328b06ced75d | /admin/post.py | b02de7c9f90fe7b52e9071d0c01a5cb86bcc7e90 | [] | no_license | lujinda/zjypan | 37beab246b1ceb84ae24330b742d3a9bf7a635a5 | fcc2a8ff221eeaebaced84735b3e12b3584efc8c | refs/heads/master | 2021-01-01T20:49:04.984315 | 2015-07-12T08:25:33 | 2015-07-12T08:25:33 | 29,416,929 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | #!/usr/bin/env python
#coding:utf8
# Author : tuxpy
# Email : q8886888@qq.com
# Last modified : 2015-02-22 21:56:00
# Filename : admin/post.py
# Description :
from .base import AdminHandler
from page.do import write_post, get_post_list, get_post, del_post
from public.do import swith_time
from lib.wrap import auth_log_save
class AdminListPostHandler(AdminHandler):
def get(self):
self.render('post/list.html', post_list = get_post_list(),
swith_time = swith_time)
@auth_log_save
def post(self):
action = self.get_query_argument('action')
assert action in ('del', )
post_uuid_list = self.get_arguments('checked_post')
assert post_uuid_list
do_func = getattr(self, 'do_' + action, None)
self.redirect(self.referer or 'list')
return do_func(post_uuid_list)
def do_del(self, post_uuid_list):
for post_uuid in post_uuid_list:
del_post(post_uuid)
return '删除通告'
class AdminWritePostHandler(AdminHandler):
def get(self):
post_uuid = self.get_query_argument('post_uuid', '')
post = get_post(post_uuid)
self.render('post/write.html', post = post)
@auth_log_save
def post(self):
post_title = self.get_argument('post_title')
post_content = self.get_argument('post_content')
post_important = self.get_argument('post_important', None) == 'yes'
post_uuid = self.get_argument('post_uuid', None)
write_post(post_title = post_title, post_content = post_content,
post_important = post_important, post_uuid = post_uuid)
self.redirect('list')
if post_uuid:
return '编辑通告'
else:
return '发布通告'
| [
"q8886888@gmail.com"
] | q8886888@gmail.com |
4a5864386c1107faa4a2a4a9a3af41fecc137e9c | 82cba93ed3339150dcbccc1e3a245f7284edb8ed | /t.py | 7c820b15876f323d1c459455ba089bdd030e14be | [] | no_license | boyi880322/python | c10ca906cb6db7e143071f7c830eb9d9741ee05b | 375e6d532f229f5a508ca8396b6cd1aa77aa6c46 | refs/heads/main | 2023-02-12T13:42:29.393617 | 2021-01-12T02:24:35 | 2021-01-12T02:24:35 | 325,202,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | name = input("姓名:")
while name != "":
h = float(input("身高(公尺):"))
w = int(input("體重(公斤):"))
BMI = w / h ** 2
print("{}的BMI是:{}".format(name, BMI))
name = input("姓名:") | [
"skynet.tw@gmail.com"
] | skynet.tw@gmail.com |
a0545f0e647e3e695ae19d66971e2bec4f38d109 | 832b4121b7d49b56ce9446cb1437b401a037b43f | /python/part03-scripts/save_the_world_v1.py | a19df8f23e84963a54b2391eedf291aad34aa769 | [] | no_license | dleehr/gcb-academy-python | 407ad2fef3f73d7e2485c7839b20c0cf53050e07 | 3a2d191ac33b8e08c2380856e01bbc65b5fd5eec | refs/heads/master | 2021-01-18T14:01:53.320240 | 2015-02-03T19:05:02 | 2015-02-03T19:05:02 | 29,843,584 | 0 | 0 | null | 2015-02-02T18:56:08 | 2015-01-26T03:38:36 | Python | UTF-8 | Python | false | false | 479 | py | import fileinput
import re
for line in fileinput.input():
match = re.search('^(.*)\t(20\d\d)-(\d\d)-(\d\d)\t(\d+\.?\d*)$', line)
if match:
fields = [
match.group(2), # year
match.group(3), # month
match.group(4), # day
match.group(1), # site
match.group(5) # value
]
print ','.join(fields)
else:
print "Line {} did not match!".format(fileinput.lineno())
| [
"whitews@gmail.com"
] | whitews@gmail.com |
c38311c336acbeb73379afa6ba0e49ecee97a5c4 | 04875545151aa1ef547c2c47ae36b9c90254317b | /example/SyntheticParameterized/basis.py | 0b99aa22b0201ff5e38f543e5a7992c74b0cc8a5 | [
"MIT"
] | permissive | treverhines/PSGI | 5a49da0a24e2460350b280f2437229de51ea3364 | 356ca2208fc24d51062417126807d79dd79df73c | refs/heads/master | 2021-03-12T19:57:37.642196 | 2015-11-14T02:08:20 | 2015-11-14T02:08:20 | 39,227,339 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,683 | py | #!/usr/bin/env python
#
# This script defines variables and functions which are needed for
# plotting purposes and for using WriteRegularization.py which forms a
# regularization matrix by collocation
#
# Variables in all caps are required for other scripts to run and
# this script must also define the slip and fluidity basis functions
#
from __future__ import division
from rbf.bspline import augmented_knots
from rbf.bspline import natural_knots
from rbf.bspline import bspnd as bspline_nd
from modest import linear_to_array_index
from modest import Perturb
import transform as trans
import pickle
import numpy as np
## Define parameters for slip basis function geometry
######################################################################
FAULT_ANCHOR = [[-116.0,32.0]]
FAULT_LENGTH = [50000.0]
FAULT_WIDTH = [20000.0]
FAULT_STRIKE = [0.0]
FAULT_DIP = [60.0]
FAULT_NLENGTH = [10]
FAULT_NWIDTH = [4]
FAULT_ORDER = [[0,0]]
FLUIDITY_ANCHOR = [-119.25,35.0]
FLUIDITY_STRIKE = 90.0
FLUIDITY_LENGTH = 600000.0
FLUIDITY_WIDTH = 600000.0
FLUIDITY_THICKNESS = 150000.0
FLUIDITY_NLENGTH = 1
FLUIDITY_NWIDTH = 1
FLUIDITY_NTHICKNESS = 5
FLUIDITY_ORDER = [0,0,3]
######################################################################
FAULT_N = sum(l*w for l,w in zip(FAULT_NLENGTH,FAULT_NWIDTH))
FLUIDITY_N = FLUIDITY_NLENGTH*FLUIDITY_NWIDTH*FLUIDITY_NTHICKNESS
FAULT_SEGMENTS = len(FAULT_ANCHOR)
FAULT_TRANSFORMS = []
FAULT_KNOTS = []
BASEMAP = pickle.load(open('basemap.pkl','r'))
# find knots for faults
for d in range(FAULT_SEGMENTS):
xc,yc = BASEMAP(*FAULT_ANCHOR[d])
t = trans.point_stretch([FAULT_LENGTH[d],FAULT_WIDTH[d],1.0])
t += trans.point_rotation_x(FAULT_DIP[d]*np.pi/180)
t += trans.point_rotation_z(np.pi/2.0 - FAULT_STRIKE[d]*np.pi/180)
t += trans.point_translation([xc,yc,0.0])
# create knots defining B-splines for slip on a rectangle x = [0,1]
# and y = [-1,0]
fault_knots_x = natural_knots(FAULT_NLENGTH[d],
FAULT_ORDER[d][0],side='both')
fault_knots_y = natural_knots(FAULT_NWIDTH[d],
FAULT_ORDER[d][1],side='both') - 1.0
FAULT_TRANSFORMS += [t]
FAULT_KNOTS += [(fault_knots_x,fault_knots_y)]
# find knots for fluidity
xc,yc = BASEMAP(*FLUIDITY_ANCHOR)
t = trans.point_stretch([FLUIDITY_LENGTH,FLUIDITY_WIDTH,FLUIDITY_THICKNESS])
t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180)
t += trans.point_translation([xc,yc,0.0])
fluidity_knots_x = natural_knots(FLUIDITY_NLENGTH,
FLUIDITY_ORDER[0],side='both')
fluidity_knots_y = natural_knots(FLUIDITY_NWIDTH,
FLUIDITY_ORDER[1],side='both') - 1.0
fluidity_knots_z = natural_knots(FLUIDITY_NTHICKNESS,
FLUIDITY_ORDER[2],side='none') - 1.0
FLUIDITY_TRANSFORM = t
FLUIDITY_KNOTS = (fluidity_knots_x,fluidity_knots_y,fluidity_knots_z)
def slip(x,coeff,segment=None,diff=None):
'''
takes positions, x, and slip coefficients, coeff, and returns the
vaues for slip. The segment key word is specified to only use
coefficients corresponding to the specified fault segment. if no
segment is specified then all coefficients will be used
'''
minN = 0
s = segment
out = np.zeros(len(x))
assert len(coeff) == FAULT_N, (
'coefficient list must have length %s' % FAULT_N)
if s is None:
for d in range(FAULT_SEGMENTS):
t = FAULT_TRANSFORMS[d].inverse()
fx = t(x)[:,[0,1]]
shape = FAULT_NLENGTH[d],FAULT_NWIDTH[d]
order = FAULT_ORDER[d]
maxN = minN + np.prod(shape)
for n in range(minN,maxN):
idx = linear_to_array_index(n-minN,shape)
out += coeff[n]*bspline_nd(fx,FAULT_KNOTS[d],idx,order,diff=diff)
minN += np.prod(shape)
else:
for d in range(s):
shape = FAULT_NLENGTH[d],FAULT_NWIDTH[d]
maxN = minN + np.prod(shape)
minN += np.prod(shape)
shape = FAULT_NLENGTH[s],FAULT_NWIDTH[s]
maxN = minN + np.prod(shape)
t = FAULT_TRANSFORMS[s].inverse()
fx = t(x)[:,[0,1]]
order = FAULT_ORDER[s]
for n in range(minN,maxN):
idx = linear_to_array_index(n-minN,shape)
out += coeff[n]*bspline_nd(fx,FAULT_KNOTS[s],idx,order,diff=diff)
minN += np.prod(shape)
return out
def fluidity(x,coeff,diff=None):
out = np.zeros(len(x))
t = FLUIDITY_TRANSFORM.inverse()
fx = t(x)
shape = FLUIDITY_NLENGTH,FLUIDITY_NWIDTH,FLUIDITY_NTHICKNESS
order = FLUIDITY_ORDER
for n in range(FLUIDITY_N):
idx = linear_to_array_index(n,shape)
out += coeff[n]*bspline_nd(fx,FLUIDITY_KNOTS,idx,order,diff=diff)
return out
if __name__ == '__main__':
from myplot.xsection import XSection
import mayavi.mlab
bm = BASEMAP
sta_array = np.loadtxt('stations.txt',dtype=str)
sta_pos = np.array(sta_array[:,[1,2]],dtype=float)
sta_pos_x,sta_pos_y = bm(sta_pos[:,0],sta_pos[:,1])
fluidity_transforms = []
x,y = bm(*FLUIDITY_ANCHOR[:2])
length = FLUIDITY_LENGTH
width = FLUIDITY_WIDTH
thickness = FLUIDITY_THICKNESS
t = trans.point_stretch([FLUIDITY_LENGTH,
FLUIDITY_THICKNESS,
1.0])
t += trans.point_rotation_x(np.pi/2.0)
t += trans.point_translation([0.0,-width/2.0,0.0])
t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180)
t += trans.point_translation([x,y,0.0])
fluidity_transforms += [t]
t = trans.point_stretch([FLUIDITY_WIDTH,
FLUIDITY_THICKNESS,
1.0])
t += trans.point_rotation_x(np.pi/2.0)
t += trans.point_rotation_z(-np.pi/2.0)
t += trans.point_translation([FLUIDITY_LENGTH/2.0,
0.0,
0.0])
t += trans.point_rotation_z(np.pi/2.0 - FLUIDITY_STRIKE*np.pi/180)
t += trans.point_translation([x,y,0.0])
fluidity_transforms += [t]
xs1 = XSection(fluidity,
f_args=(np.random.random(FLUIDITY_N),),
base_square_y=(-1,0),
transforms = fluidity_transforms,
clim = (0,1))
xs2 = XSection(fluidity,
f_args=(np.random.random(FLUIDITY_N),),
base_square_y=(-1,0),
transforms = FAULT_TRANSFORMS)
xs1.draw()
xs2.draw(color=(0.2,0.2,0.2),opacity=0.5)
mayavi.mlab.points3d(sta_pos_x,sta_pos_y,0*sta_pos[:,1],scale_factor=10000)
xs1.view()
coeff = np.random.random(FAULT_N)
xs1 = XSection(slip,
f_args=(coeff,),
base_square_y=(-1,0),
transforms = FAULT_TRANSFORMS,
clim=(0,1))
xs1.draw()
xs1.view()
coeff = np.random.random(FLUIDITY_N)
| [
"treverhines@gmail.com"
] | treverhines@gmail.com |
27369ce439c746edb215195782810276fff77a6f | e389ca9d52230140038082e3111ce41db1c00514 | /SocialMedia_API/settings.py | ee05062973c3aadfdaccfa3fcdaaf56a19f68cc0 | [] | no_license | DharmendraB/SocialMedia_API | dbd94af3fb71ae1db6dbdaf25bb429bc4648399f | 9cc134525b90f3fbba31b7442688fc7baf41c0e1 | refs/heads/main | 2023-03-29T04:15:25.286277 | 2021-04-04T04:59:08 | 2021-04-04T04:59:08 | 354,458,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | """
Django settings for SocialMedia_API project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2@!h#6ku@5g%d+e$0bjq%(nm%%7_%x*^ofgtkl97q6bj2cvngf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'social.apps.SocialConfig',
'registration',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'social_django', # <-- Here social-auth-app-django
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware', # <-- Here Social
]
ROOT_URLCONF = 'SocialMedia_API.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends', # <-- Here
'social_django.context_processors.login_redirect', # <-- Here
],
},
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
WSGI_APPLICATION = 'SocialMedia_API.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
#Managing Media Code here
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
#Registration import Code here
ACCOUNT_ACTIVATION_DAYS=3
EMAIL_HOST= 'smtp.gmail.com'
EMAIL_HOST_USER= 'XXXXXX@gmail.com'
EMAIL_HOST_PASSWORD= 'XXXXXXXX'
EMAIL_PORT= 587
EMAIL_USE_TLS= True
#Direct Login Redirect Code here
LOGIN_REDIRECT_URL = "/"
SOCIAL_AUTH_FACEBOOK_KEY = 'XXXXXXX' # App ID
SOCIAL_AUTH_FACEBOOK_SECRET = 'XXXXXXX' # App Secret | [
"ghldharmendra@gmail.com"
] | ghldharmendra@gmail.com |
ef7fdd81b5556be3d11abd7c8ad2872d3efcf5dc | 713f9168a7ba68740bb9b4ea6994e853a56d2d5c | /python/2019-10-07/shout.py | 075131c1205615b5f917df669e9b52537c71c00b | [] | no_license | marko-knoebl/courses-code | ba7723c9a61861b037422670b98276fed41060e2 | faeaa31c9a156a02e4e9169bc16f229cdaee085d | refs/heads/master | 2022-12-29T02:13:12.653745 | 2022-12-16T09:21:18 | 2022-12-16T09:21:18 | 142,756,698 | 16 | 10 | null | 2022-03-08T22:30:11 | 2018-07-29T11:51:04 | Jupyter Notebook | UTF-8 | Python | false | false | 321 | py | def shout(phrase, end="!"):
"""Prints a phrase in capital letters.
A second optional parameter can mark the end.
"""
# .upper() is a string function that converts the
# string to uppercase
upper = phrase.upper()
print(upper + end)
shout("hello") # HELLO!
shout("hey")
shout("hi", ".") # HI.
| [
"marko.kn@gmail.com"
] | marko.kn@gmail.com |
0fc4a06e03869a0850aefd5b5e8684092faaa024 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_108/ch31_2020_03_23_21_09_21_287563.py | ae11e5f03385199948261d80790e123dffe6af98 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | def eh_primo(n):
if n < 2:
return False
elif n == 2:
return True
for x in range(2,n)
if n % x == 0:
return False
return True | [
"you@example.com"
] | you@example.com |
30efbcca2190039ffec2e1304db5a08d4fe6ba75 | 0103046cd77e9f86ccde477736de36bba766ceb6 | /tests/sentry/integrations/test_issues.py | 9869431375b09aa5b193f0373502d1f44001c9f0 | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | kaozdl/sentry | ad41ada649a20300e9f2fe69050200cfbf738a63 | 63d698f5294f64a8c206b4c741e2a11be1f9a9be | refs/heads/master | 2021-06-21T18:24:21.713064 | 2021-03-04T19:45:20 | 2021-03-04T19:45:20 | 198,681,569 | 0 | 0 | BSD-3-Clause | 2019-07-24T17:32:29 | 2019-07-24T17:32:28 | null | UTF-8 | Python | false | false | 9,031 | py | from sentry.integrations.example.integration import AliasedIntegrationProvider
from sentry.models import (
ExternalIssue,
Group,
GroupStatus,
GroupLink,
Integration,
OrganizationIntegration,
)
from sentry.testutils import TestCase
class IssueSyncIntegration(TestCase):
def test_status_sync_inbound_resolve(self):
group = self.group
assert group.status == GroupStatus.UNRESOLVED
integration = Integration.objects.create(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
).update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
with self.feature("organizations:integrations-issue-sync"):
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "done"}},
)
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
def test_status_sync_inbound_unresolve(self):
group = self.group
group.status = GroupStatus.RESOLVED
group.save()
assert group.status == GroupStatus.RESOLVED
integration = Integration.objects.create(provider="example", external_id="123456")
integration.add_organization(group.organization, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
).update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.create(
organization_id=group.organization.id, integration_id=integration.id, key="APP-123"
)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)
installation = integration.get_installation(group.organization.id)
with self.feature("organizations:integrations-issue-sync"):
installation.sync_status_inbound(
external_issue.key,
{"project_id": "APP", "status": {"id": "12345", "category": "in_progress"}},
)
assert Group.objects.get(id=group.id).status == GroupStatus.UNRESOLVED
class IssueDefaultTest(TestCase):
def setUp(self):
self.group.status = GroupStatus.RESOLVED
self.group.save()
integration = Integration.objects.create(provider="example", external_id="123456")
integration.add_organization(self.group.organization, self.user)
self.external_issue = ExternalIssue.objects.create(
organization_id=self.group.organization.id, integration_id=integration.id, key="APP-123"
)
self.group_link = GroupLink.objects.create(
group_id=self.group.id,
project_id=self.group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=self.external_issue.id,
relationship=GroupLink.Relationship.references,
)
self.installation = integration.get_installation(self.group.organization.id)
def test_get_repository_choices(self):
default_repo, repo_choice = self.installation.get_repository_choices(self.group)
assert default_repo == "user/repo"
assert repo_choice == [("user/repo", "repo")]
def test_get_repository_choices_no_repos(self):
self.installation.get_repositories = lambda: []
default_repo, repo_choice = self.installation.get_repository_choices(self.group)
assert default_repo == ""
assert repo_choice == []
def test_get_repository_choices_default_repo(self):
self.installation.org_integration.config = {
"project_issue_defaults": {str(self.group.project_id): {"repo": "user/repo2"}}
}
self.installation.org_integration.save()
self.installation.get_repositories = lambda: [
{"name": "repo1", "identifier": "user/repo1"},
{"name": "repo2", "identifier": "user/repo2"},
]
default_repo, repo_choice = self.installation.get_repository_choices(self.group)
assert default_repo == "user/repo2"
assert repo_choice == [("user/repo1", "repo1"), ("user/repo2", "repo2")]
def test_store_issue_last_defaults_partial_update(self):
assert "project" in self.installation.get_persisted_default_config_fields()
assert "issueType" in self.installation.get_persisted_default_config_fields()
assert "assignedTo" in self.installation.get_persisted_user_default_config_fields()
assert "reportedBy" in self.installation.get_persisted_user_default_config_fields()
self.installation.store_issue_last_defaults(
self.project,
self.user,
{"project": "xyz", "issueType": "BUG", "assignedTo": "userA", "reportedBy": "userB"},
)
self.installation.store_issue_last_defaults(
self.project, self.user, {"issueType": "FEATURE", "assignedTo": "userC"}
)
# {} is commonly triggered by "link issue" flow
self.installation.store_issue_last_defaults(self.project, self.user, {})
assert self.installation.get_defaults(self.project, self.user) == {
"project": "xyz",
"issueType": "FEATURE",
"assignedTo": "userC",
"reportedBy": "userB",
}
def test_store_issue_last_defaults_multiple_projects(self):
assert "project" in self.installation.get_persisted_default_config_fields()
other_project = self.create_project(name="Foo", slug="foo", teams=[self.team])
self.installation.store_issue_last_defaults(
self.project, self.user, {"project": "xyz", "reportedBy": "userA"}
)
self.installation.store_issue_last_defaults(
other_project, self.user, {"project": "abc", "reportedBy": "userB"}
)
assert self.installation.get_defaults(self.project, self.user) == {
"project": "xyz",
"reportedBy": "userA",
}
assert self.installation.get_defaults(other_project, self.user) == {
"project": "abc",
"reportedBy": "userB",
}
def test_store_issue_last_defaults_for_user_multiple_providers(self):
other_integration = Integration.objects.create(provider=AliasedIntegrationProvider.key)
other_integration.add_organization(self.organization, self.user)
other_installation = other_integration.get_installation(self.organization.id)
self.installation.store_issue_last_defaults(
self.project, self.user, {"project": "xyz", "reportedBy": "userA"}
)
other_installation.store_issue_last_defaults(
self.project, self.user, {"project": "abc", "reportedBy": "userB"}
)
assert self.installation.get_defaults(self.project, self.user) == {
"project": "xyz",
"reportedBy": "userA",
}
assert other_installation.get_defaults(self.project, self.user) == {
"project": "abc",
"reportedBy": "userB",
}
def test_annotations(self):
label = self.installation.get_issue_display_name(self.external_issue)
link = self.installation.get_issue_url(self.external_issue.key)
assert self.installation.get_annotations_for_group_list([self.group]) == {
self.group.id: [f'<a href="{link}">{label}</a>']
}
integration = Integration.objects.create(provider="example", external_id="4444")
integration.add_organization(self.group.organization, self.user)
installation = integration.get_installation(self.group.organization.id)
assert installation.get_annotations_for_group_list([self.group]) == {self.group.id: []}
| [
"noreply@github.com"
] | kaozdl.noreply@github.com |
feb9dd83d3dcf1efadf822ae27bb091c2b222382 | b8c373c2265e894c2f6097457051290152caedc3 | /project_ini/questionnaire/migrations/0004_auto_20170312_2344.py | 52f1cb82be650eeb6d908c2d59af33d73dc22b12 | [] | no_license | alikhundmiri/ini_alpha | b17b8d4c7e6064a5c538d279453e53bbae2c042a | ef3a558287e9de8d01ddda354d850d03226a9bf4 | refs/heads/master | 2021-01-19T09:59:54.817258 | 2017-04-10T12:34:17 | 2017-04-10T12:34:17 | 87,807,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-12 23:44
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('questionnaire', '0003_auto_20170312_2327'),
]
operations = [
migrations.AlterField(
model_name='questions_info',
name='publish',
field=models.DateField(default=datetime.datetime(2017, 3, 12, 23, 44, 25, 275866, tzinfo=utc)),
),
]
| [
"salikhundmiri@gmail.com"
] | salikhundmiri@gmail.com |
9e45a9a79b2f82434136ae93f6a234460d3ce2c8 | 816232db2f21e193612eaa60eda0d5897d31caaf | /Inflearn/2일차-코드구현 능력/5.py | 75ab790ecc133625aaf2085bb608b959feb78934 | [] | no_license | Juyoung4/StudyAlgorithm | a60bfa7657eac57f59200bfa204aff1ad27c79f8 | 4b190e0bfeb268bef4be00ae9bedd9ca8946fbd6 | refs/heads/master | 2023-08-31T04:37:07.422641 | 2021-09-27T08:38:09 | 2021-09-27T08:38:09 | 282,757,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | # 점수 계산
"""
[문제]
여러 개의 OX 문제로 만들어진시험에서 연속적으로 답을 맞히는 경우에는 가산점을 주기 위해서 다음과 같이 점수 계산을 하기로 하였다
- 1번 문제가 맞는 경우에는 1점으로 계산한다.
- 앞의 문제에 대해서는 답을 틀리다가 답이 맞는 처음 문제는 1점으로 계산한다.
- 연속으로 문제의 답이 맞는 경우에서 두 번째 문제는 2점, 세 번째 문제는 3점, ..., K번째 문제는 K점으로 계산한다.
- 틀린 문제는 0점으로 계산한다.
(ex)
10 개의 OX 문제에서 답이 맞은 문제의 경우에는 1로 표시하고, 틀린 경우에는 0으로 표시하여
1011100110일때 점수 계산하는 방법은
1012300120 => 1+1+2+3+1+2=10 점이다.
[INPUT EX]
10
1 0 1 1 1 0 0 1 1 0
[OUTPUT EX]
10
"""
def solution(T):
score = list(map(int, input().split()))
if len(score) != T: return -1
add_ = 1 if score[0] else 0
total = 1 if score[0] else 0
for i in range(1, len(score)):
if not score[i]:
add_ = 0
print(add_, total)
continue
if not score[i-1] and score[i]:
add_ = 1
total += add_
print(add_, total)
continue
if score[i-1] and score[i]:
add_ += 1
total += add_
print(add_, total)
continue
return total
if __name__ == "__main__":
print(solution(int(input()))) | [
"vallot7@naver.com"
] | vallot7@naver.com |
bac92da3627b53e8162cbf94250ccf181fff620b | 21e177a4d828f4e0a003e9424c4952dbc0b47d29 | /lints/lint_sub_cert_province_must_appear.py | 0232e7991a8e0921aca00c46582fb7e3dd8ac4e6 | [] | no_license | 846468230/Plint | 1071277a55144bb3185347a58dd9787562fc0538 | c7e7ca27e5d04bbaa4e7ad71d8e86ec5c9388987 | refs/heads/master | 2020-05-15T12:11:22.358000 | 2019-04-19T11:46:05 | 2019-04-19T11:46:05 | 182,255,941 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | from lints import base
from cryptography import x509
from cryptography.x509.oid import NameOID
from util.time import Time
from util import ca
'''
'''
class subCertProvinceMustAppear(base.LintInterface):
def Initialize(self):
return 0
def CheckApplies(self,c):
return ca.IsSubscriberCert(c)
def Execute(self,c):
try:
subject = c.subject
if subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME) or subject.get_attributes_for_oid(NameOID.GIVEN_NAME) or subject.get_attributes_for_oid(NameOID.SURNAME):
if not subject.get_attributes_for_oid(NameOID.LOCALITY_NAME):
if not subject.get_attributes_for_oid(NameOID.STATE_OR_PROVINCE_NAME):
return base.LintResult(base.LintStatus.Error)
return base.LintResult(base.LintStatus.Pass)
except ValueError:
return base.LintResult(base.LintStatus.Fatal)
def init():
base.RegisterLint(base.Lint("e_sub_cert_province_must_appear","Subscriber Certificate: subject:stateOrProvinceName MUST appear if the subject:organizationName, subject:givenName, or subject:surname fields are present and subject:localityName is absent.","BRs: 7.1.4.2.2",base.LintSource.CABFBaselineRequirements,Time.CABGivenNameDate,subCertProvinceMustAppear())) | [
"846468230@qq.com"
] | 846468230@qq.com |
af39a1bf1eb073451ed5e06bcf042d850a88ea85 | edcc2f90e91cc781ed6e305daa5f6cb539533897 | /dataset/py150/utils/ast/child_only.py | b412d1e5a9d1e1bc4ae53d064ec6d2eb17c75efd | [
"MIT"
] | permissive | keiouok/naturalcc | d364639700d137720242a32b74b3ac48d0e94b76 | 7bab9a97331fafac1235fb32de829ff8d572320f | refs/heads/master | 2023-06-03T10:38:42.481107 | 2021-06-20T00:52:52 | 2021-06-20T00:52:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,323 | py | import sys
from ncc.data.constants import (
PAD,
SBT_LEFT_PARENTHESE,
SBT_RIGHT_PARENTHESE,
)
from ncc.data import tokenizer_funcs
from ..constants import (
RECURSION_DEPTH,
MAX_SUBTOKEN_LEN,
NODE_TMP,
)
from copy import deepcopy
# ignore those ast whose size is too large. Therefore set it as a small number
sys.setrecursionlimit(RECURSION_DEPTH) # recursion depth
def child_value2child_only(ast):
"""node['value'] => node['children']"""
for idx, node in ast.items():
value = node.get('value', None)
if value:
node.pop('value')
node['children'] = [value]
return ast
def pad_leaf_nodes(ast, max_len=MAX_SUBTOKEN_LEN):
'''
pad leaf node's child into [XX, [XX, ...]]
split token and pad it with PAD_TOKEN till reach MAX_TOKEN_LIST_LEN
e.g. VariableName -> [VariableName, [Variable, Name, PAD_TOKEN, PAD_TOKEN, ...]]
'''
for idx, node in ast.items():
if len(node['children']) == 1 and isinstance(node['children'][0], str):
subtokens = tokenizer_funcs._space_dpu_sub_tokenizer(node['children'][0])[:max_len]
subtokens.extend([PAD] * (max_len - len(subtokens)))
node['children'].append(subtokens)
return ast
def ast2sbt(ast, idx):
'''
build structure-based traversal SBT tree
ref: Deep Code Comment Generation
'''
if len(ast[idx]['children']) == 2 and type(ast[idx]['children'][1]) == list:
token = ast[idx]['type'] + '_' + ast[idx]['children'][0]
seq = [SBT_LEFT_PARENTHESE, token, SBT_RIGHT_PARENTHESE, token]
else:
token = ast[idx]['type']
seq = [SBT_LEFT_PARENTHESE, token]
for child_idx in ast[idx]['children']:
seq += ast2sbt(ast, str(child_idx))
seq += [SBT_RIGHT_PARENTHESE, token]
return seq
def get_root(ast):
"""get root node index"""
for idx, node in ast.items():
if node['parent'] is None:
return idx
def delete_root_with_unichild(ast):
"""
delete root node with only a child
because in such way, head node might be Program/Function/Error and its child is the code's AST
"""
for idx in sorted([idx for idx in ast.keys()], key=int):
if (ast[idx]['parent'] is None) and len(ast[idx]['children']) == 1:
child_idx = ast[idx]['children'][0]
ast[str(child_idx)]['parent'] = None
ast.pop(idx)
else:
break
return ast
def delete_nodes_with_unichild(ast):
'''
delete nodes with single child node
e.g. [1*NODEFIX1] -> [1*NODEFIX2] -> ['void'] => [1*NODEFIX1] -> ['void']
'''
def _dfs(idx):
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
# each ast tree generally is parsed from a method, so it has a "program" root node and a "method" node
# therefore, if current node is the root node with single child, we do not delete it
while (len(child_ids) == 1) and (node['parent'] is not None):
# update its parent's children
parent_node = ast[str(node['parent'])]
del_idx = parent_node['children'].index(int(idx))
parent_node['children'].pop(del_idx)
child_idx = child_ids[0]
# update its children's parent to its parent
ast[str(child_idx)]['parent'] = node['parent']
# update its parent's children
parent_node['children'].insert(del_idx, child_idx)
# delete itself
ast.pop(idx)
# update current info
idx = str(child_idx)
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
for idx in child_ids:
_dfs(str(idx))
idx = get_root(ast)
_dfs(idx)
return ast
def ast2bin_ast(ast):
'''ast tree -> binary ast tree'''
last_node_idx = sorted(ast.keys(), key=int)[-1]
def _dfs(idx):
node = ast[idx]
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
if len(child_ids) > 2:
# add new node
nonlocal last_node_idx
last_node_idx = str(int(last_node_idx) + 1)
ast[last_node_idx] = {'type': NODE_TMP, 'parent': idx, 'children': child_ids[1:]}
# update node's children info
node['children'] = [child_ids[0], int(last_node_idx)]
# update other childen nodes' parent info
for child_idx in child_ids[1:]:
ast[str(child_idx)]['parent'] = last_node_idx
# update current node's children info
# get current node's children indices, if it's leaf node, ignore.
if not (len(node['children']) == 1 and isinstance(node['children'][0], str)):
child_ids = node['children']
else:
return # move to leaf node, return
for idx in child_ids:
_dfs(str(idx))
idx = get_root(ast)
_dfs(idx)
return ast
def reset_indices(ast):
'''rename ast tree's node indices with consecutive indices'''
if sorted(list(ast.keys())) == list(range(len(ast))):
return ast
# firstly, resort node index with a prefix "_", e.g. 0 => "_0"
_idx = 0
def _dfs(idx, _parent_idx):
nonlocal _idx
_new_idx, _idx = f'_{_idx}', _idx + 1 # update for next node
node = ast.pop(str(idx))
ast[_new_idx] = node
# update its parent's children
if node['parent'] is None:
pass # current node is root node, no need for update its children
else:
parent_node = ast[_parent_idx]
# update its index in its parent node
parent_node['children'][parent_node['children'].index(idx)] = _new_idx
# update parent index
node['parent'] = _parent_idx
if isinstance(node['children'][0], int): # non-leaf nodes, traverse its children nodes
# update its children nodes' parent
for child_idx in node['children']:
_dfs(child_idx, _parent_idx=_new_idx)
else:
return
root_idx = get_root(ast)
_dfs(root_idx, _parent_idx=None)
# recover name: from _* => *
node_ids = deepcopy(list(ast.keys()))
for idx in node_ids:
node = ast.pop(idx)
# update children index
if len(node['children']) > 1:
node['children'] = [int(child_idx[1:]) for child_idx in node['children']]
# update parent index
if node['parent'] == None:
pass
else:
node['parent'] = int(node['parent'][1:])
ast[int(idx[1:])] = node # _idx => idx
return ast
| [
"yanghece96@gmail.com"
] | yanghece96@gmail.com |
73f09faf8526f34d6d9fda58789407a5e7cc3123 | 5e255ad1360c90478393744586663741a9569c21 | /linebot/v3/audience/models/audience_group_type.py | 02893782b733c181cd30b2473e4ee4459e95ae99 | [
"Apache-2.0"
] | permissive | line/line-bot-sdk-python | d76268e8b542060d6eccbacc5dbfab16960ecc35 | cffd35948238ae24982173e30b1ea1e595bbefd9 | refs/heads/master | 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 | Apache-2.0 | 2023-09-11T05:14:07 | 2016-10-11T03:42:26 | Python | UTF-8 | Python | false | false | 1,017 | py | # coding: utf-8
"""
LINE Messaging API
This document describes LINE Messaging API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from aenum import Enum, no_arg
class AudienceGroupType(str, Enum):
"""
Audience group type
"""
"""
allowed enum values
"""
UPLOAD = 'UPLOAD'
CLICK = 'CLICK'
IMP = 'IMP'
CHAT_TAG = 'CHAT_TAG'
FRIEND_PATH = 'FRIEND_PATH'
RESERVATION = 'RESERVATION'
APP_EVENT = 'APP_EVENT'
VIDEO_VIEW = 'VIDEO_VIEW'
WEBTRAFFIC = 'WEBTRAFFIC'
IMAGE_CLICK = 'IMAGE_CLICK'
RICHMENU_IMP = 'RICHMENU_IMP'
RICHMENU_CLICK = 'RICHMENU_CLICK'
@classmethod
def from_json(cls, json_str: str) -> AudienceGroupType:
"""Create an instance of AudienceGroupType from a JSON string"""
return AudienceGroupType(json.loads(json_str))
| [
"noreply@github.com"
] | line.noreply@github.com |
21ea2e613c180d34f365b8fba3bcd8715f8abe8c | e7f814227f64aae9ea30dd7c878a9406d0c2380f | /optuna_dashboard/search_space.py | 1f740fde7e59881d31fdf4846bb86acbb835e07e | [
"MIT"
] | permissive | tktran/optuna-dashboard | 8bf5c106cc3c96470c9e281bcb28bd6c92138627 | 38e56010bf9230f8b27c7eeeb7f01a2f65cda7ac | refs/heads/main | 2023-03-31T09:54:14.289445 | 2021-04-04T06:26:46 | 2021-04-04T06:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | import copy
import threading
from typing import Dict, List, Optional, Set, Tuple
from optuna.distributions import BaseDistribution
from optuna.trial import TrialState, FrozenTrial
SearchSpaceSetT = Set[Tuple[str, BaseDistribution]]
SearchSpaceListT = List[Tuple[str, BaseDistribution]]
# In-memory search space cache
search_space_cache_lock = threading.Lock()
search_space_cache: Dict[int, "_SearchSpace"] = {}
states_of_interest = [TrialState.COMPLETE, TrialState.PRUNED]
def get_search_space(
study_id: int, trials: List[FrozenTrial]
) -> Tuple[SearchSpaceListT, SearchSpaceListT]:
with search_space_cache_lock:
search_space = search_space_cache.get(study_id, None)
if search_space is None:
search_space = _SearchSpace()
search_space.update(trials)
search_space_cache[study_id] = search_space
return search_space.intersection, search_space.union
class _SearchSpace:
def __init__(self) -> None:
self._cursor: int = -1
self._intersection: Optional[SearchSpaceSetT] = None
self._union: SearchSpaceSetT = set()
@property
def intersection(self) -> SearchSpaceListT:
if self._intersection is None:
return []
intersection = list(self._intersection)
intersection.sort(key=lambda x: x[0])
return intersection
@property
def union(self) -> SearchSpaceListT:
union = list(self._union)
union.sort(key=lambda x: x[0])
return union
def update(self, trials: List[FrozenTrial]) -> None:
next_cursor = self._cursor
for trial in reversed(trials):
if self._cursor > trial.number:
break
if not trial.state.is_finished():
next_cursor = trial.number
if trial.state not in states_of_interest:
continue
current = set([(n, d) for n, d in trial.distributions.items()])
self._union = self._union.union(current)
if self._intersection is None:
self._intersection = copy.copy(current)
else:
self._intersection = self._intersection.intersection(current)
self._cursor = next_cursor
| [
"contact@c-bata.link"
] | contact@c-bata.link |
9f706d9d26452ede0c1df501a6d6ac04541e0c77 | ef29c31ef26815a237445b9359da00c4323717d0 | /django/orm/manytomany/models.py | 56c410dff6f951f6196132a5d64b33a8807bbe6a | [] | no_license | gvg4991/TIL-c9 | 3fe59bfe114e3af7f56a9163fa9e7ec83b3f97f0 | 2e4d6b99de2523ac4540cac2acd40342bbd6f9e3 | refs/heads/master | 2020-04-17T17:18:04.146013 | 2019-05-09T06:59:19 | 2019-05-09T06:59:19 | 166,777,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | from django.db import models
# Create your models here.
# 병원에 오는 사람들을 기록하는 시스템을 만드려고 한다.
# 필수적인 모델은 환자와 의사이다.
# 어떠한 관계로 표현할 수 있을까?
class Doctor(models.Model):
name = models.TextField()
# patients = models.ManyToManyField(Patient, through='Reservation') #아래 doctors와 둘 중 하나만 적기!
# patient1.doctor_set.all()
class Patient(models.Model):
name = models.TextField()
# Reservation을 통해서 doctor와 patient의 N:N관계를 형성
# (의사와 예약이 1:N, 환자와 예약이 1:M이므로 의사와 환자가 M:N)
doctors = models.ManyToManyField(Doctor, related_name='patients')#, through='Reservation')
#doctor1.patient_set.all(): reservation을 통하지않고 바로 의사의 환자를 불러옴
# patient_set을 patients로 이름을 지정해줌
# 중계자 역할
# class Reservation(models.Model):
# # Doctor:Reservation = 1:N 관계
# # Patient:Reservation = 1:N 관계
# doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE)
# patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
# doctor1 = Doctor.objects.create(name='kim')
# doctor2 = Doctor.objects.create(name='kang')
# patient1 = Patient.objects.create(name='tom')
# patient2 = Patient.objects.create(name='jhon')
# Reservation.objects.create(doctor=doctor1, patient=patient2)
# Reservation.objects.create(doctor=doctor1, patient=patient1)
# Reservation.objects.create(doctor=doctor2, patient=patient1)
# doctor1.patients.add(patient2)
# >>> doctor1.patients.all()
# >>> patient2.doctors.all()
# doctor1.patients.remove(patient2) == patient2.doctors.remove(doctor1) | [
"14.73oo6o19@gmail.com"
] | 14.73oo6o19@gmail.com |
f73e6a719077834333f26688c0cefb3ca7a0773e | d6aed520d16b5c6d1b36ef4e21e4c0d895b751fe | /blog/models.py | 9a14f374ba058b002c2f20a38cc4888f3bd28990 | [
"MIT"
] | permissive | CoderLambert/DjangoBlog | 22bd71ed29af37847cd17542d21e1f2253975469 | a29290aadc5ace070976dd934a530c9e6fe3bb56 | refs/heads/master | 2021-05-06T02:58:57.162849 | 2017-12-16T18:11:38 | 2017-12-16T18:11:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,636 | py | from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from uuslug import slugify
from django.contrib.sites.models import Site
from DjangoBlog.utils import cache_decorator, logger, cache
from django.utils.functional import cached_property
from django.utils.timezone import now
class BaseModel(models.Model):
slug = models.SlugField(default='no-slug', max_length=60, blank=True)
created_time = models.DateTimeField('创建时间', default=now)
last_mod_time = models.DateTimeField('修改时间', default=now)
def save(self, *args, **kwargs):
from DjangoBlog.blog_signals import article_save_signal
if not self.slug or self.slug == 'no-slug' or not self.id:
slug = self.title if 'title' in self.__dict__ else self.name
self.slug = slugify(slug)
super().save(*args, **kwargs)
# type = self.__class__.__name__
is_update_views = 'update_fields' in kwargs and len(kwargs['update_fields']) == 1 and kwargs['update_fields'][
0] == 'views'
article_save_signal.send(sender=self.__class__, is_update_views=is_update_views, id=self.id)
def get_full_url(self):
site = Site.objects.get_current().domain
url = "https://{site}{path}".format(site=site, path=self.get_absolute_url())
return url
class Meta:
abstract = True
class Article(BaseModel):
"""文章"""
STATUS_CHOICES = (
('d', '草稿'),
('p', '发表'),
)
COMMENT_STATUS = (
('o', '打开'),
('c', '关闭'),
)
TYPE = (
('a', '文章'),
('p', '页面'),
)
title = models.CharField('标题', max_length=200, unique=True)
body = models.TextField('正文')
pub_time = models.DateTimeField('发布时间', blank=True, null=True)
status = models.CharField('文章状态', max_length=1, choices=STATUS_CHOICES, default='p')
comment_status = models.CharField('评论状态', max_length=1, choices=COMMENT_STATUS, default='o')
type = models.CharField('类型', max_length=1, choices=TYPE, default='a')
views = models.PositiveIntegerField('浏览量', default=0)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='作者', on_delete=models.CASCADE)
category = models.ForeignKey('Category', verbose_name='分类', on_delete=models.CASCADE, blank=True, null=True)
tags = models.ManyToManyField('Tag', verbose_name='标签集合', blank=True)
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_time']
verbose_name = "文章"
verbose_name_plural = verbose_name
get_latest_by = 'created_time'
def get_absolute_url(self):
return reverse('blog:detailbyid', kwargs={
'article_id': self.id,
'year': self.created_time.year,
'month': self.created_time.month,
'day': self.created_time.day
})
@cache_decorator(60 * 60 * 10)
def get_category_tree(self):
tree = self.category.get_category_tree()
names = list(map(lambda c: (c.name, c.get_absolute_url()), tree))
return names
def save(self, *args, **kwargs):
if not self.slug or self.slug == 'no-slug' or not self.id:
# Only set the slug when the object is created.
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def viewed(self):
self.views += 1
self.save(update_fields=['views'])
def comment_list(self):
cache_key = 'article_comments_{id}'.format(id=self.id)
value = cache.get(cache_key)
if value:
logger.info('get article comments:{id}'.format(id=self.id))
return value
else:
comments = self.comment_set.all()
cache.set(cache_key, comments)
logger.info('set article comments:{id}'.format(id=self.id))
return comments
def get_admin_url(self):
info = (self._meta.app_label, self._meta.model_name)
return reverse('admin:%s_%s_change' % info, args=(self.pk,))
@cached_property
def next_article(self):
# 下一篇
return Article.objects.filter(id__gt=self.id, status='p').order_by('id').first()
@cached_property
def prev_article(self):
# 前一篇
return Article.objects.filter(id__lt=self.id, status='p').first()
class Category(BaseModel):
"""文章分类"""
name = models.CharField('分类名', max_length=30, unique=True)
parent_category = models.ForeignKey('self', verbose_name="父级分类", blank=True, null=True)
class Meta:
ordering = ['name']
verbose_name = "分类"
verbose_name_plural = verbose_name
def get_absolute_url(self):
return reverse('blog:category_detail', kwargs={'category_name': self.slug})
def __str__(self):
return self.name
@cache_decorator(60 * 60 * 10)
def get_category_tree(self):
"""
递归获得分类目录的父级
:return:
"""
categorys = []
def parse(category):
categorys.append(category)
if category.parent_category:
parse(category.parent_category)
parse(self)
return categorys
@cache_decorator(60 * 60 * 10)
def get_sub_categorys(self):
"""
获得当前分类目录所有子集
:return:
"""
categorys = []
all_categorys = Category.objects.all()
def parse(category):
if category not in categorys:
categorys.append(category)
childs = all_categorys.filter(parent_category=category)
for child in childs:
if category not in categorys:
categorys.append(child)
parse(child)
parse(self)
return categorys
class Tag(BaseModel):
"""文章标签"""
name = models.CharField('标签名', max_length=30, unique=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('blog:tag_detail', kwargs={'tag_name': self.slug})
@cache_decorator(60 * 60 * 10)
def get_article_count(self):
return Article.objects.filter(tags__name=self.name).distinct().count()
class Meta:
ordering = ['name']
verbose_name = "标签"
verbose_name_plural = verbose_name
class Links(models.Model):
"""友情链接"""
name = models.CharField('链接名称', max_length=30, unique=True)
link = models.URLField('链接地址')
sequence = models.IntegerField('排序', unique=True)
created_time = models.DateTimeField('创建时间', default=now)
last_mod_time = models.DateTimeField('修改时间', default=now)
class Meta:
ordering = ['sequence']
verbose_name = '友情链接'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class SideBar(models.Model):
"""侧边栏,可以展示一些html内容"""
name = models.CharField('标题', max_length=100)
content = models.TextField("内容")
sequence = models.IntegerField('排序', unique=True)
is_enable = models.BooleanField('是否启用', default=True)
created_time = models.DateTimeField('创建时间', default=now)
last_mod_time = models.DateTimeField('修改时间', default=now)
class Meta:
ordering = ['sequence']
verbose_name = '侧边栏'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| [
"liangliangyy@gmail.com"
] | liangliangyy@gmail.com |
f0e798f0316e955e18f9b5b9ff48d942cad9ac7e | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/218dae7f9abb4ae5803d0c739a05c8b6.py | 44d656f33ebed779c4fc9b02201c0d8f51e2d108 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 245 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(input):
if input.strip() == "" :
return "Fine. Be that way!"
if input.isupper() :
return "Whoa, chill out!"
if input[-1:] == '?':
return "Sure."
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
27e92f18dd67804249725b15beb456cb9386f2b0 | 298dac85fcea57a7bcdd05355e85b0f154606563 | /task_openpose_reimplement/html.py | fd862f609131f96c77713a229e6f8da8a430cadf | [
"MIT"
] | permissive | liruilong940607/image-processing-pytorch | a99a094a292daa0ac596eb810faa61468bf98e37 | 535ff7160b680af727e95c74f22d402a1741e9de | refs/heads/master | 2020-03-15T03:11:58.752906 | 2018-05-03T03:26:33 | 2018-05-03T03:26:33 | 131,936,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,533 | py | import dominate
from dominate.tags import *
import os
import base64
import numpy as np
from io import BytesIO
from PIL import Image
import argparse
import random
import cv2
'''
## Simple Usage
from html import *
html = MYHTML('./web', 'simple')
lists = get_filenames('/home/dalong/data/coco2017/train2017/')
html.add_line(lists, tag='images')
lists = ['./ignoremasks/'+f.split('/')[-1].replace('jpg', 'png') for f in lists]
html.add_line(lists, tag='ignoremasks', maxid=1)
html.save()
'''
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--ImagesDir', type=str, default=None,
help='the dir stores images')
parser.add_argument('--LabelsDir', type=str, default=None,
help='the dir stores labels')
parser.add_argument('--PredsDir', type=str, default=None,
help='the dir stores preds')
parser.add_argument('--HtmlDir', type=str, default='./web/',
help='the dir stores outputdefaulthtml file')
parser.add_argument('--HtmlName', type=str, default='index',
help='the stored html file name')
parser.add_argument('--Num', type=int, default=10,
help='the max number to show')
parser.add_argument('--MaxLabelId', type=int, default=None,
help='the Classes number')
return parser.parse_args()
# lists
def get_filenames(imagesdir, sample_N=20):
EXTs = ['jpg', 'png', 'jpeg', 'bmp']
lists = [os.path.join(imagesdir,f) for f in os.listdir(imagesdir) if f.split('.')[-1] in EXTs]
if sample_N:
sample_N = min(sample_N, len(lists))
randIdxs = random.sample(range(len(lists)), sample_N)
lists = [lists[idx] for idx in randIdxs]
return lists
# read images
def get_images_txts(lists, tag='image', maxid=None):
images = [cv2.imread(file) for file in lists]
txts = []
for i, data in enumerate(zip(images, lists)):
image, file = data
if image is None:
images[i] = np.zeros((10,10,3), dtype = np.uint8)
txts += ['%s [ID]%s not found!'%(tag,file.split('/')[-1])]
continue
txts += ['%s [ID]%s [Size]%d*%d [ValueRange]%d-%d'%(tag,
file.split('/')[-1],
image.shape[0], image.shape[1],
np.min(image), np.max(image)
)]
if maxid:
scale = int(255.0/maxid)
images = [image*scale for image in images]
return images, txts
class MYHTML:
def __init__(self, web_dir, title, reflesh=0):
self.title = title
self.web_dir = web_dir
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
self.doc = dominate.document(title=title)
if reflesh > 0:
with self.doc.head:
meta(http_equiv="reflesh", content=str(reflesh))
self.t = None
self.images = []
self.txts = []
def add_header(self, str):
with self.doc:
h3(str)
def add_line(self, lists, tag='image', maxid=None):
images, txts = get_images_txts(lists, tag, maxid)
self.new_line()
for image, txt in zip(images, txts): self.add_image(image, txt)
def new_line(self, height=400):
if self.t is not None and len(self.images)>0:
with self.t:
with tr():
for im, txt in zip(self.images, self.txts):
if len(im.shape)==3:
pil_image = Image.fromarray(im[:,:,::-1])
else:
pil_image = Image.fromarray(im)
buff = BytesIO()
pil_image.save(buff, format="JPEG")
new_image_string = base64.b64encode(buff.getvalue()).decode("utf-8")
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
img(style="height:%dpx" % self.height, src="data:image/jpg;base64,%s"%new_image_string)
br()
p(txt)
self.add_table()
self.height = height
self.images = []
self.txts = []
def add_image(self, im, txt):
self.images.append(im)
self.txts.append(txt)
def add_table(self, border=1):
self.t = table(border=border, style="table-layout: fixed;")
self.doc.add(self.t)
def add_images(self, ims, txts, height=400):
self.add_table()
with self.t:
with tr():
for im, txt in zip(ims, txts):
if len(im.shape)==3:
pil_image = Image.fromarray(im[:,:,::-1])
else:
pil_image = Image.fromarray(im)
buff = BytesIO()
pil_image.save(buff, format="JPEG")
new_image_string = base64.b64encode(buff.getvalue()).decode("utf-8")
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
img(style="height:%dpx" % height, src="data:image/jpg;base64,%s"%new_image_string)
br()
p(txt)
def save(self):
if self.t is not None and len(self.images)>0:
with self.t:
with tr():
for im, txt in zip(self.images, self.txts):
if len(im.shape)==3:
pil_image = Image.fromarray(im[:,:,::-1])
else:
pil_image = Image.fromarray(im)
buff = BytesIO()
pil_image.save(buff, format="JPEG")
new_image_string = base64.b64encode(buff.getvalue()).decode("utf-8")
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
img(style="height:%dpx" % self.height, src="data:image/jpg;base64,%s"%new_image_string)
br()
p(txt)
html_file = '%s/%s.html' % (self.web_dir, self.title)
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
def demo():
html = MYHTML('web/', 'test_html')
html.add_header('hello world')
#html.add_images([np.zeros((100,100), dtype = np.uint8)+128 for _ in range(20)], ['yeah' for _ in range(20)])
#html.add_images([np.zeros((100,100), dtype = np.uint8)+128 for _ in range(20)], ['yeah' for _ in range(20)])
html.new_line()
for _ in range(20):
html.add_image(np.zeros((100,100), dtype = np.uint8)+128, 'yeah')
html.new_line()
for _ in range(20):
html.add_image(np.zeros((100,100), dtype = np.uint8)+128, 'yeah')
html.save()
if __name__ == '__main__':
args = parse()
html = MYHTML(args.HtmlDir, args.HtmlName)
lists = None
if args.ImagesDir:
lists = imagelists = get_filenames(args.ImagesDir)
if args.LabelsDir:
lists = labellists = get_filenames(args.LabelsDir)
if args.PredsDir:
lists = predlists = get_filenames(args.PredsDir)
assert lists
# select
sample_N = min(args.Num, len(lists))
randIdxs = random.sample(range(len(lists)), sample_N)
if args.ImagesDir:
images, txts = get_images_txts([imagelists[idx] for idx in randIdxs], split='image', maxid=None)
html.new_line()
for image, txt in zip(images, txts): html.add_image(image, txt)
if args.LabelsDir:
images, txts = get_images_txts([labellists[idx] for idx in randIdxs], split='label', maxid=args.MaxLabelId)
html.new_line()
for image, txt in zip(images, txts): html.add_image(image, txt)
if args.PredsDir:
images, txts = get_images_txts([predlists[idx] for idx in randIdxs], split='pred', maxid=args.MaxLabelId)
html.new_line()
for image, txt in zip(images, txts): html.add_image(image, txt)
html.save() | [
"397653553@qq.com"
] | 397653553@qq.com |
93e8ea6d2bca6946873359d86dfb0118ce399dff | 9095c1a0da8c6ffe914ee6dd9c4708062fd95c9a | /vtpl_api/models/gender.py | c4712ee17882c6e19c587c76af25f03479537928 | [
"MIT"
] | permissive | vtpl1/vtpl_api_py | 2e5338bd08677f12fc7304fb6ac7a32f32af1c93 | d289c92254deb040de925205c583de69802a1c6b | refs/heads/master | 2020-09-10T23:34:21.828350 | 2019-11-15T07:26:53 | 2019-11-15T07:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Gender(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
MALE = "Male"
FEMALE = "Female"
OTHER = "Other"
NA = "NA"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Gender - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Gender):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"monotosh.das@videonetics.com"
] | monotosh.das@videonetics.com |
3df4a905da8f0e35b240d9567e50007b3ff14528 | ef1bf421aca35681574c03014e0c2b92da1e7dca | /examples/modes/extended_selections.py | 9609014cb98132afba108e9cc0cccf699b92feb4 | [
"MIT"
] | permissive | pyQode/pyqode.core | 74e67f038455ea8cde2bbc5bd628652c35aff6eb | 0ffabebe4f0397d53429024f6f44db3fe97b0828 | refs/heads/master | 2020-04-12T06:36:33.483459 | 2020-01-18T14:16:08 | 2020-01-18T14:16:08 | 7,739,074 | 24 | 25 | MIT | 2020-01-18T14:16:10 | 2013-01-21T19:46:41 | Python | UTF-8 | Python | false | false | 631 | py | """
Minimal example showing the use of the ExtendedSelectionMode.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.api import CodeEdit
from pyqode.core.backend import server
from pyqode.core.modes import ExtendedSelectionMode
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
editor = CodeEdit()
editor.backend.start(server.__file__)
editor.resize(800, 600)
print(editor.modes.append(ExtendedSelectionMode()))
editor.file.open(__file__)
editor.show()
app.exec_()
editor.close()
del editor
del app
| [
"colin.duquesnoy@gmail.com"
] | colin.duquesnoy@gmail.com |
15e97e0097131cf3461a92612e34e80735c8233d | 556e88a954cf031460ea7fdf3791eb968ca4fbdd | /fluent_python/chapter_16/ch16_coroaverager0.py | 4d18e28584350ad23568d896475d11f306ac2cae | [] | no_license | feng-hui/python_books_examples | c696243fcb8305be495f44d1a88a02e7f906b7bd | e38542db7be927cdaa5d85317a58a13b3a13ae25 | refs/heads/master | 2022-03-07T00:37:29.311687 | 2019-09-28T15:15:20 | 2019-09-28T15:15:20 | 122,941,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# @author FH
# @email: capricorn12032126.com
# @time: 2018/12/24 20:55
def averager():
"""
execute at terminal
example:
from chapter_16.ch16_coroaverager0 import averager
coro_avg = averager()
next(coro_avg)
coro_avg.send(10) -> 10.0
coro_avg.send(30) -> 20.0
coro_avg.send(5) -> 15.0
"""
total = 0.0
count = 0
average = None
while True:
term = yield average
total += term
count += 1
average = total / count
| [
"982698913@qq.com"
] | 982698913@qq.com |
1acf4cb9ad9106bd17384777aa04d54b258c868f | 5016d6d2eb0b66b5d1417001a40a9ec10c983e32 | /python-puka/rpc_client.py | 3ddbf03e07844cb48c3088e911794f338bf83afe | [
"Apache-2.0"
] | permissive | PoeBlu/rabbitmq-tutorials | 6466e463d5ed4e3de8ef9c3ef19ba0768d0857bd | be8f5dd34be04bb31c30d17598bb1cc0b458a2d6 | refs/heads/master | 2023-05-02T03:21:49.824907 | 2015-04-28T17:03:41 | 2015-04-28T17:03:41 | 70,755,760 | 0 | 0 | Apache-2.0 | 2023-04-14T15:25:35 | 2016-10-13T01:11:04 | C# | UTF-8 | Python | false | false | 1,291 | py | #!/usr/bin/env python
import puka
import uuid
class FibonacciRpcClient(object):
def __init__(self):
self.client = client = puka.Client("amqp://localhost/")
promise = client.connect()
client.wait(promise)
promise = client.queue_declare(exclusive=True)
self.callback_queue = client.wait(promise)['queue']
self.consume_promise = client.basic_consume(queue=self.callback_queue,
no_ack=True)
def call(self, n):
correlation_id = str(uuid.uuid4())
# We don't need to wait on promise from publish, let it happen async.
self.client.basic_publish(exchange='',
routing_key='rpc_queue',
headers={'reply_to': self.callback_queue,
'correlation_id': correlation_id},
body=str(n))
while True:
msg_result = self.client.wait(self.consume_promise)
if msg_result['headers']['correlation_id'] == correlation_id:
return int(msg_result['body'])
fibonacci_rpc = FibonacciRpcClient()
print " [x] Requesting fib(30)"
response = fibonacci_rpc.call(30)
print " [.] Got %r" % (response,)
| [
"majek04@gmail.com"
] | majek04@gmail.com |
79a64e807ffc1a3e3cf0a52eb76fe385806de058 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-ci/connectors/pipelines/pipelines/__init__.py | 371bafaa1370e03bd1b7464db2f095b34966388c | [
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 727 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
"""The pipelines package."""
import logging
import os
from rich.logging import RichHandler
from . import sentry_utils
sentry_utils.initialize()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging_handlers = [RichHandler(rich_tracebacks=True)]
if "CI" in os.environ:
# RichHandler does not work great in the CI
logging_handlers = [logging.StreamHandler()]
logging.basicConfig(
level=logging.INFO,
format="%(name)s: %(message)s",
datefmt="[%X]",
handlers=logging_handlers,
)
main_logger = logging.getLogger(__name__)
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
1889a20824f9d1897607d0c56bd84f23b17c9ae4 | 8a2c6e45cd64ee04d8c02ea579e3396cc21f7309 | /PartitionList.py | 325957f56423ab9f55cad10c360a65764aee48f5 | [] | no_license | Danyshman/Leetcode | e665ece38cb0b22d6b5b19f15d6a73e73da1710e | aa2799e7512ea389f6dc18448da7c2b4cda75e47 | refs/heads/master | 2021-07-06T17:44:45.243915 | 2020-10-05T04:55:03 | 2020-10-05T04:55:03 | 191,697,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def partition(self, head: ListNode, x: int):
l1 = l1_head = ListNode(0)
l2 = l2_head = ListNode(0)
if head is None or head.next is None:
return head
node = head
while node:
if node.val < x:
new_node = ListNode(node.val)
l1.next = new_node
l1 = new_node
else:
new_node = ListNode(node.val)
l2.next = new_node
l2 = new_node
node = node.next
l1.next = l2_head.next
return l1_head.next
| [
"danyshman.azamatov@gmail.com"
] | danyshman.azamatov@gmail.com |
f3c58bb4f225970c8017b3ca0bf7fc03919b3db9 | fd529ba6ade52cd2a3dab94da01252d7ea90398d | /0528/foddddddppdpdoodd.py | 5014c1cbe98b6709533621c6e604f53676c551df | [] | no_license | fjfhfjfjgishbrk/AE401-Python | 4a984deb0281542c205d72695285b35c7413338f | ee80fa4588b127cff2402fd81e732ede28a66411 | refs/heads/master | 2022-06-13T13:49:39.875567 | 2022-05-28T12:40:51 | 2022-05-28T12:40:51 | 251,178,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
import time
import datetime
import requests
now = datetime.datetime.now()
dateNow = now.strftime("%Y-%m-%d")
chrome_options = Options()
#chrome_options.add_argument('--headless')
chrome = webdriver.Chrome(options=chrome_options)
chrome.get("https://tw.eztable.com/search?country=tw&date=2020-05-30&people=2&q=%E4%B8%80%E8%B5%B7%E5%B0%8F%E9%A3%9F%E9%A4%A8&searchTab=restaurant&source=mobile.eztable.com&utm_campaign=branding_keyword&utm_medium=cpc&utm_source=marketing")
time.sleep(2)
#for i in range(5):
#chrome.execute_script("window.scrollTo(0,document.body.scrollHeight);")
#time.sleep(1)
pageSource = chrome.page_source
soup = BeautifulSoup(pageSource, "html.parser")
space = soup.find_all("div", class_="sc-gzVnrw")
restaurants = soup.find_all("h4", class_="sc-gpHHfC")
value1 = restaurants[0].text + " Time: " + space[0].text
chrome.find_elements_by_class_name("sc-fgfRvd")[0].click()
time.sleep(2)
pageSource = chrome.page_source
soup = BeautifulSoup(pageSource, "html.parser")
image = soup.find_all("div", class_="sc-ESoVU")[0]['style']
img_url = image.split("\"")[1]
chrome.back()
time.sleep(2)
chrome.find_elements_by_class_name("sc-gzVnrw")[0].click()
time.sleep(5)
pageSource = chrome.page_source
soup = BeautifulSoup(pageSource, "html.parser")
people = soup.find_all("div", class_="sc-keVrkP")
value1 += " " + people[0].text.split(",")[1].strip()
chrome.find_elements_by_class_name("sc-fHxwqH")[0].click()
webhook_key = "PI8b5ouDPVMzfDrEQlHyP"
trigger_name = "abc"
url = 'https://maker.ifttt.com/trigger/'+trigger_name+'/with/key/'+webhook_key+'?value1=' + value1 + "&value2=" + img_url
requests.get(url)
chrome.close() | [
"59891511+fjfhfjfjgishbrk@users.noreply.github.com"
] | 59891511+fjfhfjfjgishbrk@users.noreply.github.com |
352ce6bbbcce97cc0658ecd29193cba7bd06a0c6 | 111bb07459d59e16fe4ccff773c51426fdc4e3bc | /public/pages/qdsIndexPage.py | cf4cd6b2c543e872931a38346c4424ca547d9c60 | [] | no_license | Pactortester/UItestframework-master | 65486d2708a26fdd78d009bab4bdef0334a98d22 | 3f41da16e62a1ea181eca45ed33120842a324a69 | refs/heads/master | 2020-03-26T15:48:13.752371 | 2018-08-17T10:30:00 | 2018-08-17T10:30:00 | 145,065,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | #coding=utf-8
from public.common import basepage
from public.common import mytest
import time
class DDSIndexPage(basepage.Page):
def into_qds_page(self):
"""打ads首页"""
self.dr.open('https://new.quandashi.com/')
def return_title(self):
"""返回该页面的title"""
return self.dr.get_title()
def cookies(self):
"""登录"""
self.dr.add_cookie({'name': 'QDS_COOKIE',
'value': '4cee3ae144733628cc3ce396a7713a2cfe720901',
'Domain': '.quandashi.com'})
def order_info(self):
"""订单信息"""
self.dr.clear_type("name->ownerContactPerson","全大师")
self.dr.clear_type("css->body > div.myOrder-wrap > div.section-myorder.width1200 > div > table:nth-child(2) > tbody > tr:nth-child(2) > td.td-2 > input", "15624992498")
self.dr.clear_type("css->body > div.myOrder-wrap > div.section-myorder.width1200 > div > table:nth-child(2) > tbody > tr:nth-child(3) > td.td-2 > input","4564564@qq.com")
self.dr.clear_type("css->#remark","test")
def pay_check(self):
"""支付检验"""
for i in self.dr.get_elements(
"css->body > div.myOrder-wrap > div.section-myorder.width1200 > div > div > ul > li.row-sense > em > i"):
print("总价:" + i.text)
ii = i.text
self.assertIn(aa, ii)
print("价格一致")
self.dr.click(
"css->body > div.myOrder-wrap > div.section-myorder.width1200 > div > div > ul > li.row-step > a.btn-next.submitOrder")
time.sleep(2)
for o in self.dr.get_elements("class->payable"):
print("订单提交成功,应付金额:" + o.text)
oo = o.text
time.sleep(2)
self.assertIn(oo, ii)
print("测试通过")
self.dr.click("id->alisubmit")
| [
"1456470136@qq.com"
] | 1456470136@qq.com |
df54030576bca2ec55e311a0961ecbcba5fed0a7 | d5dd2ade4671b237c747b592d3635c1bc2852ca8 | /0x16-api_advanced/1-top_ten.py | 47611769b7e33e0060ef3c1b8b44200d25381a9d | [] | no_license | emna7/holberton-system_engineering-devops | ce14780a6d091ca1da37fbe26e70534081195bcb | ed4bee21409f12c12afd8d28acd121de67643789 | refs/heads/master | 2020-07-24T02:07:42.844728 | 2020-07-07T21:58:19 | 2020-07-07T21:58:19 | 207,769,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #!/usr/bin/python3
"""contains 1 function: top_ten"""
import requests
def top_ten(subreddit):
user = {"User-Agent": "custom"}
request = requests.get("https://www.reddit.com/r/{}/hot.json"
.format(subreddit), headers=user)
try:
for i in range(10):
print(request.json()
.get("data").get("children")[i].get("data").get("title"))
except:
print("None")
| [
"bhmemna7@gmail.com"
] | bhmemna7@gmail.com |
c657cb2b615bc1a8d1d93d8440095c8dc3bab44a | 57aed4644e21ec53e9b3c577bc4faa6b7610d79c | /test_duration.py | 7ba8f3fab4d55ca9fedf7fc9e0c21758297860ea | [
"MIT"
] | permissive | ec500-software-engineering/exercise-2-ffmpeg-JiaruiJin | 1b9277a4ac8c0b4d09236946c0d127f07016b262 | 1b2f99a1b530c3db54c70e44f2cf09bc6c9e0c5f | refs/heads/master | 2020-04-24T21:56:37.027254 | 2019-02-28T21:40:13 | 2019-02-28T21:40:13 | 172,295,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from pytest import approx
import subprocess
import json
def ffprobe(file) -> dict:
""" get media metadata """
meta = subprocess.check_output(['ffprobe', '-v', 'warning',
'-print_format', 'json',
'-show_streams',
'-show_format',
str(file)], universal_newlines = True)
return json.loads(meta)
def test_duration():
fnin = "./videoplayback.mp4"
fnout = "./videoplayback.mp4_480.mp4"
orig_meta = ffprobe(fnin)
orig_duration = float(orig_meta['streams'][0]['duration'])
meta_480 = ffprobe(fnout)
duration_480 = float(meta_480['streams'][0]['duration'])
assert round(orig_duration) == approx(round(duration_480))
| [
"noreply@github.com"
] | ec500-software-engineering.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.