hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc2d44d55708c01f71e60c199094cfb461c606ed
| 1,905
|
py
|
Python
|
hummingbot/client/settings.py
|
autonomoussoftware/hummingbot
|
03aaa8d2000c48f988d94a873d7e65b4714a65b9
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/client/settings.py
|
autonomoussoftware/hummingbot
|
03aaa8d2000c48f988d94a873d7e65b4714a65b9
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/client/settings.py
|
autonomoussoftware/hummingbot
|
03aaa8d2000c48f988d94a873d7e65b4714a65b9
|
[
"Apache-2.0"
] | null | null | null |
from os.path import (
realpath,
join,
)
from typing import List
from hummingbot import get_strategy_list
from hummingbot.core.utils.trading_pair_fetcher import TradingPairFetcher
# Global variables
required_exchanges: List[str] = []
trading_pair_fetcher = TradingPairFetcher.get_instance()
# Global static values
KEYFILE_PREFIX = "key_file_"
KEYFILE_POSTFIX = ".json"
ENCYPTED_CONF_PREFIX = "encrypted_"
ENCYPTED_CONF_POSTFIX = ".json"
GLOBAL_CONFIG_PATH = "conf/conf_global.yml"
TOKEN_ADDRESSES_FILE_PATH = realpath(join(__file__, "../../wallet/ethereum/erc20_tokens.json"))
DEFAULT_KEY_FILE_PATH = "conf/"
DEFAULT_LOG_FILE_PATH = "logs/"
DEFAULT_ETHEREUM_RPC_URL = "https://mainnet.coinalpha.com/hummingbot-test-node"
TEMPLATE_PATH = realpath(join(__file__, "../../templates/"))
CONF_FILE_PATH = "conf/"
CONF_PREFIX = "conf_"
CONF_POSTFIX = "_strategy"
EXCHANGES = {
"bamboo_relay",
"binance",
"coinbase_pro",
"ddex",
"huobi",
"liquid",
"idex",
"radar_relay",
"dolomite",
"bittrex",
"bitcoin_com"
}
DEXES = {
"bamboo_relay",
"ddex",
"idex",
"radar_relay",
"dolomite"
}
STRATEGIES: List[str] = get_strategy_list()
EXAMPLE_PAIRS = {
"bamboo_relay": "ZRX-WETH",
"binance": "ZRX-ETH",
"bitcoin_com": "ETH-BCH",
"bittrex": "ZRX-ETH",
"coinbase_pro": "ETH-USDC",
"ddex": "ZRX-WETH",
"dolomite": "WETH-DAI",
"huobi": "ETH-USDT",
"idex": "ZRX-ETH",
"liquid": "ETH-USD",
"radar_relay": "ZRX-WETH",
}
EXAMPLE_ASSETS = {
"bamboo_relay": "ZRX",
"binance": "ZRX",
"bitcoin_com": "BCH",
"bittrex": "ZRX",
"coinbase_pro": "ETH",
"ddex": "ZRX",
"dolomite": "LRC",
"huobi": "eth",
"idex": "ETH",
"liquid": "ETH",
"radar_relay": "ZRX",
}
MAXIMUM_OUTPUT_PANE_LINE_COUNT = 1000
MAXIMUM_LOG_PANE_LINE_COUNT = 1000
MAXIMUM_TRADE_FILLS_DISPLAY_OUTPUT = 100
| 22.678571
| 95
| 0.662992
|
3fa621dc5c6830d0f12e27bdb25933ca6fea8be3
| 15,019
|
py
|
Python
|
tests/test_wdl_aid.py
|
biowdl/wdl-aid
|
7e2e2dd46888af48482ebd23088731ac1a72810d
|
[
"MIT"
] | 4
|
2019-07-30T16:36:44.000Z
|
2021-04-18T16:07:56.000Z
|
tests/test_wdl_aid.py
|
biowdl/wdl-aid
|
7e2e2dd46888af48482ebd23088731ac1a72810d
|
[
"MIT"
] | 2
|
2019-10-28T12:04:48.000Z
|
2019-12-06T13:31:48.000Z
|
tests/test_wdl_aid.py
|
biowdl/wdl-aid
|
7e2e2dd46888af48482ebd23088731ac1a72810d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from pathlib import Path
import pytest
import WDL
import wdl_aid.wdl_aid as wa
filesdir = Path(__file__).parent / Path("files")
def test_drop_nones():
assert wa.drop_nones({1: 1, 2: 2, 3: None}) == {1: 1, 2: 2}
def test_wrap_in_list():
assert wa.wrap_in_list(1) == [1]
assert wa.wrap_in_list([1]) == [1]
def test_merge_dict_of_lists():
assert wa.merge_dict_of_lists(
{1: [1, 2], 2: [1,2]},
{1: [2, 3], 3: [1]}) == {1: [1, 2, 3], 2: [1, 2], 3: [1]}
def test_fully_qualified_inputs():
doc = WDL.load(str(filesdir / Path("workflow.wdl")))
available_inputs = doc.workflow.available_inputs
qualified_names = wa.fully_qualified_inputs(available_inputs,
doc.workflow.name)
assert {x[0] for x in qualified_names} == {'test.sw.workflowOptional',
'test.echo.shouldBeExcluded',
'test.echo.missingDescription',
'test.echo.taskOptional',
'test.input2', 'test.input1'}
def test_fully_qualified_parameter_meta():
test_dict = {"bots": "zeven dagen lang", "eluveitie": "lvgvs"}
result = wa.fully_qualified_parameter_meta(test_dict, "Son_ar_chistr")
assert result == {"Son_ar_chistr.bots": "zeven dagen lang",
"Son_ar_chistr.eluveitie": "lvgvs"}
def test_gather_parameter_meta():
doc = WDL.load(str(filesdir / Path("workflow.wdl")))
parameter_meta = wa.gather_parameter_meta(doc.workflow, doc.workflow.name)
assert parameter_meta == {"test.input1": "The first input",
"test.input2": "The second input",
"test.echo.taskOptional":
{"description": "an optional input",
"category": "advanced",
"desc": "alternative description",
"cat": "common"}
}
def test_process_meta():
meta = {"WDL_AID": {"exclude": ["A", "B"]},
"authors": ["A", "B"]}
assert wa.process_meta(meta, "trinket") == {
"exclude": ["trinket.A", "trinket.B"],
"authors": ["A", "B"]
}
meta2 = {"WDL_AID": {"exclude": ["A", "B"]},
"authors": "me! :3"}
assert wa.process_meta(meta2, "trinket") == {
"exclude": ["trinket.A", "trinket.B"],
"authors": ["me! :3"]
}
def test_gather_meta():
doc = WDL.load(str(filesdir / Path("workflow.wdl")))
meta = wa.gather_meta(doc.workflow, doc.workflow.name)
assert meta == {
"exclude": ["test.echo.shouldBeExcluded"],
"authors": [{
"name": "Percy",
"email": "PercivalFredrickSteinVonMuselKlossowskiDeRolothe3rd@whitestone.net",
"organization": "Vox Machina"
}, {
"name": "'Caleb'",
"email": "c.widowghast@example.com",
"organization": "The Mighty Nein"
}]
}
def test_get_description_defaults():
a_dict = {"Vax": {"description": "A half-elf rogue"},
"Vex": {"desc": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_description(a_dict, "Vax") == "A half-elf rogue"
assert wa.get_description(a_dict, "Vex") == "???"
assert wa.get_description(a_dict, "Keyleth") == "???"
assert wa.get_description(a_dict, "Grog") == "???"
def test_get_description_description_key():
a_dict = {"Vax": {"description": "A half-elf rogue"},
"Vex": {"desc": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_description(a_dict, "Vax", description_key="desc") == "???"
assert wa.get_description(a_dict, "Vex", description_key="desc") == "A half-elf ranger"
assert wa.get_description(a_dict, "Keyleth", description_key="desc") == "???"
assert wa.get_description(a_dict, "Grog", description_key="desc") == "???"
def test_get_description_fallback_description():
a_dict = {"Vax": {"description": "A half-elf rogue"},
"Vex": {"desc": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_description(a_dict, "Vax", fallback_description="A VM member") == "A half-elf rogue"
assert wa.get_description(a_dict, "Vex", fallback_description="A VM member") == "A VM member"
assert wa.get_description(a_dict, "Keyleth", fallback_description="A VM member") == "A VM member"
assert wa.get_description(a_dict, "Grog", fallback_description="A VM member") == "A VM member"
def test_get_description_fallback_description_to_object():
a_dict = {"Vax": {"description": "A half-elf rogue"},
"Vex": {"desc": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_description(a_dict, "Vax", fallback_description_to_object=True) == "A half-elf rogue"
assert wa.get_description(a_dict, "Vex", fallback_description_to_object=True) == {"desc": "A half-elf ranger"}
assert wa.get_description(a_dict, "Keyleth", fallback_description_to_object=True) == "A half-elf druid"
assert wa.get_description(a_dict, "Grog", fallback_description_to_object=True) == "???"
def test_get_category_defaults():
a_dict = {"Vax": {"category": "A half-elf rogue"},
"Vex": {"cat": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_category(a_dict, "Vax") == "A half-elf rogue"
assert wa.get_category(a_dict, "Vex") == "other"
assert wa.get_category(a_dict, "Keyleth") == "other"
assert wa.get_category(a_dict, "Grog") == "other"
def test_get_category_category_key():
a_dict = {"Vax": {"category": "A half-elf rogue"},
"Vex": {"cat": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_category(a_dict, "Vax", category_key="cat") == "other"
assert wa.get_category(a_dict, "Vex", category_key="cat") == "A half-elf ranger"
assert wa.get_category(a_dict, "Keyleth", category_key="cat") == "other"
assert wa.get_category(a_dict, "Grog", category_key="cat") == "other"
def test_get_category_fallback_category():
a_dict = {"Vax": {"category": "A half-elf rogue"},
"Vex": {"cat": "A half-elf ranger"},
"Keyleth": "A half-elf druid"}
assert wa.get_category(a_dict, "Vax", fallback_category="VM member") == "A half-elf rogue"
assert wa.get_category(a_dict, "Vex", fallback_category="VM member") == "VM member"
assert wa.get_category(a_dict, "Keyleth", fallback_category="VM member") == "VM member"
assert wa.get_category(a_dict, "Grog", fallback_category="VM member") == "VM member"
def test_gather_inputs():
doc = WDL.load(str(filesdir / Path("workflow.wdl")))
inputs, required_inputs = wa.gather_inputs(doc.workflow)
assert required_inputs == ["test.input1"]
for name, binding in inputs:
assert name in ['test.sw.workflowOptional',
'test.echo.shouldBeExcluded',
'test.echo.missingDescription',
'test.echo.taskOptional',
'test.input2',
'test.input1']
assert isinstance(binding, WDL.Env.Binding)
def test_collect_values():
values = wa.collect_values(str(filesdir / Path("workflow.wdl")), True,
"category", "other", "description", "...",
False, False)
assert isinstance(values, dict)
assert values["workflow_name"] == "test"
assert values["workflow_file"] == str(filesdir / Path("workflow.wdl"))
assert values["workflow_authors"] == [{
"name": "Percy",
"email": "PercivalFredrickSteinVonMuselKlossowskiDeRolothe3rd@whitestone.net",
"organization": "Vox Machina"
}]
assert values["workflow_all_authors"] == [{
"name": "Percy",
"email": "PercivalFredrickSteinVonMuselKlossowskiDeRolothe3rd@whitestone.net",
"organization": "Vox Machina"
}, {
"name": "'Caleb'",
"email": "c.widowghast@example.com",
"organization": "The Mighty Nein"
}]
assert values["workflow_meta"] == {
"WDL_AID": {
"exclude": ["echo.shouldBeExcluded"]
},
"authors": {
"name": "Percy",
"email": "PercivalFredrickSteinVonMuselKlossowskiDeRolothe3rd@whitestone.net",
"organization": "Vox Machina"
},
"author": "Whomever",
"email": "whatever@where-ever.meh",
"description": "Once upon a midnight dreary, while I pondered, weak and weary, over many a quant and curious volumne of forgotten lore. While I nodded, nearly napping, suddenly there came a tapping, as if some one gently rapping, rapping at my chamber door. \"'Tis some visitor,\" I muttered, \"Tapping at my chamber door. This it is and nothing more!\""
}
assert values["excluded_inputs"] == ["test.echo.shouldBeExcluded"]
assert values["wdl_aid_version"] == wa.__version__
assert all(
[entry in [
{
'default': None,
'description': '...',
'name': 'test.sw.workflowOptional',
'type': 'String?'
}, {
'default': None,
'description': '...',
'name': 'test.echo.missingDescription',
'type': 'String?'
}, {
'default': '":p"',
'description': '...',
'name': 'test.input2',
'type': 'String'
}
] for entry in values["other"]])
assert values["required"] == [{
'default': None,
'description': '...',
'name': 'test.input1',
'type': 'String'
}]
assert values["advanced"] == [{
'default': None,
'description': 'an optional input',
'name': 'test.echo.taskOptional',
'type': 'String?'
}]
def test_collect_values_strict():
with pytest.raises(ValueError):
values = wa.collect_values(str(filesdir / Path("workflow.wdl")), True,
"category", "other", "description", "...",
False, True)
def test_no_workfow():
with pytest.raises(ValueError):
values = wa.collect_values(str(filesdir / Path("no_workflow.wdl")),
True, "category", "other", "description",
"...", False, False)
def test_main_defaults(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl"))]
wa.main()
captured = capsys.readouterr().out.splitlines(True)
with (filesdir / Path("expected.md")).open("r") as expected_output:
expected = expected_output.readlines()[:-1]
assert captured == expected + ["> Generated using WDL AID ({})\n".format(wa.__version__)]
def test_main_no_required(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"--do-not-separate-required"]
wa.main()
captured = capsys.readouterr().out.splitlines(True)
with (filesdir / Path("expected_no_required.md")).open("r") as expected_output:
expected = expected_output.readlines()[:-1]
assert captured == expected + ["> Generated using WDL AID ({})\n".format(wa.__version__)]
def test_main_keys(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"--description-key", "desc", "--category-key", "cat"]
wa.main()
captured = capsys.readouterr().out.splitlines(True)
with (filesdir / Path("expected_keys.md")).open("r") as expected_output:
expected = expected_output.readlines()[:-1]
assert captured == expected + ["> Generated using WDL AID ({})\n".format(wa.__version__)]
def test_main_fallback(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"--fallback-description", "...",
"--fallback-category", "advanced",
"--fallback-description-to-object"]
wa.main()
captured = capsys.readouterr().out.splitlines(True)
with (filesdir / Path("expected_fallback.md")).open("r") as expected_output:
expected = expected_output.readlines()[:-1]
assert captured == expected + ["> Generated using WDL AID ({})\n".format(wa.__version__)]
def test_main_template(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"--template", str(filesdir / Path("test.template"))]
wa.main()
captured = capsys.readouterr()
with (filesdir / Path("test.template")).open("r") as expected_output:
expected = expected_output.read()
assert captured.out == expected
def test_main_output(tmpdir):
output_file = tmpdir.join("output.md")
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"-o", output_file.strpath]
wa.main()
with output_file.open() as out_file:
result = out_file.readlines()
with (filesdir / Path("expected.md")).open("r") as expected_output:
expected = expected_output.readlines()[:-1]
assert result == expected + ["> Generated using WDL AID ({})\n".format(wa.__version__)]
def test_main_extra(capsys):
sys.argv = ["script", str(filesdir / Path("workflow.wdl")),
"-t", str(filesdir / Path("extra.template")),
"-e", str(filesdir / Path("extra.json"))]
wa.main()
captured = capsys.readouterr()
with (filesdir / Path("extra.json")).open("r") as expected_output:
expected = expected_output.read()
assert captured.out == expected
def test_main_strict():
sys.argv = ["script", str(filesdir / Path("workflow.wdl")), "--strict"]
with pytest.raises(ValueError):
wa.main()
| 42.426554
| 362
| 0.59711
|
db6d7467d781cdb78c6c7d3c109952e33e905aee
| 2,702
|
py
|
Python
|
theonionbox/tob/geoip.py
|
rainlance/theonionbox
|
6a4f81ef37bef52065a5b8fe1291e0e1e1b06265
|
[
"MIT"
] | 120
|
2015-12-30T09:41:56.000Z
|
2022-03-23T02:30:05.000Z
|
theonionbox/tob/geoip.py
|
rainlance/theonionbox
|
6a4f81ef37bef52065a5b8fe1291e0e1e1b06265
|
[
"MIT"
] | 57
|
2015-12-29T21:55:14.000Z
|
2022-01-07T09:48:51.000Z
|
theonionbox/tob/geoip.py
|
rainlance/theonionbox
|
6a4f81ef37bef52065a5b8fe1291e0e1e1b06265
|
[
"MIT"
] | 17
|
2018-02-05T08:57:46.000Z
|
2022-02-28T16:44:41.000Z
|
# Default class used if no geoip db present
class GeoIPOO(object):
def country(self, ip, default=None):
return default
def country_name(self, ip, default=None):
return default
def region_name(self, ip, default=None):
return default
def city_name(self, ip, default=None):
return default
def postal_code(self, ip, default=None):
return default
def latitude(self, ip, default=None):
return default
def longitude(self, ip, default=None):
return default
def close(self):
return
class GeoIP2(GeoIPOO):
reader = None
cache = {}
def __init__(self, path_to_db):
from geoip2.database import Reader
try:
self.reader = Reader(path_to_db)
except:
pass
self.cache = {}
def data(self, ip):
try:
return self.cache[ip]
except KeyError:
try:
response = self.reader.city(ip)
except:
return None
self.cache[ip] = response
return response
def country(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.country.iso_code
except:
return default
def country_name(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.country.name
except:
return default
def region_name(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.subdivisions.most_specific.name
except:
return default
def city_name(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.city.name
except:
return default
def postal_code(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.postal.code
except:
return default
def latitude(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.location.latitude
except:
return default
def longitude(self, ip, default=None):
rsp = self.data(ip)
if rsp is None:
return default
try:
return rsp.location.longitude
except:
return default
def close(self):
self.reader.close()
| 21.790323
| 54
| 0.534049
|
bec00cdac61755e56e0b82d59bc79e4301fc10e3
| 7,083
|
py
|
Python
|
projects/seg/region_net.py
|
ixhorse/gluon-cv
|
c89f9bfc1f5f4ac52ed593ff038af876c3e1dc93
|
[
"Apache-2.0"
] | null | null | null |
projects/seg/region_net.py
|
ixhorse/gluon-cv
|
c89f9bfc1f5f4ac52ed593ff038af876c3e1dc93
|
[
"Apache-2.0"
] | null | null | null |
projects/seg/region_net.py
|
ixhorse/gluon-cv
|
c89f9bfc1f5f4ac52ed593ff038af876c3e1dc93
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=unused-argument
"""Pyramid Scene Parsing Network"""
from mxnet.gluon import nn
from mxnet.context import cpu
from mxnet.gluon.nn import HybridBlock
from mxnet import gluon
from gluoncv.model_zoo.segbase import SegBaseModel
from gluoncv.model_zoo.fcn import _FCNHead
# pylint: disable-all
__all__ = ['RegionNet', 'get_regionnet']
class RegionNet(SegBaseModel):
r"""DeepLabV3
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
aux : bool
Auxiliary loss.
Reference:
Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation."
arXiv preprint arXiv:1706.05587 (2017).
"""
def __init__(self, nclass, backbone='resnet50', aux=True, ctx=cpu(), pretrained_base=True,
base_size=520, crop_size=480, **kwargs):
super(RegionNet, self).__init__(nclass, aux, backbone, ctx=ctx, base_size=base_size,
crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)
with self.name_scope():
self.head = _DeepLabHead(nclass, **kwargs)
self.head.initialize(ctx=ctx)
self.head.collect_params().setattr('lr_mult', 10)
if self.aux:
self.auxlayer = _FCNHead(1024, nclass, **kwargs)
self.pool = nn.MaxPool2D(pool_size=2, strides=2)
self.auxlayer.initialize(ctx=ctx)
self.auxlayer.collect_params().setattr('lr_mult', 10)
def hybrid_forward(self, F, x):
c3, c4 = self.base_forward(x)
outputs = []
x = self.head(F.concat(c3, c4, dim=1))
outputs.append(x)
if self.aux:
auxout = self.auxlayer(c3)
auxout = self.pool(auxout)
outputs.append(auxout)
return tuple(outputs)
class _DeepLabHead(HybridBlock):
def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs={}, **kwargs):
super(_DeepLabHead, self).__init__()
with self.name_scope():
self.aspp = _ASPP(2048+1024, [3, 6, 18], norm_layer=norm_layer,
norm_kwargs=norm_kwargs, **kwargs)
self.block = nn.HybridSequential()
self.block.add(nn.Conv2D(in_channels=256, channels=256,
kernel_size=3, padding=1, use_bias=False))
self.block.add(norm_layer(in_channels=256, **norm_kwargs))
self.block.add(nn.Activation('relu'))
self.block.add(nn.Dropout(0.1))
self.block.add(nn.Conv2D(in_channels=256, channels=nclass,
kernel_size=1))
self.block.add(nn.MaxPool2D(pool_size=2, strides=2))
def hybrid_forward(self, F, x):
x = self.aspp(x)
return self.block(x)
def _ASPPConv(in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs):
block = nn.HybridSequential()
with block.name_scope():
block.add(nn.Conv2D(in_channels=in_channels, channels=out_channels,
kernel_size=3, padding=atrous_rate,
dilation=atrous_rate, use_bias=False))
block.add(norm_layer(in_channels=out_channels, **norm_kwargs))
block.add(nn.Activation('relu'))
return block
class _AsppPooling(nn.HybridBlock):
def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs):
super(_AsppPooling, self).__init__()
self.gap = nn.HybridSequential()
with self.gap.name_scope():
self.gap.add(nn.GlobalAvgPool2D())
self.gap.add(nn.Conv2D(in_channels=in_channels, channels=out_channels,
kernel_size=1, use_bias=False))
self.gap.add(norm_layer(in_channels=out_channels, **norm_kwargs))
self.gap.add(nn.Activation("relu"))
def hybrid_forward(self, F, x):
_, _, h, w = x.shape
pool = self.gap(x)
return F.contrib.BilinearResize2D(pool, height=h, width=w)
class _ASPP(nn.HybridBlock):
def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs):
super(_ASPP, self).__init__()
out_channels = 256
b0 = nn.HybridSequential()
with b0.name_scope():
b0.add(nn.Conv2D(in_channels=in_channels, channels=out_channels,
kernel_size=1, use_bias=False))
b0.add(norm_layer(in_channels=out_channels, **norm_kwargs))
b0.add(nn.Activation("relu"))
rate1, rate2, rate3 = tuple(atrous_rates)
b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs)
b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs)
# b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs)
# b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer,
# norm_kwargs=norm_kwargs)
self.concurent = gluon.contrib.nn.HybridConcurrent(axis=1)
with self.concurent.name_scope():
self.concurent.add(b0)
self.concurent.add(b1)
self.concurent.add(b2)
# self.concurent.add(b3)
# self.concurent.add(b4)
self.project = nn.HybridSequential()
with self.project.name_scope():
self.project.add(nn.Conv2D(in_channels=3*out_channels, channels=out_channels,
kernel_size=1, use_bias=False))
self.project.add(norm_layer(in_channels=out_channels, **norm_kwargs))
self.project.add(nn.Activation("relu"))
self.project.add(nn.Dropout(0.5))
def hybrid_forward(self, F, x):
return self.project(self.concurent(x))
def get_regionnet(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='~/.mxnet/models', ctx=cpu(0), **kwargs):
r"""DeepLabV3
Parameters
----------
dataset : str, default pascal_voc
The dataset that model pretrained on. (pascal_voc, ade20k)
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_fcn(dataset='pascal_voc', backbone='resnet50', pretrained=False)
>>> print(model)
"""
from gluoncv.data import datasets
# infer number of classes
model = RegionNet(datasets[dataset].NUM_CLASS, backbone=backbone, ctx=ctx, **kwargs)
return model
| 41.180233
| 100
| 0.629536
|
f711087fc49d8d2d1e4f621050569c812ce59cb7
| 123
|
py
|
Python
|
CursoIntensivoPython/curso-intensivo-python-master/capitulo_09/my_electric_car.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
CursoIntensivoPython/curso-intensivo-python-master/capitulo_09/my_electric_car.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
CursoIntensivoPython/curso-intensivo-python-master/capitulo_09/my_electric_car.py
|
SweydAbdul/estudos-python
|
b052708d0566a0afb9a1c04d035467d45f820879
|
[
"MIT"
] | null | null | null |
import electric_car
my_tesla = electric_car.ElectricCar('tesla', 'roadster', 2016)
print(my_tesla.get_descriptive_name())
| 24.6
| 62
| 0.804878
|
42a251d21db809066dd950389905fb6f847b4b61
| 1,317
|
py
|
Python
|
SQLTemplatedPythonOperator/operator.py
|
asdfgeoff/airflow-operators
|
e013b276e10e39c2b675cd4532e2ae3e30717a3f
|
[
"MIT"
] | 1
|
2021-09-06T14:47:21.000Z
|
2021-09-06T14:47:21.000Z
|
SQLTemplatedPythonOperator/operator.py
|
asdfgeoff/airflow-operators
|
e013b276e10e39c2b675cd4532e2ae3e30717a3f
|
[
"MIT"
] | null | null | null |
SQLTemplatedPythonOperator/operator.py
|
asdfgeoff/airflow-operators
|
e013b276e10e39c2b675cd4532e2ae3e30717a3f
|
[
"MIT"
] | null | null | null |
from airflow.operators.python_operator import PythonOperator
from typing import Optional
class SQLTemplatedPythonOperator(PythonOperator):
""" Extend PythonOperator to receive a templated SQL query and also to display it in the "Rendered Template" tab in Airflow's UI.
This is very helpful for troubleshooting specific task instances, since you can copy a propertly formatted query directly from
the web UI rather than copying the contents of "templates_dict" and parsing it manually.
Args:
sql (str): File path or query text containing jinja2 variables to be filled by airflow templating engine.
python_callable (func): Access final sql text from inside using kwargs['templates_dict']['query']
"""
template_ext = ('.sql',)
template_fields = ('sql', 'templates_dict')
ui_color = "#ffe5cc"
ui_fgcolor = "#000"
def __init__(self,
sql: str,
op_args: Optional[list] = None,
op_kwargs: Optional[list] = None,
*args, **kwargs) -> None:
super(SQLTemplatedPythonOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = {'sql': sql}
if __name__ == '__main__':
pass
| 36.583333
| 133
| 0.668185
|
c3ed851f9b62f8d6857f1f66f3f017ab320666f3
| 3,394
|
py
|
Python
|
src/oci/_vendor/requests/structures.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/_vendor/requests/structures.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/_vendor/requests/structures.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
from collections import OrderedDict
from .compat import Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| 30.576577
| 245
| 0.635533
|
beb967d90cab0c379f3c043c43d888005a62a442
| 6,271
|
py
|
Python
|
dacbench/wrappers/performance_tracking_wrapper.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 11
|
2020-11-09T10:50:31.000Z
|
2022-02-19T09:23:44.000Z
|
dacbench/wrappers/performance_tracking_wrapper.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 95
|
2020-11-18T09:37:30.000Z
|
2022-02-17T10:05:33.000Z
|
dacbench/wrappers/performance_tracking_wrapper.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 11
|
2020-11-15T15:24:27.000Z
|
2022-03-14T14:51:43.000Z
|
from collections import defaultdict
from gym import Wrapper
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
class PerformanceTrackingWrapper(Wrapper):
"""
Wrapper to track episode performance.
Includes interval mode that returns performance in lists of len(interval) instead of one long list.
"""
def __init__(
self,
env,
performance_interval=None,
track_instance_performance=True,
logger=None,
):
"""
Initialize wrapper
Parameters
-------
env : gym.Env
Environment to wrap
performance_interval : int
If not none, mean in given intervals is tracked, too
track_instance_performance : bool
Indicates whether to track per-instance performance
logger : dacbench.logger.ModuleLogger
"""
super(PerformanceTrackingWrapper, self).__init__(env)
self.performance_interval = performance_interval
self.overall_performance = []
self.episode_performance = 0
if self.performance_interval:
self.performance_intervals = []
self.current_performance = []
self.track_instances = track_instance_performance
if self.track_instances:
self.instance_performances = defaultdict(lambda: [])
self.logger = logger
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"performance_interval",
"track_instances",
"overall_performance",
"performance_intervals",
"current_performance",
"env",
"get_performance",
"step",
"instance_performances",
"episode_performance",
"render_performance",
"render_instance_performance",
"logger",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"performance_interval",
"track_instances",
"overall_performance",
"performance_intervals",
"current_performance",
"env",
"get_performance",
"step",
"instance_performances",
"episode_performance",
"render_performance",
"render_instance_performance",
"logger",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record performance
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, metainfo
"""
state, reward, done, info = self.env.step(action)
self.episode_performance += reward
if done:
self.overall_performance.append(self.episode_performance)
if self.logger is not None:
self.logger.log(
"overall_performance",
self.episode_performance,
)
if self.performance_interval:
if len(self.current_performance) < self.performance_interval:
self.current_performance.append(self.episode_performance)
else:
self.performance_intervals.append(self.current_performance)
self.current_performance = [self.episode_performance]
if self.track_instances:
key = "".join(str(e) for e in self.env.instance)
self.instance_performances[key].append(self.episode_performance)
self.episode_performance = 0
return state, reward, done, info
def get_performance(self):
"""
Get state performance
Returns
-------
np.array or np.array, np.array or np.array, dict or np.array, np.arry, dict
all states or all states and interval sorted states
"""
if self.performance_interval and self.track_instances:
complete_intervals = self.performance_intervals + [self.current_performance]
return (
self.overall_performance,
complete_intervals,
self.instance_performances,
)
elif self.performance_interval:
complete_intervals = self.performance_intervals + [self.current_performance]
return self.overall_performance, complete_intervals
elif self.track_instances:
return self.overall_performance, self.instance_performances
else:
return self.overall_performance
def render_performance(self):
""" Plot performance """
plt.figure(figsize=(12, 6))
plt.plot(
np.arange(len(self.overall_performance) // 2),
self.overall_performance[1::2],
)
plt.title("Mean Performance per episode")
plt.xlabel("Episode")
plt.ylabel("Reward")
plt.show()
def render_instance_performance(self):
""" Plot mean performance for each instance """
plt.figure(figsize=(12, 6))
plt.title("Mean Performance per Instance")
plt.ylabel("Mean reward")
plt.xlabel("Instance")
ax = plt.subplot(111)
for k, i in zip(
self.instance_performances.keys(),
np.arange(len(self.instance_performances.keys())),
):
ax.bar(str(i), np.mean(self.instance_performances[k]))
plt.show()
| 30.590244
| 103
| 0.571998
|
dee4bce4981f00960e3a1827b5554406c281c830
| 435
|
py
|
Python
|
setup.py
|
cpcloud/miniast
|
8677752f70a1bbfc9dee3c3a742ea99790b9659e
|
[
"Apache-2.0"
] | 16
|
2018-02-24T00:07:44.000Z
|
2018-09-13T21:39:25.000Z
|
setup.py
|
cpcloud/miniast
|
8677752f70a1bbfc9dee3c3a742ea99790b9659e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
cpcloud/miniast
|
8677752f70a1bbfc9dee3c3a742ea99790b9659e
|
[
"Apache-2.0"
] | null | null | null |
import versioneer
from setuptools import setup, find_packages
setup(
name='miniast',
url='https://github.com/cpcloud/miniast',
packages=find_packages(),
python_requires='>=3.5',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Lightweight macros for Python',
license='Apache License, Version 2.0',
author='Phillip Cloud',
author_email='cpcloud@gmail.com',
)
| 24.166667
| 48
| 0.708046
|
36efe6bfc83fb39fa8a5471a546e84283049b287
| 4,670
|
py
|
Python
|
src/local/butler/package.py
|
mi-ac/clusterfuzz
|
0b5c023eca9e3aac41faba17da8f341c0ca2ddc7
|
[
"Apache-2.0"
] | 1
|
2021-12-20T14:48:42.000Z
|
2021-12-20T14:48:42.000Z
|
src/local/butler/package.py
|
mi-ac/clusterfuzz
|
0b5c023eca9e3aac41faba17da8f341c0ca2ddc7
|
[
"Apache-2.0"
] | 2
|
2021-09-28T05:36:03.000Z
|
2021-12-13T20:48:34.000Z
|
src/local/butler/package.py
|
henryzz0/clusterfuzz
|
0427ed8328d6bd6e18540087793a41531bbaafea
|
[
"Apache-2.0"
] | 1
|
2021-11-06T06:22:00.000Z
|
2021-11-06T06:22:00.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""package.py handles the package command"""
import os
import re
import sys
import zipfile
from local.butler import appengine
from local.butler import common
from local.butler import constants
MIN_SUPPORTED_NODEJS_VERSION = 4
def _clear_zip(target_zip_path):
"""Remove zip and manifest file."""
if os.path.exists(constants.PACKAGE_TARGET_MANIFEST_PATH):
os.remove(constants.PACKAGE_TARGET_MANIFEST_PATH)
if os.path.exists(target_zip_path):
os.remove(target_zip_path)
def _add_to_zip(output_file, src_file_path, dest_file_path=None):
"""Add the src_file_path to the output_file with the right target path."""
if dest_file_path is None:
dest_file_path = src_file_path
output_file.write(src_file_path, os.path.join('clusterfuzz', dest_file_path))
def _is_nodejs_up_to_date():
"""Check if node is of version MINIMUM_NODEJS_VERSION."""
return_code, output = common.execute('node -v')
if return_code != 0:
return False
m = re.match(br'v([0-9]+)\..+', output.strip())
if not m:
return False
major_version = int(m.group(1))
return major_version >= MIN_SUPPORTED_NODEJS_VERSION
def _get_files(path):
"""Iterate through files in path."""
for root, _, filenames in os.walk(path):
for filename in filenames:
if filename.endswith('.pyc') or (os.sep + '.git') in root:
continue
yield os.path.join(root, filename)
def package(revision,
target_zip_dir=constants.PACKAGE_TARGET_ZIP_DIRECTORY,
target_manifest_path=constants.PACKAGE_TARGET_MANIFEST_PATH,
platform_name=None,
python3=False):
"""Prepare clusterfuzz-source.zip."""
is_ci = os.getenv('TEST_BOT_ENVIRONMENT')
if not is_ci and common.is_git_dirty():
print('Your branch is dirty. Please fix before packaging.')
sys.exit(1)
if not _is_nodejs_up_to_date():
print('You do not have nodejs, or your nodejs is not at least version 4.')
sys.exit(1)
common.install_dependencies(platform_name=platform_name)
# This needs to be done before packaging step to let src/appengine/config be
# archived for bot.
appengine.symlink_dirs()
_, ls_files_output = common.execute('git -C . ls-files', print_output=False)
file_paths = [path.decode('utf-8') for path in ls_files_output.splitlines()]
if not os.path.exists(target_zip_dir):
os.makedirs(target_zip_dir)
target_zip_name = constants.LEGACY_ZIP_NAME
if platform_name:
if python3:
target_zip_name = platform_name + '-3.zip'
else:
target_zip_name = platform_name + '.zip'
target_zip_path = os.path.join(target_zip_dir, target_zip_name)
_clear_zip(target_zip_path)
output_file = zipfile.ZipFile(target_zip_path, 'w', zipfile.ZIP_DEFLATED)
# Add files from git.
for file_path in file_paths:
if (file_path.startswith('config') or file_path.startswith('local') or
file_path.startswith(os.path.join('src', 'appengine')) or
file_path.startswith(os.path.join('src', 'local')) or
file_path.startswith(os.path.join('src', 'python', 'tests'))):
continue
_add_to_zip(output_file, file_path)
# These are project configuration yamls.
for path in _get_files(os.path.join('src', 'appengine', 'config')):
_add_to_zip(output_file, path)
# These are third party dependencies.
for path in _get_files(os.path.join('src', 'third_party')):
_add_to_zip(output_file, path)
output_file.close()
with open(target_manifest_path, 'w') as f:
f.write('%s\n' % revision)
with zipfile.ZipFile(target_zip_path, 'a', zipfile.ZIP_DEFLATED) as f:
_add_to_zip(f, target_manifest_path, constants.PACKAGE_TARGET_MANIFEST_PATH)
print('Revision: %s' % revision)
print()
print('%s is ready.' % target_zip_path)
return target_zip_path
def execute(args):
if args.platform == 'all':
for platform_name in list(constants.PLATFORMS.keys()):
package(
revision=common.compute_staging_revision(),
platform_name=platform_name)
else:
package(
revision=common.compute_staging_revision(), platform_name=args.platform)
| 31.133333
| 80
| 0.723555
|
3601aabec8f5b1522e104db897d7701cfad7c5e1
| 12,102
|
py
|
Python
|
src/tests/plugins/paypal/test_webhook.py
|
alainrk/pretix
|
867a8132aa1ed73dd9513efae5b3c46b5bbae140
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-08-31T13:16:55.000Z
|
2021-08-31T13:16:55.000Z
|
src/tests/plugins/paypal/test_webhook.py
|
alainrk/pretix
|
867a8132aa1ed73dd9513efae5b3c46b5bbae140
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/plugins/paypal/test_webhook.py
|
alainrk/pretix
|
867a8132aa1ed73dd9513efae5b3c46b5bbae140
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import json
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import (
Event, EventPermission, Order, Organizer, RequiredAction, User,
)
@pytest.fixture
def env():
user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
EventPermission.objects.create(event=event, user=user)
o1 = Order.objects.create(
code='FOOBAR', event=event, email='dummy@dummy.test',
status=Order.STATUS_PAID,
datetime=now(), expires=now() + timedelta(days=10),
total=Decimal('13.37'), payment_provider='paypal',
payment_info=json.dumps({
"id": "PAY-5YK922393D847794YKER7MUI",
"create_time": "2013-02-19T22:01:53Z",
"update_time": "2013-02-19T22:01:55Z",
"state": "approved",
"intent": "sale",
"payer": {
"payment_method": "credit_card",
"funding_instruments": [
{
"credit_card": {
"type": "mastercard",
"number": "xxxxxxxxxxxx5559",
"expire_month": 2,
"expire_year": 2018,
"first_name": "Betsy",
"last_name": "Buyer"
}
}
]
},
"transactions": [
{
"amount": {
"total": "7.47",
"currency": "USD",
"details": {
"subtotal": "7.47"
}
},
"description": "This is the payment transaction description.",
"note_to_payer": "Contact us for any questions on your order.",
"related_resources": [
{
"sale": {
"id": "36C38912MN9658832",
"create_time": "2013-02-19T22:01:53Z",
"update_time": "2013-02-19T22:01:55Z",
"state": "completed",
"amount": {
"total": "7.47",
"currency": "USD"
},
"protection_eligibility": "ELIGIBLE",
"protection_eligibility_type": "ITEM_NOT_RECEIVED_ELIGIBLE",
"transaction_fee": {
"value": "1.75",
"currency": "USD"
},
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"links": [
{
"href": "https://api.paypal.com/v1/payments/sale/36C38912MN9658832",
"rel": "self",
"method": "GET"
},
{
"href": "https://api.paypal.com/v1/payments/sale/36C38912MN9658832/refund",
"rel": "refund",
"method": "POST"
},
{
"href":
"https://api.paypal.com/v1/payments/payment/PAY-5YK922393D847794YKER7MUI",
"rel": "parent_payment",
"method": "GET"
}
]
}
}
]
}
],
"links": [
{
"href": "https://api.paypal.com/v1/payments/payment/PAY-5YK922393D847794YKER7MUI",
"rel": "self",
"method": "GET"
}
]
})
)
return event, o1
def get_test_charge(order: Order):
return {
"id": "36C38912MN9658832",
"create_time": "2013-02-19T22:01:53Z",
"update_time": "2013-02-19T22:01:55Z",
"state": "completed",
"amount": {
"total": "7.47",
"currency": "USD"
},
"protection_eligibility": "ELIGIBLE",
"protection_eligibility_type": "ITEM_NOT_RECEIVED_ELIGIBLE,UNAUTHORIZED_PAYMENT_ELIGIBLE",
"transaction_fee": {
"value": "1.75",
"currency": "USD"
},
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"links": [
{
"href": "https://api.paypal.com/v1/payments/sale/36C38912MN9658832",
"rel": "self",
"method": "GET"
},
{
"href": "https://api.paypal.com/v1/payments/sale/36C38912MN9658832/refund",
"rel": "refund",
"method": "POST"
},
{
"href": "https://api.paypal.com/v1/payments/payment/PAY-5YK922393D847794YKER7MUI",
"rel": "parent_payment",
"method": "GET"
}
]
}
@pytest.mark.django_db
def test_webhook_all_good(env, client, monkeypatch):
charge = get_test_charge(env[1])
monkeypatch.setattr("paypalrestsdk.Sale.find", lambda *args: charge)
monkeypatch.setattr("pretix.plugins.paypal.payment.Paypal.init_api", lambda *args: None)
client.post('/dummy/dummy/paypal/webhook/', json.dumps(
{
"id": "WH-2WR32451HC0233532-67976317FL4543714",
"create_time": "2014-10-23T17:23:52Z",
"resource_type": "sale",
"event_type": "PAYMENT.SALE.COMPLETED",
"summary": "A successful sale payment was made for $ 0.48 USD",
"resource": {
"amount": {
"total": "-0.01",
"currency": "USD"
},
"id": "36C38912MN9658832",
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"update_time": "2014-10-31T15:41:51Z",
"state": "completed",
"create_time": "2014-10-31T15:41:51Z",
"links": [],
"sale_id": "9T0916710M1105906"
},
"links": [],
"event_version": "1.0"
}
), content_type='application_json')
order = env[1]
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_webhook_mark_paid(env, client, monkeypatch):
order = env[1]
order.status = Order.STATUS_PENDING
order.save()
charge = get_test_charge(env[1])
monkeypatch.setattr("paypalrestsdk.Sale.find", lambda *args: charge)
monkeypatch.setattr("pretix.plugins.paypal.payment.Paypal.init_api", lambda *args: None)
client.post('/dummy/dummy/paypal/webhook/', json.dumps(
{
"id": "WH-2WR32451HC0233532-67976317FL4543714",
"create_time": "2014-10-23T17:23:52Z",
"resource_type": "sale",
"event_type": "PAYMENT.SALE.COMPLETED",
"summary": "A successful sale payment was made for $ 0.48 USD",
"resource": {
"amount": {
"total": "-0.01",
"currency": "USD"
},
"id": "36C38912MN9658832",
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"update_time": "2014-10-31T15:41:51Z",
"state": "completed",
"create_time": "2014-10-31T15:41:51Z",
"links": [],
"sale_id": "9T0916710M1105906"
},
"links": [],
"event_version": "1.0"
}
), content_type='application_json')
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_webhook_refund1(env, client, monkeypatch):
charge = get_test_charge(env[1])
charge['state'] = 'refunded'
monkeypatch.setattr("paypalrestsdk.Sale.find", lambda *args: charge)
monkeypatch.setattr("pretix.plugins.paypal.payment.Paypal.init_api", lambda *args: None)
client.post('/dummy/dummy/paypal/webhook/', json.dumps(
{
# Sample obtained in a sandbox webhook
"id": "WH-9K829080KA1622327-31011919VC6498738",
"create_time": "2017-01-15T20:15:36Z",
"resource_type": "refund",
"event_type": "PAYMENT.SALE.REFUNDED",
"summary": "A EUR 255.41 EUR sale payment was refunded",
"resource": {
"amount": {
"total": "255.41",
"currency": "EUR"
},
"id": "75S46770PP192124D",
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"update_time": "2017-01-15T20:15:06Z",
"create_time": "2017-01-15T20:14:29Z",
"state": "completed",
"links": [],
"refund_to_payer": {
"value": "255.41",
"currency": "EUR"
},
"invoice_number": "",
"refund_reason_code": "REFUND",
"sale_id": "9T0916710M1105906"
},
"links": [],
"event_version": "1.0"
}
), content_type='application_json')
order = env[1]
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
ra = RequiredAction.objects.get(action_type="pretix.plugins.paypal.refund")
client.login(username='dummy@dummy.dummy', password='dummy')
client.post('/control/event/dummy/dummy/paypal/refund/{}/'.format(ra.pk))
order = env[1]
order.refresh_from_db()
assert order.status == Order.STATUS_REFUNDED
@pytest.mark.django_db
def test_webhook_refund2(env, client, monkeypatch):
charge = get_test_charge(env[1])
charge['state'] = 'refunded'
monkeypatch.setattr("paypalrestsdk.Sale.find", lambda *args: charge)
monkeypatch.setattr("pretix.plugins.paypal.payment.Paypal.init_api", lambda *args: None)
client.post('/dummy/dummy/paypal/webhook/', json.dumps(
{
# Sample obtained in the webhook simulator
"id": "WH-2N242548W9943490U-1JU23391CS4765624",
"create_time": "2014-10-31T15:42:24Z",
"resource_type": "sale",
"event_type": "PAYMENT.SALE.REFUNDED",
"summary": "A 0.01 USD sale payment was refunded",
"resource": {
"amount": {
"total": "-0.01",
"currency": "USD"
},
"id": "36C38912MN9658832",
"parent_payment": "PAY-5YK922393D847794YKER7MUI",
"update_time": "2014-10-31T15:41:51Z",
"state": "completed",
"create_time": "2014-10-31T15:41:51Z",
"links": [],
"sale_id": "9T0916710M1105906"
},
"links": [],
"event_version": "1.0"
}
), content_type='application_json')
order = env[1]
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
ra = RequiredAction.objects.get(action_type="pretix.plugins.paypal.refund")
client.login(username='dummy@dummy.dummy', password='dummy')
client.post('/control/event/dummy/dummy/paypal/refund/{}/'.format(ra.pk))
order = env[1]
order.refresh_from_db()
assert order.status == Order.STATUS_REFUNDED
| 37.467492
| 118
| 0.472401
|
c82b82212eb1a47e62fbba37222cb3cd2609ee06
| 3,759
|
py
|
Python
|
tests/test_emu_cal/test_emu_passfunc.py
|
mosesyhc/surmise
|
3b32640031ce9b0d653d3bcb594b7d0275aa8237
|
[
"MIT"
] | 10
|
2021-02-06T02:04:05.000Z
|
2022-02-20T05:48:55.000Z
|
tests/test_emu_cal/test_emu_passfunc.py
|
mosesyhc/surmise
|
3b32640031ce9b0d653d3bcb594b7d0275aa8237
|
[
"MIT"
] | 13
|
2021-02-06T18:26:55.000Z
|
2021-12-21T14:28:38.000Z
|
tests/test_emu_cal/test_emu_passfunc.py
|
mosesyhc/surmise
|
3b32640031ce9b0d653d3bcb594b7d0275aa8237
|
[
"MIT"
] | 4
|
2021-02-12T22:37:41.000Z
|
2022-03-16T19:32:11.000Z
|
import numpy as np
import scipy.stats as sps
import pytest
from contextlib import contextmanager
from surmise.emulation import emulator
# example to illustrate the user inputted pass function
def borehole_failmodel(x, theta):
"""Given x and theta,
return matrix of [row x] times [row theta] of values."""
f = borehole_model(x, theta)
wheretoobig = np.where((f / borehole_true(x)) > 1.25)
f[wheretoobig[0], wheretoobig[1]] = np.inf
return f
def borehole_model(x, theta):
"""Given x and theta,
return matrix of [row x] times [row theta] of values."""
theta = tstd2theta(theta)
x = xstd2x(x)
p = x.shape[0]
n = theta.shape[0]
theta_stacked = np.repeat(theta, repeats=p, axis=0)
x_stacked = np.tile(x.astype(float), (n, 1))
f = borehole_vec(x_stacked, theta_stacked).reshape((n, p))
return f.T
def borehole_true(x):
"""Given x, return matrix of [row x] times 1 of values."""
# assume true theta is [0.5]^d
theta0 = np.atleast_2d(np.array([0.5] * 4))
f0 = borehole_model(x, theta0)
return f0
def borehole_vec(x, theta):
"""Given x and theta, return vector of values."""
(Hu, Ld_Kw, Treff, powparam) = np.split(theta, theta.shape[1], axis=1)
(rw, Hl) = np.split(x[:, :-1], 2, axis=1)
numer = 2 * np.pi * (Hu - Hl)
denom1 = 2 * Ld_Kw / rw ** 2
denom2 = Treff
f = ((numer / ((denom1 + denom2))) * np.exp(powparam * rw)).reshape(-1)
return f
def tstd2theta(tstd, hard=True):
"""Given standardized theta in [0, 1]^d, return non-standardized theta."""
if tstd.ndim < 1.5:
tstd = tstd[:, None].T
(Treffs, Hus, LdKw, powparams) = np.split(tstd, tstd.shape[1], axis=1)
Treff = (0.5-0.05) * Treffs + 0.05
Hu = Hus * (1110 - 990) + 990
if hard:
Ld_Kw = LdKw * (1680 / 1500 - 1120 / 15000) + 1120 / 15000
else:
Ld_Kw = LdKw * (1680 / 9855 - 1120 / 12045) + 1120 / 12045
powparam = powparams * (0.5 - (- 0.5)) + (-0.5)
theta = np.hstack((Hu, Ld_Kw, Treff, powparam))
return theta
def xstd2x(xstd):
"""Given standardized x in [0, 1]^2 x {0, 1}, return non-standardized x."""
if xstd.ndim < 1.5:
xstd = xstd[:, None].T
(rws, Hls, labels) = np.split(xstd, xstd.shape[1], axis=1)
rw = rws * (np.log(0.5) - np.log(0.05)) + np.log(0.05)
rw = np.exp(rw)
Hl = Hls * (820 - 700) + 700
x = np.hstack((rw, Hl, labels))
return x
class thetaprior:
""" This defines the class instance of priors provided to the methods. """
def lpdf(theta):
if theta.ndim > 1.5:
return np.squeeze(np.sum(sps.norm.logpdf(theta, 1, 0.5), 1))
else:
return np.squeeze(np.sum(sps.norm.logpdf(theta, 1, 0.5)))
def rnd(n):
return np.vstack((sps.norm.rvs(1, 0.5, size=(n, 4))))
x = sps.uniform.rvs(0, 1, [50, 3])
x[:, 2] = x[:, 2] > 0.5
yt = np.squeeze(borehole_true(x))
yvar = (10 ** (-2)) * np.ones(yt.shape)
thetatot = (thetaprior.rnd(15))
y = yt + sps.norm.rvs(0, np.sqrt(yvar))
@contextmanager
def does_not_raise():
yield
# test to check the emulator with a passed function
@pytest.mark.parametrize(
"expectation",
[
(does_not_raise()),
],
)
def test_passfunction(expectation):
with expectation:
assert emulator(passthroughfunc=borehole_model,
method='PCGP') is not None
# test to check the emulator predict with a passed function
@pytest.mark.parametrize(
"expectation",
[
(does_not_raise()),
],
)
def test_passfunction_predict(expectation):
with expectation:
emu = emulator(passthroughfunc=borehole_model,
method='PCGP')
assert emu.predict(x=x, theta=thetatot) is not None
| 28.263158
| 79
| 0.602554
|
3ab8d5d006b950260aef6f44f4a72e39120a0ef8
| 16,303
|
py
|
Python
|
tests/integration/test_merge_tree_s3/test.py
|
uniquechao/ClickHouse
|
1533f9b9aad3e8e6135179f11b4d8fdc99ce4be6
|
[
"Apache-2.0"
] | 2
|
2020-06-11T11:48:42.000Z
|
2020-10-10T12:08:13.000Z
|
tests/integration/test_merge_tree_s3/test.py
|
uniquechao/ClickHouse
|
1533f9b9aad3e8e6135179f11b4d8fdc99ce4be6
|
[
"Apache-2.0"
] | 13
|
2019-06-06T09:45:53.000Z
|
2020-05-15T12:03:45.000Z
|
tests/integration/test_merge_tree_s3/test.py
|
uniquechao/ClickHouse
|
1533f9b9aad3e8e6135179f11b4d8fdc99ce4be6
|
[
"Apache-2.0"
] | 22
|
2019-06-14T10:31:51.000Z
|
2020-10-12T14:57:44.000Z
|
import logging
import random
import string
import time
import pytest
from helpers.cluster import ClickHouseCluster
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml",
"configs/config.d/bg_processing_pool_conf.xml",
"configs/config.d/log_conf.xml"], user_configs=[], with_minio=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
FILES_OVERHEAD = 1
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1
def random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
def generate_values(date_str, count, sign=1):
data = [[date_str, sign * (i + 1), random_string(10)] for i in range(count)]
data.sort(key=lambda tup: tup[1])
return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data])
def create_table(cluster, table_name, additional_settings=None):
node = cluster.instances["node"]
create_table_statement = """
CREATE TABLE {} (
dt Date,
id Int64,
data String,
INDEX min_max (id) TYPE minmax GRANULARITY 3
) ENGINE=MergeTree()
PARTITION BY dt
ORDER BY (dt, id)
SETTINGS
storage_policy='s3',
old_parts_lifetime=0,
index_granularity=512
""".format(table_name)
if additional_settings:
create_table_statement += ","
create_table_statement += additional_settings
node.query(create_table_statement)
@pytest.fixture(autouse=True)
def drop_table(cluster):
yield
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
try:
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 0
finally:
# Remove extra objects to prevent tests cascade failing
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
@pytest.mark.parametrize(
"min_rows_for_wide_part,files_per_part",
[
(0, FILES_OVERHEAD_PER_PART_WIDE),
(8192, FILES_OVERHEAD_PER_PART_COMPACT)
]
)
def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
create_table(cluster, "s3_test", additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part))
node = cluster.instances["node"]
minio = cluster.minio_client
values1 = generate_values('2020-01-03', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values1))
assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part
values2 = generate_values('2020-01-04', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values2))
assert node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part * 2
assert node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)"
@pytest.mark.parametrize(
"merge_vertical", [False, True]
)
def test_insert_same_partition_and_merge(cluster, merge_vertical):
settings = None
if merge_vertical:
settings = """
vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0
"""
create_table(cluster, "s3_test", additional_settings=settings)
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("SYSTEM STOP MERGES s3_test")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD
node.query("SYSTEM START MERGES s3_test")
# Wait for merges and old parts deletion
for attempt in range(0, 10):
parts_count = node.query("SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' FORMAT Values")
if parts_count == "(1)":
break
if attempt == 9:
assert parts_count == "(1)"
time.sleep(1)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD
def test_alter_table_columns(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1")
# To ensure parts have merged
node.query("OPTIMIZE TABLE s3_test")
# Wait for merges, mutations and old parts deletion
time.sleep(3)
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)"
assert len(list(minio.list_objects(cluster.minio_bucket,
'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN
node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2})
# Wait for old parts deletion
time.sleep(3)
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
# and file with mutation
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == (
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
# Wait for old parts deletion
time.sleep(3)
# and 2 files with mutations
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2
def test_attach_detach_partition(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-04'")
node.query("ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1})
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_partition_to_another_disk(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
def test_table_manipulations(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("RENAME TABLE s3_test TO s3_renamed")
assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("RENAME TABLE s3_renamed TO s3_test")
assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)"
node.query("DETACH TABLE s3_test")
node.query("ATTACH TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("TRUNCATE TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_replace_partition_to_another_table(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-06', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
create_table(cluster, "s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-05' TO TABLE s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Number of objects in S3 should be unchanged.
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
# Add new partitions to source table, but with different values and replace them from copied table.
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone")
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-05' FROM s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Wait for outdated partitions deletion.
time.sleep(3)
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("DROP TABLE s3_clone NO DELAY")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
# Data should remain in S3
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("ALTER TABLE s3_test FREEZE")
# Number S3 objects should be unchanged.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 4
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
def test_freeze_unfreeze(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup1'")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup2'")
node.query("TRUNCATE TABLE s3_test")
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
# Unfreeze single partition from backup1.
node.query("ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'")
# Unfreeze all partitions from backup2.
node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'")
# Data should be removed from S3.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
| 44.062162
| 126
| 0.691529
|
0c65afc9dacd136a8ff5a0e679dd3c0fe8ec2833
| 3,720
|
py
|
Python
|
RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_GeneralPurpose_V1_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_GeneralPurpose_V1_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_GeneralPurpose_V1_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
import FWCore.ParameterSet.Config as cms
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
#
# The following MVA is tuned on Spring16 MC samples.
# See more documentation in this presentation (P.Pigard):
# https://indico.cern.ch/event/491544/contributions/2321565/attachments/1346333/2030225/20160929_EGM_v4.pdf
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Spring16GeneralPurposeV1"
# There are 3 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 10-inf GeV
# 1 EB2 (eta>=0.8) pt 10-inf GeV
# 2 EE pt 10-inf GeV
mvaSpring16WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Spring16_GeneralPurpose_V1/electronID_mva_Spring16_GeneralPurpose_V1_EB1_10.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Spring16_GeneralPurpose_V1/electronID_mva_Spring16_GeneralPurpose_V1_EB2_10.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Spring16_GeneralPurpose_V1/electronID_mva_Spring16_GeneralPurpose_V1_EE_10.weights.xml.gz"
)
### WP to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
### For turn-on and details see documentation linked above
MVA_WP90 = EleMVA_WP(
idName = "mvaEleID-Spring16-GeneralPurpose-V1-wp90", mvaTag = mvaTag,
cutCategory0 = "0.836695742607", # EB1
cutCategory1 = "0.715337944031", # EB2
cutCategory2 = "0.356799721718", # EE
)
MVA_WP80 = EleMVA_WP(
idName = "mvaEleID-Spring16-GeneralPurpose-V1-wp80", mvaTag = mvaTag,
cutCategory0 = "0.940962684155", # EB1
cutCategory1 = "0.899208843708", # EB2
cutCategory2 = "0.758484721184", # EE
)
workingPoints = dict(
wp80 = MVA_WP80,
wp90 = MVA_WP90
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Spring16_GeneralPurpose_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(3),
categoryCuts = cms.vstring(*EleMVA_3CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaSpring16WeightFiles_V1,
variableDefinition = cms.string(mvaVariablesFile)
)
# Create the VPset's for VID cuts
mvaEleID_Spring16_GeneralPurpose_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Spring16_GeneralPurpose_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
# The MD5 sum numbers below reflect the exact set of cut variables
# and values above. If anything changes, one has to
# 1) comment out the lines below about the registry,
# 2) run "calculateMD5 <this file name> <one of the VID config names just above>
# 3) update the MD5 sum strings below and uncomment the lines again.
#
central_id_registry.register(mvaEleID_Spring16_GeneralPurpose_V1_wp90.idName,
'14c153aaf3c207deb3ad4932586647a7')
central_id_registry.register(mvaEleID_Spring16_GeneralPurpose_V1_wp80.idName,
'b490bc0b0af2d5f3e9efea562370af2a')
mvaEleID_Spring16_GeneralPurpose_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Spring16_GeneralPurpose_V1_wp80.isPOGApproved = cms.untracked.bool(True)
| 42.272727
| 136
| 0.756989
|
33b465b0688c0716fefeed6b857609f35dda1dbe
| 686
|
py
|
Python
|
scripts/prep-flooding_step-3_distances.py
|
avinashkalyani/flooding_brains
|
be4c6480efed906ded5d1219bcc905611f1ceaea
|
[
"MIT"
] | null | null | null |
scripts/prep-flooding_step-3_distances.py
|
avinashkalyani/flooding_brains
|
be4c6480efed906ded5d1219bcc905611f1ceaea
|
[
"MIT"
] | null | null | null |
scripts/prep-flooding_step-3_distances.py
|
avinashkalyani/flooding_brains
|
be4c6480efed906ded5d1219bcc905611f1ceaea
|
[
"MIT"
] | null | null | null |
"""Find borders of a selected tissue in a segmentation file."""
import os
import subprocess
# Path to LayNii (folder where it is installed in your system)
LAYNII_PATH = "/home/faruk/Git/LAYNII"
FILE1 = "path/to/okapi_cerebrum_RH_v06_borders.nii.gz"
FILE2 = "path/to/okapi_cerebrum_RH_v06_borders_points4.nii.gz"
# Number of points that will be generated on the borders
NR_POINTS = 4
# -----------------------------------------------------------------------------
# Run LayNii
command = os.path.join(LAYNII_PATH, "LN2_GEODISTANCE ")
command += "-domain {} ".format(FILE1)
command += "-init {} ".format(FILE2)
print(command)
subprocess.run(command, shell=True)
print('Finished.\n')
| 28.583333
| 79
| 0.661808
|
a98a034a4be40900ef31364b5bea7a3c76a9a536
| 17,949
|
py
|
Python
|
src/the_tale/the_tale/game/tests/test_logic.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/the_tale/the_tale/game/tests/test_logic.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/the_tale/the_tale/game/tests/test_logic.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import smart_imports
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
logic.create_test_map()
self.account = self.accounts_factory.create_account(is_fast=True)
def test_remove_game_data(self):
self.assertEqual(heroes_models.Hero.objects.count(), 1)
logic.remove_game_data(self.account)
self.assertEqual(heroes_models.Hero.objects.count(), 0)
class FormGameInfoTests(pvp_helpers.PvPTestsMixin, utils_testcase.TestCase):
def setUp(self):
super(FormGameInfoTests, self).setUp()
logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_2 = self.accounts_factory.create_account()
pvp_tt_services.matchmaker.cmd_debug_clear_service()
def test_no_account(self):
data = logic.form_game_info()
self.assertEqual(data['mode'], 'pve')
self.assertEqual(data['account'], None)
self.assertEqual(data['enemy'], None)
def test_account(self):
data = logic.form_game_info(self.account_1, is_own=True)
self.assertEqual(data['mode'], 'pve')
self.assertEqual(data['account']['id'], self.account_1.id)
self.assertEqual(data['enemy'], None)
def test_account__other(self):
data = logic.form_game_info(self.account_2, is_own=True)
self.assertEqual(data['mode'], 'pve')
self.assertEqual(data['account']['id'], self.account_2.id)
self.assertEqual(data['enemy'], None)
def test_pvp(self):
self.create_pvp_battle(account_1=self.account_1, account_2=self.account_2)
data = logic.form_game_info(self.account_1)
self.assertEqual(data['mode'], 'pvp')
self.assertTrue(data['account']['hero']['action']['data']['is_pvp'])
self.assertTrue(data['enemy']['hero']['action']['data']['is_pvp'])
self.assertEqual(data['account']['hero']['action']['data']['enemy_id'], self.account_2.id)
self.assertEqual(data['enemy']['hero']['action']['data']['enemy_id'], self.account_1.id)
self.assertEqual(data['account']['id'], self.account_1.id)
self.assertEqual(data['enemy']['id'], self.account_2.id)
def test_own_hero_get_cached_data(self):
hero = heroes_logic.load_hero(account_id=self.account_1.id)
with mock.patch('the_tale.game.heroes.objects.Hero.cached_ui_info_for_hero',
mock.Mock(return_value={'actual_on_turn': hero.saved_at_turn,
'pvp': 'actual',
'action': {},
'ui_caching_started_at': 0})) as cached_ui_info_for_hero:
with mock.patch('the_tale.game.heroes.objects.Hero.ui_info') as ui_info:
data = logic.form_game_info(self.account_1, is_own=True)
self.assertEqual(data['account']['hero']['pvp'], 'actual')
self.assertEqual(data['enemy'], None)
self.assertEqual(cached_ui_info_for_hero.call_count, 1)
self.assertEqual(cached_ui_info_for_hero.call_args, mock.call(account_id=self.account_1.id, recache_if_required=True, patch_turns=None, for_last_turn=False))
self.assertEqual(ui_info.call_count, 0)
def create_not_own_ui_info(self, hero, enemy_id=None):
pvp_data = None
if enemy_id is not None:
pvp_data = {'is_pvp': True,
'enemy_id': enemy_id,
'pvp__actual': 'actual',
'pvp__last_turn': 'last_turn'}
return {'actual_on_turn': hero.saved_at_turn,
'action': {'data': pvp_data},
'ui_caching_started_at': 0,
'changed_fields': []}
def test_not_own_hero_get_cached_data__not_cached(self):
hero = heroes_logic.load_hero(account_id=self.account_1.id)
with mock.patch('the_tale.game.heroes.objects.Hero.cached_ui_info_for_hero',
mock.Mock(return_value=self.create_not_own_ui_info(hero, enemy_id=self.account_2.id))) as cached_ui_info_for_hero:
with mock.patch('the_tale.game.heroes.objects.Hero.ui_info',
mock.Mock(return_value=self.create_not_own_ui_info(hero, enemy_id=self.account_2.id))) as ui_info:
logic.form_game_info(self.account_1, is_own=False)
self.assertEqual(cached_ui_info_for_hero.call_count, 2)
self.assertEqual(cached_ui_info_for_hero.call_args_list,
[mock.call(account_id=self.account_1.id, recache_if_required=False, patch_turns=None, for_last_turn=True),
mock.call(account_id=self.account_2.id, recache_if_required=False, patch_turns=None, for_last_turn=True)])
self.assertEqual(ui_info.call_count, 0)
@mock.patch.object(utils_cache, 'get', lambda *argv, **kwargs: None)
def test_not_own_hero_get_cached_data(self):
battle_info = self.create_pvp_battle(account_1=self.account_1, account_2=self.account_2)
with mock.patch('the_tale.game.heroes.objects.Hero.ui_info',
lambda *argv, **kwargs: self.create_not_own_ui_info(battle_info.hero_1,
enemy_id=battle_info.hero_2.id)):
data = logic.form_game_info(self.account_1, is_own=False)
self.assertEqual(data['account']['hero']['action']['data']['pvp'], 'last_turn')
self.assertEqual(data['enemy']['hero']['action']['data']['pvp'], 'last_turn')
self.assertFalse('pvp__actual' in data['account']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__last_turn' in data['account']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__actual' in data['enemy']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__last_turn' in data['enemy']['hero']['action']['data']['pvp'])
def test_is_old(self):
self.assertFalse(logic.form_game_info(self.account_1, is_own=True)['account']['is_old'])
game_turn.set(666)
self.assertTrue(logic.form_game_info(self.account_1, is_own=True)['account']['is_old'])
heroes_logic.save_hero(heroes_logic.load_hero(account_id=self.account_1.id))
self.assertFalse(logic.form_game_info(self.account_1, is_own=True)['account']['is_old'])
def test_is_old__not_own_hero(self):
self.assertFalse(logic.form_game_info(self.account_1, is_own=False)['account']['is_old'])
game_turn.set(666)
self.assertTrue(logic.form_game_info(self.account_1, is_own=False)['account']['is_old'])
heroes_logic.save_hero(heroes_logic.load_hero(account_id=self.account_1.id))
self.assertFalse(logic.form_game_info(self.account_1, is_own=False)['account']['is_old'])
def test_is_old__pvp(self):
battle_info = self.create_pvp_battle(account_1=self.account_1, account_2=self.account_2)
self.assertFalse(logic.form_game_info(self.account_1)['account']['is_old'])
self.assertFalse(logic.form_game_info(self.account_1)['enemy']['is_old'])
game_turn.set(666)
self.assertTrue(logic.form_game_info(self.account_1)['account']['is_old'])
self.assertTrue(logic.form_game_info(self.account_1)['enemy']['is_old'])
battle_info.storage.save_changed_data()
self.assertFalse(logic.form_game_info(self.account_1)['account']['is_old'])
self.assertFalse(logic.form_game_info(self.account_1)['enemy']['is_old'])
def test_game_info_data_hidding(self):
'''
player hero always must show actual data
enemy hero always must show data on statrt of the turn
'''
battle_info = self.create_pvp_battle(account_1=self.account_1, account_2=self.account_2)
hero_1_pvp, hero_2_pvp = pvp_logic.get_arena_heroes_pvp(battle_info.hero_1)
hero_1_pvp.set_energy(1)
hero_2_pvp.set_energy(2)
battle_info.storage.save_all()
heroes_objects.Hero.reset_ui_cache(self.account_1.id)
heroes_objects.Hero.reset_ui_cache(self.account_2.id)
data = logic.form_game_info(battle_info.account_1, is_own=True)
self.assertEqual(data['account']['hero']['action']['data']['pvp']['energy'], 1)
self.assertEqual(data['enemy']['hero']['action']['data']['pvp']['energy'], 0)
hero_2_pvp.store_turn_data()
battle_info.storage.save_changed_data()
data = logic.form_game_info(battle_info.account_1, is_own=True)
self.assertEqual(data['enemy']['hero']['action']['data']['pvp']['energy'], 2)
@mock.patch.object(utils_cache, 'get', lambda *argv, **kwargs: None)
def test_game_info_caching(self):
battle_info = self.create_pvp_battle(account_1=self.account_1, account_2=self.account_2)
def get_ui_info(hero, **kwargs):
if hero.id == battle_info.hero_1.id:
return {'actual_on_turn': battle_info.hero_1.saved_at_turn,
'action': {'data': {'is_pvp': True,
'enemy_id': battle_info.hero_2.id,
'pvp__actual': 'actual',
'pvp__last_turn': 'last_turn'}},
'changed_fields': [],
'ui_caching_started_at': 0}
else:
return self.create_not_own_ui_info(battle_info.hero_2, enemy_id=self.account_1.id)
with mock.patch('the_tale.game.heroes.objects.Hero.ui_info', get_ui_info):
data = logic.form_game_info(self.account_1, is_own=True)
self.assertEqual(data['account']['hero']['action']['data']['pvp'], 'actual')
self.assertEqual(data['enemy']['hero']['action']['data']['pvp'], 'last_turn')
self.assertFalse('pvp__actual' in data['account']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__last_turn' in data['account']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__actual' in data['enemy']['hero']['action']['data']['pvp'])
self.assertFalse('pvp__last_turn' in data['enemy']['hero']['action']['data']['pvp'])
class HighlevelStepTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = logic.create_test_map()
def test_places_methods_called(self):
# all that methods tested in places package
set_power_economic = mock.Mock()
sync_size = mock.Mock()
sync_habits = mock.Mock()
refresh_attributes = mock.Mock()
mark_as_updated = mock.Mock()
set_area = mock.Mock()
sync_race = mock.Mock()
update_heroes_habits = mock.Mock()
with mock.patch('the_tale.game.places.attributes.Attributes.set_power_economic', set_power_economic), \
mock.patch('the_tale.game.places.attributes.Attributes.sync_size', sync_size), \
mock.patch('the_tale.game.places.attributes.Attributes.set_area', set_area), \
mock.patch('the_tale.game.places.objects.Place.sync_habits', sync_habits), \
mock.patch('the_tale.game.places.objects.Place.sync_race', sync_race), \
mock.patch('the_tale.game.places.objects.Place.refresh_attributes', refresh_attributes), \
mock.patch('the_tale.game.places.objects.Place.update_heroes_habits', update_heroes_habits), \
mock.patch('the_tale.game.places.objects.Place.mark_as_updated', mark_as_updated):
logic.highlevel_step(logger=mock.Mock())
places_number = len(places_storage.places.all())
for method in (set_power_economic,
sync_size,
sync_habits,
refresh_attributes,
mark_as_updated,
set_area,
sync_race,
update_heroes_habits):
self.assertEqual(method.call_count, places_number)
def give_power_to_person(self, person, power, fame):
impacts = persons_logic.tt_power_impacts(person_inner_circle=False,
place_inner_circle=False,
actor_type=tt_api_impacts.OBJECT_TYPE.HERO,
actor_id=666,
person=person,
amount=power,
fame=fame)
politic_power_logic.add_power_impacts(impacts)
def give_power_to_place(self, place, power, fame):
impacts = places_logic.tt_power_impacts(inner_circle=False,
actor_type=tt_api_impacts.OBJECT_TYPE.HERO,
actor_id=666,
place=place,
amount=power,
fame=fame)
politic_power_logic.add_power_impacts(impacts)
@mock.patch('tt_logic.politic_power.constants.POWER_REDUCE_FRACTION', 0.9)
@mock.patch('the_tale.game.persons.attributes.Attributes.places_help_amount', 1)
@mock.patch('the_tale.game.places.attributes.Attributes.freedom', 1)
@mock.patch('the_tale.game.places.objects.Place.refresh_attributes', mock.Mock())
def test_sync_data(self):
game_tt_services.debug_clear_service()
self.assertEqual(politic_power_storage.places.outer_power(self.places[0].id), 0)
self.assertEqual(politic_power_storage.places.inner_power(self.places[0].id), 0)
self.assertEqual(politic_power_storage.places.outer_power(self.places[1].id), 0)
self.assertEqual(politic_power_storage.places.inner_power(self.places[1].id), 0)
self.assertEqual(politic_power_storage.places.outer_power(self.places[2].id), 0)
self.assertEqual(politic_power_storage.places.inner_power(self.places[2].id), 0)
self.assertEqual(persons_models.Person.objects.filter(place_id=self.places[0].id).count(), 3)
self.assertEqual(persons_models.Person.objects.filter(place_id=self.places[1].id).count(), 3)
self.assertEqual(persons_models.Person.objects.filter(place_id=self.places[2].id).count(), 3)
self.assertEqual(len(persons_storage.persons.all()), 9)
popularity = places_logic.get_hero_popularity(666)
self.assertEqual(popularity.get_fame(self.places[0].id), 0)
self.assertEqual(popularity.get_fame(self.places[1].id), 0)
self.assertEqual(popularity.get_fame(self.places[2].id), 0)
person_1_1 = self.places[0].persons[0]
person_2_1, person_2_2 = self.places[1].persons[0:2]
person_3_1, person_3_2 = self.places[2].persons[0:2]
self.give_power_to_person(person=person_1_1, power=1, fame=2)
self.give_power_to_person(person=person_2_1, power=100, fame=200)
self.give_power_to_person(person=person_2_2, power=1000, fame=2000)
self.give_power_to_person(person=person_3_1, power=10000, fame=20000)
self.give_power_to_person(person=person_3_2, power=100000, fame=200000)
with self.check_changed(lambda: persons_storage.persons._version):
with self.check_changed(lambda: places_storage.places._version):
logic.highlevel_step(logger=mock.Mock())
self.assertTrue(self.places[0]._modifier.is_NONE)
game_turn.increment()
self.assertEqual(politic_power_storage.persons.outer_power(person_1_1.id), 0)
self.assertEqual(politic_power_storage.persons.outer_power(person_2_1.id), 90)
self.assertEqual(politic_power_storage.persons.outer_power(person_2_2.id), 900)
self.assertEqual(politic_power_storage.persons.outer_power(person_3_1.id), 9000)
self.assertEqual(politic_power_storage.persons.outer_power(person_3_2.id), 90000)
self.assertEqual(politic_power_storage.places.outer_power(self.places[0].id), 0)
self.assertEqual(politic_power_storage.places.outer_power(self.places[1].id), 990)
self.assertEqual(politic_power_storage.places.outer_power(self.places[2].id), 99000)
popularity = places_logic.get_hero_popularity(666)
self.assertEqual(popularity.get_fame(self.places[0].id), 1)
self.assertEqual(popularity.get_fame(self.places[1].id), 2189)
self.assertEqual(popularity.get_fame(self.places[2].id), 218997)
self.give_power_to_place(place=self.places[0], power=-10, fame=-20)
self.give_power_to_place(place=self.places[1], power=-1, fame=-2)
self.give_power_to_place(place=self.places[1], power=+10000000, fame=20000000)
self.give_power_to_place(place=self.places[2], power=-2, fame=-40)
self.give_power_to_place(place=self.places[2], power=+20, fame=40)
with self.check_changed(lambda: persons_storage.persons._version):
with self.check_changed(lambda: places_storage.places._version):
logic.highlevel_step(logger=mock.Mock())
self.places = [places_storage.places[self.places[0].id],
places_storage.places[self.places[1].id],
places_storage.places[self.places[2].id]]
self.assertEqual(politic_power_storage.places.outer_power(self.places[0].id), -9)
self.assertEqual(politic_power_storage.places.outer_power(self.places[1].id), 9000890)
self.assertEqual(politic_power_storage.places.outer_power(self.places[2].id), 89116)
popularity = places_logic.get_hero_popularity(666)
self.assertEqual(popularity.get_fame(self.places[0].id), 0)
self.assertEqual(popularity.get_fame(self.places[1].id), 19911015)
self.assertEqual(popularity.get_fame(self.places[2].id), 218038)
| 48.380054
| 165
| 0.646164
|
fc7f77f126c01e0e5a5ade5f8dbe3b4b3db5ef7f
| 657
|
py
|
Python
|
xu/compa/Parapluie/src/ActionWidget/PHolder.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
xu/compa/Parapluie/src/ActionWidget/PHolder.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
xu/compa/Parapluie/src/ActionWidget/PHolder.py
|
sonnts996/XuCompa-Request
|
f343e7bfd1b4263eb76438c96d347c549cc75ce3
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QWidget, QStyleOption, QStyle
class PHolder(QWidget):
resized = pyqtSignal()
def __init__(self):
super().__init__()
def resizeEvent(self, a0: QtGui.QResizeEvent):
super(PHolder, self).resizeEvent(a0)
self.resized.emit()
def paintEvent(self, a0: QtGui.QPaintEvent) -> None:
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
def itemSize(self):
return self.sizeHint()
| 26.28
| 72
| 0.677321
|
a9aed7fa2b7e532d973931c86aee71496ad14464
| 813
|
py
|
Python
|
util.py
|
litosly/Autonomous_Drone_Racing_Mapping_and_Racing_in_2D
|
de6bbe7854cf9d7dd2009ba3e674b052e1489e6a
|
[
"MIT"
] | null | null | null |
util.py
|
litosly/Autonomous_Drone_Racing_Mapping_and_Racing_in_2D
|
de6bbe7854cf9d7dd2009ba3e674b052e1489e6a
|
[
"MIT"
] | null | null | null |
util.py
|
litosly/Autonomous_Drone_Racing_Mapping_and_Racing_in_2D
|
de6bbe7854cf9d7dd2009ba3e674b052e1489e6a
|
[
"MIT"
] | null | null | null |
import numpy as np
def sector_mask(shape,centre,radius,angle_range):
"""
Return a boolean mask for a circular sector. The start/stop angles in
"angle_range" should be given in clockwise order.
Reference: http://hk.uwenku.com/question/p-cocpxpri-oa.html
"""
x,y = np.ogrid[:shape[0],:shape[1]]
cx,cy = centre
tmin,tmax = np.deg2rad(angle_range)
# ensure stop angle > start angle
if tmax < tmin:
tmax += 2*np.pi
# convert cartesian --> polar coordinates
r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)
theta = np.arctan2(x-cx,y-cy) - tmin
# wrap angles between 0 and 2*pi
theta %= (2*np.pi)
# circular mask
circmask = r2 <= radius*radius
# angular mask
anglemask = theta <= (tmax-tmin)
return circmask*anglemask
| 26.225806
| 74
| 0.611316
|
d59edf2a2920c225d38429d11fef195607a21325
| 405
|
py
|
Python
|
petAppointment/wsgi.py
|
guillercp93/petsAppointment
|
3778312f878db138cf670fa6c5f105a515215164
|
[
"MIT"
] | null | null | null |
petAppointment/wsgi.py
|
guillercp93/petsAppointment
|
3778312f878db138cf670fa6c5f105a515215164
|
[
"MIT"
] | null | null | null |
petAppointment/wsgi.py
|
guillercp93/petsAppointment
|
3778312f878db138cf670fa6c5f105a515215164
|
[
"MIT"
] | null | null | null |
"""
WSGI config for petAppointment project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "petAppointment.settings")
application = get_wsgi_application()
| 23.823529
| 78
| 0.792593
|
9030e5479a10d0d686eb79ba7f14acf474dd6027
| 1,799
|
py
|
Python
|
main_test.py
|
isrealconsulting/codepy27
|
a95e665f7b057b04f066f693ef350bebed97e86b
|
[
"Apache-2.0"
] | 1
|
2016-01-11T16:41:42.000Z
|
2016-01-11T16:41:42.000Z
|
main_test.py
|
isrealconsulting/codepy27
|
a95e665f7b057b04f066f693ef350bebed97e86b
|
[
"Apache-2.0"
] | null | null | null |
main_test.py
|
isrealconsulting/codepy27
|
a95e665f7b057b04f066f693ef350bebed97e86b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import AppEngineTestbedCase, Http2Mock
import webtest
from . import main
class TestMailgunHandlers(AppEngineTestbedCase):
def setUp(self):
super(TestMailgunHandlers, self).setUp()
self.app = webtest.TestApp(main.app)
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
def test_post(self):
http = Http2Mock(responses=[{}])
with http:
response = self.app.post('/', {
'recipient': 'jisreal@isrealconsulting.com',
'submit': 'Send simple email'})
self.assertEqual(response.status_int, 200)
http = Http2Mock(responses=[{}])
with http:
response = self.app.post('/', {
'recipient': 'jisreal@isrealconsulting.com',
'submit': 'Send complex email'})
self.assertEqual(response.status_int, 200)
http = Http2Mock(responses=[{'status': 500, 'body': 'Test error'}])
with http, self.assertRaises(Exception):
self.app.post('/', {
'recipient': 'jisreal@isrealconsulting.com',
'submit': 'Send simple email'})
| 32.709091
| 75
| 0.642023
|
d5f16014ce89ef4f1ad97066993c1d81da31212b
| 7,491
|
py
|
Python
|
esp32/tools/fw_updater/pypic.py
|
pauldeng/pycom-micropython-sigfox
|
c82364561f6ae25ac250e408aa86523def9fe78d
|
[
"MIT"
] | 198
|
2017-03-24T23:23:54.000Z
|
2022-01-07T07:14:00.000Z
|
esp32/tools/fw_updater/pypic.py
|
pauldeng/pycom-micropython-sigfox
|
c82364561f6ae25ac250e408aa86523def9fe78d
|
[
"MIT"
] | 509
|
2017-03-28T19:37:18.000Z
|
2022-03-31T20:31:43.000Z
|
esp32/tools/fw_updater/pypic.py
|
pauldeng/pycom-micropython-sigfox
|
c82364561f6ae25ac250e408aa86523def9fe78d
|
[
"MIT"
] | 187
|
2017-03-24T23:23:58.000Z
|
2022-02-25T01:48:45.000Z
|
#!/usr/bin/env python
# Copyright (c) 2016-2021, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
from __future__ import print_function
import argparse
import struct
import sys
import time
import serial
try:
from local_settings import DEBUG
except:
DEBUG = False
__version__ = '0.9.3'
CMD_PEEK = (0x0)
CMD_POKE = (0x01)
CMD_MAGIC = (0x02)
CMD_HW_VER = (0x10)
CMD_FW_VER = (0x11)
CMD_PROD_ID = (0x12)
CMD_SETUP_SLEEP = (0x20)
CMD_GO_SLEEP = (0x21)
CMD_CALIBRATE = (0x22)
CMD_BAUD_CHANGE = (0x30)
CMD_DFU = (0x31)
ANSELA_ADDR = (0x18C)
ANSELB_ADDR = (0x18D)
ANSELC_ADDR = (0x18E)
ADCON0_ADDR = (0x9D)
ADCON1_ADDR = (0x9E)
IOCAP_ADDR = (0x391)
IOCAN_ADDR = (0x392)
_ADCON0_CHS_POSN = (0x02)
_ADCON0_ADON_MASK = (0x01)
_ADCON1_ADCS_POSN = (0x04)
_ADCON0_GO_nDONE_MASK = (0x02)
ADRESL_ADDR = (0x09B)
ADRESH_ADDR = (0x09C)
TRISA_ADDR = (0x08C)
TRISC_ADDR = (0x08E)
PORTA_ADDR = (0x00C)
PORTC_ADDR = (0x00E)
WPUA_ADDR = (0x20C)
PCON_ADDR = (0x096)
STATUS_ADDR = (0x083)
# helper functions
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def log(*args):
print(' '.join(str(a) for a in args))
def error(msg):
eprint('error:', msg)
def exit_with_error(code, msg):
error(msg)
sys.exit(code)
def warn(msg):
eprint('warning:', msg)
def print_debug(msg):
if DEBUG:
eprint(msg)
class Pypic:
def __init__(self, port):
# we need bytesize to be 5 bits in order for the PIC to process the commands
try:
self.serial = serial.Serial(port, baudrate=115200, bytesize=serial.FIVEBITS, timeout=0.25, exclusive=True)
connected = True
except Exception as e:
connected = False
print_debug("Not connecting in exclusive mode because: %s" % str(e))
if not connected:
try:
self.serial = serial.Serial(port, baudrate=115200, bytesize=serial.FIVEBITS, timeout=0.25)
except Exception as e:
raise e
self.detected = False
try:
if self.read_fw_version() < 6:
raise ValueError('PIC firmware out of date')
else:
self.detected = True
except Exception:
pass
def _write(self, data, read=True, num_read_bytes=1):
self.serial.write(data)
if read:
r_data = self.serial.read(2)
if not r_data:
raise Exception('Timeout while waiting for Rx data')
if num_read_bytes == 2:
(b1, b2) = struct.unpack('BB', r_data)
return 256 * b2 + b1
else:
if sys.version_info[0] < 3:
return struct.unpack('B', r_data[0])[0]
else:
return r_data[0]
def _flush(self, num_read_bytes=10):
r_data = self.serial.read(num_read_bytes)
# print(r_data)
return
def _send_cmd(self, cmd, num_read_bytes=1):
return self._write(bytearray([cmd]), True, num_read_bytes)
def read_hw_version(self):
return self._send_cmd(CMD_HW_VER, 2)
def read_fw_version(self):
return self._send_cmd(CMD_FW_VER, 2)
def read_product_id(self):
return self._send_cmd(CMD_PROD_ID, 2)
def peek_memory(self, addr):
return self._write(bytearray([CMD_PEEK, addr & 0xFF, (addr >> 8) & 0xFF]))
def poke_memory(self, addr, value):
self._write(bytearray([CMD_POKE, addr & 0xFF, (addr >> 8) & 0xFF, value & 0xFF]), False)
def magic_write_read(self, addr, _and=0xFF, _or=0, _xor=0):
return self._write(bytearray([CMD_MAGIC, addr & 0xFF, (addr >> 8) & 0xFF, _and & 0xFF, _or & 0xFF, _xor & 0xFF]))
def magic_write(self, addr, _and=0xFF, _or=0, _xor=0):
self._write(bytearray([CMD_MAGIC, addr & 0xFF, (addr >> 8) & 0xFF, _and & 0xFF, _or & 0xFF, _xor & 0xFF]), False)
def toggle_bits_in_memory(self, addr, bits):
self.magic_write(addr, _xor=bits)
def mask_bits_in_memory(self, addr, mask):
self.magic_write(addr, _and=mask)
def set_bits_in_memory(self, addr, bits):
self.magic_write(addr, _or=bits)
def reset_pycom_module(self):
# make RC5 an output
self.mask_bits_in_memory(TRISC_ADDR, ~(1 << 5))
# drive RC5 low
self.mask_bits_in_memory(PORTC_ADDR, ~(1 << 5))
time.sleep(0.2)
# drive RC5 high
self.set_bits_in_memory(PORTC_ADDR, 1 << 5)
time.sleep(0.1)
# make RC5 an input
self.set_bits_in_memory(TRISC_ADDR, 1 << 5)
def enter_pycom_programming_mode(self, reset=True):
print_debug("Entering programming mode with reset={}".format(reset))
# make RA5 an output
self.mask_bits_in_memory(TRISA_ADDR, ~(1 << 5))
# drive RA5 low
self.mask_bits_in_memory(PORTA_ADDR, ~(1 << 5))
# make RC0 an output
self.mask_bits_in_memory(TRISC_ADDR, ~(1 << 0))
# set RC0 low
self.mask_bits_in_memory(PORTC_ADDR, ~(1 << 0))
# perform reset
if reset:
self.reset_pycom_module()
# We should keep RC0 low at this point in case someone
# presses the reset button before the firmware upgrade
# as this is mandatory for the regular expansion board
self._flush()
def exit_pycom_programming_mode(self, reset=True):
print_debug("Leaving programming mode with reset={}".format(reset))
# make RA5 an output
self.mask_bits_in_memory(TRISA_ADDR, ~(1 << 5))
# drive RA5 high
self.set_bits_in_memory(PORTA_ADDR, 1 << 5)
# make RC0 an input
# This will prevent issues with the RGB LED
self.set_bits_in_memory(TRISC_ADDR, 1 << 0)
if reset:
self.reset_pycom_module()
self._flush()
def isdetected(self):
return self.detected
def close(self):
self.serial.close()
def main(args):
parser = argparse.ArgumentParser(description='Sends internal commands to put the Pycom module in programming mode')
parser.add_argument('-p', '--port', metavar='PORT', help='the serial port used to communicate with the PIC')
parser.add_argument('--enter', action='store_true', help='enter programming mode')
parser.add_argument('--exit', action='store_true', help='exit programming mode')
parser.add_argument('--noreset', action='store_true', help='do not reset esp32')
args = parser.parse_args()
if not args.port:
exit_with_error(1, 'no serial port specified')
if (args.enter and args.exit) or (not args.enter and not args.exit):
exit_with_error(1, 'invalid action requested')
pic = Pypic(args.port)
if pic.isdetected():
if args.enter:
pic.enter_pycom_programming_mode(not args.noreset)
elif args.exit:
pic.exit_pycom_programming_mode(not args.noreset)
# print debug info about current PIC product
print_debug("read_product_id(): 0x%X" % pic.read_product_id())
print_debug("read_hw_version(): 0x%X" % pic.read_hw_version())
print_debug("read_fw_version(): 0x%X" % pic.read_fw_version())
pic.close()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 29.492126
| 121
| 0.632492
|
25c94fdec15f0afb3f752c1bb6fd3d548024a4d4
| 1,605
|
py
|
Python
|
LinkedList/DelDuplicatSingleLL.py
|
csagar131/Data_Strucures_And_Algo
|
ac7e01e7ad9893a22aca9456278a8956f3ef2e95
|
[
"MIT"
] | 1
|
2019-07-12T06:43:20.000Z
|
2019-07-12T06:43:20.000Z
|
LinkedList/DelDuplicatSingleLL.py
|
csagar131/Data_Strucures_And_Algo
|
ac7e01e7ad9893a22aca9456278a8956f3ef2e95
|
[
"MIT"
] | null | null | null |
LinkedList/DelDuplicatSingleLL.py
|
csagar131/Data_Strucures_And_Algo
|
ac7e01e7ad9893a22aca9456278a8956f3ef2e95
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self,value):
self.info = value
self.link = None
class SingleLinked:
def __init__(self):
self.start = None
list = SingleLinked()
def insertAtEnd(value): #insert element at end of linked list
if list.start is None:
list.start = Node(value)
else:
k = list.start
while k.link is not None:
k = k.link
k.link = Node(value)
def traverse(): # to traverse the linked list
k = list.start
if list.start is None:
print("list is empty")
else:
while k is not None:
print(k.info,end=" ")
k = k.link
def removeDuplicate():
k = list.start
while k is not None:
s = k.info #info of the current k node
t = k
while t is not None: # we check from the current value of k
if t.link is not None and s == t.link.info: # check if t.link is not None because info part
t.link = t.link.link # has to be check otherwise return AttributeError:
else:
t = t.link
k = k.link
p = '''
press 1 to insert at end
press 2 to traverse the list
press 3 to remove all the duplicate elements
'''
print(p)
n = 0
while n is not 111:
n = int(input("Press Number:-"))
if n == 1:
value = int(input('Enter Integer value:-'))
insertAtEnd(value)
print('value ' + str(value) + ' inseted')
elif n == 2:
traverse()
elif n == 3:
removeDuplicate()
else:
break
| 23.602941
| 107
| 0.535826
|
8a58859277fa2b0300bb2f57cd8d36bc933ca39a
| 2,837
|
py
|
Python
|
cartopy_extra_tiles/cached_tiler.py
|
marceloandrioni/cartopy-extra-tiles
|
81f33bd708dbbe5f0ca0eb8c44b1a864e629e2f6
|
[
"MIT"
] | 1
|
2020-06-05T01:03:37.000Z
|
2020-06-05T01:03:37.000Z
|
cartopy_extra_tiles/cached_tiler.py
|
marceloandrioni/cartopy-extra-tiles
|
81f33bd708dbbe5f0ca0eb8c44b1a864e629e2f6
|
[
"MIT"
] | 1
|
2020-06-12T18:08:55.000Z
|
2020-06-13T22:00:09.000Z
|
cartopy_extra_tiles/cached_tiler.py
|
marceloandrioni/cartopy-extra-tiles
|
81f33bd708dbbe5f0ca0eb8c44b1a864e629e2f6
|
[
"MIT"
] | 1
|
2020-06-17T19:44:52.000Z
|
2020-06-17T19:44:52.000Z
|
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
#
# ======================================================================
"""
Adapted from CachedTiler posted by pelson on
https://github.com/SciTools/cartopy/issues/732
Additional features:
* Cache directory is specified via argument to the constructor.
* Filename extension for image is determined at runtime.
"""
import os
import types
import requests
import PIL
class CachedTiler(object):
"""Augments other tilers with ability to cache tiles localy to disk
in user-specified directory.
"""
def __init__(self, tiler, cache_dir):
"""Constructor.
:type tiler: GoogleTiles
:param tiler: Tiler to use.
:type cache_dir: str
:param cache_dir: Path to directory for caching tiles locally on disk.
"""
self.tiler = tiler
tileset_name = '{}-{}'.format(self.tiler.__class__.__name__.lower(), self.tiler.style)
self.cache_dir = os.path.expanduser(os.path.join(cache_dir, tileset_name))
return
def __getattr__(self, name):
"""Mimic the tiler interface.
For methods ensure that the "self" passed through continues to
be CachedTiler, not underlying tiler (self.tiler).
:type name: str
:param name: Name of attribute/method.
"""
attr = getattr(self.tiler, name, None)
if isinstance(attr, types.MethodType):
attr = types.MethodType(attr.__func__, self)
return attr
def get_image(self, tile):
"""Get image, using local cache if possible.
:type tile: tuple
:param tile: Tuple of tile x,y,z.
"""
# Create cache directory if necessary.
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# Get extension for filename based on URL.
url = self._image_url(tile)
if url.lower().endswith("png"):
fname_extension = ".png"
elif url.lower().endswith("jpg"):
fname_extension = ".jpg"
elif url.endswith("s=G"):
fname_extension = ".png"
else:
msg = "Could not detect filename extension from url '{}'.".format(url)
raise ValueError(msg)
tile_fname = os.path.join(self.cache_dir, "_".join(str(v) for v in tile) + fname_extension)
if not os.path.exists(tile_fname):
response = requests.get(url, stream=True)
with open(tile_fname, "wb") as fh:
for chunk in response:
fh.write(chunk)
with open(tile_fname, 'rb') as fh:
img = PIL.Image.open(fh)
img = img.convert(self.desired_tile_form)
return img, self.tileextent(tile), 'lower'
# End of file
| 31.175824
| 99
| 0.5816
|
fe4587ec030eb516e154a0a67e64f595f63e1073
| 454
|
py
|
Python
|
stock/news_vars.py
|
foolcatcora/tushare
|
44b51564942386658cab6b111929495c85ad25cc
|
[
"BSD-3-Clause"
] | 2
|
2018-10-04T08:04:24.000Z
|
2021-01-21T06:58:30.000Z
|
stock/news_vars.py
|
foolcatcora/tushare
|
44b51564942386658cab6b111929495c85ad25cc
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T02:42:32.000Z
|
2020-04-21T02:42:32.000Z
|
stock/news_vars.py
|
foolcatcora/tushare
|
44b51564942386658cab6b111929495c85ad25cc
|
[
"BSD-3-Clause"
] | 1
|
2015-03-22T12:08:05.000Z
|
2015-03-22T12:08:05.000Z
|
# -*- coding:utf-8 -*-
LATEST_URL = '%sroll.news.%s/interface/%s?col=43&spec=&type=&ch=03&k=&offset_page=0&offset_num=0&num=%s&asc=&page=1&r=0.%s'
LATEST_COLS = ['classify','title','time','url']
LATEST_COLS_C = ['classify','title','time','url','content']
NOTICE_INFO_URL = '%s%s/corp/view/%s?stock_str=%s'
NOTICE_INFO_CLS = ['title', 'type', 'date', 'url']
GUBA_SINA_URL = '%sguba.%s'
GUBA_SINA_COLS = ['title', 'content', 'ptime', 'rcounts']
| 45.4
| 124
| 0.640969
|
82c580cde13bce779eacf34cef6c767fb7298bc6
| 1,659
|
py
|
Python
|
rotkehlchen/errors.py
|
georgerobescu/rotkehlchen
|
817c880b771b8daf5635b02642861dd9949689e2
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/errors.py
|
georgerobescu/rotkehlchen
|
817c880b771b8daf5635b02642861dd9949689e2
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/errors.py
|
georgerobescu/rotkehlchen
|
817c880b771b8daf5635b02642861dd9949689e2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
class PoloniexError(Exception):
def __init__(self, err):
self.err = err
def __str__(self):
return self.err
class RecoverableRequestError(Exception):
def __init__(self, exchange, err):
self.exchange = exchange
self.err = err
def __str__(self):
return 'While querying {} got error: "{}"'.format(self.exchange, self.err)
class InputError(Exception):
pass
class EthSyncError(Exception):
pass
class AuthenticationError(Exception):
pass
class IncorrectApiKeyFormat(Exception):
pass
class UnableToDecryptRemoteData(Exception):
pass
class RotkehlchenPermissionError(Exception):
pass
class RemoteError(Exception):
"""Thrown when a remote API can't be reached or throws unexpected error"""
pass
class PriceQueryUnknownFromAsset(Exception):
def __init__(self, from_asset):
super().__init__(
'Unable to query historical price for Unknown Asset: "{}"'.format(from_asset),
)
class UnprocessableTradePair(Exception):
def __init__(self, pair: str):
self.pair = pair
super().__init__(f'Unprocessable pair {pair} encountered.')
class UnknownAsset(Exception):
def __init__(self, asset_name: str):
self.asset_name = asset_name
super().__init__(f'Unknown asset {asset_name} provided.')
class UnsupportedAsset(Exception):
def __init__(self, asset_name: str):
self.asset_name = asset_name
super().__init__(f'Found asset {asset_name} which is not supported.')
class HistoryCacheInvalid(Exception):
pass
class DBUpgradeError(Exception):
pass
| 20.481481
| 90
| 0.687764
|
ac7cabf8ae2c5e76cea379462e9decec98f9eb91
| 2,759
|
py
|
Python
|
Diena_12_Faili/d12_a17_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 8
|
2020-08-31T16:10:54.000Z
|
2021-11-24T06:37:37.000Z
|
Diena_12_Faili/d12_a17_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 8
|
2021-06-08T22:30:29.000Z
|
2022-03-12T00:48:55.000Z
|
Diena_12_Faili/d12_a17_u1.py
|
MarisKuz/Python-RTU
|
12261d06dc81fa0d98190ca0eb5133d43d517070
|
[
"MIT"
] | 12
|
2020-09-28T17:06:52.000Z
|
2022-02-17T12:12:46.000Z
|
from pathlib import Path
import string
# 1a
def file_line_len(fpath):
line_len = 0
filepath = Path(fpath)
if filepath.is_file():
with open(filepath, encoding="utf-8") as f:
# line_len = len(f.readlines()) # downside loads into memory for a moment, so not good for huge files
line_len = sum(1 for line in f) # this will not need to load everything just 1s into memory for each line, so less memory
else:
print("No such file", filepath, filepath.name, filepath.stem)
line_len = -1
return line_len
print(file_line_len("veidenbaums.txt"))
print(file_line_len("../LICENSE"))
print(file_line_len("../dirty_laundry.txt"))
"""
1b -> uzrakstam funkciju get_poem_lines(fpath), kas atgriež list ar tikai tām rindiņām kurās ir dzeja.
Tātad mums nederēs rindiņas bez teksta un nederēs dzejoļu virksraksti.
PS vēlams izmantot encoding = "utf-8"a
"""
def get_poem_lines(fpath):
filepath = Path(fpath)
verses_only = []
if filepath.is_file():
with open(filepath, encoding="utf-8") as f:
for line in f:
# if 0<len(line.rstrip("\n")) and '***' not in line:
if 0<len(line.strip()) and '***' not in line: # clean all whitespace and newlines
verses_only.append(line)
return verses_only
else:
print('Invalid file')
return []
print(len(get_poem_lines("veidenbaums.txt")))
def save_lines(destpath, lines, end="", encoding="utf-8"):
filepath = Path(destpath)
with open(filepath, mode="w", encoding=encoding) as f:
f.writelines([line+end for line in lines])
save_lines("veid_clean_s21.txt", get_poem_lines("veidenbaums.txt"))
def clean_punkts(srcpath,destpath, badchars=string.punctuation, encoding="utf-8",):
# suiable for large files, for huge files you would want chunk
with open(srcpath, encoding=encoding) as fin, open(destpath, mode="w", encoding=encoding) as fout:
for line in fin:
for c in badchars: # clean all bad chars
line = line.replace(c,"")
fout.write(line)
badchars = string.punctuation+"…"
print("Cleaning", badchars)
clean_punkts("veid_clean_s21.txt", "veid_clean_s21_no_punct.txt", badchars=badchars)
from collections import Counter
def get_word_usage(srcpath, destpath=""):
with open(srcpath,encoding="utf-8") as f:
txt = f.read()
txt = txt.replace("\n", " ")
tokens = txt.split(" ") # words basically
count = Counter(tokens)
print(count.most_common(50))
with open(destpath, mode="w", encoding="utf-8") as f:
f.write(str(count.most_common()))
return count
get_word_usage("veid_clean_s21_no_punct.txt", "veid_counter_s21.txt")
# print(my_count.most_common(50))
| 37.794521
| 133
| 0.663646
|
d2ce6b29c10aeaa07f13bcb44bb4d10534b19b38
| 9,100
|
py
|
Python
|
samples/client/petstore/python-experimental/petstore_api/models/cat.py
|
ximyro/openapi-generator
|
30ee3f343eb681514ce5b33438de3b8f4fb1f50b
|
[
"Apache-2.0"
] | 1
|
2020-07-11T11:20:56.000Z
|
2020-07-11T11:20:56.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/cat.py
|
ximyro/openapi-generator
|
30ee3f343eb681514ce5b33438de3b8f4fb1f50b
|
[
"Apache-2.0"
] | 3
|
2021-04-30T14:46:08.000Z
|
2022-02-27T20:57:15.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/cat.py
|
ximyro/openapi-generator
|
30ee3f343eb681514ce5b33438de3b8f4fb1f50b
|
[
"Apache-2.0"
] | 1
|
2021-09-03T09:04:09.000Z
|
2021-09-03T09:04:09.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import animal
except ImportError:
animal = sys.modules[
'petstore_api.models.animal']
try:
from petstore_api.models import cat_all_of
except ImportError:
cat_all_of = sys.modules[
'petstore_api.models.cat_all_of']
class Cat(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_name': (str,), # noqa: E501
'declawed': (bool,), # noqa: E501
'color': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_name': val}
attribute_map = {
'class_name': 'className', # noqa: E501
'declawed': 'declawed', # noqa: E501
'color': 'color', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_name, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs): # noqa: E501
"""cat.Cat - a model defined in OpenAPI
Args:
class_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
declawed (bool): [optional] # noqa: E501
color (str): [optional] if omitted the server will use the default value of 'red' # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_from_server': _from_server,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_name': class_name,
}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
'anyOf': [
],
'allOf': [
animal.Animal,
cat_all_of.CatAllOf,
],
'oneOf': [
],
}
@classmethod
def get_discriminator_class(cls, data):
"""Returns the child class specified by the discriminator"""
discriminator = cls.discriminator
discr_propertyname_py = list(discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in data:
class_name = data[discr_propertyname_js]
else:
class_name = data[discr_propertyname_py]
class_name_to_discr_class = discriminator[discr_propertyname_py]
return class_name_to_discr_class.get(class_name)
| 38.559322
| 174
| 0.604835
|
21d0e7dd1fb96c0407c439a8531a4c92833ef064
| 3,954
|
py
|
Python
|
nlptasks/task_interpretability_attentionpooling.py
|
allenwind/tf2bert
|
9820223559543529d4dcc703e2742ab8fd14d58e
|
[
"Apache-2.0"
] | 4
|
2021-06-16T02:26:18.000Z
|
2021-09-24T11:06:51.000Z
|
nlptasks/task_interpretability_attentionpooling.py
|
allenwind/tf2bert
|
9820223559543529d4dcc703e2742ab8fd14d58e
|
[
"Apache-2.0"
] | null | null | null |
nlptasks/task_interpretability_attentionpooling.py
|
allenwind/tf2bert
|
9820223559543529d4dcc703e2742ab8fd14d58e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import *
from tensorflow.keras.preprocessing import sequence
from tf2bert.layers import AttentionPooling1D
from tf2bert.text.tokenizers import CharTokenizer
from tf2bert.text.rendering import print_color_text
import dataset
# AttentionPooling1D的可视化解释
def gelu(x):
return 0.5 * x * (1.0 + tf.math.erf(x / tf.sqrt(2.0)))
def batch_pad(X, maxlen=None, dtype="int32"):
if maxlen is None:
maxlen = max([len(i) for i in X])
X = sequence.pad_sequences(
X,
maxlen=maxlen,
dtype=dtype,
padding="post",
truncating="post",
value=0
)
return X
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, X, y, num_classes, batch_size):
self.X = X
self.y = y
self.num_classes = num_classes
self.batch_size = batch_size
def __len__(self):
return len(self.X) // self.batch_size
def __getitem__(self, index):
i = index * self.batch_size
j = i + self.batch_size
X = self.X[i:j]
y = self.y[i:j]
y = tf.keras.utils.to_categorical(y, num_classes)
return batch_pad(X, maxlen=None), np.array(y)
def on_epoch_end(self):
np.random.RandomState(773).shuffle(self.X)
np.random.RandomState(773).shuffle(self.y)
def split_kfolds(X, y, n_splits=8):
X_train = [j for i, j in enumerate(X) if i % n_splits != 1]
y_train = [j for i, j in enumerate(y) if i % n_splits != 1]
X_test = [j for i, j in enumerate(X) if i % n_splits == 1]
y_test = [j for i, j in enumerate(y) if i % n_splits == 1]
return (X_train, y_train), (X_test, y_test)
def show_visualization(model, pool_model, text, labelid, id2label, tokenizer):
X = [text]
X = tokenizer.transform(X)
X = batch_pad(X)
labelid_pred = np.argmax(model.predict(X)[0])
weights = pool_model.predict(X)[0].flatten()
print_color_text(text, weights)
print("=>y_true:", id2label[labelid])
print("=>y_pred:", id2label[labelid_pred])
X, y, classes = dataset.load_hotel_comment()
num_classes = len(classes)
tokenizer = CharTokenizer(mintf=10)
tokenizer.fit(X)
num_words = len(tokenizer)
maxlen = None
embedding_dim = 128
hdim = 128
embedding = Embedding(
input_dim=num_words,
output_dim=embedding_dim,
mask_zero=False,
name="embedding"
)
conv1 = Conv1D(filters=hdim, kernel_size=2, padding="same", activation=gelu)
conv2 = Conv1D(filters=hdim, kernel_size=2, padding="same", activation=gelu)
conv3 = Conv1D(filters=hdim, kernel_size=3, padding="same", activation=gelu)
pool = AttentionPooling1D(hdim, return_scores=True)
inputs = Input(shape=(maxlen,))
mask = Lambda(lambda x: tf.not_equal(x, 0))(inputs)
x = embedding(inputs)
x = LayerNormalization()(x)
x = Dropout(0.2)(x)
x = conv1(x)
x = conv2(x)
x = conv3(x)
x, w = pool(x, mask=mask)
x = Dense(128)(x)
x = Dropout(0.2)(x)
outputs = Dense(num_classes, activation="softmax")(x)
model = Model(inputs, outputs)
model.compile(
loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"]
)
model.summary()
pool_model = Model(inputs, w)
if __name__ == "__main__":
print(__file__)
batch_size = 64
epochs = 10
Xv = X[:100].copy()
yv = y[:100].copy()
X = tokenizer.transform(X)
(X_train, y_train), (X_test, y_test) = split_kfolds(X, y, 5)
dataset_train = DataGenerator(X_train, y_train, num_classes, batch_size)
dataset_val = DataGenerator(X_test, y_test, num_classes, batch_size)
model.fit(
dataset_train,
batch_size=batch_size,
epochs=epochs,
validation_data=dataset_val,
validation_batch_size=batch_size
)
id2label = {j:i for i,j in classes.items()}
for text, labelid in zip(Xv, yv):
show_visualization(model, pool_model, text, labelid, id2label, tokenizer)
input()
| 28.861314
| 81
| 0.666667
|
9026ccce09852942df2d24ec1a6a1e0e55ab58ae
| 599
|
py
|
Python
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0022.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | 1
|
2020-07-03T13:54:18.000Z
|
2020-07-03T13:54:18.000Z
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0022.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0022.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
print('[-- Crie um programa que leia o nome completo de uma pessoa e mostre: a) O nome com todas as letras maiúsculas; b) O nome com todas as letras minúsculas; c) Quantas letras ao todo (sem considerar espaços); d) Quantas letras tem o primeiro nome --]\n')
nome = input('Digite o nome completo: ')
print('O nome com todas as letras maiúsculas é: {}' .format(nome.upper()))
print('O nome com todas as letras minúsculas é: {}'.format(nome.lower()))
print('Quantas letras ao todo? {}' .format(len(nome.replace(' ', ''))))
print('Quantas letras tem o primeiro nome? {}'.format(len(nome.split()[0])))
| 85.571429
| 258
| 0.702838
|
84a19b06b3eb8c23e5e316c7a54bdf2fb09424a1
| 2,101
|
py
|
Python
|
script/testing/oltpbench/run_oltpbench.py
|
shaqsnake/terrier
|
d1b5c318c86e89bccc684f02565bee5c818ff8cd
|
[
"MIT"
] | 1
|
2021-06-02T02:52:26.000Z
|
2021-06-02T02:52:26.000Z
|
script/testing/oltpbench/run_oltpbench.py
|
shaqsnake/terrier
|
d1b5c318c86e89bccc684f02565bee5c818ff8cd
|
[
"MIT"
] | null | null | null |
script/testing/oltpbench/run_oltpbench.py
|
shaqsnake/terrier
|
d1b5c318c86e89bccc684f02565bee5c818ff8cd
|
[
"MIT"
] | 3
|
2020-11-10T11:06:20.000Z
|
2022-03-26T15:30:55.000Z
|
#!/usr/bin/python3
import os
import sys
import argparse
import traceback
base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, base_path)
from oltpbench.test_oltpbench import TestOLTPBench
if __name__ == "__main__":
aparser = argparse.ArgumentParser(description="Timeseries")
aparser.add_argument("benchmark", help="Benchmark Type")
aparser.add_argument("weights", help="Benchmark weights")
aparser.add_argument("--db-host", help="DB Hostname")
aparser.add_argument("--db-port", type=int, help="DB Port")
aparser.add_argument("--db-output-file", help="DB output log file")
aparser.add_argument("--scale-factor", type=float, metavar="S", \
help="The scale factor. (default: 1)")
aparser.add_argument("--transaction-isolation", metavar="I", \
help="The transaction isolation level (default: TRANSACTION_SERIALIZABLE")
aparser.add_argument("--client-time", type=int, metavar="C", \
help="How long to execute each benchmark trial (default: 20)")
aparser.add_argument("--terminals", type=int, metavar="T", \
help="Number of terminals in each benchmark trial (default: 1)")
aparser.add_argument("--loader-threads", type=int, metavar="L", \
help="Number of loader threads to use (default: 1)")
aparser.add_argument("--build-type",
default="debug",
choices=["debug", "release", "relwithdebinfo"],
help="Build type (default: %(default)s")
aparser.add_argument("--query-mode",
default="simple",
choices=["simple", "extended"],
help="Query protocol mode")
args = vars(aparser.parse_args())
try:
oltpbench = TestOLTPBench(args)
exit_code = oltpbench.run()
except:
print("Exception trying to run OLTP Bench tests")
traceback.print_exc(file=sys.stdout)
exit_code = 1
sys.exit(exit_code)
| 42.02
| 99
| 0.612565
|
bddfdbf56310230be8fd31db5cad0e42e2526a97
| 7,532
|
py
|
Python
|
wu-python-project-5/AdvRoom.py
|
roboticforest/school-work-graveyard
|
083f91ee6436dc1041ea449b3c3718a60dd3c18f
|
[
"BSD-Source-Code",
"FSFAP"
] | null | null | null |
wu-python-project-5/AdvRoom.py
|
roboticforest/school-work-graveyard
|
083f91ee6436dc1041ea449b3c3718a60dd3c18f
|
[
"BSD-Source-Code",
"FSFAP"
] | null | null | null |
wu-python-project-5/AdvRoom.py
|
roboticforest/school-work-graveyard
|
083f91ee6436dc1041ea449b3c3718a60dd3c18f
|
[
"BSD-Source-Code",
"FSFAP"
] | null | null | null |
# File: AdvRoom.py
# REFERENCES:
# I googled "python file type hint" and got:
# https://stackoverflow.com/questions/38569401/type-hint-for-a-file-or-file-like-object
# which linked to:
# https://docs.python.org/3/library/typing.html#typing.IO
"""
This module is responsible for modeling a single room in Adventure.
"""
import typing
from text_utils import ordinal_str
###########################################################################
# Your job in this milestone is to fill in the definitions of the #
# methods listed in this file, along with any helper methods you need. #
# The public methods shown in this file are the ones you need for #
# Milestone #1. You will need to add other public methods for later #
# milestones, as described in the handout. For Milestone #7, you will #
# need to move the get_next_room method into AdvGame and replace it here #
# with a get_passages method that returns a dictionary of passages. #
###########################################################################
# Constants
# Separates the room description and room exit list. Complete sets of room information are separated by a blank line.
MARKER = "-----"
class AdvRoom:
"""Represents an individual room within an Adventure style text game."""
def __init__(self, name: str, short_desc: str, long_desc: list, room_exits: list):
"""
Initializes an Adventure Room object with the specified attributes.
:param name: The unique ID of the room. This should be a human readable plain text string, like "EndOfRoad".
:param short_desc: A single line description of the room to display to the player.
:param long_desc: A list of lines of text to display to the user which describes the appearance of the room they
are standing within.
:param room_exits: A dictionary of room exits. Expected is a dict[str, str], where the key is a string representing
the directional command players will type and the data will be the ID of the room to send the player to.
"""
self._name = name
self._short_description = short_desc
self._long_description = long_desc
self._exits = room_exits
self._visited = False
self._objects = set()
def add_object(self, obj_name: str):
"""
Places the named game object within the room.
:param obj_name: The name of the object to add into the room.
:return: Nothing.
"""
self._objects.add(obj_name)
def remove_object(self, name: str):
"""
Takes the named game object out of the room.
:param name: The name of the object to remove.
:return: Nothing.
"""
self._objects.remove(name)
def contains_object(self, name: str) -> bool:
"""
Checks to see if the named game object is contained within the room.
:param name: The name of the object to search for.
:return: True if the given name matches the name of an object within the room, False otherwise.
"""
return name in self._objects
def get_contents(self) -> set:
"""
Gets the names of all game objects contained within the room.
:return: A set listing the names of all objects contained within the room.
"""
return self._objects.copy()
def get_name(self) -> str:
"""Returns the name of this room."""
return self._name
def get_short_description(self) -> str:
"""Returns a one-line short description of this room."""
return self._short_description
def get_long_description(self) -> list:
"""Returns the list of lines describing this room."""
return self._long_description
def get_connected_rooms(self, exit_command: str) -> list:
"""
Attempts to retrieve all room connections associated with the given room exiting travel command word (such as
"NORTH", or "IN").
:param exit_command: A string containing, typically, a travel direction such as "SOUTH" or "OUT".
:return: A list of all neighboring connected rooms (if there are any) connected via the given command word. Each
connected room is described as the following tuple: (exit_command, destination_room, requirement)
"""
return [room for room in self._exits if exit_command in room]
def set_visited(self, is_visited: bool):
"""Marks this room as having been visited by the player."""
self._visited = is_visited
def has_been_visited(self) -> bool:
"""Checks to see if this room has been visited by the player."""
return self._visited
@staticmethod
def read_room(room_file: typing.TextIO):
"""
Reads a single room from the given file if possible.
Room files are plain text, formatted as follows:
UniqueNameOfRoom
Short description line.
Multiline long descriptions that
can go on and on
until the marker is read.
-----
VERB: RoomName
VERB: AnotherRoom/ITEM
:param room_file: An open file object connected to a text file containing room data.
:return: None if a room could not be properly read (or if EOF is reached), otherwise an AdvRoom object.
"""
# Setup a small helper to minimize typos, and ensure consistent line counting.
lines_read = 0 # Count lines for better error messages.
def get_next_line_from_file():
"""A tiny helper to ensure line counting and whitespace stripping."""
nonlocal room_file
nonlocal lines_read
lines_read += 1
return room_file.readline().strip()
# Read in, bare minimum, the room name and short description.
name = get_next_line_from_file()
short_desc = get_next_line_from_file()
if name == "" or short_desc == "":
return None
# Read in the long description.
# The description ends at the MARKER constant.
done = False
long_desc = []
while not done:
line = get_next_line_from_file()
if line == MARKER:
done = True
else:
long_desc.append(line)
# Read in the list of available room changing verbs and destinations.
# The list end is marked by a blank line.
done = False
room_exits = []
while not done:
line = room_file.readline().strip()
if line == "":
done = True
else:
split_pos = line.find(":")
if split_pos == -1:
msg = "Missing colon separator on the {0} line of room {1}. The line reads: \"{2}\"".format(
ordinal_str(lines_read), name, line)
raise ValueError(msg)
exit_command = line[:split_pos].upper()
destination_room = line[split_pos + 1:].strip()
requirement = None
# Handle optional room requirement.
split_pos = destination_room.find("/")
if split_pos != -1:
requirement = destination_room[split_pos + 1:]
destination_room = destination_room[:split_pos]
room_exits.append((exit_command, destination_room, requirement))
return AdvRoom(name, short_desc, long_desc, room_exits)
| 39.025907
| 123
| 0.615242
|
3ef0bec1fb0a3a39f92d8f1885bea9aab1572a9f
| 16,605
|
py
|
Python
|
DEEP LEARNING/Object detection/YOLO Object Localization Keras/yad2k/models/keras_yolo.py
|
Diyago/ML-DL-scripts
|
40718a9d4318d6d6531bcea5998c0a18afcd9cb3
|
[
"Apache-2.0"
] | 142
|
2018-09-02T08:59:45.000Z
|
2022-03-30T17:08:24.000Z
|
DEEP LEARNING/Object detection/YOLO Object Localization Keras/yad2k/models/keras_yolo.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 4
|
2019-09-08T07:27:11.000Z
|
2021-10-19T05:50:24.000Z
|
DEEP LEARNING/Object detection/YOLO Object Localization Keras/yad2k/models/keras_yolo.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 75
|
2018-10-04T17:08:40.000Z
|
2022-03-08T18:50:52.000Z
|
"""YOLO_v2 Model Defined in Keras."""
import sys
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Lambda
from keras.layers.merge import concatenate
from keras.models import Model
from ..utils import compose
from .keras_darknet19 import DarknetConv2D, DarknetConv2D_BN_Leaky, darknet_body
sys.path.append("..")
voc_anchors = np.array(
[[1.08, 1.19], [3.42, 4.41], [6.63, 11.38], [9.42, 5.11], [16.62, 10.52]]
)
voc_classes = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def space_to_depth_x2(x):
"""Thin wrapper for Tensorflow space_to_depth with block_size=2."""
# Import currently required to make Lambda work.
# See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273
import tensorflow as tf
return tf.space_to_depth(x, block_size=2)
def space_to_depth_x2_output_shape(input_shape):
"""Determine space_to_depth output shape for block_size=2.
Note: For Lambda with TensorFlow backend, output shape may not be needed.
"""
return (
(input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 * input_shape[3])
if input_shape[1]
else (input_shape[0], None, None, 4 * input_shape[3])
)
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V2 model CNN body in Keras."""
darknet = Model(inputs, darknet_body()(inputs))
conv20 = compose(
DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(1024, (3, 3))
)(darknet.output)
conv13 = darknet.layers[43].output
conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
conv21_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name="space_to_depth",
)(conv21)
x = concatenate([conv21_reshaped, conv20])
x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
return Model(inputs, x)
def yolo_head(feats, anchors, num_classes):
"""Convert final layer features to bounding box parameters.
Parameters
----------
feats : tensor
Final convolutional layer features.
anchors : array-like
Anchor box widths and heights.
num_classes : int
Number of target classes.
Returns
-------
box_xy : tensor
x, y box predictions adjusted by spatial location in conv layer.
box_wh : tensor
w, h box predictions adjusted by anchors and conv spatial resolution.
box_conf : tensor
Probability estimate for whether each box contains any object.
box_class_pred : tensor
Probability distribution estimate for each box over class labels.
"""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])
# Static implementation for fixed models.
# TODO: Remove or add option for static implementation.
# _, conv_height, conv_width, _ = K.int_shape(feats)
# conv_dims = K.variable([conv_width, conv_height])
# Dynamic implementation of conv dims for fully convolutional model.
conv_dims = K.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = K.arange(0, stop=conv_dims[0])
conv_width_index = K.arange(0, stop=conv_dims[1])
conv_height_index = K.tile(conv_height_index, [conv_dims[1]])
# TODO: Repeat_elements and tf.split doesn't support dynamic splits.
# conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
conv_width_index = K.flatten(K.transpose(conv_width_index))
conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = K.cast(conv_index, K.dtype(feats))
feats = K.reshape(
feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5]
)
conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))
# Static generation of conv_index:
# conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
# conv_index = conv_index[:, [1, 0]] # swap columns for YOLO ordering.
# conv_index = K.variable(
# conv_index.reshape(1, conv_height, conv_width, 1, 2))
# feats = Reshape(
# (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)
box_confidence = K.sigmoid(feats[..., 4:5])
box_xy = K.sigmoid(feats[..., :2])
box_wh = K.exp(feats[..., 2:4])
box_class_probs = K.softmax(feats[..., 5:])
# Adjust preditions to each spatial grid point and anchor size.
# Note: YOLO iterates over height index before width index.
box_xy = (box_xy + conv_index) / conv_dims
box_wh = box_wh * anchors_tensor / conv_dims
return box_confidence, box_xy, box_wh, box_class_probs
def yolo_boxes_to_corners(box_xy, box_wh):
"""Convert YOLO box predictions to bounding box corners."""
box_mins = box_xy - (box_wh / 2.0)
box_maxes = box_xy + (box_wh / 2.0)
return K.concatenate(
[
box_mins[..., 1:2], # y_min
box_mins[..., 0:1], # x_min
box_maxes[..., 1:2], # y_max
box_maxes[..., 0:1], # x_max
]
)
def yolo_loss(args, anchors, num_classes, rescore_confidence=False, print_loss=False):
"""YOLO localization loss function.
Parameters
----------
yolo_output : tensor
Final convolutional layer features.
true_boxes : tensor
Ground truth boxes tensor with shape [batch, num_true_boxes, 5]
containing box x_center, y_center, width, height, and class.
detectors_mask : array
0/1 mask for detector positions where there is a matching ground truth.
matching_true_boxes : array
Corresponding ground truth boxes for positive detector positions.
Already adjusted for conv height and width.
anchors : tensor
Anchor boxes for model.
num_classes : int
Number of object classes.
rescore_confidence : bool, default=False
If true then set confidence target to IOU of best predicted box with
the closest matching ground truth box.
print_loss : bool, default=False
If True then use a tf.Print() to print the loss components.
Returns
-------
mean_loss : float
mean localization loss across minibatch
"""
(yolo_output, true_boxes, detectors_mask, matching_true_boxes) = args
num_anchors = len(anchors)
object_scale = 5
no_object_scale = 1
class_scale = 1
coordinates_scale = 1
pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo_head(
yolo_output, anchors, num_classes
)
# Unadjusted box predictions for loss.
# TODO: Remove extra computation shared with yolo_head.
yolo_output_shape = K.shape(yolo_output)
feats = K.reshape(
yolo_output,
[-1, yolo_output_shape[1], yolo_output_shape[2], num_anchors, num_classes + 5],
)
pred_boxes = K.concatenate((K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)
# TODO: Adjust predictions by image width/height for non-square images?
# IOUs may be off due to different aspect ratio.
# Expand pred x,y,w,h to allow comparison with ground truth.
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
pred_xy = K.expand_dims(pred_xy, 4)
pred_wh = K.expand_dims(pred_wh, 4)
pred_wh_half = pred_wh / 2.0
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
true_boxes_shape = K.shape(true_boxes)
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
true_boxes = K.reshape(
true_boxes,
[true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]],
)
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
# Find IOU of each predicted box with each ground truth box.
true_wh_half = true_wh / 2.0
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
intersect_mins = K.maximum(pred_mins, true_mins)
intersect_maxes = K.minimum(pred_maxes, true_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.0)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = intersect_areas / union_areas
# Best IOUs for each location.
best_ious = K.max(iou_scores, axis=4) # Best IOU scores.
best_ious = K.expand_dims(best_ious)
# A detector has found an object if IOU > thresh for some true box.
object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))
# TODO: Darknet region training includes extra coordinate loss for early
# training steps to encourage predictions to match anchor priors.
# Determine confidence weights from object and no_object weights.
# NOTE: YOLO does not use binary cross-entropy here.
no_object_weights = no_object_scale * (1 - object_detections) * (1 - detectors_mask)
no_objects_loss = no_object_weights * K.square(-pred_confidence)
if rescore_confidence:
objects_loss = (
object_scale * detectors_mask * K.square(best_ious - pred_confidence)
)
else:
objects_loss = object_scale * detectors_mask * K.square(1 - pred_confidence)
confidence_loss = objects_loss + no_objects_loss
# Classification loss for matching detections.
# NOTE: YOLO does not use categorical cross-entropy loss here.
matching_classes = K.cast(matching_true_boxes[..., 4], "int32")
matching_classes = K.one_hot(matching_classes, num_classes)
classification_loss = (
class_scale * detectors_mask * K.square(matching_classes - pred_class_prob)
)
# Coordinate loss for matching detection boxes.
matching_boxes = matching_true_boxes[..., 0:4]
coordinates_loss = (
coordinates_scale * detectors_mask * K.square(matching_boxes - pred_boxes)
)
confidence_loss_sum = K.sum(confidence_loss)
classification_loss_sum = K.sum(classification_loss)
coordinates_loss_sum = K.sum(coordinates_loss)
total_loss = 0.5 * (
confidence_loss_sum + classification_loss_sum + coordinates_loss_sum
)
if print_loss:
total_loss = tf.Print(
total_loss,
[
total_loss,
confidence_loss_sum,
classification_loss_sum,
coordinates_loss_sum,
],
message="yolo_loss, conf_loss, class_loss, box_coord_loss:",
)
return total_loss
def yolo(inputs, anchors, num_classes):
"""Generate a complete YOLO_v2 localization model."""
num_anchors = len(anchors)
body = yolo_body(inputs, num_anchors, num_classes)
outputs = yolo_head(body.output, anchors, num_classes)
return outputs
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=0.6):
"""Filter YOLO boxes based on object and class confidence."""
box_scores = box_confidence * box_class_probs
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1)
prediction_mask = box_class_scores >= threshold
# TODO: Expose tf.boolean_mask to Keras backend?
boxes = tf.boolean_mask(boxes, prediction_mask)
scores = tf.boolean_mask(box_class_scores, prediction_mask)
classes = tf.boolean_mask(box_classes, prediction_mask)
return boxes, scores, classes
def yolo_eval(
yolo_outputs, image_shape, max_boxes=10, score_threshold=0.6, iou_threshold=0.5
):
"""Evaluate YOLO model on given input batch and return filtered boxes."""
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
boxes = yolo_boxes_to_corners(box_xy, box_wh)
boxes, scores, classes = yolo_filter_boxes(
box_confidence, boxes, box_class_probs, threshold=score_threshold
)
# Scale boxes back to original image shape.
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
# TODO: Something must be done about this ugly hack!
max_boxes_tensor = K.variable(max_boxes, dtype="int32")
K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
nms_index = tf.image.non_max_suppression(
boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold
)
boxes = K.gather(boxes, nms_index)
scores = K.gather(scores, nms_index)
classes = K.gather(classes, nms_index)
return boxes, scores, classes
def preprocess_true_boxes(true_boxes, anchors, image_size):
"""Find detector in YOLO where ground truth box should appear.
Parameters
----------
true_boxes : array
List of ground truth boxes in form of relative x, y, w, h, class.
Relative coordinates are in the range [0, 1] indicating a percentage
of the original image dimensions.
anchors : array
List of anchors in form of w, h.
Anchors are assumed to be in the range [0, conv_size] where conv_size
is the spatial dimension of the final convolutional features.
image_size : array-like
List of image dimensions in form of h, w in pixels.
Returns
-------
detectors_mask : array
0/1 mask for detectors in [conv_height, conv_width, num_anchors, 1]
that should be compared with a matching ground truth box.
matching_true_boxes: array
Same shape as detectors_mask with the corresponding ground truth box
adjusted for comparison with predicted parameters at training time.
"""
height, width = image_size
num_anchors = len(anchors)
# Downsampling factor of 5x 2-stride max_pools == 32.
# TODO: Remove hardcoding of downscaling calculations.
assert height % 32 == 0, "Image sizes in YOLO_v2 must be multiples of 32."
assert width % 32 == 0, "Image sizes in YOLO_v2 must be multiples of 32."
conv_height = height // 32
conv_width = width // 32
num_box_params = true_boxes.shape[1]
detectors_mask = np.zeros(
(conv_height, conv_width, num_anchors, 1), dtype=np.float32
)
matching_true_boxes = np.zeros(
(conv_height, conv_width, num_anchors, num_box_params), dtype=np.float32
)
for box in true_boxes:
# scale box to convolutional feature spatial dimensions
box_class = box[4:5]
box = box[0:4] * np.array([conv_width, conv_height, conv_width, conv_height])
i = np.floor(box[1]).astype("int")
j = min(np.floor(box[0]).astype("int"), 1)
best_iou = 0
best_anchor = 0
for k, anchor in enumerate(anchors):
# Find IOU between box shifted to origin and anchor box.
box_maxes = box[2:4] / 2.0
box_mins = -box_maxes
anchor_maxes = anchor / 2.0
anchor_mins = -anchor_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.0)
intersect_area = intersect_wh[0] * intersect_wh[1]
box_area = box[2] * box[3]
anchor_area = anchor[0] * anchor[1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
if iou > best_iou:
best_iou = iou
best_anchor = k
if best_iou > 0:
detectors_mask[i, j, best_anchor] = 1
adjusted_box = np.array(
[
box[0] - j,
box[1] - i,
np.log(box[2] / anchors[best_anchor][0]),
np.log(box[3] / anchors[best_anchor][1]),
box_class,
],
dtype=np.float32,
)
matching_true_boxes[i, j, best_anchor] = adjusted_box
return detectors_mask, matching_true_boxes
| 35.941558
| 88
| 0.663957
|
14667506894ff77eb54ceba7c87f97e241be9abd
| 36,023
|
py
|
Python
|
fairseq/models/transformer.py
|
yingwaner/C-MNMT
|
3a87f75302efc859139af72483f480a1cac86f25
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer.py
|
yingwaner/C-MNMT
|
3a87f75302efc859139af72483f480a1cac86f25
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer.py
|
yingwaner/C-MNMT
|
3a87f75302efc859139af72483f480a1cac86f25
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('transformer')
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
return {
'transformer.wmt14.en-fr': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2',
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz',
'transformer.wmt19.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz',
'transformer.wmt19.en-ru': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz',
'transformer.wmt19.de-en': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz',
'transformer.wmt19.ru-en': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz',
'transformer.wmt19.en-de.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz',
'transformer.wmt19.en-ru.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz',
'transformer.wmt19.de-en.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz',
'transformer.wmt19.ru-en.single_model': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz',
}
# fmt: on
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
parser.add_argument('--layer-wise-attention', default=False, action='store_true',
help='perform layer-wise attention (cross-attention or cross+self-attention)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, 'no_cross_attention', False),
)
@register_model('transformer_align')
class TransformerAlignModel(TransformerModel):
"""
See "Jointly Learning to Align and Translate with Transformer
Models" (Garg et al., EMNLP 2019).
"""
def __init__(self, encoder, decoder, args):
super().__init__(encoder, decoder)
self.alignment_heads = args.alignment_heads
self.alignment_layer = args.alignment_layer
self.full_context_alignment = args.full_context_alignment
@staticmethod
def add_args(parser):
# fmt: off
super(TransformerAlignModel, TransformerAlignModel).add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='D',
help='Number of cross attention heads per layer to supervised with alignments')
parser.add_argument('--alignment-layer', type=int, metavar='D',
help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')
parser.add_argument('--full-context-alignment', type=bool, metavar='D',
help='Whether or not alignment is supervised conditioned on the full target context.')
# fmt: on
@classmethod
def build_model(cls, args, task):
# set any default arguments
transformer_align(args)
transformer_model = TransformerModel.build_model(args, task)
return TransformerAlignModel(transformer_model.encoder, transformer_model.decoder, args)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens, src_lengths)
return self.forward_decoder(prev_output_tokens, encoder_out)
def forward_decoder(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
features_only=False,
**extra_args,
):
attn_args = {'alignment_layer': self.alignment_layer, 'alignment_heads': self.alignment_heads}
decoder_out = self.decoder(
prev_output_tokens,
encoder_out,
**attn_args,
**extra_args,
)
if self.full_context_alignment:
attn_args['full_context_alignment'] = self.full_context_alignment
_, alignment_out = self.decoder(
prev_output_tokens, encoder_out, features_only=True, **attn_args, **extra_args,
)
decoder_out[1]['attn'] = alignment_out['attn']
return decoder_out
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens, num_languages: int = 5):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
#language embedding for different languages
self.num_languages = num_languages
#self.language_embeddings = (
#nn.Embedding(self.num_languages, embed_dim, self.padding_idx)
#)
def forward_embedding(self, src_tokens, lang):
# embed tokens and positions
embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
#if self.language_embeddings is not None and lang is not None:
# x = x + self.language_embeddings(lang.long().cuda())
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
def forward(self, lang_pair, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.layer_wise_attention:
return_all_hiddens = True
lang = torch.Tensor([lang_pair])
x, encoder_embedding = self.forward_embedding(src_tokens, lang)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
if return_all_hiddens:
encoder_states[-1] = x
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
'encoder_embedding': encoder_embedding, # B x T x C
'encoder_states': encoder_states, # List[T x B x C]
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if encoder_out.get('encoder_states', None) is not None:
for idx, state in enumerate(encoder_out['encoder_states']):
encoder_out['encoder_states'][idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, self.padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
features_only=False,
**extra_args,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state, **extra_args,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
full_context_alignment=False,
alignment_layer=None,
alignment_heads=None,
**unused,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = len(self.layers) - 1
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
if not self_attn_padding_mask.any() and not self.cross_self_attention:
self_attn_padding_mask = None
# decoder layers
attn = None
inner_states = [x]
for idx, layer in enumerate(self.layers):
encoder_state = None
if encoder_out is not None:
if self.layer_wise_attention:
encoder_state = encoder_out['encoder_states'][idx]
else:
encoder_state = encoder_out['encoder_out']
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn = layer(
x,
encoder_state,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=(idx == alignment_layer),
need_head_weights=(idx == alignment_layer),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float()
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, '_future_mask')
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.no_cross_attention = getattr(args, 'no_cross_attention', False)
args.cross_self_attention = getattr(args, 'cross_self_attention', False)
args.layer_wise_attention = getattr(args, 'layer_wise_attention', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer_align', 'transformer_align')
def transformer_align(args):
args.alignment_heads = getattr(args, 'alignment_heads', 1)
args.alignment_layer = getattr(args, 'alignment_layer', 4)
args.full_context_alignment = getattr(args, 'full_context_alignment', False)
base_architecture(args)
@register_model_architecture('transformer_align', 'transformer_wmt_en_de_big_align')
def transformer_wmt_en_de_big_align(args):
args.alignment_heads = getattr(args, 'alignment_heads', 1)
args.alignment_layer = getattr(args, 'alignment_layer', 4)
transformer_wmt_en_de_big(args)
| 44.145833
| 144
| 0.652111
|
8e2bfedb89388e1455a2efac691312159a6e6fcc
| 4,659
|
py
|
Python
|
cogs/text.py
|
best4281/cherng-bot
|
6f3daace95e905f48b4395dbfe55b62d04cc4556
|
[
"BSD-2-Clause"
] | null | null | null |
cogs/text.py
|
best4281/cherng-bot
|
6f3daace95e905f48b4395dbfe55b62d04cc4556
|
[
"BSD-2-Clause"
] | null | null | null |
cogs/text.py
|
best4281/cherng-bot
|
6f3daace95e905f48b4395dbfe55b62d04cc4556
|
[
"BSD-2-Clause"
] | null | null | null |
import asyncio.exceptions
from datetime import datetime
import discord
from discord.ext import commands
from configs import *
async def confirm_clear(bot, ctx, msgCount:int, msgList:list, check_func=None):
confirm = await ctx.send(f":bangbang: {ctx.author.mention} You are about to delete {msgCount} messages in {ctx.channel.mention}.\nSend **yes** to confirm. Send **no** to cancel.", delete_after=30.0)
del_check = lambda message: message.author == ctx.author and message.content.lower() in ["yes", "no"]
try:
user_confirm = await bot.wait_for('message', timeout=20.0, check=del_check)
if user_confirm.content.lower() == "no":
await confirm.edit(content="Message deletion was aborted.")
await user_confirm.delete()
return
await ctx.channel.delete_messages(msgList)
await confirm.edit(content=f"**{msgCount}** messages were removed from {ctx.channel.mention}")
await user_confirm.delete()
return
except asyncio.exceptions.TimeoutError:
await confirm.edit(content="You did not gave me any confirmation in 20 seconds.")
return
except discord.errors.ClientException:
def check(msg):
if check_func(msg) and msg in msgList:
return True
return False
deleted = await ctx.channel.purge(limit=10000, check=check)
await confirm.edit(content=f"**{len(deleted)-1}** messages were removed from {ctx.channel.mention}")
await user_confirm.delete()
except Exception as e:
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"{now}: cogs.text.confirm_clear() {e}")
class TextCog(commands.Cog, name = "Text", description = "Commands for managing text channel."):
def __init__(self, bot):
self.bot = bot
self.too_many_deletion = 50
@commands.command(
name = "clear",
aliases = ["c", "cl", "clearchat"],
help = "Remove messages in the text channel.",
usage = "<number/all> [@user(s)] [-i]",
description = (
"`number:` number of the messages to be deleted, can be replaced with `all`\n"
"`all:` is for removing maximum of 1000 messages at a time\n"
"`@user(s):` mention a user to remove messages from this specific user\n"
"`-i` or `--ignore` also delete the pinned messages\n\n"
"**Require __manage message__ permission.**\n⠀"
)
)
@commands.has_guild_permissions(manage_messages=True)
async def clear(self, ctx, num = None, *args):
if not ctx.message.mentions and '-i' in args or '--ignore' in args:
check_func = lambda msg: True
elif ctx.message.mentions and ctx.guild and '-i' in args or '--ignore' in args:
check_func = lambda msg: msg.author in ctx.message.mentions
elif ctx.message.mentions and ctx.guild:
check_func = lambda msg: msg.author in ctx.message.mentions and not msg.pinned
else:
check_func = lambda msg: not msg.pinned
if num == None:
await ctx.invoke(self.bot.get_command('help'), "clear")
return
if num == "all":
msgCount = 0
msgList = []
async with ctx.typing():
async for msg in ctx.channel.history(limit=1000):
if check_func(msg):
msgCount += 1
msgList.append(msg)
await confirm_clear(self.bot, ctx, msgCount, msgList, check_func)
else:
try:
num = int(num)
except:
await ctx.invoke(self.bot.get_command('help'), "clear")
await ctx.send(f"If you tried to use `clear` command without a number, it will __not__ work. Please always specify the number of messages to clear or use `{get_prefix(ctx)}clear all`.")
return
msgCount = -1
msgList = []
async with ctx.typing():
async for msg in ctx.channel.history(limit=None):
if check_func(msg):
msgCount += 1
msgList.append(msg)
if msgCount == num:
break
if msgCount >= self.too_many_deletion:
await confirm_clear(self.bot, ctx, msgCount, msgList, check_func)
else:
await ctx.channel.delete_messages(msgList)
await ctx.send(f"**{msgCount}** messages were removed from {ctx.channel.mention}", delete_after=10.0)
def setup(bot):
bot.add_cog(TextCog(bot))
| 43.95283
| 202
| 0.59326
|
c2366b4f6a59de2c5219fcc4a2e456e44d649367
| 13,761
|
py
|
Python
|
vega/algorithms/compression/quant_ea/utils/tensorflow/quant_conv.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
vega/algorithms/compression/quant_ea/utils/tensorflow/quant_conv.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/algorithms/compression/quant_ea/utils/tensorflow/quant_conv.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Quantized Convlution."""
import math
import tensorflow as tf
@tf.custom_gradient
def sign(input):
"""Apply Sign class in dorefa-net for weights.
:param input: batch of input
:type input: Tensor
:return: quantized output
:rtype: Tensor
"""
input = tf.math.sign(input)
def grads(grad_output):
return grad_output
return input, grads
@tf.custom_gradient
def xnor(input):
"""Apply Sign class in dorefa-net for weights.
:param input: batch of input
:type input: Tensor
:return: quantized output
:rtype: Tensor
"""
input = tf.math.sign(input) * tf.reduce_mean(tf.math.abs(input), axis=[1, 2, 3], keepdims=True)
def grads(grad_output):
return grad_output
return input, grads
@tf.custom_gradient
def scale_sign(input):
"""Apply Sign class in dorefa-net for weights.
:param input: batch of input
:type input: Tensor
:return: quantized output
:rtype: Tensor
"""
input = tf.math.sign(input) * tf.reduce_mean(tf.math.abs(input))
def grads(grad_output):
return grad_output
return input, grads
def dorefa_w(w, nbit_w, *args, **kwargs):
"""Dorefa quantization for weights.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
if nbit_w == 1:
w = scale_sign(w)
else:
w = tf.math.tanh(w)
w = w / (2 * tf.reduce_max(tf.math.abs(w))) + 0.5
w = 2 * quantize_w(w, nbit_w) - 1
return w
@tf.custom_gradient
def quantize_w(input, nbit):
"""Quantization function for weights.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:return: quantized output and grad function
:rtype: Tensor, fn
"""
scale = tf.cast((2 ** nbit - 1), input.dtype)
output = tf.math.round(input * scale) / scale
def grads(grad_output):
if grad_output is None:
return tf.zeros_like(input)
return grad_output
return output, grads
def wrpn_w(w, nbit_w, *args, **kwargs):
"""Wrpn quantization for weights.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
if nbit_w == 1:
w = scale_sign(w)
else:
w = quantize_w(tf.clip_by_value(w, -1, 1), nbit_w - 1)
return w
def xnor_w(w, nbit_w=1, *args, **kwargs):
"""Xnor quantization for weights.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
if nbit_w != 1:
raise ValueError('nbit_w must be 1 in XNOR-Net.')
return xnor(w)
def bireal_w(w, nbit_w=1, *args, **kwargs):
"""Bireal quantization for weights.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
if nbit_w != 1:
raise ValueError('nbit_w must be 1 in Bi-Real-Net.')
return sign(w) * tf.reduce_mean(tf.math.abs(tf.Variable(w)))
def dorefa_a(input, nbit_a, alpha=None, offset=None):
"""Dorefa quantization for activations.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
scale = tf.cast((2 ** nbit_a - 1) if alpha is None else (2 ** nbit_a - 1) / alpha, input.dtype)
return quantize_a(tf.clip_by_value(input, 0, 1.0), nbit_a, scale)
@tf.custom_gradient
def quantize_a(input, nbit, scale):
"""Quantization function for activations.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param scale: calculated scale
:type scale: float or Tensor
:return: quantized output and grad function
:rtype: Tensor, fn
"""
output = tf.math.round(input * scale) / scale
def grads(grad_output):
if grad_output is None:
return tf.zeros_like(input)
return grad_output
return output, grads
def pact_a(input, nbit_a, alpha, *args, **kwargs):
"""PACT quantization for activations.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
x = 0.5 * (tf.math.abs(input) - tf.math.abs(input - alpha) + alpha)
scale = tf.cast((2 ** nbit_a - 1) if alpha is None else (2 ** nbit_a - 1) / alpha, input.dtype)
return quantize_a(x, nbit_a, scale)
@tf.custom_gradient
def bireal_a_calc(input):
"""Forward and backward for bireal_a.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
input = tf.math.sign(tf.clip_by_value(input, -1, 1))
def grads(grad_output):
grad_input = (2 + 2 * input) * tf.cast(tf.math.less(input, 0), dtype=input.dtype) + \
(2 - 2 * input) * tf.cast(tf.math.greater_equal(input, 0), dtype=input.dtype)
grad_input = tf.minimum(grad_input, 0)
grad_input *= grad_output
return grad_input
return input, grads
def bireal_a(input, nbit_a=1, *args, **kwargs):
"""Adaptor for bireal_a.
:param input: batch of input
:type input: Tensor
:param nbit: bit width
:type nbit: int
:param alpha: scale factor
:type alpha: float or Tensor
:param offset: offset factor
:type offset: float or Tensor
:return: quantized output
:rtype: Tensor
"""
return bireal_a_calc(input)
class QuantConv(tf.layers.Conv2D):
"""General QuantConv class for quantized convolution.
The params are the same as nn.Conv2d
"""
def __init__(self, out_channels, kernel_size, name, strides=1, padding='same', dilation=1,
groups=1, use_bias=True, data_format='channels_first'):
super(QuantConv, self).__init__(out_channels, kernel_size, strides, padding,
data_format, dilation, use_bias=use_bias)
self.out_channels = out_channels
self.data_format = 'NCHW' if self.data_format == 'channels_first' else 'NHWC'
self.group = groups
if self.use_bias:
self.bias = tf.get_variable(name + '/bias', initializer=tf.zeros((out_channels)))
else:
self.bias = None
def quant_config(self, quan_name_w='dorefa', quan_name_a='dorefa', has_offset=False, quant_info=None, name=''):
"""Config the quantization settings.
:param quan_name_w: type of weight quantization
:type quan_name_w: string
:param quan_name_a: type of activation quantization
:type quan_name_a: string
:param nbit_w: bit width of weight quantization
:type nbit_w: int
:param nbit_a: bit width of activation quantization
:type nbit_a: int
:param has_offset: whether use offset
:type has_offset: bool
"""
if quant_info is None:
self.nbit_w = 1
self.nbit_a = 1
else:
if isinstance(quant_info['nbit_w_list'], list) and isinstance(quant_info['nbit_a_list'], list):
self.nbit_w, self.nbit_a = quant_info['nbit_w_list'].pop(0), quant_info['nbit_a_list'].pop(0)
else:
self.nbit_w, self.nbit_a = quant_info['nbit_w_list'], quant_info['nbit_a_list']
self.quant_info = quant_info
name_w_dict = {'dorefa': dorefa_w, 'pact': dorefa_w, 'wrpn': wrpn_w, 'xnor': xnor_w, 'bireal': bireal_w}
name_a_dict = {'dorefa': dorefa_a, 'pact': pact_a, 'wrpn': dorefa_a, 'xnor': dorefa_a, 'bireal': bireal_a}
self.quan_w = name_w_dict[quan_name_w]
self.quan_a = name_a_dict[quan_name_a]
if quan_name_a == 'pact':
self.alpha_a = tf.get_variable(name + '/alpha_a', initializer=10. * tf.ones((1.), dtype=tf.float32))
else:
self.alpha_a = None
if quan_name_w == 'pact':
self.alpha_w = tf.get_variable(name + '/alpha_w', initializer=10. * tf.ones((1.), dtype=tf.float32))
else:
self.alpha_w = None
if has_offset:
self.offset = tf.get_variable(name + '/offset', initializer=tf.zeross((1.), dtype=tf.float32))
else:
self.offset = None
def calc_flops_params(self, in_channels, out_channels, kernel_size, xW, xH):
"""Calculate extra flops and params, append in quant_info.
:param in_channels: in_channels of input
:type in_channels: int
:param out_channels: out_channels of input
:type out_channels: int
:param kernel_size: kernel_size of input
:type in_channels: int
:param xW: width of input
:type xW: int
:param xH: height of input
:type xH: int
"""
if isinstance(kernel_size, list) or isinstance(kernel_size, tuple):
ks1, ks2 = kernel_size[0], kernel_size[1]
else:
ks1, ks2 = kernel_size, kernel_size
multiplier = self.nbit_w - 1
self.quant_info['extra_params'] += multiplier * (ks1 * ks2 * in_channels * 16)
multiplier += self.nbit_a
self.quant_info['extra_flops'] += multiplier * (16 * xW * xH * (in_channels * ks1 * ks2))
def __call__(self, input):
"""Forward function of quantized convolution.
:param input: batch of input
:type input: Tensor
:return: output
:rtype: Tensor
"""
channel_axis = 1 if (self.data_format == 'NCHW' or self.data_format == 'channels_first') else 3
self.in_channels = int(input.get_shape()[channel_axis])
input_size = list(input.get_shape())[1:]
input_size.pop(channel_axis - 1)
if self.quant_info:
self.calc_flops_params(self.in_channels, self.out_channels, self.kernel_size,
int(input_size[0]), int(input_size[1]))
# 0-bit: identity mapping
if self.nbit_w == 0 or self.nbit_a == 0:
diff_channels = self.out_channels - self.in_channels
if self.strides == 2 or self.strides == (2, 2):
if channel_axis == 1:
x = tf.pad(
input[:, :, ::2, ::2],
tf.constant([[0, 0], [0, 0], [diff_channels // 2, diff_channels - diff_channels // 2]]),
"CONSTANT",
0
)
else:
x = tf.pad(
input[:, ::2, ::2, :],
tf.constant([[0, 0], [0, 0], [diff_channels // 2, diff_channels - diff_channels // 2]]),
"CONSTANT",
0
)
return x
else:
x = tf.pad(
input,
tf.constant([[0, 0], [0, 0], [diff_channels // 2, diff_channels - diff_channels // 2]]),
"CONSTANT",
0
)
return x
# w quan
self.weight = tf.get_variable(self.name + '/kernel',
initializer=tf.random.normal(self.kernel_size + (self.in_channels,
self.out_channels,)))
if self.nbit_w < 32:
self.nbit_w = 1
w = self.quan_w(self.weight, self.nbit_w, self.alpha_w, self.offset)
else:
w = self.weight
# a quan
if self.nbit_a < 32:
x = self.quan_a(input, self.nbit_a, self.alpha_a)
else:
x = tf.nn.relu(input)
if self.group == 1:
x = tf.nn.conv2d(x, w, strides=self.strides, padding=self.padding.upper(),
dilations=self.dilation_rate, name=self.name,
data_format=self.data_format)
else:
x = tf.nn.depthwise_conv2d(x, w, strides=self.strides, padding=self.padding.upper(),
dilations=self.dilation_rate, name=self.name,
data_format=self.data_format)
return x
| 33
| 115
| 0.59407
|
dfb75cf54e6dd8d9fe36f6916f39a8cc5e940fac
| 4,615
|
py
|
Python
|
tests/test_documentmanifest.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_documentmanifest.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_documentmanifest.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DocumentManifest
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from fhir.resources import fhirtypes # noqa: F401
from fhir.resources import documentmanifest
def impl_documentmanifest_1(inst):
assert inst.contained[0].id == "org-1"
assert inst.contained[1].id == "a1"
assert inst.contained[2].id == "a2"
assert inst.content[0].reference == "#a1"
assert inst.content[1].reference == "#a2"
assert inst.content[2].reference == "DocumentReference/example"
assert inst.content[3].reference == "DiagnosticReport/f001"
assert inst.created == fhirtypes.DateTime.validate("2014-09-21T11:50:23-05:00")
assert inst.id == "654789"
assert inst.identifier[0].system == "http://happyvalley.com/supportingdocumentation"
assert inst.identifier[0].value == "52345"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.recipient[0].reference == "#org-1"
assert inst.related[0].identifier.system == "http://happyvalley.com/claim"
assert inst.related[0].identifier.value == "12345"
assert (
inst.related[1].identifier.system
== "http://www.BenefitsInc.com/fhir/remittance"
)
assert inst.related[1].identifier.value == "R3500"
assert inst.status == "current"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A Financial '
"Management Attachment example</div>"
)
assert inst.text.status == "generated"
def test_documentmanifest_1(base_settings):
"""No. 1 tests collection for DocumentManifest.
Test File: documentmanifest-fm-attachment.json
"""
filename = (
base_settings["unittest_data_dir"] / "documentmanifest-fm-attachment.json"
)
inst = documentmanifest.DocumentManifest.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "DocumentManifest" == inst.resource_type
impl_documentmanifest_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "DocumentManifest" == data["resourceType"]
inst2 = documentmanifest.DocumentManifest(**data)
impl_documentmanifest_1(inst2)
def impl_documentmanifest_2(inst):
assert inst.author[0].reference == "#a1"
assert inst.contained[0].id == "a1"
assert inst.content[0].reference == "DocumentReference/example"
assert inst.created == fhirtypes.DateTime.validate("2004-12-25T23:50:50-05:00")
assert inst.description == "Physical"
assert inst.id == "example"
assert inst.identifier[0].system == "http://example.org/documents"
assert inst.identifier[0].value == "23425234234-2347"
assert inst.masterIdentifier.system == "http://example.org/documents"
assert inst.masterIdentifier.value == "23425234234-2346"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.recipient[0].reference == "Practitioner/xcda1"
assert inst.related[0].identifier.system == "http://example.org/documents"
assert inst.related[0].identifier.value == "23425234234-9999"
assert inst.related[0].ref.reference == "DocumentReference/example"
assert inst.source == "urn:oid:1.3.6.1.4.1.21367.2009.1.2.1"
assert inst.status == "current"
assert inst.subject.reference == "Patient/xcda"
assert inst.text.div == '<div xmlns="http://www.w3.org/1999/xhtml">Text</div>'
assert inst.text.status == "generated"
assert inst.type.text == "History and Physical"
def test_documentmanifest_2(base_settings):
"""No. 2 tests collection for DocumentManifest.
Test File: documentmanifest-example.json
"""
filename = base_settings["unittest_data_dir"] / "documentmanifest-example.json"
inst = documentmanifest.DocumentManifest.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "DocumentManifest" == inst.resource_type
impl_documentmanifest_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "DocumentManifest" == data["resourceType"]
inst2 = documentmanifest.DocumentManifest(**data)
impl_documentmanifest_2(inst2)
| 40.130435
| 88
| 0.697725
|
869a89ebadb7de0c661befd8e7a90c198eb43c73
| 7,834
|
py
|
Python
|
pong.py
|
timolegros/Pong
|
1804d1fcd869592b6501ccd5d5e79c65000db9e1
|
[
"MIT"
] | null | null | null |
pong.py
|
timolegros/Pong
|
1804d1fcd869592b6501ccd5d5e79c65000db9e1
|
[
"MIT"
] | null | null | null |
pong.py
|
timolegros/Pong
|
1804d1fcd869592b6501ccd5d5e79c65000db9e1
|
[
"MIT"
] | null | null | null |
# Pong V2 has all attributes of the full version except the paddles do not move.
import pygame
def main():
# initialize all pygame modules (some need initialization)
pygame.init()
# create a pygame display window
pygame.display.set_mode((500, 400))
# set the title of the display window
pygame.display.set_caption('Pong')
# get the display surface
w_surface = pygame.display.get_surface()
# create a game object
game = Game(w_surface)
# start the main game loop by calling the play method on the game object
game.play()
# quit pygame and clean up the pygame window
pygame.quit()
class Game:
# An object in this class represents a complete game.
def __init__(self, surface):
# Initialize a Game.
# - self is the Game to initialize
# - surface is the display window surface object
self.surface = surface
self.bg_color = pygame.Color('black')
self.FPS = 60
self.game_Clock = pygame.time.Clock()
self.close_clicked = False
self.continue_game = True
# === game specific objects
self.dot_center = [250, 200]
self.dot_velocity = [5, 3]
self.dot_radius = 10
self.dot = Dot('white', self.dot_radius, self.dot_center, self.dot_velocity, self.surface)
# rectangle values
self.left_rectangle = pygame.Rect((50, 165), (10, 70))
self.right_rectangle = pygame.Rect((440, 165), (10, 70))
# initial score values
self.left_score_num = 0
self.right_score_num = 0
self.continue_top_right = True
self.continue_top_left = True
self.continue_bottom_right = True
self.continue_bottom_left = True
pygame.key.set_repeat(20,20)
def play(self):
# Play the game until the player presses the close box.
# - self is the Game that should be continued or not.
while not self.close_clicked: # until player clicks close box
# play frame
self.handle_events()
self.draw()
self.collision()
self.update_score()
if self.continue_game:
self.update()
self.decide_continue()
self.game_Clock.tick(self.FPS) # run at most with FPS Frames Per Second
def handle_events(self):
# Handle each user event by changing the game state appropriately.
# - self is the Game whose events will be handled
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
self.close_clicked = True
if event.type == pygame.KEYDOWN and self.continue_game:
self.handle_key_down(event)
# if event.type == pygame.KEYUP and self.continue_game:
# self.handle_key_up(event)
def handle_key_down(self, event):
size = self.surface.get_size()
if event.key == pygame.K_q:
self.left_rectangle.top = self.left_rectangle.top - 10
if self.left_rectangle.top < 0:
self.left_rectangle.top = 0
if event.key == pygame.K_p:
self.right_rectangle.top = self.right_rectangle.top - 10
if self.right_rectangle.top < 0:
self.right_rectangle.top = 0
if event.key == pygame.K_a:
self.left_rectangle.bottom = self.left_rectangle.bottom + 10
if self.left_rectangle.bottom > size[1]:
self.left_rectangle.bottom = size[1]
if event.key == pygame.K_l:
self.right_rectangle.bottom = self.right_rectangle.bottom + 10
if self.right_rectangle.bottom > size [1]:
self.right_rectangle.bottom = size[1]
def draw(self):
# Draw all game objects.
# - self is the Game to draw
self.surface.fill(self.bg_color) # clear the display surface first
self.dot.draw()
self.left_score()
self.right_score()
pygame.draw.rect(self.surface, pygame.Color('White'), self.left_rectangle)
pygame.draw.rect(self.surface, pygame.Color('White'), self.right_rectangle)
pygame.display.update() # make the updated surface appear on the display
def collision(self):
# Detects any collision between dot and paddle and reverses x-velocity if one occurs
# - self is the Game which detects collisions
if self.dot_velocity[0] > 0:
if self.right_rectangle.collidepoint(self.dot_center[0] + self.dot_radius, self.dot_center[1]):
self.dot_velocity[0] = - self.dot_velocity[0]
if self.dot_velocity[0] < 0:
if self.left_rectangle.collidepoint(self.dot_center[0] - self.dot_radius, self.dot_center[1]):
self.dot_velocity[0] = - self.dot_velocity[0]
def left_score(self):
# creates the left score
# - self is the Game onto which the score is drawn
fg_color = pygame.Color('white')
font = pygame.font.SysFont('', 70)
text_string = str(self.left_score_num)
text_box = font.render(text_string, True, fg_color, self.bg_color)
location = (0, 0)
self.surface.blit(text_box, location)
def right_score(self):
# creates the right score
# - self is the Game onto which the score is drawn
fg_color = pygame.Color('White')
font = pygame.font.SysFont('', 70)
text_string = str(self.right_score_num)
text_box = font.render(text_string, True, fg_color, self.bg_color)
location = (self.surface.get_width()-text_box.get_width(), 0)
self.surface.blit(text_box, location)
def update_score(self):
# updates the scores on the left and right
# - self is the Game to update
if self.dot_center[0] - self.dot_radius <= 0: # left edge
self.right_score_num += 1
if self.dot_center[0] + self.dot_radius >= self.surface.get_width(): # right edge
self.left_score_num += 1
def update(self):
# Update the game objects for the next frame.
# - self is the Game to update
self.dot.move()
def decide_continue(self):
# Check and remember if the game should continue
# - self is the Game to check
if self.left_score_num == 11 or self.right_score_num == 11:
self.continue_game = False
class Dot:
# An object in this class represents a Dot that moves
def __init__(self, dot_color, dot_radius, dot_center, dot_velocity, surface):
# Initialize a Dot.
# - self is the Dot to initialize
# - color is the pygame.Color of the dot
# - center is a list containing the x and y int
# coords of the center of the dot
# - radius is the int pixel radius of the dot
# - velocity is a list containing the x and y components
# - surface is the window's pygame.Surface object
self.color = pygame.Color(dot_color)
self.radius = dot_radius
self.center = dot_center
self.velocity = dot_velocity
self.surface = surface
def move(self):
# dictates the movement of the dot including how it bounces of window edges
# - self is the Dot
size = self.surface.get_size()
for i in range(0, 2):
self.center[i] = (self.center[i] + self.velocity[i])
if self.center[i] <= self.radius:
self.velocity[i] = - self.velocity[i]
if self.center[i] + self.radius >= size[i]:
self.velocity[i] = - self.velocity[i]
def draw(self):
# Draw the dot on the surface
# - self is the Dot
pygame.draw.circle(self.surface, self.color, self.center, self.radius)
main()
| 36.437209
| 107
| 0.61399
|
196fe5659936586cac6c1379d7f238d21d84340b
| 4,240
|
py
|
Python
|
pybamm/models/submodels/oxygen_diffusion/full_oxygen_diffusion.py
|
gyouhoc/PyBaMM
|
6852e0e518157e6802ce83a2549562e7d0ed4b9f
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/oxygen_diffusion/full_oxygen_diffusion.py
|
gyouhoc/PyBaMM
|
6852e0e518157e6802ce83a2549562e7d0ed4b9f
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/oxygen_diffusion/full_oxygen_diffusion.py
|
gyouhoc/PyBaMM
|
6852e0e518157e6802ce83a2549562e7d0ed4b9f
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Class for oxygen diffusion
#
import pybamm
from .base_oxygen_diffusion import BaseModel
def separator_and_positive_only(variable):
"""Return only the separator and positive electrode children
Parameters
----------
variable : :class:`pybamm.Concatenation`
Concatenation of variables in negative, separator, positive
Returns
-------
:class:`pybamm.Concatenation`
Concatenation of variables in separator and positive only
"""
_, var_s, var_p = variable.orphans
return pybamm.Concatenation(var_s, var_p)
class Full(BaseModel):
"""Class for conservation of mass of oxygen. (Full refers to unreduced by
asymptotic methods)
In this model, extremely fast oxygen kinetics in the negative electrode imposes
zero oxygen concentration there, and so the oxygen variable only lives in the
separator and positive electrode. The boundary condition at the negative electrode/
separator interface is homogeneous Dirichlet.
Parameters
----------
param : parameter class
The parameters to use for this submodel
reactions : dict
Dictionary of reaction terms
**Extends:** :class:`pybamm.oxygen_diffusion.BaseModel`
"""
def __init__(self, param, reactions):
super().__init__(param, reactions)
def get_fundamental_variables(self):
# Oxygen concentration (oxygen concentration is zero in the negative electrode)
c_ox_n = pybamm.FullBroadcast(0, "negative electrode", "current collector")
c_ox_s = pybamm.Variable(
"Separator oxygen concentration",
domain="separator",
auxiliary_domains={"secondary": "current collector"},
)
c_ox_p = pybamm.Variable(
"Positive oxygen concentration",
domain="positive electrode",
auxiliary_domains={"secondary": "current collector"},
)
c_ox_s_p = pybamm.Concatenation(c_ox_s, c_ox_p)
variables = {"Separator and positive electrode oxygen concentration": c_ox_s_p}
c_ox = pybamm.Concatenation(c_ox_n, c_ox_s, c_ox_p)
variables.update(self._get_standard_concentration_variables(c_ox))
return variables
def get_coupled_variables(self, variables):
tor = separator_and_positive_only(variables["Electrolyte tortuosity"])
c_ox = variables["Separator and positive electrode oxygen concentration"]
# TODO: allow charge and convection?
v_box = pybamm.Scalar(0)
param = self.param
N_ox_diffusion = -tor * param.curlyD_ox * pybamm.grad(c_ox)
N_ox = N_ox_diffusion + param.C_e * c_ox * v_box
# Flux in the negative electrode is zero
N_ox = pybamm.Concatenation(
pybamm.FullBroadcast(0, "negative electrode", "current collector"), N_ox
)
variables.update(self._get_standard_flux_variables(N_ox))
return variables
def set_rhs(self, variables):
param = self.param
eps = separator_and_positive_only(variables["Porosity"])
deps_dt = separator_and_positive_only(variables["Porosity change"])
c_ox = variables["Separator and positive electrode oxygen concentration"]
N_ox = variables["Oxygen flux"].orphans[1]
source_terms = sum(
pybamm.Concatenation(
pybamm.FullBroadcast(0, "separator", "current collector"),
reaction["Positive"]["s_ox"] * variables[reaction["Positive"]["aj"]],
)
for reaction in self.reactions.values()
)
self.rhs = {
c_ox: (1 / eps)
* (-pybamm.div(N_ox) / param.C_e + source_terms - c_ox * deps_dt)
}
def set_boundary_conditions(self, variables):
c_ox = variables["Separator and positive electrode oxygen concentration"]
self.boundary_conditions = {
c_ox: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(0), "Neumann"),
}
}
def set_initial_conditions(self, variables):
c_ox = variables["Separator and positive electrode oxygen concentration"]
self.initial_conditions = {c_ox: self.param.c_ox_init}
| 33.385827
| 87
| 0.653774
|
a369e492cd5e10af3861594198323c0539332e06
| 1,793
|
py
|
Python
|
browser/processrendering.py
|
gocept/alphaflow
|
4b797cb12fb52254b1884159fd9a8b899c739f7c
|
[
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null |
browser/processrendering.py
|
gocept/alphaflow
|
4b797cb12fb52254b1884159fd9a8b899c739f7c
|
[
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null |
browser/processrendering.py
|
gocept/alphaflow
|
4b797cb12fb52254b1884159fd9a8b899c739f7c
|
[
"ZPL-2.1",
"ZPL-2.0"
] | 1
|
2021-11-01T07:58:18.000Z
|
2021-11-01T07:58:18.000Z
|
# Copyright (c) 2005-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
import Products.Five
import Products.AlphaFlow.interfaces
class ProcessGraph(Products.Five.BrowserView):
@property
def graphing(self):
g = Products.AlphaFlow.interfaces.IWorkflowGraph(self.context)
g.zoom = self.request.get('zoom') or g.zoom
g.highlight = self.request.get('highlight')
session = getattr(self.request, 'SESSION', {})
g.expand_groups = session.get('expandgroup', [])
return g
def expandGroup(self, groupname):
expanded = self.request.SESSION.get('expandgroup', ())
if groupname not in expanded:
expanded += (groupname,)
self.request.SESSION.set('expandgroup', expanded)
def closeGroup(self, groupname):
expanded = self.request.SESSION.get('expandgroup', ())
if groupname in expanded:
expanded = list(expanded)
expanded.remove(groupname)
expanded = tuple(expanded)
self.request.SESSION.set('expandgroup', expanded)
def closeAllGroups(self):
if 'expandgroup' in self.request.SESSION.keys():
del self.request.SESSION['expandgroup']
def getGraph(self):
self.request.RESPONSE.setHeader('Content-Type', 'image/png')
return self.graphing.render(format='png')
def getGraphSVG(self):
self.request.RESPONSE.setHeader('Content-Type', 'image/svg+xml')
self.request.RESPONSE.setHeader('Content-Disposition',
'attachment; filename="workflow.svg"')
return self.graphing.render(format='svg')
def getMap(self):
self.request.RESPONSE.setHeader('Content-Type', 'text/xml')
return self.graphing.render(format='cmapx')
| 34.480769
| 78
| 0.640825
|
3929dffcf5900964524fca7584b169cfe353700a
| 10,342
|
py
|
Python
|
Python/gan_keras/dcgan.py
|
fyumoto/GAN_Keras
|
0cc7f3cc8726300969959420d506726ea6b1adc2
|
[
"Apache-2.0"
] | 1
|
2019-01-19T08:17:04.000Z
|
2019-01-19T08:17:04.000Z
|
Python/gan_keras/dcgan.py
|
fyumoto/GAN_Keras
|
0cc7f3cc8726300969959420d506726ea6b1adc2
|
[
"Apache-2.0"
] | null | null | null |
Python/gan_keras/dcgan.py
|
fyumoto/GAN_Keras
|
0cc7f3cc8726300969959420d506726ea6b1adc2
|
[
"Apache-2.0"
] | null | null | null |
# License
# Copyright 2018 Hamaad Musharaf Shah
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import math
import inspect
import tensorflow
from six.moves import range
import os
import math
import sys
import importlib
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, LabelBinarizer
from sklearn.metrics import roc_auc_score
from scipy.stats import norm
import keras
from keras import backend as bkend
from keras import layers
from keras.layers import Input, Dense, BatchNormalization, Dropout, Flatten, convolutional, pooling, Reshape, Embedding
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras import metrics
from keras.models import Model
from keras.optimizers import Adam, RMSprop
from keras.utils.generic_utils import Progbar
from keras.preprocessing import image
import tensorflow as tf
from tensorflow.python.client import device_lib
from gan_keras.loss_history import LossHistory
class DeepConvGenAdvNet(BaseEstimator,
TransformerMixin):
def __init__(self,
z_size=None,
iterations=None,
batch_size=None):
args, _, _, values = inspect.getargvalues(inspect.currentframe())
values.pop("self")
for arg, val in values.items():
setattr(self, arg, val)
# Build the discriminator.
self.discriminator = self.build_discriminator()
self.discriminator.compile(optimizer=RMSprop(lr=0.0002,
clipvalue=1.0,
decay=1e-8),
loss="binary_crossentropy")
# Build the generator to fool the discriminator.
# Freeze the discriminator here.
self.discriminator.trainable = False
self.generator = self.build_generator()
GAN_input = Input(shape=(self.z_size,))
GAN_fake = self.generator(GAN_input)
GAN_output = self.discriminator(GAN_fake)
# Build the GAN.
self.GAN = Model(GAN_input, GAN_output)
self.GAN.compile(optimizer=RMSprop(lr=0.0004,
clipvalue=1.0,
decay=1e-8),
loss="binary_crossentropy")
def fit(self,
X,
y=None):
num_train = X.shape[0]
start = 0
for step in range(self.iterations):
# Generate a new batch of noise...
noise = np.random.uniform(low=-1.0, high=1.0, size=(self.batch_size, self.z_size))
# ...and generate a batch of fake images.
generated_images = self.generator.predict(noise)
stop = start + self.batch_size
# Get a batch of real images.
image_batch = X[start:stop]
# [real, fake].
x = np.concatenate((image_batch, generated_images))
# [real, fake].
y = np.concatenate([np.ones(shape=(self.batch_size, 1)), np.zeros(shape=(self.batch_size, 1))])
y += 0.05 * np.random.random(size=y.shape)
# See if the discriminator can figure itself out.
self.d_loss = self.discriminator.train_on_batch(x, y)
# Make new noise.
noise = np.random.uniform(low=-1.0, high=1.0, size=(self.batch_size, self.z_size))
# We want to train the generator to trick the discriminator.
# For the generator, we want all the [real, fake] labels to say real.
trick = np.ones(shape=(self.batch_size, 1))
self.gan_loss = self.GAN.train_on_batch(noise, trick)
start += self.batch_size
if start > num_train - self.batch_size:
start = 0
if step % 100 == 0:
print("Step:", step)
print("Discriminator loss:", self.d_loss)
print("GAN loss:", self.gan_loss)
img = image.array_to_img(generated_images[0] * 255.0, scale=False)
img.save("outputs/generated_image" + str(step) + ".png")
img = image.array_to_img(image_batch[0] * 255.0, scale=False)
img.save("outputs/real_image" + str(step) + ".png")
return self
def transform(self,
X):
return self.feature_extractor.predict(X)
def build_generator(self):
# We will map z, a latent vector, to image space (..., 28, 28, 1).
latent = Input(shape=(self.z_size,))
# This produces a (..., 7, 7, 128) shaped tensor.
cnn = Dense(units=1024, activation="tanh")(latent)
cnn = Dense(units=128 * 7 * 7, activation="tanh")(cnn)
cnn = BatchNormalization()(cnn)
cnn = Reshape((7, 7, 128))(cnn)
# Upsample to (..., 14, 14, 64).
cnn = layers.Conv2DTranspose(filters=64, kernel_size=(5, 5), strides=(2, 2), padding="same", activation="tanh")(cnn)
cnn = layers.Conv2D(filters=64, kernel_size=(5, 5), strides=(1, 1), padding="same", activation="tanh")(cnn)
# Upsample to (..., 28, 28, 64).
cnn = layers.Conv2DTranspose(filters=64, kernel_size=(5, 5), strides=(2, 2), padding="same", activation="tanh")(cnn)
# Take a channel axis reduction to (..., 28, 28, 1).
fake_img = Conv2D(filters=1, kernel_size=(5, 5), strides=(1, 1), padding="same", activation="sigmoid", kernel_initializer="glorot_normal", name="generator")(cnn)
return Model(latent, fake_img)
def build_discriminator(self):
image = Input(shape=(28, 28, 1))
cnn = Conv2D(filters=64, kernel_size=(5, 5), padding="same", strides=(2, 2), activation="tanh")(image)
cnn = layers.MaxPooling2D(pool_size=(2, 2))(cnn)
cnn = Conv2D(filters=128, kernel_size=(5, 5), padding="same", strides=(2, 2), activation="tanh")(cnn)
cnn = layers.MaxPooling2D(pool_size=(2, 2))(cnn)
cnn = Flatten()(cnn)
cnn = Dense(units=1024, activation="tanh")(cnn)
self.feature_extractor = Model(image, cnn)
is_real_img = Dense(units=1, activation="sigmoid", name="discriminator")(cnn)
return Model(image, is_real_img)
def evaluate(self,
X):
num_test = X.shape[0]
# Generate a new batch of noise.
noise = np.random.uniform(low=-1.0, high=1.0, size=(num_test, self.z_size))
generated_images = self.generator.predict(noise)
# [real, fake].
x = np.concatenate((X, generated_images))
# [real, fake].
y = np.concatenate([np.ones(shape=(num_test, 1)), np.zeros(shape=(num_test, 1))])
y += 0.05 * np.random.random(size=y.shape)
self.d_test_loss = self.discriminator.evaluate(x, y)
# Make new noise.
noise = np.random.uniform(low=-1.0, high=1.0, size=(num_test, self.z_size))
trick = np.ones(shape=(num_test, 1))
self.gan_test_loss = self.GAN.evaluate(noise, trick)
return [self.d_test_loss, self.gan_test_loss]
class DeeperConvGenAdvNet(DeepConvGenAdvNet):
def __init__(self,
z_size=None,
iterations=None,
batch_size=None):
super(DeeperConvGenAdvNet, self).__init__(z_size=z_size,
iterations=iterations,
batch_size=batch_size)
def build_discriminator(self):
image = Input(shape=(28, 28, 1))
cnn = Conv2D(filters=100, kernel_size=(8, 8), padding="same", strides=(1, 1), activation="elu")(image)
cnn = Dropout(rate=0.5)(cnn)
cnn = Conv2D(filters=100, kernel_size=(8, 8), padding="same", strides=(1, 1), activation="elu")(cnn)
cnn = Dropout(rate=0.5)(cnn)
cnn = Conv2D(filters=100, kernel_size=(8, 8), padding="same", strides=(1, 1), activation="elu")(cnn)
cnn = layers.MaxPooling2D(pool_size=(4, 4))(cnn)
cnn = Flatten()(cnn)
self.feature_extractor = Model(image, cnn)
is_real_img = Dense(units=1, activation="sigmoid", name="discriminator")(cnn)
return Model(image, is_real_img)
class DeepConvGenAdvNetInsurance(DeepConvGenAdvNet):
def __init__(self,
z_size=None,
iterations=None,
batch_size=None):
super(DeepConvGenAdvNetInsurance, self).__init__(z_size=z_size,
iterations=iterations,
batch_size=batch_size)
def build_generator(self):
# We will map z, a latent vector, to image space (..., 4, 3, 1).
latent = Input(shape=(self.z_size,))
# This produces a (..., 4, 3, 1) shaped tensor.
cnn = Dense(units=100, activation="tanh")(latent)
cnn = Dense(units=100, activation="tanh")(cnn)
cnn = Dense(units=100, activation="tanh")(cnn)
cnn = Dense(units=1 * 3 * 4, activation="sigmoid")(cnn)
fake_input_ = Reshape((4, 3, 1))(cnn)
return Model(latent, fake_input_)
def build_discriminator(self):
input_ = Input(shape=(4, 3, 1))
cnn = Flatten()(input_)
cnn = Dense(units=100, activation="elu")(cnn)
cnn = Dropout(rate=0.5)(cnn)
self.feature_extractor = Model(input_, cnn)
is_real_input_ = Dense(units=1, activation="sigmoid", name="discriminator")(cnn)
return Model(input_, is_real_input_)
| 40.556863
| 307
| 0.593696
|
a009dac46d593971361c1dce912b4d85ae2b5603
| 3,616
|
py
|
Python
|
memory.py
|
AliengirlLiv/dreamer-pytorch-1
|
33979d7c61d0406d27ea46b9dcbbd823f765a518
|
[
"MIT"
] | null | null | null |
memory.py
|
AliengirlLiv/dreamer-pytorch-1
|
33979d7c61d0406d27ea46b9dcbbd823f765a518
|
[
"MIT"
] | null | null | null |
memory.py
|
AliengirlLiv/dreamer-pytorch-1
|
33979d7c61d0406d27ea46b9dcbbd823f765a518
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from env import postprocess_observation, preprocess_observation_
class ExperienceReplay():
def __init__(self, size, symbolic_env, observation_size, state_size, action_size, bit_depth, device):
self.device = device
self.symbolic_env = symbolic_env
self.size = size
self.observations = np.empty((size, observation_size) if symbolic_env else (size, 3, 64, 64), dtype=np.float32 if symbolic_env else np.uint8)
self.states = np.empty((size, state_size), dtype=np.float32)
self.has_state = state_size > 0
self.actions = np.empty((size, action_size), dtype=np.float32)
self.rewards = np.empty((size, ), dtype=np.float32)
self.nonterminals = np.empty((size, 1), dtype=np.float32)
self.idx = 0
self.full = False # Tracks if memory has been filled/all slots are valid
self.steps, self.episodes = 0, 0 # Tracks how much experience has been used in total
self.bit_depth = bit_depth
def append(self, observation, action, reward, done):
if type(observation) is tuple:
observation, state = observation
self.states[self.idx] = state.cpu().numpy()
if self.symbolic_env:
self.observations[self.idx] = observation.cpu().numpy()
else:
self.observations[self.idx] = postprocess_observation(observation.cpu().numpy(), self.bit_depth) # Decentre and discretise visual observations (to save memory)
self.actions[self.idx] = action.numpy()
self.rewards[self.idx] = reward
self.nonterminals[self.idx] = not done
self.idx = (self.idx + 1) % self.size
self.full = self.full or self.idx == 0
self.steps, self.episodes = self.steps + 1, self.episodes + (1 if done else 0)
# Returns an index for a valid single sequence chunk uniformly sampled from the memory
def _sample_idx(self, L):
valid_idx = False
while not valid_idx:
idx = np.random.randint(0, self.size if self.full else self.idx - L)
idxs = np.arange(idx, idx + L) % self.size
valid_idx = not self.idx in idxs[1:] # Make sure data does not cross the memory index
return idxs
def _retrieve_batch(self, idxs, n, L):
vec_idxs = idxs.transpose().reshape(-1) # Unroll indices
observations = torch.as_tensor(self.observations[vec_idxs].astype(np.float32))
if not self.symbolic_env:
preprocess_observation_(observations, self.bit_depth) # Undo discretisation for visual observations
observations = observations.reshape(L, n, *observations.shape[1:])
if self.has_state:
observations = (observations, self.states[vec_idxs].reshape(L, n, -1))
return observations, self.actions[vec_idxs].reshape(L, n, -1), self.rewards[vec_idxs].reshape(L, n), self.nonterminals[vec_idxs].reshape(L, n, 1)
# Returns a batch of sequence chunks uniformly sampled from the memory
def sample(self, n, L):
batch = self._retrieve_batch(np.asarray([self._sample_idx(L) for _ in range(n)]), n, L)
# print(np.asarray([self._sample_idx(L) for _ in range(n)]))
# [1578 1579 1580 ... 1625 1626 1627] | 0/100 [00:00<?, ?it/s]
# [1049 1050 1051 ... 1096 1097 1098]
# [1236 1237 1238 ... 1283 1284 1285]
# ...
# [2199 2200 2201 ... 2246 2247 2248]
# [ 686 687 688 ... 733 734 735]
# [1377 1378 1379 ... 1424 1425 1426]]
return [torch.as_tensor(item).to(device=self.device) if type(item) is not tuple else tuple([torch.as_tensor(item_i).to(device=self.device) for item_i in item])
for item in batch]
| 52.405797
| 201
| 0.6651
|
000d13a013a8a71f5b436d8c03939f20e8c24de3
| 5,048
|
py
|
Python
|
wat_bridge/signals.py
|
fabi321/wat-bridge
|
c376d0ffa46841efcf7ddf66517a412c373a2f94
|
[
"MIT"
] | 2
|
2020-11-23T17:38:34.000Z
|
2021-01-14T13:23:25.000Z
|
wat_bridge/signals.py
|
fabi321/WhatsApp-Telegram-Bridge
|
c376d0ffa46841efcf7ddf66517a412c373a2f94
|
[
"MIT"
] | null | null | null |
wat_bridge/signals.py
|
fabi321/WhatsApp-Telegram-Bridge
|
c376d0ffa46841efcf7ddf66517a412c373a2f94
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# wat-bridge
# https://github.com/rmed/wat-bridge
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rafael Medina García <rafamedgar@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Signal handlers."""
import os
import magic
import sys
from wat_bridge.helper import get_contact, get_phone, db_get_group
from wat_bridge.static import SETTINGS, get_logger
from wat_bridge.tg import updater as tgbot
from wat_bridge.wa import wabot
from wat_bridge.helper import DataMedia
from wat_bridge.helper import replace_phone_with_name
logger = get_logger('signals')
def split_string(text, chars_per_string):
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
def sigint_handler(signal, frame):
"""Function used as handler for SIGINT to terminate program."""
sys.exit(0)
def to_tg_handler(sender, **kwargs):
"""Handle signals sent to Telegram.
This will involve sending messages through the Telegram bot.
Args:
phone (str): Phone number that sent the message.
message (str): The message received
media (boolean): True or False
"""
phone = kwargs.get('phone')
message = kwargs.get('message')
media: DataMedia = kwargs.get('media')
# Check if known contact
contact = get_contact(phone)
chat_id = db_get_group(contact)
if not chat_id:
chat_id = SETTINGS['owner']
if media:
# Media Messages
type: str = media.get_type()
path: str = media.get_args()[0]
caption: str = media.get_kwargs()['caption']
caption = replace_phone_with_name(caption)
if type == "image":
tgbot.bot.send_photo(chat_id, open(path, 'rb'), caption=caption)
elif type == "video":
tgbot.bot.send_video(chat_id, open(path, "rb"), caption=caption, supports_streaming=True)
else:
tgbot.bot.send_document(chat_id, open(path, 'rb'), caption=caption)
else:
message = replace_phone_with_name(message)
# Text Messages
if not contact:
# Unknown sender
output = 'Message from #unknown\n'
output += 'Phone number: %s\n' % phone
output += '---------\n'
output += message
logger.info('received message from unknown number: %s' % phone)
else:
group = db_get_group(contact)
if not group:
# Known sender
output = 'Message from #%s\n' % contact
output += '---------\n'
output += message
else:
# Contact is bound to group
chat_id = group
output = message
logger.info('received message from %s' % contact)
# Deliver message through Telegram
for chunk in split_string(output, 3000):
tgbot.bot.send_message(chat_id, chunk)
def to_wa_handler(sender, **kwargs):
"""Handle signals sent to Whatsapp.
This will involve sending messages through the Whatsapp bot.
Args:
contact (str): Name of the contact to send the message to.
message (str): The message to send
"""
contact = kwargs.get('contact')
message = kwargs.get('message')
media = kwargs.get('media')
# Check if known contact
phone = get_phone(contact)
if not phone:
# Abort
tgbot.bot.send_message(
SETTINGS['owner'],
'Unknown contact: "%s"' % contact
)
return
logger.info('sending message to %s (%s)' % (contact, phone))
wabot.send_msg(phone=phone, message=message, media=media)
| 34.108108
| 111
| 0.658082
|
159b5699028189a92b5c7799fe50732180ae4238
| 16,249
|
py
|
Python
|
lace/test_topology.py
|
bodylabs/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 2
|
2020-05-30T10:28:34.000Z
|
2021-02-17T13:47:23.000Z
|
lace/test_topology.py
|
lace/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 11
|
2019-08-29T16:53:29.000Z
|
2021-07-01T06:24:37.000Z
|
lace/test_topology.py
|
bodylabs/lace
|
a6ae80787c8c6ba197bd9bad9254b503f4e05c73
|
[
"BSD-2-Clause"
] | 5
|
2017-05-09T16:18:16.000Z
|
2018-05-08T16:16:09.000Z
|
# pylint: disable=len-as-condition
import unittest
from unittest import mock
import numpy as np
import scipy.sparse as sp
from bltest import attr
from lace.mesh import Mesh
from lace.serialization import obj
from lace.cache import vc
class TestTopologyMixin(unittest.TestCase):
def test_quads_to_tris(self):
from lace.topology import quads_to_tris
tris = np.array([
[3, 2, 1, 0],
[4, 5, 6, 7],
[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[0, 4, 7, 3],
])
expected_quads = np.array([
[3, 2, 1],
[3, 1, 0],
[4, 5, 6],
[4, 6, 7],
[0, 1, 5],
[0, 5, 4],
[1, 2, 6],
[1, 6, 5],
[2, 3, 7],
[2, 7, 6],
[0, 4, 7],
[0, 7, 3],
])
expected_f_old_to_new = np.array([
[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11],
])
np.testing.assert_array_equal(quads_to_tris(tris), expected_quads)
quads, f_old_to_new = quads_to_tris(tris, ret_mapping=True)
np.testing.assert_array_equal(expected_quads, quads)
np.testing.assert_array_equal(f_old_to_new, expected_f_old_to_new)
def indicies_for_testing_keep_vertices(self, mesh):
'''
These tests have failed in the past due to hard coded verticies, so let's
generate what we need programatically.
keep_vertices will update both .v and .f. The behavior on .v is simple: it
will remove all vertices except those who's indices it's given. Here we're
producing indices_to_keep to be the input to keep_vertices and expected_verts,
which should be the value of .v after the call.
The behavior on .f is more complex. .f is rewritten, to include only those
triangles from the original mesh where all three of the vertices are kept. In
order to test this we're building indices_to_keep such that it includes all
the vertices of several known triangles and a bunch of vertices that are chosen
such that they do not span any triangles. This way we can be sure that the
function is discarding and keeping faces appropriately. To test this, we look
at expected_face_vertices, which is the subset of expected_verts that are
attached to faces. If this were to include all of expected_verts then we would
know that incomplete faces were being improperly included.
'''
expected_faces = [300, 900] # These are arbitrary.
indices_to_keep = list(mesh.f[expected_faces].flatten())
faces_seen = set(expected_faces)
v = 0
num_inds_to_keep = len(indices_to_keep) + 6 # Doesn't really matter how many, as long as there's a few
while len(indices_to_keep) < num_inds_to_keep and v < mesh.v.shape[0]:
# Adding a bunch of vertices that are chosen such that they do not span any triangles:
faces_containing_v = set(np.nonzero(np.any(mesh.f == v, axis=1))[0])
if len(faces_containing_v.intersection(faces_seen)) == 0:
indices_to_keep.append(v)
faces_seen.update(faces_containing_v)
v += 1
expected_verts = mesh.v[np.array(indices_to_keep, dtype=np.uint32)]
expected_face_vertices = mesh.v[mesh.f[expected_faces].flatten()]
return indices_to_keep, expected_verts, expected_face_vertices
@attr('missing_assets')
def test_keep_vertices(self):
mesh = obj.load(vc('/templates/cached_model_templates/sm_2013_f_0005.obj'))
# set vc and vc for completeness
mesh.set_vertex_colors("blue")
mesh.reset_normals()
indices_to_keep, expected_verts, expected_face_vertices = self.indicies_for_testing_keep_vertices(mesh)
mesh.keep_vertices(indices_to_keep)
np.testing.assert_array_equal(mesh.v, expected_verts)
np.testing.assert_array_equal(mesh.v[mesh.f.flatten()], expected_face_vertices)
max_v_index = np.max(mesh.f.flatten())
self.assertLessEqual(max_v_index, mesh.v.shape[0] - 1)
@attr('missing_assets')
def test_keep_vertices_without_segm(self):
mesh = obj.load(vc('/templates/cached_model_templates/sm_2013_f_0005.obj'))
mesh.segm = None
indices_to_keep, expected_verts, expected_face_vertices = self.indicies_for_testing_keep_vertices(mesh)
mesh.keep_vertices(indices_to_keep)
np.testing.assert_array_equal(mesh.v, expected_verts)
np.testing.assert_array_equal(mesh.v[mesh.f.flatten()], expected_face_vertices)
max_v_index = np.max(mesh.f.flatten())
self.assertLessEqual(max_v_index, mesh.v.shape[0] - 1)
@attr('missing_assets')
def test_keep_vertices_without_f(self):
mesh = obj.load(vc('/templates/cached_model_templates/sm_2013_f_0005.obj'))
mesh.segm = None
mesh.f = None
indices_to_keep = [1, 2, 3, 5, 8, 273, 302, 11808, 11847, 12031, 12045]
expected_verts = mesh.v[indices_to_keep]
mesh.keep_vertices(indices_to_keep)
np.testing.assert_array_equal(mesh.v, expected_verts)
self.assertIs(mesh.f, None)
def test_keep_vertices_with_no_verts_does_not_raise(self):
mesh = Mesh()
mesh.keep_vertices([])
@attr('missing_assets')
@mock.patch('warnings.warn')
def test_keep_vertices_with_empty_list_does_not_warn(self, warn):
mesh = obj.load(vc('/templates/cached_model_templates/sm_2013_f_0005.obj'))
mesh.keep_vertices([])
self.assertFalse(warn.called)
def test_vertex_indices_in_segments(self):
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
cube.segm = {
# All quads.
'all': np.arange(12),
# Quads 2 and 3.
'two_adjacent_sides': [4, 5, 6, 7],
# Quad 0.
'lower_base': [0, 1],
}
np.testing.assert_array_equal(
cube.vertex_indices_in_segments(['all']),
np.arange(8)
)
np.testing.assert_array_equal(
len(cube.vertex_indices_in_segments(['lower_base'])),
4
)
np.testing.assert_array_equal(
len(cube.vertex_indices_in_segments(['two_adjacent_sides'])),
6
)
np.testing.assert_array_equal(
len(cube.vertex_indices_in_segments(['lower_base', 'two_adjacent_sides'])),
7
)
with self.assertRaises(ValueError):
cube.vertex_indices_in_segments(['random_segm'])
@attr('missing_assets')
def test_keep_segments(self):
mesh = obj.load(vc('/templates/cached_model_templates/sm_2013_f_0005.obj'))
expected_parts = ['rightCalf', 'head', 'rightHand', 'leftTorso', 'midsection', 'leftFoot', 'rightTorso', 'rightThigh', 'leftCalf', 'rightShoulder', 'leftShoulder', 'leftThigh', 'pelvis', 'leftForearm', 'rightFoot', 'leftHand', 'rightUpperArm', 'rightForearm', 'leftUpperArm']
self.assertEqual(set(mesh.segm.keys()), set(expected_parts))
self.assertEqual(len(mesh.segm['rightFoot']), 3336)
self.assertEqual(len(mesh.segm['leftFoot']), 3336)
segments_to_keep = ['leftFoot', 'rightFoot']
mesh.keep_segments(segments_to_keep)
self.assertEqual(len(mesh.f), 6672)
self.assertEqual(len(mesh.segm['rightFoot']), 3336)
self.assertEqual(len(mesh.segm['leftFoot']), 3336)
self.assertEqual(set(mesh.segm.keys()), set(segments_to_keep))
max_f_index = np.max(mesh.segm.values())
self.assertEqual(max_f_index, mesh.f.shape[0] - 1)
def test_clean_segments(self):
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
cube.segm = {
'all': np.arange(12)
}
self.assertEqual(cube.clean_segments(['random_segm', 'all']), ['all'])
def test_flip_faces(self):
from lace.shapes import create_rectangular_prism
box = create_rectangular_prism(np.array([1.0, 1.0, 1.0]), np.array([4.0, 2.0, 1.0]))
box.reset_normals()
original_vn = box.vn.copy()
original_f = box.f.copy()
box.flip_faces()
box.reset_normals()
self.assertEqual(box.f.shape, original_f.shape)
for face, orig_face in zip(box.f, original_f):
self.assertNotEqual(list(face), list(orig_face))
self.assertEqual(set(face), set(orig_face))
np.testing.assert_array_almost_equal(box.vn, np.negative(original_vn))
def test_vert_connectivity(self):
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
connectivity = cube.vert_connectivity
self.assertTrue(sp.issparse(connectivity))
self.assertEqual(connectivity.shape, (cube.v.shape[0], cube.v.shape[0]))
# Assert that neighbors are marked:
for face in cube.f:
face = np.asarray(face, dtype=np.uint32)
self.assertNotEqual(connectivity[face[0], face[1]], 0)
self.assertNotEqual(connectivity[face[1], face[2]], 0)
self.assertNotEqual(connectivity[face[2], face[0]], 0)
# Assert that non-neighbors are not marked:
for v_index in set(cube.f.flatten()):
faces_with_this_v = set(cube.f[np.any(cube.f == v_index, axis=1)].flatten())
not_neighbors_of_this_v = set(cube.f.flatten()) - faces_with_this_v
for vert in not_neighbors_of_this_v:
self.assertEqual(connectivity[int(vert), int(v_index)], 0)
self.assertEqual(connectivity[int(v_index), int(vert)], 0)
def test_vert_opposites_per_edge(self):
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
opposites = cube.vert_opposites_per_edge
self.assertIsInstance(opposites, dict)
for e, op in opposites.items():
self.assertIsInstance(e, tuple)
self.assertEqual(len(e), 2)
faces_with_e0 = set(cube.f[np.any(cube.f == e[0], axis=1)].flatten())
faces_with_e1 = set(cube.f[np.any(cube.f == e[1], axis=1)].flatten())
self.assertEqual(faces_with_e0.intersection(faces_with_e1) - set(e), set(op))
def test_vertices_in_common(self):
import timeit
from lace.topology import vertices_in_common
self.assertEqual(vertices_in_common([0, 1, 2], [0, 1, 3]), [0, 1])
self.assertEqual(vertices_in_common([0, 1, 2], [3, 0, 1]), [0, 1])
self.assertEqual(vertices_in_common([0, 1, 2], [3, 4, 5]), [])
self.assertEqual(vertices_in_common([0, 1, 2], [0, 3, 4]), [0])
self.assertEqual(vertices_in_common([0, 1, 2], [0, 1, 2]), [0, 1, 2])
self.assertEqual(vertices_in_common([0, 1], [0, 1, 2]), [0, 1])
self.assertEqual(vertices_in_common([0, 1, 2], [0, 1]), [0, 1])
self.assertEqual(vertices_in_common([0, 1, 2], [0, 1, 2, 3]), [0, 1, 2])
self.assertLess(timeit.timeit('vertices_in_common([0, 1, 2], [0, 1, 3])', setup='from lace.topology import vertices_in_common', number=10000), 0.015)
def edges_the_hard_way(self, faces):
from collections import Counter
e0 = np.vstack((faces[:, 0], faces[:, 1]))
e1 = np.vstack((faces[:, 1], faces[:, 2]))
e2 = np.vstack((faces[:, 2], faces[:, 0]))
e = np.hstack((e0, e1, e2)).T
edge_count = Counter((min(a, b), max(a, b)) for a, b in e)
return [x for x, count in edge_count.items() if count == 2]
def test_faces_per_edge(self):
import timeit
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
self.assertEqual(len(cube.faces_per_edge), len(self.edges_the_hard_way(cube.f)))
for e in cube.faces_per_edge:
# Check that each of these edges points to a pair of faces that
# share two vertices -- that is, faces that share an edge.
self.assertEqual(len(set(cube.f[e[0]]).intersection(set(cube.f[e[1]]))), 2)
# Now check that changing the faces clears the cache
cube.f = cube.f[[1, 2, 3, 4, 6, 7, 8, 9, 10, 11]] # remove [0 1 2] & [4 1 0] so edge [0, 1] is gone
self.assertEqual(len(cube.faces_per_edge), len(self.edges_the_hard_way(cube.f)))
for e in cube.faces_per_edge:
self.assertEqual(len(set(cube.f[e[0]]).intersection(set(cube.f[e[1]]))), 2)
# And test that caching happens -- without caching, this takes about 5 seconds:
self.assertLess(timeit.timeit('cube.faces_per_edge', setup='from lace.shapes import create_cube; cube = create_cube([0., 0., 0.], 1.)', number=10000), 0.01)
def test_vertices_per_edge(self):
import timeit
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
self.assertEqual(len(cube.vertices_per_edge), len(self.edges_the_hard_way(cube.f)))
self.assertEqual(set([(min(a, b), max(a, b)) for a, b in cube.vertices_per_edge]), set(self.edges_the_hard_way(cube.f)))
# Now check that changing the faces clears the cache
cube.f = cube.f[[1, 2, 3, 4, 6, 7, 8, 9, 10, 11]] # remove [0 1 2] & [4 1 0] so edge [0, 1] is gone
self.assertEqual(len(cube.vertices_per_edge), len(self.edges_the_hard_way(cube.f)))
self.assertEqual(set([(min(a, b), max(a, b)) for a, b in cube.vertices_per_edge]), set(self.edges_the_hard_way(cube.f)))
# And test that caching happens -- without caching, this takes about 5 seconds:
self.assertLess(timeit.timeit('cube.vertices_per_edge', setup='from lace.shapes import create_cube; cube = create_cube([0., 0., 0.], 1.)', number=10000), 0.01)
def test_vertices_to_edges_matrix(self):
import timeit
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
calculated_edges = cube.vertices_to_edges_matrix.dot(cube.v.ravel()).reshape((-1, 3))
self.assertEqual(len(calculated_edges), len(cube.vertices_per_edge))
for e, e_ind in zip(calculated_edges, cube.vertices_per_edge):
np.testing.assert_array_equal(e, cube.v[e_ind[0]] - cube.v[e_ind[1]])
# And test that caching happens -- without caching, this takes about 5 seconds:
self.assertLess(timeit.timeit('cube.vertices_to_edges_matrix', setup='from lace.shapes import create_cube; cube = create_cube([0., 0., 0.], 1.)', number=10000), 0.01)
def vertices_to_edges_matrix_single_axis(self):
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
# Assert that it produces the same results as vertices_to_edges_matrix:
self.assertEqual(np.vstack((cube.vertices_to_edges_matrix_single_axis.dot(cube.v[:, ii]) for ii in range(3))).T,
cube.vertices_to_edges_matrix.dot(cube.v.ravel()).reshape((-1, 3)))
def test_remove_redundant_verts(self):
eps = 1e-15
from lace.shapes import create_cube
cube = create_cube(np.zeros(3), 1.)
orig_v = cube.v.copy()
orig_f = cube.f.copy()
cube.f[1:4] = cube.f[1:4] + len(cube.v)
cube.v = np.vstack((cube.v, cube.v + eps))
cube.remove_redundant_verts()
np.testing.assert_array_equal(cube.v, orig_v)
np.testing.assert_array_equal(cube.f, orig_f)
def test_has_same_topology(self):
from lace.shapes import create_cube
cube_1 = create_cube(np.zeros(3), 1.)
cube_2 = create_cube(np.zeros(3), 1.)
self.assertTrue(cube_1.has_same_topology(cube_2))
cube_1 = create_cube(np.zeros(3), 1.)
cube_2 = create_cube(np.ones(3), 1.)
self.assertTrue(cube_1.has_same_topology(cube_2))
cube_1 = create_cube(np.zeros(3), 1.)
cube_2 = create_cube(np.zeros(3), 1.)
cube_2.f = np.roll(cube_2.f, 1, axis=1)
self.assertFalse(cube_1.has_same_topology(cube_2))
cube_1 = create_cube(np.zeros(3), 1.)
cube_2 = create_cube(np.zeros(3), 1.)
del cube_2.f
self.assertFalse(cube_1.has_same_topology(cube_2))
| 44.517808
| 283
| 0.631793
|
6e78764ed1bb892bf0f0f7c3afa1afa238dd5c40
| 979
|
py
|
Python
|
School/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 16
|
2020-02-26T09:52:43.000Z
|
2021-12-21T07:03:10.000Z
|
School/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 4
|
2021-03-18T23:31:32.000Z
|
2021-06-10T18:25:30.000Z
|
School/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 11
|
2020-06-05T00:06:11.000Z
|
2022-02-14T07:50:31.000Z
|
"""School URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework.authtoken import views
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("classroom.api.urls")),
re_path(r"^api-auth/", include("rest_framework.urls")),
re_path(r"^api-token-auth/", views.obtain_auth_token),
]
| 34.964286
| 77
| 0.708887
|
2add75c4ca1ea49f2d5ea8f827bec3b3e75c0933
| 724
|
py
|
Python
|
main.py
|
davihonorato/Jogo_da_Velha
|
acf8d1ce9b906d2e267732426f362aa6a818436b
|
[
"MIT"
] | null | null | null |
main.py
|
davihonorato/Jogo_da_Velha
|
acf8d1ce9b906d2e267732426f362aa6a818436b
|
[
"MIT"
] | null | null | null |
main.py
|
davihonorato/Jogo_da_Velha
|
acf8d1ce9b906d2e267732426f362aa6a818436b
|
[
"MIT"
] | null | null | null |
from functions import *
inicio()
while True:
win = False
count = 0
tabela = ['1', '2', '3',
'4', '5', '6',
'7', '8', '9']
interface(tabela)
while win is False:
while True:
if count % 2 == 0:
user = 'X'
resp = leiaNum('JOGADOR 1: ')
else:
user = 'O'
resp = leiaNum('JOGADOR 2: ')
if tabela[resp-1].isalpha():
print('NÃO É POSSIVEL ESSA JOGADA.')
else:
tabela[resp - 1] = user
print('-' * 30)
break
count += 1
interface(tabela)
win = verificar(tabela, user)
break
| 24.133333
| 52
| 0.399171
|
8497a5120d043788aacc46633c04541748dc5ad2
| 177
|
py
|
Python
|
Quiz6.py
|
JackieG19/pythonPractice
|
6296e785a5a4698c38bcf414fdb9dbbccc73400f
|
[
"MIT"
] | null | null | null |
Quiz6.py
|
JackieG19/pythonPractice
|
6296e785a5a4698c38bcf414fdb9dbbccc73400f
|
[
"MIT"
] | null | null | null |
Quiz6.py
|
JackieG19/pythonPractice
|
6296e785a5a4698c38bcf414fdb9dbbccc73400f
|
[
"MIT"
] | null | null | null |
# Write Python code that prints out Udacity (with a capital U), given the definition of s below.
s = 'audacity'
print "U" + s[2] + s[3] + s[4] + s[5] + s[6] + s[7]
# Udacity
| 22.125
| 96
| 0.610169
|
a77e83133be6f13fbf499e5299f37e641a2920d2
| 8,873
|
py
|
Python
|
msticpy/sectools/tiproviders/open_page_rank.py
|
kubajir/msticpy
|
7b319b71b191b5f75dcf9afd87492523a74b5ad7
|
[
"MIT"
] | 820
|
2019-05-16T07:24:34.000Z
|
2022-03-31T09:18:10.000Z
|
msticpy/sectools/tiproviders/open_page_rank.py
|
kubajir/msticpy
|
7b319b71b191b5f75dcf9afd87492523a74b5ad7
|
[
"MIT"
] | 205
|
2019-06-24T19:24:19.000Z
|
2022-03-30T23:13:46.000Z
|
msticpy/sectools/tiproviders/open_page_rank.py
|
kubajir/msticpy
|
7b319b71b191b5f75dcf9afd87492523a74b5ad7
|
[
"MIT"
] | 171
|
2019-06-23T13:53:12.000Z
|
2022-03-29T18:22:46.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Open Page Rank Provider.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
from json import JSONDecodeError
from typing import Any, Tuple, Union, Iterable, Dict, List
import attr
import pandas as pd
from .ti_provider_base import LookupResult, TISeverity, generate_items, TILookupStatus
from .http_base import HttpProvider, IoCLookupParams
from ...common.utility import export
from ..._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class OPR(HttpProvider):
"""Open PageRank Lookup."""
_BASE_URL = "https://openpagerank.com"
_IOC_QUERIES = {
"dns": IoCLookupParams(
path="/api/v1.0/getPageRank",
params={"domains[0]": "{observable}"},
headers={"API-OPR": "{API_KEY}"},
)
}
_REQUIRED_PARAMS = ["API_KEY"]
def __init__(self, **kwargs):
"""Initialize a new instance of the class."""
super().__init__(**kwargs)
self._provider_name = self.__class__.__name__
print(
"Using Open PageRank.",
"See https://www.domcop.com/openpagerank/what-is-openpagerank",
)
# pylint: disable=duplicate-code
def lookup_iocs(
self,
data: Union[pd.DataFrame, Dict[str, str], Iterable[str]],
obs_col: str = None,
ioc_type_col: str = None,
query_type: str = None,
**kwargs,
) -> pd.DataFrame:
"""
Lookup collection of IoC observables.
Parameters
----------
data : Union[pd.DataFrame, Dict[str, str], Iterable[str]]
Data input in one of three formats:
1. Pandas dataframe (you must supply the column name in
`obs_col` parameter)
2. Dict of observable, IoCType
3. Iterable of observables - IoCTypes will be inferred
obs_col : str, optional
DataFrame column to use for observables, by default None
ioc_type_col : str, optional
DataFrame column to use for IoCTypes, by default None
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
pd.DataFrame
DataFrame of results.
"""
kwargs.get("provider_name", self.__class__.__name__)
domain_list = set()
bad_requests: List[pd.Series] = []
for ioc, ioc_type in generate_items(data, obs_col, ioc_type_col):
if not ioc:
continue
result = self._check_ioc_type(
ioc=ioc, ioc_type=ioc_type, query_subtype=query_type
)
if result.status == TILookupStatus.ok.value:
domain_list.add(result.ioc)
else:
bad_requests.append(pd.Series(attr.asdict(result)))
results: List[pd.Series] = []
if not domain_list:
return pd.DataFrame(columns=LookupResult.column_map())
for item_result in self._lookup_bulk_request(domain_list): # type: ignore
results.append(pd.Series(attr.asdict(item_result)))
all_results = results + bad_requests
return pd.DataFrame(data=all_results).rename(columns=LookupResult.column_map())
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
if self._failed_response(response) or not isinstance(response.raw_result, dict):
return False, TISeverity.information, "Not found."
severity = TISeverity.information
if "response" in response.raw_result:
dom_records = response.raw_result["response"]
dom_record = dom_records[0]
return self._parse_one_record(dom_record)
return True, severity, {}
def _parse_multi_results(self, response: LookupResult) -> Iterable[LookupResult]:
"""Parse details of batch response."""
if not isinstance(response.raw_result, dict):
new_result = LookupResult(**attr.asdict(response))
new_result.result = False
new_result.set_severity(TISeverity.information)
new_result.details = "Not found."
yield new_result
elif "response" in response.raw_result:
dom_records = response.raw_result["response"]
for dom_record in dom_records:
result, sev, details = self._parse_one_record(dom_record)
domain_name = dom_record["domain"]
new_result = LookupResult(ioc=domain_name, ioc_type="dns")
new_result.ioc = domain_name
new_result.provider = self._provider_name
new_result.result = result
new_result.set_severity(sev)
new_result.details = details
new_result.raw_result = dom_record
new_result.reference = f"{response.reference}?domains[0]={domain_name}"
yield new_result
@staticmethod
def _parse_one_record(dom_record: dict):
record_status = dom_record.get("status_code", 404)
severity = TISeverity.information
if record_status == 200:
return (
True,
severity,
{
"rank": dom_record.get("rank", "0"),
"page_rank": dom_record.get("page_rank_decimal", 0),
"error": dom_record.get("error", ""),
},
)
if record_status == 404:
return (
True,
TISeverity.warning,
{
"rank": dom_record.get("rank", "0"),
"error": dom_record.get("error", ""),
},
)
return False, TISeverity.information, {}
def _lookup_bulk_request(self, ioc_list: Iterable[str]) -> Iterable[LookupResult]:
ioc_list = list(ioc_list)
batch_size = 100
l_len = len(ioc_list)
for step in range(0, l_len, batch_size):
batch_list = ioc_list[step : (step + batch_size)] # noqa: E203
for result in self._lookup_batch(batch_list):
yield result
# pylint: disable=duplicate-code
def _lookup_batch(self, ioc_list: list) -> Iterable[LookupResult]:
# build the query string manually - of the form domains[N]=domN&domains[N+1]...
qry_elements = []
for idx, dom in zip(range(0, len(ioc_list)), ioc_list):
qry_elements.append(f"domains[{idx}]={dom}")
qry_str = "&".join(qry_elements)
path = self._IOC_QUERIES["dns"].path
req_url = f"{self._BASE_URL}{path}?{qry_str}"
try:
_, req_params = self._substitute_parms("dummy", "dns", None)
response = self._requests_session.get(
url=req_url, headers=req_params["headers"]
)
result = LookupResult(ioc=",".join(ioc_list), ioc_type="dns")
if response.status_code == 200:
result.status = TILookupStatus.ok.value
result.reference = self._BASE_URL + path
result.raw_result = response.json()
for single_result in self._parse_multi_results(result):
yield single_result
else:
result.raw_result = str(response)
result.result = False
result.reference = req_url
result.status = response.status_code
result.details = "No response from provider."
yield result
except (
LookupError,
JSONDecodeError,
NotImplementedError,
ConnectionError,
) as err:
self._err_to_results(result, err)
if not isinstance(err, LookupError):
result.reference = req_url
yield result
# pylint: enable=duplicate-code
| 36.665289
| 88
| 0.580412
|
52a9c2048ff779b13955e249da17f62b7bb9c46b
| 96
|
py
|
Python
|
apis_core/apis_entities/apps.py
|
sviatoplok/apis-core
|
c23718af2a51598e32684b9b954b594ceef1f0f7
|
[
"MIT"
] | 1
|
2019-09-02T09:14:06.000Z
|
2019-09-02T09:14:06.000Z
|
apis_core/apis_entities/apps.py
|
sviatoplok/apis-core
|
c23718af2a51598e32684b9b954b594ceef1f0f7
|
[
"MIT"
] | null | null | null |
apis_core/apis_entities/apps.py
|
sviatoplok/apis-core
|
c23718af2a51598e32684b9b954b594ceef1f0f7
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class EntitiesConfig(AppConfig):
name = 'apis_entities'
| 16
| 33
| 0.770833
|
fc6615cdf6f62a7ff0c7af14acd8f67bab782dc6
| 520
|
py
|
Python
|
Online-Judges/DimikOJ/Python/10-run-rate-1.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3
|
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/DimikOJ/Python/10-run-rate-1.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/DimikOJ/Python/10-run-rate-1.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
for i in range(int(input())):
r1, r2, b = map(int, input().split())
#print(r1 ,r2, b)
if b != 0:
crr = r2 / ((300-b) / 6)
print(format(crr, ".2f"), end = " ")
if r1 < r2:
rrr = 0.0
else:
rrr = ((r1 - r2) + 1) / (b / 6)
print(format(rrr, ".2f"))
else:
crr = r2/50
print(format(crr, ".2f"), end = " ")
if r1 < r2:
rrr = 0
else:
rrr = (r1 + 1) - r2
print(format(rrr,".2f"))
| 24.761905
| 44
| 0.369231
|
1c07ef9250b321c31f06b5098b6889bc6eb4b834
| 2,456
|
py
|
Python
|
converters/download_rgsummary.py
|
bbockelm/topology
|
16404a6751b81597ab2117f0e983ac063085ff34
|
[
"Apache-2.0"
] | null | null | null |
converters/download_rgsummary.py
|
bbockelm/topology
|
16404a6751b81597ab2117f0e983ac063085ff34
|
[
"Apache-2.0"
] | null | null | null |
converters/download_rgsummary.py
|
bbockelm/topology
|
16404a6751b81597ab2117f0e983ac063085ff34
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from subprocess import Popen, PIPE
from argparse import ArgumentParser
import os
import sys
import urllib.parse
import urllib.request
YES, NO, ONLY = "yes", "no", "only"
params = {
"all_resources": "on",
"summary_attrs_showservice": "1",
# "summary_attrs_showrsvstatus": "1", # <- should not be updated manually
# "summary_attrs_showgipstatus": "1", # <- gip is dead
# "summary_attrs_showvomembership": "1", # <- shows "SupportedVOs" field, usually blank & superseded by CE collector
"summary_attrs_showvoownership": "1",
"summary_attrs_showwlcg": "1",
# "summary_attrs_showenv": "1", # <- this one is never filled out
"summary_attrs_showcontact": "1",
"summary_attrs_showfqdn": "1",
"summary_attrs_showhierarchy": "1",
# "summary_attrs_showticket": "1", # <- shows open GOC tickets
}
parser = ArgumentParser()
parser.add_argument("--show-inactive-resources", choices=[YES, NO, ONLY], default=YES) # original GRACC URL used NO
parser.add_argument("--show-itb", choices=[YES, NO, ONLY], default=YES) # original GRACC URL used NO
parser.add_argument("--show-disabled-resources", choices=[YES, NO, ONLY], default=YES)
args = parser.parse_args()
if args.show_inactive_resources == ONLY:
params["active"] = "on"
params["active_value"] = "0"
elif args.show_inactive_resources == NO:
params["active"] = "on"
params["active_value"] = "1"
elif args.show_inactive_resources == YES:
params.pop("active", None)
else: assert False
if args.show_itb == ONLY:
params["gridtype"] = "on"
params["gridtype_2"] = "on"
elif args.show_itb == NO:
params["gridtype"] = "on"
params["gridtype_1"] = "on"
elif args.show_itb == YES:
params.pop("gridtype", None)
else: assert False
if args.show_disabled_resources == ONLY:
params["disable"] = "on"
params["disable_value"] = "1"
elif args.show_disabled_resources == NO:
params["disable"] = "on"
params["disable_value"] = "0"
elif args.show_disabled_resources == YES:
params.pop("disable", None)
else: assert False
query = urllib.parse.urlencode(params)
url = "https://myosg.grid.iu.edu/rgsummary/xml?%s" % query
with urllib.request.urlopen(url) as req:
data = req.read().decode("utf-8")
newenv = os.environ.copy()
newenv["XMLLINT_INDENT"] = "\t"
proc = Popen("xmllint --format -", stdin=PIPE, stdout=sys.stdout, shell=True, encoding="utf-8", env=newenv)
proc.communicate(data)
| 30.7
| 121
| 0.684446
|
057d89b9b90fbb4dbd5909b25da6b5493c22b1be
| 4,870
|
py
|
Python
|
tests/test_protocol.py
|
sapid/deepstreampy_twisted
|
1c6af4ea5e3751b026745f03bc2fc91340f1417d
|
[
"MIT"
] | 2
|
2018-02-03T02:37:04.000Z
|
2019-05-29T13:27:09.000Z
|
tests/test_protocol.py
|
sapid/deepstreampy_twisted
|
1c6af4ea5e3751b026745f03bc2fc91340f1417d
|
[
"MIT"
] | 8
|
2018-02-12T21:32:45.000Z
|
2020-03-18T08:15:41.000Z
|
tests/test_protocol.py
|
sapid/deepstreampy_twisted
|
1c6af4ea5e3751b026745f03bc2fc91340f1417d
|
[
"MIT"
] | 2
|
2017-12-15T09:15:14.000Z
|
2018-02-06T09:15:07.000Z
|
from deepstreampy_twisted import protocol
from deepstreampy import constants
from twisted.trial import unittest
from twisted.test import proto_helpers
import sys
from twisted.internet import task
if sys.version_info[0] < 3:
import mock
else:
from unittest import mock
import twisted
twisted.internet.base.DelayedCall.debug = True
class ProtocolTests(unittest.TestCase):
url = 'ws://localhost:0/deepstream'
def setUp(self):
self.client = mock.Mock()
self.reactor = task.Clock()
# debug = 'verbose'
debug = False
self.factory = protocol.DeepstreamFactory(
ProtocolTests.url,
client=self.client,
authParams={},
debug=debug,
reactor=self.reactor)
self.clock = self.reactor
self.proto = self.factory.buildProtocol(('localhost', 0))
self.factory._protocol_instance = self.proto
self.proto.callLater = self.clock.callLater
self.tr = proto_helpers.StringTransport()
self.tr.protocol = self.proto
self.proto.transport = self.tr
def tearDown(self):
self.tr.loseConnection()
for call in self.clock.getDelayedCalls():
call.cancel()
self.clock.advance(1)
def _decode(self, message):
message.replace(chr(31), '|').replace(chr(30), '+')
if not isinstance(message,unicode) and isinstance(message, str):
message = message.decode('utf-8')
message = message.replace(chr(31), '|').replace(chr(30), '+')
# message.replace(chr(31), '|').replace(chr(30), '+')
return message
def _encode(self, message):
if isinstance(message,unicode):
message = message.encode('utf-8')
return message.replace('|', chr(31)).replace('+', chr(30))
def _test(self, dataReceived, expected):
self._server_emit(dataReceived)
sent = self._decode(self.tr.value())
self.assertEqual(sent, expected)
def _server_emit(self, data):
encoded_data = self._encode(data)
self.proto.onMessage(encoded_data, False)
def _get_connection_state_changes(self):
count = 0
for call_args in self.client.emit.call_args_list:
if call_args[0][0] == constants.event.CONNECTION_STATE_CHANGED:
count += 1
return count
def test_connects(self):
# Start in state CLOSED
self.assertEqual(self.factory._state, constants.connection_state.CLOSED)
self.assertEqual(self._get_connection_state_changes(), 0)
# Create the connection; move state to AWAITING_CONNECTION
self.proto.makeConnection(self.tr)
self.assertEqual(self.factory._state, constants.connection_state.AWAITING_CONNECTION)
self.assertEqual(self._get_connection_state_changes(), 1)
# Test receiving a message; move state to CHALLENGING
self._test("C|CH+", "C|CHR|%s+" % ProtocolTests.url)
self.assertEqual(self.factory._state, constants.connection_state.CHALLENGING)
# Test receiving a message; move state to AWAITING_AUTHENTICATION, then AUTHENTICATING
self._server_emit('C|A+')
# We'll miss AWAITING_AUTHENTICATION, so we'll count state changes and check the 'at rest' state
self.assertEqual(self._get_connection_state_changes(), 4)
self.assertEqual(self.factory._state, constants.connection_state.AUTHENTICATING) # Potential timing issue
def test_anon_auth(self):
self.proto.makeConnection(self.tr)
self.factory._set_state(constants.connection_state.AWAITING_AUTHENTICATION)
d = self.factory.authenticate({})
d.addCallback(self.assertEqual, {'message': None,
'success': True,
'error': None})
self.clock.advance(1)
self.assertEqual(self._decode(self.tr.value()), 'A|REQ|{}+')
self._server_emit('A|A+')
self.clock.advance(1)
self.assertEqual(self.factory._state, constants.connection_state.OPEN)
self.clock.advance(1)
def test_too_many_auths(self):
self.proto.makeConnection(self.tr)
self.factory._state = constants.connection_state.CHALLENGING
self._test('C|A+', 'A|REQ|{}+')
self._server_emit('A|E+')
self.assertEqual(self.factory._state, constants.connection_state.AWAITING_AUTHENTICATION)
self.proto._send_auth_params()
self._server_emit('A|E+')
self.assertEqual(self.factory._state, constants.connection_state.AWAITING_AUTHENTICATION)
self.proto._send_auth_params()
self._server_emit('A|E|%s+' % constants.event.TOO_MANY_AUTH_ATTEMPTS)
self.assertTrue(self.factory._too_many_auth_attempts)
# def test_redirect(self):
# raise NotImplementedError
# # Still need to write this test.
| 37.751938
| 113
| 0.659959
|
ec2afdb4005acaf3ca675355c1e24075b8906a2c
| 22,635
|
py
|
Python
|
arviz/tests/helpers.py
|
StanczakDominik/arviz
|
ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287
|
[
"Apache-2.0"
] | 1
|
2020-08-09T00:16:00.000Z
|
2020-08-09T00:16:00.000Z
|
arviz/tests/helpers.py
|
StanczakDominik/arviz
|
ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287
|
[
"Apache-2.0"
] | null | null | null |
arviz/tests/helpers.py
|
StanczakDominik/arviz
|
ec33b4cc7d4a6d5ba95a87a43ef226a49c2cb287
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name, comparison-with-callable
"""Test helper functions."""
import gzip
import importlib
import logging
import os
import pickle
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pytest
from _pytest.outcomes import Skipped
from packaging.version import Version
from ..data import InferenceData, from_dict
_log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
@pytest.fixture(scope="module")
def draws():
"""Share default draw count."""
return 500
@pytest.fixture(scope="module")
def chains():
"""Share default chain count."""
return 2
def create_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, data["J"]),
"theta": np.random.randn(nchains, ndraws, data["J"]),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"]))}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
"max_depth": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, data["J"]),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, data["J"]) / 2,
"theta": np.random.randn(nchains, ndraws, data["J"]) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"])) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["obs_dim"], "log_likelihood": ["obs_dim"]},
coords={"obs_dim": range(data["J"])},
)
return model
def create_multidimensional_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
ndim1 = 5
ndim2 = 7
data = {
"y": np.random.normal(size=(ndim1, ndim2)),
"sigma": np.random.normal(size=(ndim1, ndim2)),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2),
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2)}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["dim1", "dim2"], "log_likelihood": ["dim1", "dim2"]},
coords={"dim1": range(ndim1), "dim2": range(ndim2)},
)
return model
@pytest.fixture(scope="module")
def models():
"""Fixture containing 2 mock inference data instances for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_model(seed=10)
model_2 = create_model(seed=11)
return Models()
@pytest.fixture(scope="module")
def multidim_models():
"""Fixture containing 2 mock inference data instances with multidimensional data for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_multidimensional_model(seed=10)
model_2 = create_multidimensional_model(seed=11)
return Models()
def check_multiple_attrs(
test_dict: Dict[str, List[str]], parent: InferenceData
) -> List[Union[str, Tuple[str, str]]]:
"""Perform multiple hasattr checks on InferenceData objects.
It is thought to first check if the parent object contains a given dataset,
and then (if present) check the attributes of the dataset.
Given the ouput of the function, all missmatches between expectation and reality can
be retrieved: a single string indicates a group mismatch and a tuple of strings
``(group, var)`` indicates a mismatch in the variable ``var`` of ``group``.
Parameters
----------
test_dict: dict of {str : list of str}
Its structure should be `{dataset1_name: [var1, var2], dataset2_name: [var]}`.
A ``~`` at the beggining of a dataset or variable name indicates the name NOT
being present must be asserted.
parent: InferenceData
InferenceData object on which to check the attributes.
Returns
-------
list
List containing the failed checks. It will contain either the dataset_name or a
tuple (dataset_name, var) for all non present attributes.
Examples
--------
The output below indicates that ``posterior`` group was expected but not found, and
variables ``a`` and ``b``:
["posterior", ("prior", "a"), ("prior", "b")]
Another example could be the following:
[("posterior", "a"), "~observed_data", ("sample_stats", "~log_likelihood")]
In this case, the output indicates that variable ``a`` was not found in ``posterior``
as it was expected, however, in the other two cases, the preceding ``~`` (kept from the
input negation notation) indicates that ``observed_data`` group should not be present
but was found in the InferenceData and that ``log_likelihood`` variable was found
in ``sample_stats``, also against what was expected.
"""
failed_attrs = []
for dataset_name, attributes in test_dict.items():
if dataset_name.startswith("~"):
if hasattr(parent, dataset_name[1:]):
failed_attrs.append(dataset_name)
elif hasattr(parent, dataset_name):
dataset = getattr(parent, dataset_name)
for attribute in attributes:
if attribute.startswith("~"):
if hasattr(dataset, attribute[1:]):
failed_attrs.append((dataset_name, attribute))
elif not hasattr(dataset, attribute):
failed_attrs.append((dataset_name, attribute))
else:
failed_attrs.append(dataset_name)
return failed_attrs
def emcee_version():
"""Check emcee version.
Returns
-------
int
Major version number
"""
import emcee
return int(emcee.__version__[0])
def needs_emcee3_func():
"""Check if emcee3 is required."""
# pylint: disable=invalid-name
needs_emcee3 = pytest.mark.skipif(emcee_version() < 3, reason="emcee3 required")
return needs_emcee3
def _emcee_lnprior(theta):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
# Half-cauchy prior, hwhm=25
if tau < 0:
return -np.inf
prior_tau = -np.log(tau ** 2 + 25 ** 2)
prior_mu = -((mu / 10) ** 2) # normal prior, loc=0, scale=10
prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1
return prior_mu + prior_tau + prior_eta
def _emcee_lnprob(theta, y, sigma):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
prior = _emcee_lnprior(theta)
like_vect = -(((mu + tau * eta - y) / sigma) ** 2)
like = np.sum(like_vect)
return like + prior, (like_vect, np.random.normal((mu + tau * eta), sigma))
def emcee_schools_model(data, draws, chains):
"""Schools model in emcee."""
import emcee
chains = 10 * chains # emcee is sad with too few walkers
y = data["y"]
sigma = data["sigma"]
J = data["J"] # pylint: disable=invalid-name
ndim = J + 2
pos = np.random.normal(size=(chains, ndim))
pos[:, 1] = np.absolute(pos[:, 1]) # pylint: disable=unsupported-assignment-operation
if emcee_version() < 3:
sampler = emcee.EnsembleSampler(chains, ndim, _emcee_lnprob, args=(y, sigma))
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws)
else:
here = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(here, "saved_models")
filepath = os.path.join(data_directory, "reader_testfile.h5")
backend = emcee.backends.HDFBackend(filepath) # pylint: disable=no-member
backend.reset(chains, ndim)
# pylint: disable=unexpected-keyword-arg
sampler = emcee.EnsembleSampler(
chains, ndim, _emcee_lnprob, args=(y, sigma), backend=backend
)
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws, store=True)
return sampler
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _pyro_noncentered_model(J, sigma, y=None):
import pyro
import pyro.distributions as dist
mu = pyro.sample("mu", dist.Normal(0, 5))
tau = pyro.sample("tau", dist.HalfCauchy(5))
with pyro.plate("J", J):
eta = pyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def pyro_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation in Pyro."""
import torch
from pyro.infer import MCMC, NUTS
y = torch.from_numpy(data["y"]).float()
sigma = torch.from_numpy(data["sigma"]).float()
nuts_kernel = NUTS(_pyro_noncentered_model, jit_compile=True, ignore_jit_warnings=True)
posterior = MCMC(nuts_kernel, num_samples=draws, warmup_steps=draws, num_chains=chains)
posterior.run(data["J"], sigma, y)
# This block lets the posterior be pickled
posterior.sampler = None
posterior.kernel.potential_fn = None
return posterior
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _numpyro_noncentered_model(J, sigma, y=None):
import numpyro
import numpyro.distributions as dist
mu = numpyro.sample("mu", dist.Normal(0, 5))
tau = numpyro.sample("tau", dist.HalfCauchy(5))
with numpyro.plate("J", J):
eta = numpyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def numpyro_schools_model(data, draws, chains):
"""Centered eight schools implementation in NumPyro."""
from jax.random import PRNGKey
from numpyro.infer import MCMC, NUTS
mcmc = MCMC(
NUTS(_numpyro_noncentered_model),
num_warmup=draws,
num_samples=draws,
num_chains=chains,
chain_method="sequential",
)
mcmc.run(PRNGKey(0), extra_fields=("num_steps", "energy"), **data)
# This block lets the posterior be pickled
mcmc.sampler._sample_fn = None # pylint: disable=protected-access
mcmc.sampler._init_fn = None # pylint: disable=protected-access
mcmc.sampler._postprocess_fn = None # pylint: disable=protected-access
mcmc.sampler._potential_fn = None # pylint: disable=protected-access
mcmc._cache = {} # pylint: disable=protected-access
return mcmc
def tfp_schools_model(num_schools, treatment_stddevs):
"""Non-centered eight schools model for tfp."""
import tensorflow_probability.python.edward2 as ed
import tensorflow as tf
if int(tf.__version__[0]) > 1:
import tensorflow.compat.v1 as tf # pylint: disable=import-error
tf.disable_v2_behavior()
avg_effect = ed.Normal(loc=0.0, scale=10.0, name="avg_effect") # `mu`
avg_stddev = ed.Normal(loc=5.0, scale=1.0, name="avg_stddev") # `log(tau)`
school_effects_standard = ed.Normal(
loc=tf.zeros(num_schools), scale=tf.ones(num_schools), name="school_effects_standard"
) # `eta`
school_effects = avg_effect + tf.exp(avg_stddev) * school_effects_standard # `theta`
treatment_effects = ed.Normal(
loc=school_effects, scale=treatment_stddevs, name="treatment_effects"
) # `y`
return treatment_effects
def tfp_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation for tfp."""
import tensorflow_probability as tfp
import tensorflow_probability.python.edward2 as ed
import tensorflow as tf
if int(tf.__version__[0]) > 1:
import tensorflow.compat.v1 as tf # pylint: disable=import-error
tf.disable_v2_behavior()
del chains
log_joint = ed.make_log_joint_fn(tfp_schools_model)
def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
"""Unnormalized target density as a function of states."""
return log_joint(
num_schools=data["J"],
treatment_stddevs=data["sigma"].astype(np.float32),
avg_effect=avg_effect,
avg_stddev=avg_stddev,
school_effects_standard=school_effects_standard,
treatment_effects=data["y"].astype(np.float32),
)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=draws,
num_burnin_steps=500,
current_state=[
tf.zeros([], name="init_avg_effect"),
tf.zeros([], name="init_avg_stddev"),
tf.ones([data["J"]], name="init_school_effects_standard"),
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn, step_size=0.4, num_leapfrog_steps=3
),
)
with tf.Session() as sess:
[states_, _] = sess.run([states, kernel_results])
return tfp_schools_model, states_
def pystan_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation for pystan."""
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real eta[J];
}
transformed parameters {
real theta[J];
for (j in 1:J)
theta[j] = mu + tau * eta[j];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
eta ~ normal(0, 1);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
if pystan_version() == 2:
import pystan # pylint: disable=import-error
stan_model = pystan.StanModel(model_code=schools_code)
fit = stan_model.sampling(
data=data,
iter=draws + 500,
warmup=500,
chains=chains,
check_hmc_diagnostics=False,
control=dict(adapt_engaged=False),
)
else:
import stan # pylint: disable=import-error
stan_model = stan.build(schools_code, data=data)
fit = stan_model.sample(
num_chains=chains, num_samples=draws, num_warmup=500, save_warmup=False
)
return stan_model, fit
def pymc3_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation for pymc3."""
import pymc3 as pm
with pm.Model() as model:
mu = pm.Normal("mu", mu=0, sd=5)
tau = pm.HalfCauchy("tau", beta=5)
eta = pm.Normal("eta", mu=0, sd=1, shape=data["J"])
theta = pm.Deterministic("theta", mu + tau * eta)
pm.Normal("obs", mu=theta, sd=data["sigma"], observed=data["y"])
trace = pm.sample(draws, chains=chains)
return model, trace
def library_handle(library):
"""Import a library and return the handle."""
if library == "pystan":
try:
module = importlib.import_module("pystan")
except ImportError:
module = importlib.import_module("stan")
else:
module = importlib.import_module(library)
return module
def load_cached_models(eight_schools_data, draws, chains, libs=None):
"""Load pymc3, pystan, emcee, and pyro models from pickle."""
here = os.path.dirname(os.path.abspath(__file__))
supported = (
("tensorflow_probability", tfp_noncentered_schools),
("pystan", pystan_noncentered_schools),
("pymc3", pymc3_noncentered_schools),
("emcee", emcee_schools_model),
("pyro", pyro_noncentered_schools),
("numpyro", numpyro_schools_model),
)
data_directory = os.path.join(here, "saved_models")
models = {}
if isinstance(libs, str):
libs = [libs]
for library_name, func in supported:
if libs is not None and library_name not in libs:
continue
library = library_handle(library_name)
if library.__name__ == "stan":
# PyStan3 does not support pickling
# httpstan caches models automatically
_log.info("Generating and loading stan model")
models["pystan"] = func(eight_schools_data, draws, chains)
continue
py_version = sys.version_info
fname = "{0.major}.{0.minor}_{1.__name__}_{1.__version__}_{2}_{3}_{4}.pkl.gzip".format(
py_version, library, sys.platform, draws, chains
)
path = os.path.join(data_directory, fname)
if not os.path.exists(path):
with gzip.open(path, "wb") as buff:
_log.info("Generating and caching %s", fname)
pickle.dump(func(eight_schools_data, draws, chains), buff)
with gzip.open(path, "rb") as buff:
_log.info("Loading %s from cache", fname)
models[library.__name__] = pickle.load(buff)
return models
def pystan_version():
"""Check PyStan version.
Returns
-------
int
Major version number
"""
try:
import pystan # pylint: disable=import-error
version = int(pystan.__version__[0])
except ImportError:
try:
import stan as pystan # pylint: disable=import-error
version = int(pystan.__version__[0])
except ImportError:
version = None
return version
def test_precompile_models(eight_schools_params, draws, chains):
"""Precompile model files."""
load_cached_models(eight_schools_params, draws, chains)
def running_on_ci() -> bool:
"""Return True if running on CI machine."""
return os.environ.get("ARVIZ_CI_MACHINE") is not None
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
"""Import and return the requested module ``modname``.
Doesn't allow skips on CI machine.
Borrowed and modified from ``pytest.importorskip``.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module's ``__version__``
attribute must be at least this minimal version, otherwise the test is
still skipped.
:param str reason: if given, this reason is shown as the message when the
module cannot be imported.
:returns: The imported module. This should be assigned to its canonical
name.
Example::
docutils = pytest.importorskip("docutils")
"""
# ARVIZ_CI_MACHINE is True if tests run on CI, where ARVIZ_CI_MACHINE env variable exists
ARVIZ_CI_MACHINE = running_on_ci()
if ARVIZ_CI_MACHINE:
import warnings
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
__import__(modname)
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
if verattr is None or Version(verattr) < Version(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod
else:
return pytest.importorskip(modname=modname, minversion=minversion, reason=reason)
| 34.876733
| 101
| 0.609013
|
24b4406153e161fe768baf63487c0df2ae810f35
| 1,527
|
py
|
Python
|
vigenre.py
|
rawalshree/classical-ciphers
|
fce82af1e01901447c4d5f4b54b047c69b681f8e
|
[
"MIT"
] | null | null | null |
vigenre.py
|
rawalshree/classical-ciphers
|
fce82af1e01901447c4d5f4b54b047c69b681f8e
|
[
"MIT"
] | null | null | null |
vigenre.py
|
rawalshree/classical-ciphers
|
fce82af1e01901447c4d5f4b54b047c69b681f8e
|
[
"MIT"
] | null | null | null |
'''
Owner - Rawal Shree
Email - rawalshreepal000@gmail.com
Github - https://github.com/rawalshree
'''
global plain
global cipher
global Success
Success = False
plain = ""
cipher = ""
class Vigenre:
# A - Z ==> 65 - 90
# a - z ==> 97 - 122
def setKey(self, key):
global Success
self.key = key
if self.key.isalpha():
Success = True
def encryption(self, plainText):
global cipher
self.plainText = plainText
if Success:
for x in range(len(self.plainText)):
if self.plainText[x].isalpha():
char = chr(((ord(self.plainText[x]) + ord(self.key[x % len(self.key)]))) - 97)
if ord(char) > ord('z'):
char = chr(ord(char) - 26)
cipher += char
return cipher
else:
print("Invalid Key")
return self.plainText
def decryption(self, cipherText):
global plain
self.cipherText = cipherText
if Success:
for x in range(len(self.cipherText)):
if self.cipherText[x].isalpha():
char = chr(((ord(self.cipherText[x]) - ord(self.key[x % len(self.key)]))) + 97)
if ord(char) < ord('a'):
char = chr(ord(char) + 26)
plain += char
return plain
else:
print("Invalid Key")
return self.cipherText
| 25.032787
| 99
| 0.483301
|
c269873f5b90b54afec8267feca0fdda3e1fc1dc
| 3,726
|
py
|
Python
|
Python/Python_Requests_Beautifulsoup/sdk_example.py
|
scraperapi/scraperapi-code-examples
|
3f7b5d3945fb8a97579fd62a8c0062c15e658193
|
[
"MIT"
] | 4
|
2021-06-11T15:40:31.000Z
|
2022-01-09T19:07:10.000Z
|
Python/Python_Requests_Beautifulsoup/sdk_example.py
|
scraperapi/scraperapi-code-examples
|
3f7b5d3945fb8a97579fd62a8c0062c15e658193
|
[
"MIT"
] | 1
|
2021-06-02T22:09:17.000Z
|
2021-06-02T22:09:17.000Z
|
Python/Python_Requests_Beautifulsoup/sdk_example.py
|
scraperapi/scraperapi-code-examples
|
3f7b5d3945fb8a97579fd62a8c0062c15e658193
|
[
"MIT"
] | 8
|
2021-08-19T22:56:36.000Z
|
2022-03-23T10:35:57.000Z
|
from bs4 import BeautifulSoup
import concurrent.futures
import csv
import urllib.parse
from scraper_api import ScraperAPIClient
"""
SCRAPER SETTINGS
You need to define the following values below:
- API_KEY --> Find this on your dashboard, or signup here to create a
free account here https://dashboard.scraperapi.com/signup
- NUM_RETRIES --> We recommend setting this to 5 retries. For most sites
95% of your requests will be successful on the first try,
and 99% after 3 retries.
- NUM_THREADS --> Set this equal to the number of concurrent threads available
in your plan. For reference: Free Plan (5 threads), Hobby Plan (10 threads),
Startup Plan (25 threads), Business Plan (50 threads),
Enterprise Plan (up to 5,000 threads).
"""
API_KEY = 'INSERT_API_KEY_HERE'
NUM_RETRIES = 3
NUM_THREADS = 5
client = ScraperAPIClient(API_KEY)
## Example list of urls to scrape
list_of_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
## we will store the scraped data in this list
scraped_quotes = []
def scrape_url(url):
"""
SEND REQUESTS TO SCRAPER API AND PARSE DATA FROM THE HTML RESPONSE
INPUT/OUTPUT: Takes a single url as input, and appends the scraped data to the "scraped_quotes" list.
METHOD: Takes the input url, requests it via scraperapi and keeps retrying the request until it gets a
successful response (200 or 404 status code) or up to the number of retries you define in NUM_RETRIES.
If it did yield a successful response then it parses the data from the HTML response and adds it to the
"scraped_quotes" list. You can easily reconfigure this to store the scraped data in a database.
"""
response = client.get(url=url, retry=NUM_RETRIES)
## parse data if 200 status code (successful response)
if response.status_code == 200:
"""
Insert the parsing code for your use case here...
"""
## Example: parse data with beautifulsoup
html_response = response.text
soup = BeautifulSoup(html_response, "html.parser")
quotes_sections = soup.find_all('div', class_="quote")
## loop through each quotes section and extract the quote and author
for quote_block in quotes_sections:
quote = quote_block.find('span', class_='text').text
author = quote_block.find('small', class_='author').text
## add scraped data to "scraped_quotes" list
scraped_quotes.append({
'quote': quote,
'author': author
})
"""
CONFIGURE CONCURRENT THREADS
Create thread pools up to the NUM_THREADS you define above and splits the urls you
want to scrape amongst these threads until complete. Takes as input:
- max_workers --> the maximum number of threads it will create. Here we set it to the
value we defined in NUM_THREADS.
- function to execute --> the first input to the executor.map() function is the function
we want to execute in each thread. Here we input the "scrape_url(url)""
function which accepts a single url as input.
- input list --> the second input to the executor.map() function is the data we want to
be split amongst the threads created. Here we input the "list_of_urls" we
want to scrape.
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
executor.map(scrape_url, list_of_urls)
print(scraped_quotes)
| 36.891089
| 107
| 0.655931
|
35a810a316549f8e3003e7028962700112d57427
| 10,289
|
py
|
Python
|
app/tornado_handlers/browse.py
|
didouard/flight_review
|
3cc9bad113bd93254c710aa800f8ebe6e233bf99
|
[
"BSD-3-Clause"
] | null | null | null |
app/tornado_handlers/browse.py
|
didouard/flight_review
|
3cc9bad113bd93254c710aa800f8ebe6e233bf99
|
[
"BSD-3-Clause"
] | null | null | null |
app/tornado_handlers/browse.py
|
didouard/flight_review
|
3cc9bad113bd93254c710aa800f8ebe6e233bf99
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tornado handler for the browse page
"""
from __future__ import print_function
import collections
import sys
import os
from datetime import datetime
import json
import sqlite3
import tornado.web
from plot_app.db_entry import DBVehicleData
# this is needed for the following imports
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../plot_app")
)
from config import get_db_filename, get_overview_img_filepath
from db_entry import DBData, DBDataGenerated
from helper import flight_modes_table, get_airframe_data, html_long_word_force_break
# pylint: disable=relative-beyond-top-level,too-many-statements
from .common import get_jinja_env, get_generated_db_data_from_log
BROWSE_TEMPLATE = "browse.html"
# pylint: disable=abstract-method
class BrowseDataRetrievalHandler(tornado.web.RequestHandler):
"""Ajax data retrieval handler"""
def get(self, *args, **kwargs):
"""GET request"""
search_str = self.get_argument("search[value]", "").lower()
order_ind = int(self.get_argument("order[0][column]"))
order_dir = self.get_argument("order[0][dir]", "").lower()
data_start = int(self.get_argument("start"))
data_length = int(self.get_argument("length"))
draw_counter = int(self.get_argument("draw"))
show_id = self.get_argument("show_id", default="")
json_output = dict()
json_output["draw"] = draw_counter
# get the logs (but only the public ones)
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
sql_order = " ORDER BY Date DESC"
ordering_col = [
"", # table row number
"LogsGenerated.StartTime",
"Vehicle.Name",
"LogsGenerated.Duration",
"LogsGenerated.NumLoggedErrors",
"LogsGenerated.VibrationState",
"LogsGenerated.GpsType",
"LogsGenerated.QuickDischarge",
]
if ordering_col[order_ind] != "":
sql_order = " ORDER BY " + ordering_col[order_ind]
if order_dir == "desc":
sql_order += " DESC"
sql_show_id = ""
if show_id:
sql_show_id = "AND show_id = " + show_id + " "
cur.execute(
"SELECT Logs.Id, Logs.Date, "
" Logs.Description, Logs.WindSpeed, "
" Logs.Rating, Logs.VideoUrl, "
" LogsGenerated.*, "
" Vehicle.Name "
"FROM Logs "
" LEFT JOIN LogsGenerated on Logs.Id=LogsGenerated.Id "
" LEFT JOIN Vehicle on Logs.uuid=Vehicle.UUID "
'WHERE Logs.Public = 1 AND NOT Logs.Source = "CI" '
+ sql_show_id
+ sql_order
)
# pylint: disable=invalid-name
Columns = collections.namedtuple("Columns", "columns search_only_columns")
def get_columns_from_tuple(db_tuple, counter):
"""load the columns (list of strings) from a db_tuple"""
db_data = DBDataJoin()
log_id = db_tuple[0]
log_date = db_tuple[1].strftime("%Y-%m-%d")
db_data.description = db_tuple[2]
db_data.feedback = ""
db_data.type = ""
db_data.wind_speed = db_tuple[3]
db_data.rating = db_tuple[4]
db_data.video_url = db_tuple[5]
generateddata_log_id = db_tuple[6]
if log_id != generateddata_log_id:
print("Join failed, loading and updating data")
db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
if db_data_gen is None:
return None
db_data.add_generated_db_data_from_log(db_data_gen)
else:
db_data.duration_s = db_tuple[7]
db_data.mav_type = db_tuple[8]
db_data.estimator = db_tuple[9]
db_data.sys_autostart_id = db_tuple[10]
db_data.sys_hw = db_tuple[11]
db_data.ver_sw = db_tuple[12]
db_data.num_logged_errors = db_tuple[13]
db_data.num_logged_warnings = db_tuple[14]
db_data.flight_modes = {
int(x) for x in db_tuple[15].split(",") if len(x) > 0
}
db_data.ver_sw_release = db_tuple[16]
db_data.vehicle_uuid = db_tuple[17]
db_data.flight_mode_durations = [
tuple(map(int, x.split(":")))
for x in db_tuple[18].split(",")
if len(x) > 0
]
db_data.start_time_utc = db_tuple[19]
db_data.vibration_state = db_tuple[20]
db_data.gps_type = db_tuple[21]
db_data.quick_discharge = db_tuple[22]
db_data.name = db_tuple[23]
# bring it into displayable form
ver_sw = db_data.ver_sw
if len(ver_sw) > 10:
ver_sw = ver_sw[:6]
if len(db_data.ver_sw_release) > 0:
try:
release_split = db_data.ver_sw_release.split()
release_type = int(release_split[1])
if release_type == 255: # it's a release
ver_sw = release_split[0]
except:
pass
airframe_data = get_airframe_data(db_data.sys_autostart_id)
if airframe_data is None:
airframe = db_data.sys_autostart_id
else:
airframe = airframe_data["name"]
flight_modes = ", ".join(
[
flight_modes_table[x][0]
for x in db_data.flight_modes
if x in flight_modes_table
]
)
m, s = divmod(db_data.duration_s, 60)
h, m = divmod(m, 60)
duration_str = "{:d}:{:02d}:{:02d}".format(h, m, s)
start_time_str = "N/A"
if db_data.start_time_utc != 0:
try:
start_datetime = datetime.fromtimestamp(db_data.start_time_utc)
start_time_str = start_datetime.strftime("%Y-%m-%d %H:%M")
except ValueError as value_error:
# bogus date
print(value_error)
# make sure to break long descriptions w/o spaces (otherwise they
# mess up the layout)
description = html_long_word_force_break(db_data.description)
search_only_columns = []
if db_data.ver_sw is not None:
search_only_columns.append(db_data.ver_sw)
if db_data.ver_sw_release is not None:
search_only_columns.append(db_data.ver_sw_release)
if db_data.vehicle_uuid is not None:
search_only_columns.append(db_data.vehicle_uuid)
# image_col = '<div class="no_map_overview"> Not rendered / No GPS </div>'
# image_filename = os.path.join(get_overview_img_filepath(), log_id + ".png")
# if os.path.exists(image_filename):
# image_col = '<img class="map_overview" src="/overview_img/'
# image_col += (
# log_id + '.png" alt="Overview Image Load Failed" height=50/>'
# )
return Columns(
[
counter,
'<a href="plot_app?log=' + log_id + '">' + start_time_str + "</a>",
db_data.name,
duration_str,
db_data.num_logged_errors,
db_data.vibration_state,
db_data.gps_type,
db_data.quick_discharge,
],
search_only_columns,
)
# need to fetch all here, because we will do more SQL calls while
# iterating (having multiple cursor's does not seem to work)
db_tuples = cur.fetchall()
json_output["recordsTotal"] = len(db_tuples)
json_output["data"] = []
if data_length == -1:
data_length = len(db_tuples)
filtered_counter = 0
if search_str == "":
# speed-up the request by iterating only over the requested items
counter = data_start
for i in range(data_start, min(data_start + data_length, len(db_tuples))):
counter += 1
columns = get_columns_from_tuple(db_tuples[i], counter)
if columns is None:
continue
json_output["data"].append(columns.columns)
filtered_counter = len(db_tuples)
else:
counter = 1
for db_tuple in db_tuples:
counter += 1
columns = get_columns_from_tuple(db_tuple, counter)
if columns is None:
continue
if any(
search_str in str(column).lower()
for column in (columns.columns, columns.search_only_columns)
):
if data_start <= filtered_counter < data_start + data_length:
json_output["data"].append(columns.columns)
filtered_counter += 1
cur.close()
con.close()
json_output["recordsFiltered"] = filtered_counter
self.set_header("Content-Type", "application/json")
self.write(json.dumps(json_output))
class DBDataJoin(DBData, DBDataGenerated, DBVehicleData):
"""Class for joined Data"""
def add_generated_db_data_from_log(self, source):
"""Update joined data by parent data"""
self.__dict__.update(source.__dict__)
class BrowseHandler(tornado.web.RequestHandler):
"""Browse public log file Tornado request handler"""
def get(self, *args, **kwargs):
"""GET request"""
template = get_jinja_env().get_template(BROWSE_TEMPLATE)
template_args = {}
search_str = self.get_argument("search", "").lower()
if len(search_str) > 0:
template_args["initial_search"] = json.dumps(search_str)
self.write(template.render(template_args))
| 37.010791
| 89
| 0.558558
|
55c3278fdf9127deb0a1f5cd725832d70a7bd411
| 282
|
py
|
Python
|
rnbgrader/tests/test_answers.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
rnbgrader/tests/test_answers.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
rnbgrader/tests/test_answers.py
|
matthew-brett/rnbgrader
|
f07494f59dd0d1cb97c094ac2ea9e9d1243f0f70
|
[
"BSD-2-Clause"
] | null | null | null |
""" Test answers module
"""
import re
from rnbgrader.answers import raw2regex
def test_raw2regex():
raw = r""" Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
4.00 23.00 40.00 41.73 48.00 190.00 960 """
assert re.search(raw2regex(raw), raw)
| 18.8
| 68
| 0.588652
|
9daf7b39f127b5558a92d9ade503c8cd00b26c14
| 1,197
|
py
|
Python
|
src/python/qepsi4/_psi4_test.py
|
wugaxp/qe-psi4
|
1e73186efeb731cce3f0220a31fc4356e2b148d5
|
[
"Apache-2.0"
] | null | null | null |
src/python/qepsi4/_psi4_test.py
|
wugaxp/qe-psi4
|
1e73186efeb731cce3f0220a31fc4356e2b148d5
|
[
"Apache-2.0"
] | null | null | null |
src/python/qepsi4/_psi4_test.py
|
wugaxp/qe-psi4
|
1e73186efeb731cce3f0220a31fc4356e2b148d5
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from qepsi4 import run_psi4
from openfermion import (jordan_wigner, jw_get_ground_state_at_particle_number,
qubit_operator_sparse)
class TestChem(unittest.TestCase):
def test_run_psi4(self):
geometry = {"sites": [
{'species': 'H', 'x': 0, 'y': 0, 'z': 0},
{'species': 'H', 'x': 0, 'y': 0, 'z': 1.7}
]}
results, hamiltonian = run_psi4(geometry, save_hamiltonian=True)
self.assertAlmostEqual(results['energy'], -0.8544322638069642)
self.assertEqual(results['n_alpha'], 1)
self.assertEqual(results['n_beta'], 1)
self.assertEqual(results['n_mo'], 2)
self.assertEqual(results['n_frozen_core'], 0)
self.assertEqual(results['n_frozen_valence'], 0)
self.assertEqual(hamiltonian.n_qubits, 4)
qubit_operator = qubit_operator_sparse(jordan_wigner(hamiltonian))
energy, state = jw_get_ground_state_at_particle_number(qubit_operator, 2)
results_cisd, hamiltonian = run_psi4(geometry, method='ccsd')
# For this system, the CCSD energy should be exact.
self.assertAlmostEqual(energy, results_cisd['energy'])
| 38.612903
| 81
| 0.650794
|
dbfdbb557c713d06baac2e90e48ab8402e4c244b
| 3,027
|
py
|
Python
|
app/patch/validator.py
|
YYJeffrey/flask-tea
|
a1adc75c862bc0b1b3b1614d0c10484ee4dc3fd4
|
[
"MIT"
] | null | null | null |
app/patch/validator.py
|
YYJeffrey/flask-tea
|
a1adc75c862bc0b1b3b1614d0c10484ee4dc3fd4
|
[
"MIT"
] | null | null | null |
app/patch/validator.py
|
YYJeffrey/flask-tea
|
a1adc75c862bc0b1b3b1614d0c10484ee4dc3fd4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2021 by Jeffrey.
:license: MIT, see LICENSE for more details.
"""
from collections import namedtuple
from flask import request
from wtforms import Form as _Form, SelectField as _SelectField
from wtforms.compat import string_types
from wtforms.validators import StopValidation, DataRequired as _DataRequired, Optional as _Optional
from app.lib.exception import ParameterError
class Form(_Form):
def __init__(self):
data = request.get_json(silent=True)
args = request.args.to_dict()
super(Form, self).__init__(data=data, **args)
def validate_for_api(self):
"""
处理异常 拼接异常信息
"""
valid = super(Form, self).validate()
if not valid:
msg = ''
for index, item in enumerate(self.errors.values()):
msg += ';'.join(item)
if index != len(self.errors.values()) - 1:
msg += ';'
raise ParameterError(msg=msg)
return self
def get_data(self, *args):
data_list = []
for arg in args:
data_list.append(getattr(self._data, arg, None))
return data_list[0] if len(data_list) == 1 else tuple(data_list)
@property
def _data(self):
self.validate_for_api()
key_list, value_list = [], []
for key, value in self._fields.items():
if value.data is not None:
key_list.append(key)
value_list.append(value.data)
NamedTuple = namedtuple('NamedTuple', [key for key in key_list])
return NamedTuple(*value_list)
@property
def dt_data(self):
"""
返回dict类型
"""
return self._data._asdict()
@property
def nt_data(self):
"""
返回namedtuple类型
"""
return self._data
class DataRequired(_DataRequired):
def __call__(self, form, field):
if field.type == 'IntegerField' and field.data == 0:
return
if field.type == 'BooleanField' and field.data is False:
return
if not field.data or isinstance(field.data, string_types) and not field.data.strip():
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Optional(_Optional):
def __call__(self, form, field):
if field.data:
return
if not field.raw_data or isinstance(field.raw_data[0], string_types) and not self.string_check(
field.raw_data[0]):
field.errors[:] = []
raise StopValidation()
class SelectField(_SelectField):
def pre_validate(self, form):
if self.validate_choice:
for _, _, match in self.iter_choices():
if match:
break
else:
raise ValueError(self.gettext('枚举选项有误'))
| 28.828571
| 103
| 0.577469
|
72e6fbd5472e200b0c37d752daac101c1da83eaf
| 191
|
py
|
Python
|
fuzz.py
|
ess-dmsc/fuzzing_python_example
|
47bb661dc2126152addf97b8b58d3a39602704e6
|
[
"BSD-2-Clause"
] | null | null | null |
fuzz.py
|
ess-dmsc/fuzzing_python_example
|
47bb661dc2126152addf97b8b58d3a39602704e6
|
[
"BSD-2-Clause"
] | null | null | null |
fuzz.py
|
ess-dmsc/fuzzing_python_example
|
47bb661dc2126152addf97b8b58d3a39602704e6
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
from example import csv_addition
# Recommended to import AFL last
import afl
afl.init()
csv_addition(sys.stdin.read())
# Recommended as speeds up exiting
os._exit(0)
| 13.642857
| 34
| 0.774869
|
ef66ec33da702a48e6c75904737028b946439b0c
| 895
|
py
|
Python
|
settings_manager.py
|
oleg79/android_emulator_helper
|
f9edf1bdae10d2ad7ed29dfe3276369f50dcfc17
|
[
"MIT"
] | 1
|
2022-03-02T15:43:22.000Z
|
2022-03-02T15:43:22.000Z
|
settings_manager.py
|
oleg79/android_emulator_helper
|
f9edf1bdae10d2ad7ed29dfe3276369f50dcfc17
|
[
"MIT"
] | null | null | null |
settings_manager.py
|
oleg79/android_emulator_helper
|
f9edf1bdae10d2ad7ed29dfe3276369f50dcfc17
|
[
"MIT"
] | null | null | null |
import os
import json
def set_device_status(device_name, key):
"""
Saves device status to data.json.
"""
with open('data.json', 'r') as datafile:
data = json.load(datafile)
with open('data.json', 'w+') as datafile:
data[key] = device_name
json.dump(data, datafile)
def get_device_by_status(status):
"""
Returns device name by status.
"""
with open('data.json', 'r') as datafile:
data = json.load(datafile)
return data.get(status, None)
def map_device_status(option):
"""
Maps options with their statuses if such exits for options list display.
"""
stats = []
if option == get_device_by_status('DefaultDevice'):
stats.append('default')
if option == get_device_by_status('LatestStartedDevice'):
stats.append('latest started')
return "{} {}".format(option, ", ".join(stats))
| 24.861111
| 76
| 0.62905
|
1b272c61ff9f8a5a4a450a42e13aefb5f8da36ef
| 2,629
|
py
|
Python
|
Help.py
|
404-Program-not-found/Discord-AntiScamBot
|
9df2b8c7a1d59109c4e9c9b18512cffa8e763a42
|
[
"Unlicense"
] | 1
|
2021-12-11T15:41:38.000Z
|
2021-12-11T15:41:38.000Z
|
Help.py
|
404-Program-not-found/Discord-AntiScamBot
|
9df2b8c7a1d59109c4e9c9b18512cffa8e763a42
|
[
"Unlicense"
] | null | null | null |
Help.py
|
404-Program-not-found/Discord-AntiScamBot
|
9df2b8c7a1d59109c4e9c9b18512cffa8e763a42
|
[
"Unlicense"
] | null | null | null |
import discord
from discord.ext import commands
class help(commands.Cog):
def __init__(self, bot):
bot.help_command = HelpClass()
bot.help_command.cog = self
class HelpClass(commands.HelpCommand):
def __init__(self):
attrs = {
"aliases": ["helpme", "halp"],
"hidden": True
}
super().__init__(command_attrs=attrs, verify_checks=False)
async def send_command_help(self, command):
embed = discord.Embed(title=command.qualified_name, color=0x0084ff)
if command.help:
embed.add_field(name="Description", value=command.help)
embed.add_field(name="Usage", value=f"`{self.get_command_signature(command)}`", inline=False)
alias = command.aliases
if alias:
embed.add_field(name="Aliases", value=", ".join(alias), inline=False)
channel = self.get_destination()
await channel.send(embed=embed)
def get_command_signature(self, command):
return '%s%s %s' % (self.context.clean_prefix, command.qualified_name, command.signature)
async def send_bot_help(self, mapping):
embed = discord.Embed(title=f"Command List", color=0x0084ff)
for cog, commands in mapping.items():
filtered = await self.filter_commands(commands, sort=True)
command_signatures = [self.get_command_signature(c) for c in filtered]
if command_signatures:
cog_name = getattr(cog, "qualified_name", "No Category")
embed.add_field(
name=cog_name.title(),
value=f"`{self.context.clean_prefix}help {cog_name}`", inline=True)
channel = self.get_destination()
await channel.send(embed=embed)
async def send_cog_help(self, cog):
embed = discord.Embed(
title=cog.qualified_name.title(),
color=0x0084ff)
filtered = await self.filter_commands(cog.walk_commands(), sort=True)
if filtered:
for command in filtered:
embed.add_field(name=command, value=f"`{self.context.clean_prefix}help {command.qualified_name}`",
inline=True)
channel = self.get_destination()
await channel.send(embed=embed)
else:
await self.send_error_message("Cog not found or is empty")
async def send_error_message(self, error):
embed = discord.Embed(title="Error", description=error, colour=0xff0033)
channel = self.get_destination()
await channel.send(embed=embed)
def setup(bot):
bot.add_cog(help(bot))
| 37.028169
| 114
| 0.628376
|
e2a919e40b10d1902ac4c16d99266f4a07935611
| 606
|
py
|
Python
|
Crash Course on Python/Week 3/programming_excercises/loop_factorial.py
|
garynth41/Google-IT-Automation-with-Python-Professional-Certificate
|
6a800b5b995c05f74c824545260207d19877baf7
|
[
"MIT"
] | 2
|
2020-01-18T16:01:24.000Z
|
2020-02-29T19:27:17.000Z
|
Crash Course on Python/Week 3/programming_excercises/loop_factorial.py
|
garynth41/Google-IT-Automation-with-Python-Professional-Certificate
|
6a800b5b995c05f74c824545260207d19877baf7
|
[
"MIT"
] | null | null | null |
Crash Course on Python/Week 3/programming_excercises/loop_factorial.py
|
garynth41/Google-IT-Automation-with-Python-Professional-Certificate
|
6a800b5b995c05f74c824545260207d19877baf7
|
[
"MIT"
] | 4
|
2020-08-17T16:49:06.000Z
|
2022-02-14T06:45:29.000Z
|
'''
In math, the factorial of a number is defined as the product of an integer and all the integers below it. For example, the factorial of four
(4!) is equal to 1*2*3*4=24. Fill in the blanks to make the factorial function return the right number.
'''
def factorial(n):
result = 1
for i in range(1, 1+n):
result = result*i
return result
print(factorial(4)) # should return 24
print(factorial(5)) # should return 120
'''
Here is your output:
24
120
Well done, you! The pieces of code you're tackling keep getting more complex, but you're doing a great job!
'''
| 28.857143
| 141
| 0.676568
|
2b5adc69dcd99e939a70c3ada31e33b82079b34b
| 2,879
|
py
|
Python
|
enaml/qt/qt_label.py
|
jwiggins/enaml
|
1c8793ba5390c52e119423684753fc3b1b893ae2
|
[
"BSD-3-Clause-Clear"
] | 26
|
2016-04-01T18:49:31.000Z
|
2020-07-21T22:19:46.000Z
|
enaml/qt/qt_label.py
|
jwiggins/enaml
|
1c8793ba5390c52e119423684753fc3b1b893ae2
|
[
"BSD-3-Clause-Clear"
] | 29
|
2016-02-22T17:40:55.000Z
|
2018-08-21T18:18:36.000Z
|
enaml/qt/qt_label.py
|
jwiggins/enaml
|
1c8793ba5390c52e119423684753fc3b1b893ae2
|
[
"BSD-3-Clause-Clear"
] | 4
|
2016-08-29T13:07:19.000Z
|
2018-11-04T01:31:46.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.label import ProxyLabel
from .QtCore import Qt
from .QtGui import QLabel
from .qt_control import QtControl
ALIGN_MAP = {
'left': Qt.AlignLeft,
'right': Qt.AlignRight,
'center': Qt.AlignHCenter,
'justify': Qt.AlignJustify,
}
VERTICAL_ALIGN_MAP = {
'top': Qt.AlignTop,
'bottom': Qt.AlignBottom,
'center': Qt.AlignVCenter,
}
class QtLabel(QtControl, ProxyLabel):
""" A Qt implementation of an Enaml ProxyLabel.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QLabel)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying label widget.
"""
self.widget = QLabel(self.parent_widget())
def init_widget(self):
""" Initialize the underlying widget.
"""
super(QtLabel, self).init_widget()
d = self.declaration
self.set_text(d.text)
self.set_align(d.align)
self.set_vertical_align(d.vertical_align)
self.widget.linkActivated.connect(self.on_link_activated)
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_link_activated(self, link):
""" Handle the link activated signal.
"""
self.declaration.link_activated(link)
#--------------------------------------------------------------------------
# ProxyLabel API
#--------------------------------------------------------------------------
def set_text(self, text):
""" Set the text in the widget.
"""
with self.geometry_guard():
self.widget.setText(text)
def set_align(self, align):
""" Set the alignment of the text in the widget.
"""
widget = self.widget
alignment = widget.alignment()
alignment &= ~Qt.AlignHorizontal_Mask
alignment |= ALIGN_MAP[align]
widget.setAlignment(alignment)
def set_vertical_align(self, align):
""" Set the vertical alignment of the text in the widget.
"""
widget = self.widget
alignment = widget.alignment()
alignment &= ~Qt.AlignVertical_Mask
alignment |= VERTICAL_ALIGN_MAP[align]
widget.setAlignment(alignment)
| 29.377551
| 79
| 0.499132
|
9e4ce30e2b80d49da6894c6010b25c816bb05c5c
| 142
|
py
|
Python
|
Pythonexercicios/ex.021.py
|
FabioLeonel/Python
|
c71bd2451f15cc33153044a8c5a79d2d4cea48ce
|
[
"MIT"
] | null | null | null |
Pythonexercicios/ex.021.py
|
FabioLeonel/Python
|
c71bd2451f15cc33153044a8c5a79d2d4cea48ce
|
[
"MIT"
] | 1
|
2020-09-16T16:20:09.000Z
|
2020-09-16T16:20:09.000Z
|
Pythonexercicios/ex.021.py
|
FabioLeonel/Python
|
c71bd2451f15cc33153044a8c5a79d2d4cea48ce
|
[
"MIT"
] | null | null | null |
# 21
import pygame
pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('musicateste.mp3')
pygame.mixer.music.play()
pygame.event.wait()
| 17.75
| 42
| 0.760563
|
46721e903b924f9ea0d465acc84524807d0a062c
| 2,906
|
py
|
Python
|
labs/Python/lab1.py
|
SzymonZos/Python-And-R-Introduction
|
ac9be6489fb7970f0a57cebb39bf69dacae8b7fb
|
[
"MIT"
] | 1
|
2020-06-11T19:47:43.000Z
|
2020-06-11T19:47:43.000Z
|
labs/Python/lab1.py
|
SzymonZos/Python-And-R-Introduction
|
ac9be6489fb7970f0a57cebb39bf69dacae8b7fb
|
[
"MIT"
] | 1
|
2020-07-30T08:39:30.000Z
|
2020-07-30T08:39:30.000Z
|
labs/Python/lab1.py
|
SzymonZos/Python-And-R-Introduction
|
ac9be6489fb7970f0a57cebb39bf69dacae8b7fb
|
[
"MIT"
] | null | null | null |
# Task 1
def find_position(string: str, key: str) -> list:
return [position for position, char in enumerate(string) if char == key]
def find_positions(string: str, *keys) -> dict:
positions = {}
for key in keys:
position = find_position(string, key)
if position:
positions[key] = position
return positions
# Task 2
def positions_to_tuple_list(positions: dict) -> list:
tuple_list = []
for char in positions:
for position in positions[char]:
tuple_list.append((position, char))
return sorted(tuple_list)
def positions_to_tuple_list_comprehension(positions: dict) -> list:
return sorted([(pos, key) for key in positions for pos in positions[key]])
# Task 3
def dict_sorted_generator(input_dict: dict):
return ((key, value) for key, value in sorted(input_dict.items()))
# Task 4
def draw_histo(input_dict: dict) -> None:
new_dict = {key: len(value) for key, value in input_dict.items()}
for key, value in dict_sorted_generator(new_dict):
print(key, "*" * value)
# Task 5
def convert_to_unique(input_list: list) -> list:
return [*set(input_list)]
# Task 6
def jaccard_index(set_1: set, set_2: set) -> float:
return len(set_1 & set_2) / len(set_1 | set_2)
# Task 7
def evaluate_sets(set_1: set, set_2: set) -> tuple:
return (set_1, set_2) if len(set_1) >= len(set_2) else (set_2, set_1)
def remove_from_bigger_set(bigger: set, smaller: set) -> None:
bigger.remove((bigger - smaller).pop())
def optimize_jaccard_index(set_1: set, set_2: set, limit=0.9) -> float:
index = jaccard_index(set_1, set_2)
while set_1 and set_2 and index < limit:
try:
remove_from_bigger_set(*evaluate_sets(set_1, set_2))
except KeyError:
break
index = jaccard_index(set_1, set_2)
return index
def main():
# Task 1
positions = find_positions("test" * 2 + "xD", "a", "e", "s", "x")
print(positions)
# Task 2
print(positions_to_tuple_list(positions))
print(positions_to_tuple_list_comprehension(positions))
# Task 3
dict_gen = dict_sorted_generator({5: 1, 1: 5})
for a, b in dict_sorted_generator({5: 1, 1: 5}):
print(f"k={a}, v={b}")
for a, b in dict_gen:
print(f"k={a}, v={b}")
for a, b in dict_gen:
print(f"k={a}, v={b}")
# Task 4
draw_histo(positions)
# Task 5
print(convert_to_unique([1, 2, 1, 2, 6, 7, 6, 9, 9, 9, 10]))
# Task 6
s1 = {0, 10, 20}
s2 = {20, 5, 10}
print(jaccard_index(s1, s2))
# Task 7
set_1 = {0, 5, 4}
set_2 = {5, 2, 4}
optimize_jaccard_index(set_1, set_2, 0.55)
print(set_1, set_2, jaccard_index(set_1, set_2))
set_1 = {0, 5, 4}
set_2 = {5, 2, 4}
optimize_jaccard_index(set_1, set_2)
print(set_1, set_2, jaccard_index(set_1, set_2))
if __name__ == "__main__":
main()
| 25.716814
| 78
| 0.628699
|
8246ed3bb0d0e6f457c5b92b483f2b421874fe2e
| 9,969
|
py
|
Python
|
hplip-3.20.3/base/services.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
hplip-3.20.3/base/services.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | 1
|
2021-11-20T16:33:39.000Z
|
2021-11-20T16:33:39.000Z
|
hplip-3.20.3/base/services.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# (c) Copyright 2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Goutam Kodu, Amarnath Chitumalla
#
#
#
# Std Lib
import sys
import os
from subprocess import Popen, PIPE
import grp
import fnmatch
import tempfile
import socket
import struct
import select
import time
import fcntl
import errno
import stat
import string
import glob
import subprocess # TODO: Replace with subprocess (commands is deprecated in Python 3.0)
import io
import re
import getpass
import locale
from .sixext.moves import html_entities
# Local
from .g import *
from .codes import *
from . import utils, tui
from . import logger
# System wide logger
log = logger.Logger('', logger.Logger.LOG_LEVEL_INFO, logger.Logger.LOG_TO_CONSOLE)
log.set_level('info')
def running_as_root():
return os.geteuid() == 0
def restart_cups():
if os.path.exists('/etc/init.d/cups'):
return '/etc/init.d/cups restart'
elif os.path.exists('/etc/init.d/cupsys'):
return '/etc/init.d/cupsys restart'
else:
return 'killall -HUP cupsd'
def restart(passwordObj):
ok = False
shutdown = utils.which('shutdown')
if shutdown and passwordObj:
cmd = "%s -r now" % (os.path.join(shutdown, "shutdown"))
cmd = passwordObj.getAuthCmd() % cmd
status, output = utils.run(cmd, passwordObj, "Need authentication to restart system")
ok = (status == 0)
return ok
def run_open_mdns_port(core, passwordObj, callback=None):
open_mdns_port_cmd = core.get_distro_ver_data('open_mdns_port')
log.debug(open_mdns_port_cmd)
if open_mdns_port_cmd and passwordObj:
x = 1
for cmd in open_mdns_port_cmd:
cmd = passwordObj.getAuthCmd() % cmd
status, output = utils.run(cmd, passwordObj, "Need authentication to open mdns port [%s]"%cmd)
if status != 0:
log.warn("An error occurred running '%s'" % cmd)
log.warn(output)
if callback is not None:
callback(cmd, "Open mDNS/Bonjour step %d" % x)
x += 1
def run_hp_tools(cmd):
if cmd is not None:
hpCommand = utils.which(cmd, True)
if not hpCommand:
hpCommand = cmd
log.debug(hpCommand)
status, output = utils.run(hpCommand)
return status == 0
else:
log.error("Command not found")
return False
def run_hp_tools_with_auth(cmd, passwordObj):
if cmd is not None and passwordObj is not None :
hpCommand = utils.which(cmd,True)
if not hpCommand: #if it is local command like. ./setup.py
hpCommand = cmd
hpCommand = passwordObj.getAuthCmd() % hpCommand
log.debug(hpCommand)
status, output = utils.run(hpCommand, passwordObj, "Need authentication to run %s command"%cmd)
return status == 0
else:
log.error("Command not found or password object is not valid")
return False
# start_service() starts the services
# Input:
# service_name (string) --> service name to be started.
# passwordObj --> root required services, needs to pass base/password object
# Output:
# ret_val (bool) --> returns True, if service is started or already running also.
# --> returns False, if failed to start service.
def start_service( service_name, passwordObj):
ret_Val = False
if not service_name or not passwordObj:
return ret_Val
if utils.which('systemctl'):
cmd_status = passwordObj.getAuthCmd()%("systemctl status %s.service"%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%("systemctl start %s.service"%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
elif utils.which('service'):
cmd_status = passwordObj.getAuthCmd()%("service %s status"%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%("service %s start"%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
elif 'unrecognized service' in out:
log.error("Failed to Start since %s is unrecognized service"%service_name)
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
elif os.path.exists('/etc/init.d/%s'%service_name):
cmd_status = passwordObj.getAuthCmd()%('/etc/init.d/%s status'%service_name)
log.debug(cmd_status)
sts,out = utils.run(cmd_status, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0:
if 'stop' in out or 'inactive' in out:
cmd_start = passwordObj.getAuthCmd()%('/etc/init.d/%s start'%service_name)
log.debug("cmd_start=%s"%cmd_start)
sts,out = utils.run(cmd_start, passwordObj, "Need authentication to start/restart %s service"%service_name)
if sts ==0:
ret_Val = True
else:
ret_Val = True
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
else:
if service_name == 'cups':
cmd = 'lpstat -r'
sts,out = utils.run(cmd, passwordObj, "Need authentication to get %s service status"%service_name)
if sts ==0 and 'is running' in out:
ret_Val = True
else:
log.error("service command not found, please start cups service manually.")
else:
log.error("Fail to start %s service, please start %s service manually."%(service_name,service_name))
return ret_Val
def run_systray():
path = utils.which('hp-systray')
if path:
path = os.path.join(path, 'hp-systray')
else:
path = os.path.join(prop.home_dir, 'systray.py')
if not os.path.exists(path):
log.warn("Unable to start hp-systray")
log.debug("Running hp-systray: %s --force-startup" % path)
os.spawnlp(os.P_NOWAIT, path, 'hp-systray', '--force-startup', "--ignore-update-firsttime")
log.debug("Waiting for hp-systray to start...")
time.sleep(1)
def disable_SmartInstall():
path = utils.which('hp-SIDisable',True)
if path:
param = '-'
sicmd = "%s %s" % (path,param)
if run_hp_tools(sicmd):
log.debug("Smart Install is disabled\n")
else:
log.error("Smart Install could not be disabled\n")
else:
try:
from . import pkit
plugin = PLUGIN_REQUIRED
plugin_reason = PLUGIN_REASON_NONE
ok, sudo_ok = pkit.run_plugin_command(plugin == PLUGIN_REQUIRED, plugin_reason)
if not ok or not sudo_ok:
log.error("Failed to install plug-in.")
except ImportError:
log.warn("Import error\n")
def close_running_hp_processes():
# check systray is running?
status,output = utils.Is_Process_Running('hp-systray')
if status is True:
ok,choice = tui.enter_choice("\nSome HPLIP applications are running. Press 'y' to close applications or press 'n' to quit upgrade(y=yes*, n=no):",['y','n'],'y')
if not ok or choice =='n':
log.info("Manually close HPLIP applications and run hp-upgrade again.")
return False
try:
# dBus
from dbus import SystemBus, lowlevel
except ImportError:
log.error("Unable to load DBus.")
pass
else:
try:
args = ['', '', EVENT_SYSTEMTRAY_EXIT, prop.username, 0, '', '']
msg = lowlevel.SignalMessage('/', 'com.hplip.StatusService', 'Event')
msg.append(signature='ssisiss', *args)
log.debug("Sending close message to hp-systray ...")
SystemBus().send_message(msg)
time.sleep(0.5)
except:
log.error("Failed to send DBus message to hp-systray/hp-toolbox.")
pass
toolbox_status,output = utils.Is_Process_Running('hp-toolbox')
if toolbox_status is True:
log.error("Failed to close either HP-Toolbox/HP-Systray. Manually close and run hp-upgrade again.")
return False
return True
| 35.476868
| 168
| 0.624135
|
8d0326de7c4b4c0be83304f13fa7f5be560875b3
| 4,006
|
py
|
Python
|
sdk/python/pulumi_azure/config/vars.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/config/vars.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/config/vars.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'auxiliary_tenant_ids',
'client_certificate_password',
'client_certificate_path',
'client_id',
'client_secret',
'disable_correlation_request_id',
'disable_terraform_partner_id',
'environment',
'features',
'location',
'metadata_host',
'metadata_url',
'msi_endpoint',
'partner_id',
'skip_credentials_validation',
'skip_provider_registration',
'storage_use_azuread',
'subscription_id',
'tenant_id',
'use_msi',
]
__config__ = pulumi.Config('azure')
auxiliary_tenant_ids = __config__.get('auxiliaryTenantIds')
client_certificate_password = __config__.get('clientCertificatePassword')
"""
The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
"""
client_certificate_path = __config__.get('clientCertificatePath')
"""
The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
"""
client_id = __config__.get('clientId')
"""
The Client ID which should be used.
"""
client_secret = __config__.get('clientSecret')
"""
The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.
"""
disable_correlation_request_id = __config__.get('disableCorrelationRequestId')
"""
This will disable the x-ms-correlation-request-id header.
"""
disable_terraform_partner_id = __config__.get('disableTerraformPartnerId')
"""
This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.
"""
environment = __config__.get('environment') or (_utilities.get_env('AZURE_ENVIRONMENT', 'ARM_ENVIRONMENT') or 'public')
"""
The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to
public.
"""
features = __config__.get('features')
location = __config__.get('location') or _utilities.get_env('ARM_LOCATION')
metadata_host = __config__.get('metadataHost') or (_utilities.get_env('ARM_METADATA_HOSTNAME') or '')
"""
The Hostname which should be used for the Azure Metadata Service.
"""
metadata_url = __config__.get('metadataUrl') or (_utilities.get_env('ARM_METADATA_URL') or '')
"""
Deprecated - replaced by `metadata_host`.
"""
msi_endpoint = __config__.get('msiEndpoint')
"""
The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected
automatically.
"""
partner_id = __config__.get('partnerId')
"""
A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
"""
skip_credentials_validation = __config__.get('skipCredentialsValidation')
"""
This will cause the AzureRM Provider to skip verifying the credentials being used are valid.
"""
skip_provider_registration = __config__.get('skipProviderRegistration') or (_utilities.get_env_bool('ARM_SKIP_PROVIDER_REGISTRATION') or False)
"""
Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already
registered?
"""
storage_use_azuread = __config__.get('storageUseAzuread') or (_utilities.get_env_bool('ARM_STORAGE_USE_AZUREAD') or False)
"""
Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?
"""
subscription_id = __config__.get('subscriptionId') or (_utilities.get_env('ARM_SUBSCRIPTION_ID') or '')
"""
The Subscription ID which should be used.
"""
tenant_id = __config__.get('tenantId')
"""
The Tenant ID which should be used.
"""
use_msi = __config__.get('useMsi')
"""
Allowed Managed Service Identity be used for Authentication.
"""
| 30.120301
| 143
| 0.762107
|
dc9cf9e53808d85a338e57bf175986ce5368f5d7
| 1,881
|
py
|
Python
|
apps/hie/decorators.py
|
thebureaugroup/sharemyhealth
|
de93793d7065d50af53f260e3347b8ed30d8c864
|
[
"Apache-2.0"
] | null | null | null |
apps/hie/decorators.py
|
thebureaugroup/sharemyhealth
|
de93793d7065d50af53f260e3347b8ed30d8c864
|
[
"Apache-2.0"
] | 4
|
2021-06-05T00:09:17.000Z
|
2021-12-13T20:52:41.000Z
|
apps/hie/decorators.py
|
thebureaugroup/sharemyhealth
|
de93793d7065d50af53f260e3347b8ed30d8c864
|
[
"Apache-2.0"
] | null | null | null |
from functools import update_wrapper
from django.http import HttpResponseForbidden
import logging
from django.utils.translation import ugettext_lazy as _
from jwkest.jwt import JWT
logger = logging.getLogger('sharemyhealth_.%s' % __name__)
_author_ = "Alan Viars"
def check_ial_before_allowing_authorize(func):
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated:
try:
vmi = request.user.social_auth.filter(
provider='verifymyidentity-openidconnect')[0]
extra_data = vmi.extra_data
if 'id_token' in vmi.extra_data.keys():
id_token = extra_data.get('id_token')
parsed_id_token = JWT().unpack(id_token)
parsed_id_token = parsed_id_token.payload()
except Exception:
id_token = "No ID token."
parsed_id_token = {'sub': '', 'ial': '1',
"note": "No ID token for this user"}
if parsed_id_token['ial'] not in ('2', '3'):
msg = _(
"%s %s was defined access due to insufficient identity assurance level (IAL). Subject=%s"
"" %
(request.user.first_name, request.user.last_name, parsed_id_token['sub']))
logger.info(msg)
response_string = _(
"""Your identity assurance level (IAL) of 1 is insufficient for this action.""")
return HttpResponseForbidden(response_string)
return func(request, *args, **kwargs)
return update_wrapper(wrapper, func)
def bind_to_patient(func):
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated:
pass
return func(request, *args, **kwargs)
return update_wrapper(wrapper, func)
| 35.490566
| 110
| 0.58639
|
56b1ab541f72d2bb2c630a2622afb80086bee50e
| 5,159
|
py
|
Python
|
haystack/tracer.py
|
ExpediaDotCom/haystack-client-python
|
e32141f9759282a1a25fc03231a64e47b81e7f3a
|
[
"Apache-2.0"
] | 5
|
2018-09-24T09:05:41.000Z
|
2019-04-04T10:31:09.000Z
|
haystack/tracer.py
|
ExpediaDotCom/haystack-client-python
|
e32141f9759282a1a25fc03231a64e47b81e7f3a
|
[
"Apache-2.0"
] | 6
|
2019-04-17T19:04:42.000Z
|
2021-01-26T04:23:19.000Z
|
haystack/tracer.py
|
ExpediaDotCom/haystack-client-python
|
e32141f9759282a1a25fc03231a64e47b81e7f3a
|
[
"Apache-2.0"
] | 2
|
2018-09-18T14:31:01.000Z
|
2019-02-25T14:33:43.000Z
|
import time
import uuid
from opentracing import Format, Tracer, UnsupportedFormatException
from opentracing.scope_managers import ThreadLocalScopeManager
from .text_propagator import TextPropagator
from .span import Span, SpanContext
class HaystackTracer(Tracer):
def __init__(self,
service_name,
recorder,
scope_manager=None,
common_tags=None,
use_shared_spans=False):
"""
Initialize a Haystack Tracer instance.
:param service_name: The service name to which all spans will belong.
:param recorder: The recorder (dispatcher) implementation which handles
finished spans.
:param scope_manager: An optional parameter to override the default
ThreadLocal scope manager.
:param common_tags: An optional dictionary of tags which should be
applied to all created spans for this service
:param use_shared_spans: A boolean indicating whether or not to use
shared spans. This is when client/server spans share the same span id.
Default is to use unique span ids.
"""
scope_manager = ThreadLocalScopeManager() if scope_manager is None \
else scope_manager
super().__init__(scope_manager)
self._propagators = {}
self._common_tags = {} if common_tags is None else common_tags
self.service_name = service_name
self.recorder = recorder
self.use_shared_spans = use_shared_spans
self.register_propagator(Format.TEXT_MAP, TextPropagator())
self.register_propagator(Format.HTTP_HEADERS, TextPropagator())
def register_propagator(self, format, propagator):
"""Register a propagator with this Tracer.
:param string format: a Format identifier like Format.TEXT_MAP
:param Propagator propagator: a Propagator instance to handle
inject/extract calls
"""
self._propagators[format] = propagator
def start_active_span(self,
operation_name,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True):
span = self.start_span(operation_name=operation_name,
child_of=child_of,
references=references,
tags=tags,
start_time=start_time,
ignore_active_span=ignore_active_span)
return self.scope_manager.activate(span, finish_on_close)
def start_span(self,
operation_name=None,
child_of=None,
references=None,
tags=None,
start_time=None,
ignore_active_span=False):
start_time = time.time() if start_time is None else start_time
# Check for an existing ctx in `references`
parent_ctx = None
if child_of is not None:
parent_ctx = (child_of if isinstance(child_of, SpanContext)
else child_of.context)
elif references is not None and len(references) > 0:
parent_ctx = references[0].referenced_context
# Check for an active span in scope manager
if not ignore_active_span and parent_ctx is None:
scope = self.scope_manager.active
if scope is not None:
parent_ctx = scope.span.context
new_ctx = SpanContext(span_id=format(uuid.uuid4()))
if parent_ctx is not None:
new_ctx.trace_id = parent_ctx.trace_id
if parent_ctx.baggage is not None:
new_ctx._baggage = parent_ctx.baggage.copy()
if self.use_shared_spans:
new_ctx.span_id = parent_ctx.span_id
new_ctx.parent_id = parent_ctx.parent_id
else:
new_ctx.parent_id = parent_ctx.span_id
else:
new_ctx.trace_id = format(uuid.uuid4())
# Set common tags
if self._common_tags:
tags = {**self._common_tags, **tags} if tags else \
self._common_tags.copy()
return Span(self,
operation_name=operation_name,
context=new_ctx,
tags=tags,
start_time=start_time)
def inject(self, span_context, format, carrier):
if format in self._propagators:
self._propagators[format].inject(span_context, carrier)
else:
raise UnsupportedFormatException()
def extract(self, format, carrier):
if format in self._propagators:
return self._propagators[format].extract(carrier)
else:
raise UnsupportedFormatException()
def record(self, span):
self.recorder.record_span(span)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 37.656934
| 79
| 0.598566
|
d216f003b200228d651288e0716e93e92bab7c0b
| 378
|
py
|
Python
|
fec/data/jinja2.py
|
cnlucas/fec-cms
|
aa67a0d4c19a350420d2f8c4b4e6f93acb808639
|
[
"CC0-1.0"
] | 39
|
2018-03-09T21:56:17.000Z
|
2022-01-20T02:31:38.000Z
|
fec/data/jinja2.py
|
rbtrsv/fec-cms
|
3136d1cf300ce1505d7035de38038e1c045937e6
|
[
"CC0-1.0"
] | 3,183
|
2018-03-09T20:30:55.000Z
|
2022-03-30T21:27:49.000Z
|
fec/data/jinja2.py
|
rbtrsv/fec-cms
|
3136d1cf300ce1505d7035de38038e1c045937e6
|
[
"CC0-1.0"
] | 19
|
2018-03-09T20:47:31.000Z
|
2022-03-10T02:54:33.000Z
|
import jinja2
from compressor.contrib.jinja2ext import CompressorExtension
def environment(**options):
"""Create a jinja2 environment with the CompressorExtension added in"""
options['extensions'] += [CompressorExtension]
options['autoescape'] = True # This was already True but we want to set it explicitly
env = jinja2.Environment(**options)
return env
| 34.363636
| 90
| 0.746032
|
11c38efa994a130d9671826e38658c753e88df4e
| 11,798
|
py
|
Python
|
Bio/PDB/PDBIO.py
|
ajmaurais/biopython
|
f110e06e740d81ff0d762a5da34099bfdca269c3
|
[
"BSD-3-Clause"
] | 1
|
2020-01-21T02:04:08.000Z
|
2020-01-21T02:04:08.000Z
|
Bio/PDB/PDBIO.py
|
ajmaurais/biopython
|
f110e06e740d81ff0d762a5da34099bfdca269c3
|
[
"BSD-3-Clause"
] | null | null | null |
Bio/PDB/PDBIO.py
|
ajmaurais/biopython
|
f110e06e740d81ff0d762a5da34099bfdca269c3
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Output of PDB files."""
# To allow saving of chains, residues, etc..
from Bio.PDB.StructureBuilder import StructureBuilder
# Allowed Elements
from Bio.Data.IUPACData import atom_weights
_ATOM_FORMAT_STRING = (
"%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f%s%6.2f %4s%2s%2s\n"
)
_PQR_ATOM_FORMAT_STRING = (
"%s%5i %-4s%c%3s %c%4i%c %8.3f%8.3f%8.3f %7s %6s %2s\n"
)
class Select:
"""Select everything for PDB output (for use as a base class).
Default selection (everything) during writing - can be used as base class
to implement selective output. This selects which entities will be written out.
"""
def __repr__(self):
"""Represent the output as a string for debugging."""
return "<Select all>"
def accept_model(self, model):
"""Overload this to reject models for output."""
return 1
def accept_chain(self, chain):
"""Overload this to reject chains for output."""
return 1
def accept_residue(self, residue):
"""Overload this to reject residues for output."""
return 1
def accept_atom(self, atom):
"""Overload this to reject atoms for output."""
return 1
_select = Select()
class StructureIO:
"""Base class to derive structure file format writers from."""
def __init__(self):
"""Initialise."""
pass
def set_structure(self, pdb_object):
"""Check what the user is providing and build a structure."""
if pdb_object.level == "S":
structure = pdb_object
else:
sb = StructureBuilder()
sb.init_structure("pdb")
sb.init_seg(" ")
# Build parts as necessary
if pdb_object.level == "M":
sb.structure.add(pdb_object.copy())
self.structure = sb.structure
else:
sb.init_model(0)
if pdb_object.level == "C":
sb.structure[0].add(pdb_object.copy())
else:
sb.init_chain("A")
if pdb_object.level == "R":
try:
parent_id = pdb_object.parent.id
sb.structure[0]["A"].id = parent_id
except Exception:
pass
sb.structure[0]["A"].add(pdb_object.copy())
else:
# Atom
sb.init_residue("DUM", " ", 1, " ")
try:
parent_id = pdb_object.parent.parent.id
sb.structure[0]["A"].id = parent_id
except Exception:
pass
sb.structure[0]["A"].child_list[0].add(pdb_object.copy())
# Return structure
structure = sb.structure
self.structure = structure
class PDBIO(StructureIO):
"""Write a Structure object (or a subset of a Structure object) as a PDB or PQR file.
Examples
--------
>>> from Bio.PDB import PDBParser
>>> from Bio.PDB.PDBIO import PDBIO
>>> parser = PDBParser()
>>> structure = parser.get_structure("1a8o", "PDB/1A8O.pdb")
>>> io=PDBIO()
>>> io.set_structure(structure)
>>> io.save("bio-pdb-pdbio-out.pdb")
>>> import os
>>> os.remove("bio-pdb-pdbio-out.pdb") # tidy up
"""
def __init__(self, use_model_flag=0, is_pqr=False):
"""Create the PDBIO object.
:param use_model_flag: if 1, force use of the MODEL record in output.
:type use_model_flag: int
:param is_pqr: if True, build PQR file. Otherwise build PDB file.
:type is_pqr: Boolean
"""
self.use_model_flag = use_model_flag
self.is_pqr = is_pqr
# private methods
def _get_atom_line(
self,
atom,
hetfield,
segid,
atom_number,
resname,
resseq,
icode,
chain_id,
charge=" ",
):
"""Return an ATOM PDB string (PRIVATE)."""
if hetfield != " ":
record_type = "HETATM"
else:
record_type = "ATOM "
if atom.element:
element = atom.element.strip().upper()
if element.capitalize() not in atom_weights:
raise ValueError("Unrecognised element %r" % atom.element)
element = element.rjust(2)
else:
element = " "
name = atom.get_fullname().strip()
# Pad atom name if:
# - smaller than 4 characters
# AND - is not C, N, O, S, H, F, P, ..., one letter elements
# AND - first character is NOT numeric (funky hydrogen naming rules)
if len(name) < 4 and name[:1].isalpha() and len(element.strip()) < 2:
name = " " + name
altloc = atom.get_altloc()
x, y, z = atom.get_coord()
# PDB Arguments
if not self.is_pqr:
bfactor = atom.get_bfactor()
occupancy = atom.get_occupancy()
# PQR Arguments
else:
radius = atom.get_radius()
pqr_charge = atom.get_charge()
if not self.is_pqr:
try:
occupancy_str = "%6.2f" % occupancy
except TypeError:
if occupancy is None:
occupancy_str = " " * 6
import warnings
from Bio import BiopythonWarning
warnings.warn(
"Missing occupancy in atom %s written as blank"
% repr(atom.get_full_id()),
BiopythonWarning,
)
else:
raise TypeError(
"Invalid occupancy %r in atom %r" % (occupancy, atom.get_full_id())
)
args = (
record_type,
atom_number,
name,
altloc,
resname,
chain_id,
resseq,
icode,
x,
y,
z,
occupancy_str,
bfactor,
segid,
element,
charge,
)
return _ATOM_FORMAT_STRING % args
else:
# PQR case
try:
pqr_charge = "%7.4f" % pqr_charge
except TypeError:
if pqr_charge is None:
pqr_charge = " " * 7
import warnings
from Bio import BiopythonWarning
warnings.warn("Missing charge in atom %s written as blank" %
repr(atom.get_full_id()), BiopythonWarning)
else:
raise TypeError("Invalid charge %r in atom %r"
% (pqr_charge, atom.get_full_id()))
try:
radius = "%6.4f" % radius
except TypeError:
if radius is None:
radius = " " * 6
import warnings
from Bio import BiopythonWarning
warnings.warn("Missing radius in atom %s written as blank" %
repr(atom.get_full_id()), BiopythonWarning)
else:
raise TypeError("Invalid radius %r in atom %r"
% (radius, atom.get_full_id()))
args = (
record_type,
atom_number,
name,
altloc,
resname,
chain_id,
resseq,
icode,
x,
y,
z,
pqr_charge,
radius,
element,
)
return _PQR_ATOM_FORMAT_STRING % args
# Public methods
def save(self, file, select=_select, write_end=True, preserve_atom_numbering=False):
"""Save structure to a file.
:param file: output file
:type file: string or filehandle
:param select: selects which entities will be written.
:type select: object
Typically select is a subclass of L{Select}, it should
have the following methods:
- accept_model(model)
- accept_chain(chain)
- accept_residue(residue)
- accept_atom(atom)
These methods should return 1 if the entity is to be
written out, 0 otherwise.
Typically select is a subclass of L{Select}.
"""
get_atom_line = self._get_atom_line
if isinstance(file, str):
fp = open(file, "w")
close_file = 1
else:
# filehandle, I hope :-)
fp = file
close_file = 0
# multiple models?
if len(self.structure) > 1 or self.use_model_flag:
model_flag = 1
else:
model_flag = 0
for model in self.structure.get_list():
if not select.accept_model(model):
continue
# necessary for ENDMDL
# do not write ENDMDL if no residues were written
# for this model
model_residues_written = 0
if not preserve_atom_numbering:
atom_number = 1
if model_flag:
fp.write("MODEL %s\n" % model.serial_num)
for chain in model.get_list():
if not select.accept_chain(chain):
continue
chain_id = chain.get_id()
# necessary for TER
# do not write TER if no residues were written
# for this chain
chain_residues_written = 0
for residue in chain.get_unpacked_list():
if not select.accept_residue(residue):
continue
hetfield, resseq, icode = residue.get_id()
resname = residue.get_resname()
segid = residue.get_segid()
for atom in residue.get_unpacked_list():
if select.accept_atom(atom):
chain_residues_written = 1
model_residues_written = 1
if preserve_atom_numbering:
atom_number = atom.get_serial_number()
s = get_atom_line(
atom,
hetfield,
segid,
atom_number,
resname,
resseq,
icode,
chain_id,
)
fp.write(s)
if not preserve_atom_numbering:
atom_number += 1
if chain_residues_written:
fp.write(
"TER %5i %3s %c%4i%c \n"
% (atom_number, resname, chain_id, resseq, icode)
)
if model_flag and model_residues_written:
fp.write("ENDMDL\n")
if write_end:
fp.write("END\n")
if close_file:
fp.close()
| 33.047619
| 108
| 0.476013
|
e618a1e8b9eea370c49f25befbc89624bfe488c8
| 6,386
|
py
|
Python
|
callAdaptiveSampler.py
|
CSMMLab/neuralEntropyClosures
|
5efc5961f2fac36921a749d35f3636c61d1cc873
|
[
"MIT"
] | 4
|
2021-09-23T07:21:23.000Z
|
2021-12-24T11:35:39.000Z
|
callAdaptiveSampler.py
|
ScSteffen/neuralEntropyClosures
|
5efc5961f2fac36921a749d35f3636c61d1cc873
|
[
"MIT"
] | null | null | null |
callAdaptiveSampler.py
|
ScSteffen/neuralEntropyClosures
|
5efc5961f2fac36921a749d35f3636c61d1cc873
|
[
"MIT"
] | null | null | null |
"""
brief: driver for adaptive samples
author: Steffen Schotthöfer
"""
import os
import numpy as np
from multiprocessing import Pool
from functools import partial
import csv
import tensorflow as tf
import matplotlib.pyplot as plt
from src.utils import load_data, scatter_plot_2d
from src.sampler.adaptiveSampler import AdaptiveSampler
from src.math import EntropyTools
def test_func(x):
# quadratic function
return 0.5 * (x[:, 0] ** 2 + x[:, 1] ** 2)
def grad_func(x):
# gard of testFunc
return x
def compute_diams(pois, points, grads, knn_param):
adap_sampler = AdaptiveSampler(points, grads, knn_param)
# print(len(pois))
diam_list: list = []
count = 0
count_good = 0
count_bad = 0
for poi in pois:
# for poi in pois:
vertices, success = adap_sampler.compute_a_wrapper(poi)
# --- Preprocess Vertices ---
if success:
diam_list.append(adap_sampler.compute_diam_a(vertices))
count_good += 1
else:
count_bad += 1
diam_list.append(np.nan)
count += 1
# if count % 100 == 0:
print("Poi count: " + str(count) + "/" + str(len(pois)) + ". Diam: " + str(diam_list[count - 1]))
print("count Bad: " + str(count_bad))
print("count Good: " + str(count_good))
diams: np.ndarray = np.asarray(diam_list)
# print(diams)
return diams
def compute_diams_relative(pois_n_pois_grads, points, grads, knn_param):
pois = pois_n_pois_grads[0]
pois_grads = pois_n_pois_grads[1]
adap_sampler = AdaptiveSampler(points, grads, knn_param)
# print(len(pois))
diam_list: list = []
count = 0
count_good = 0
count_bad = 0
for poi_idx in range(len(pois)):
# for poi in pois:
vertices, success = adap_sampler.compute_a_wrapper(pois[poi_idx])
# --- Preprocess Vertices ---
if success:
diam_list.append(adap_sampler.compute_diam_a(vertices) / (np.linalg.norm(pois_grads[poi_idx]) + 0.00001))
count_good += 1
else:
count_bad += 1
diam_list.append(np.nan)
count += 1
if count % 100 == 0:
print("Poi count: " + str(count) + "/" + str(len(pois)) + ". Diam: " + str(diam_list[count - 1]))
print("count Bad: " + str(count_bad))
print("count Good: " + str(count_good))
diams: np.ndarray = np.asarray(diam_list)
# print(diams)
return diams
def main2():
et = EntropyTools(2)
alpha = [[1.0, 1.1], [1.0, 1.0], [1.1, 1.1]]
alpha_c = et.reconstruct_alpha(tf.constant(alpha))
u = et.reconstruct_u(alpha_c)
u_poi = (u[0] + u[1] + u[2]) / 3
u_poi = u_poi[1:].numpy()
alpha_normal = np.asarray(alpha)
u_normal = u[:, 1:].numpy()
ada_sampler = AdaptiveSampler(points=u_normal, grads=alpha_normal, knn_param=3)
vertices, success = ada_sampler.compute_a_wrapper(u_poi)
plt.plot(vertices[:, 0], vertices[:, 1], '--')
plt.plot(alpha_normal[:, 0], alpha_normal[:, 1], '+')
plt.show()
return 0
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
[u, alpha, h] = load_data("data/1D/Monomial_M2_1D_normal_alpha.csv", 3, [True, True, True])
lim_x_in = (-1, 1) # (-0.5, -0.35)
lim_y_in = (0, 1) # (0.2, 0.3)
def preprocess(u_p, a_p, h_p, lim_x, lim_y):
keep_idx = []
for idx in range(len(u_p)):
if lim_x[0] <= u_p[idx, 1] <= lim_x[1] and lim_y[0] <= u_p[idx, 2] <= lim_y[1]:
keep_idx.append(idx)
return u_p[keep_idx, :], a_p[keep_idx, :], h_p[keep_idx, :]
u, alpha, h = preprocess(u, alpha, h, lim_x=lim_x_in, lim_y=lim_y_in)
t = u[:, 1:]
scatter_plot_2d(x_in=t, z_in=h, lim_x=lim_x_in, lim_y=lim_y_in, lim_z=(-1, 5),
title=r"diam($h$) over ${\mathcal{R}^r}$",
folder_name="delete", name="test", show_fig=False, log=False)
[u_pois, alpha_pois, h_pois] = load_data("data/1D/pois_M2.csv", 3, [True, True, True])
u_pois, alpha_pois, h_pois = preprocess(u_pois, alpha_pois, h_pois, lim_x=lim_x_in, lim_y=lim_y_in)
# u_pois = u_pois[:1000, :]
u_normal = u[:, 1:]
alpha_normal = alpha[:, 1:]
pois = u_pois[:, 1:]
pois_grads = alpha_pois[:, 1:]
# ada_sampler = AdaptiveSampler(points=u_normal, grads=alpha_normal, knn_param=20)
# pois = ada_sampler.compute_pois()
process_count = 24
# split pois across processes
chunk: int = int(len(pois) / process_count)
pois_chunk = []
pois_n_grads_chunk = []
for i in range(process_count - 1):
pois_chunk.append(pois[i * chunk:(i + 1) * chunk])
pois_n_grads_chunk.append([pois[i * chunk:(i + 1) * chunk], pois_grads[i * chunk:(i + 1) * chunk]])
pois_chunk.append(pois[(process_count - 1) * chunk:])
pois_n_grads_chunk.append([pois[(process_count - 1) * chunk:], pois_grads[(process_count - 1) * chunk:]])
with Pool(process_count) as p:
# diams_chunk = p.map(partial(compute_diams, points=u_normal, grads=alpha_normal, knn_param=20),
# pois_chunk)
diams_chunk = p.map(partial(compute_diams_relative, points=u_normal, grads=alpha_normal, knn_param=20),
pois_n_grads_chunk)
# merge the computed chunks
diams: np.ndarray = np.zeros(len(pois))
# print(diams.shape)
count = 0
for diam in diams_chunk:
for d in diam:
diams[count] = d
count += 1
print(len(diams))
# print(diams)
with open('diameters.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(pois[:, 0])
writer.writerow(pois[:, 0])
writer.writerow(diams)
# (x_in: np.ndarray, z_in: np.ndarray, lim_x: tuple = (-1, 1), lim_y: tuple = (0, 1),
#
# name: str = 'defaultName', log: bool = True, folder_name: str = "figures",
# show_fig: bool = False, ):
scatter_plot_2d(x_in=pois, z_in=diams, lim_x=lim_x_in, lim_y=lim_y_in, lim_z=(0.01, 10),
title=r"diam($A_{x^*}$)$/|\alpha|$ over ${\mathcal{R}^r}$",
folder_name="delete", name="diam_A_relative_alpha", show_fig=False, log=True)
return 0
if __name__ == '__main__':
# main2()
main()
| 34.706522
| 117
| 0.599123
|
dff17e5dd1dec23bf1965152bd0bfd8274cdb1bf
| 1,522
|
py
|
Python
|
sns_imagenet.py
|
ByungKwanLee/Causal-Adversarial-Instruments
|
68993508a6beeaae2936aaf0c44da360bb82a597
|
[
"MIT"
] | 18
|
2022-02-16T05:57:45.000Z
|
2022-03-13T07:31:30.000Z
|
sns_imagenet.py
|
ByungKwanLee/Causal-Adversarial-Instruments
|
68993508a6beeaae2936aaf0c44da360bb82a597
|
[
"MIT"
] | null | null | null |
sns_imagenet.py
|
ByungKwanLee/Causal-Adversarial-Instruments
|
68993508a6beeaae2936aaf0c44da360bb82a597
|
[
"MIT"
] | 1
|
2022-02-16T10:22:58.000Z
|
2022-02-16T10:22:58.000Z
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
content = np.array(['FGSM', 'PGD', 'CW '])
imgn_adv = np.array([[0.3894, 0.378, 0.4251]])
imgn_inst = np.array([[0.3603, 0.3517, 0.1487]])
imgn_causal = np.array([[0.5363, 0.5445, 0.5338]])
imgn_treat = np.array([[0.5688, 0.5705, 0.5521]])
# imgn_adv = np.array([[0.3504, 0.3384, 0.3869]])
# imgn_inst = np.array([[0.2466, 0.2368, 0.1664]])
# imgn_causal = np.array([[0.5216, 0.519, 0.5071]])
# imgn_treat = np.array([[0.5777, 0.5805, 0.5235]])
imgn = np.concatenate([imgn_adv, imgn_inst, imgn_causal, imgn_treat])
imgn_df = pd.DataFrame(columns=content, data=imgn)
imgn_ = imgn_df.set_index([["Adv", "CF", "CC", "AC"]])
imgn_df_ = imgn_.stack().reset_index()
imgn_df_.columns = ['', 'Method', 'Accuracy']
matplotlib.rc_file_defaults()
sns.set_style("darkgrid", {'font.family':'serif', 'font.serif':['Times New Roman']})
fig = plt.figure(figsize=(5, 3.7), dpi=600)
b = sns.barplot(x='Accuracy', y='Method', hue='', data=imgn_df_, alpha=0.9, palette="Reds_d", orient='h')
b.legend(loc='upper right', title='', frameon=True, fontsize=7)
b.set(xlim=(0, 0.75))
b.tick_params(labelsize=13)
b.set_yticklabels(labels=b.get_yticklabels(), rotation=90, va='center')
#b.axes.set_title("VGG-16 Network",fontsize=20)
b.set(ylabel=None)
b.set_xlabel("Acc", fontsize=10, loc='right')
plt.setp(b.get_legend().get_texts(), fontsize=13)
plt.tight_layout()
plt.savefig("./causal_imgn_vgg.png")
| 31.708333
| 105
| 0.684625
|
0d0b1c6e6e594bbf421cb198978e2c566c549f4c
| 1,204
|
py
|
Python
|
ESRGAN/tflite_inference.py
|
shareeff/ESRGAN_TF2
|
27a8d9a1ec8094f5bc9d024cead0b8c8f4f2d149
|
[
"MIT"
] | 2
|
2021-09-14T07:24:16.000Z
|
2021-12-31T05:44:32.000Z
|
ESRGAN/tflite_inference.py
|
shareeff/ESRGAN_TF2
|
27a8d9a1ec8094f5bc9d024cead0b8c8f4f2d149
|
[
"MIT"
] | null | null | null |
ESRGAN/tflite_inference.py
|
shareeff/ESRGAN_TF2
|
27a8d9a1ec8094f5bc9d024cead0b8c8f4f2d149
|
[
"MIT"
] | 3
|
2021-09-14T07:13:22.000Z
|
2022-01-25T18:50:14.000Z
|
from PIL import Image
import numpy as np
import tensorflow as tf
from modules.utils import read_image, scale_image_0_1_range
INPUT_SHAPE= [512, 512]
TFLITE_MODEL_PATH = './saved/models/esrgan.tflite'
IMG_PATH = "./images/input_hr/0855.png"
SAVE_IMG_PATH = "./images/results/tflite_0855.png"
def main():
interpreter = tf.lite.Interpreter(model_path=TFLITE_MODEL_PATH)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_index = input_details[0]["index"]
output_index = output_details[0]["index"]
image = read_image(IMG_PATH)
input_image = tf.image.resize(image, INPUT_SHAPE, method=tf.image.ResizeMethod.BICUBIC)
input_image = scale_image_0_1_range(input_image)
input_image = tf.expand_dims(input_image, axis=0)
interpreter.set_tensor(input_index, input_image)
interpreter.invoke()
output = interpreter.get_tensor(output_index)
# Convert output array to image
output_image = (np.squeeze(output, axis=0).clip(0, 1) * 255).astype(np.uint8)
img = Image.fromarray(output_image)
img.save(SAVE_IMG_PATH)
if __name__ == '__main__':
main()
| 25.617021
| 91
| 0.742525
|
196b8bc01dd1acd9c970040807edd0ab089da619
| 32,607
|
py
|
Python
|
trax/rl/ppo_trainer.py
|
Tenoke/trax
|
bbabf6cc8a0682218927080bce33a4f90591aa0b
|
[
"Apache-2.0"
] | 2
|
2020-03-27T17:26:58.000Z
|
2020-03-27T18:45:47.000Z
|
trax/rl/ppo_trainer.py
|
Tenoke/trax
|
bbabf6cc8a0682218927080bce33a4f90591aa0b
|
[
"Apache-2.0"
] | null | null | null |
trax/rl/ppo_trainer.py
|
Tenoke/trax
|
bbabf6cc8a0682218927080bce33a4f90591aa0b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import logging
import jax
from jax import jit
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.envs import trajectory
from trax import jaxboard
from trax import models as trax_models
from trax import optimizers as trax_opt
from trax.rl import base_trainer
from trax.rl import ppo
from trax.rl import serialization_utils
from trax.shapes import ShapeDtype
from trax.supervised import trainer_lib
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
N_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
class PPO(base_trainer.BaseTrainer):
"""PPO trainer."""
def __init__(self,
train_env,
eval_env,
output_dir=None,
policy_and_value_model=trax_models.FrameStackMLP,
policy_and_value_optimizer=functools.partial(
trax_opt.Adam, learning_rate=1e-3),
policy_and_value_two_towers=False,
policy_and_value_vocab_size=None,
n_optimizer_steps=N_OPTIMIZER_STEPS,
optimizer_batch_size=64,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=100,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
value_weight=1.0,
entropy_weight=0.01,
epsilon=0.1,
eval_every_n=1000,
save_every_n=1000,
done_frac_for_policy_save=0.5,
n_evals=1,
len_history_for_policy=4,
eval_temperatures=(1.0, 0.5),
separate_eval=True,
init_policy_from_world_model_output_dir=None,
controller=None,
should_save_checkpoints=True,
should_write_summaries=True,
**kwargs):
"""Creates the PPO trainer.
Args:
train_env: gym.Env to use for training.
eval_env: gym.Env to use for evaluation.
output_dir: Output dir.
policy_and_value_model: Function defining the policy and value network,
without the policy and value heads.
policy_and_value_optimizer: Function defining the optimizer.
policy_and_value_two_towers: Whether to use two separate models as the
policy and value networks. If False, share their parameters.
policy_and_value_vocab_size: Vocabulary size of a policy and value network
operating on serialized representation. If None, use raw continuous
representation.
n_optimizer_steps: Number of optimizer steps.
optimizer_batch_size: Batch size of an optimizer step.
print_every_optimizer_steps: How often to log during the policy
optimization process.
target_kl: Policy iteration early stopping. Set to infinity to disable
early stopping.
boundary: We pad trajectories at integer multiples of this number.
max_timestep: If set to an integer, maximum number of time-steps in a
trajectory. Used in the collect procedure.
max_timestep_eval: If set to an integer, maximum number of time-steps in
an evaluation trajectory. Used in the collect procedure.
random_seed: Random seed.
gamma: Reward discount factor.
lambda_: N-step TD-error discount factor in GAE.
value_weight: Value loss coefficient.
entropy_weight: Entropy loss coefficient.
epsilon: Clipping coefficient.
eval_every_n: How frequently to eval the policy.
save_every_n: How frequently to save the policy.
done_frac_for_policy_save: Fraction of the trajectories that should be
done to checkpoint the policy.
n_evals: Number of times to evaluate.
len_history_for_policy: How much of history to give to the policy.
eval_temperatures: Sequence of temperatures to try for categorical
sampling during evaluation.
separate_eval: Whether to run separate evaluation using a set of
temperatures. If False, the training reward is reported as evaluation
reward with temperature 1.0.
init_policy_from_world_model_output_dir: Model output dir for initializing
the policy. If None, initialize randomly.
controller: Function history -> (step -> {'name': value}) controlling
nontrainable parameters.
should_save_checkpoints: Whether to save policy checkpoints.
should_write_summaries: Whether to save summaries.
**kwargs: Additional keyword arguments passed to the base class.
"""
# Set in base class constructor.
self._train_env = None
self._should_reset = None
self._n_optimizer_steps = n_optimizer_steps
self._optimizer_batch_size = optimizer_batch_size
self._print_every_optimizer_steps = print_every_optimizer_steps
self._target_kl = target_kl
self._boundary = boundary
self._max_timestep = max_timestep
self._max_timestep_eval = max_timestep_eval
self._nontrainable_params = {
'gamma': np.array(gamma),
'lambda': np.array(lambda_),
'value_weight': np.array(value_weight),
'entropy_weight': np.array(entropy_weight),
'epsilon': np.array(epsilon),
}
self._eval_every_n = eval_every_n
self._save_every_n = save_every_n
self._done_frac_for_policy_save = done_frac_for_policy_save
self._n_evals = n_evals
self._len_history_for_policy = len_history_for_policy
self._eval_temperatures = eval_temperatures
self._separate_eval = separate_eval
self._controller = controller
self._should_save_checkpoints = should_save_checkpoints
self._should_write_summaries = should_write_summaries
self._history = None
(n_controls, n_actions) = ppo.analyze_action_space(train_env.action_space)
self._rng = trainer_lib.init_random_number_generators(random_seed)
self._policy_and_value_vocab_size = policy_and_value_vocab_size
if self._policy_and_value_vocab_size is not None:
self._serialization_kwargs = ppo.init_serialization(
vocab_size=self._policy_and_value_vocab_size,
observation_space=train_env.observation_space,
action_space=train_env.action_space,
n_timesteps=(self._max_timestep + 1),
)
else:
self._serialization_kwargs = {}
self.init_policy_from_world_model_output_dir = (
init_policy_from_world_model_output_dir
)
self._rewards_to_actions = ppo.init_rewards_to_actions(
self._policy_and_value_vocab_size,
train_env.observation_space,
train_env.action_space,
n_timesteps=(self._max_timestep + 1),
)
self._policy_and_value_net_fn = functools.partial(
ppo.policy_and_value_net,
n_actions=n_actions,
n_controls=n_controls,
vocab_size=self._policy_and_value_vocab_size,
bottom_layers_fn=policy_and_value_model,
two_towers=policy_and_value_two_towers,
)
self._policy_and_value_net_apply = jit(self._policy_and_value_net_fn())
self._policy_and_value_optimizer = policy_and_value_optimizer()
# Super ctor calls reset(), which uses fields initialized above.
super(PPO, self).__init__(train_env, eval_env, output_dir, **kwargs)
def reset(self, output_dir):
super(PPO, self).reset(output_dir)
# Initialize the policy and value network.
# Create the network again to avoid caching of parameters.
policy_and_value_net = self._policy_and_value_net_fn()
(batch_obs_shape, obs_dtype) = self._batch_obs_shape_and_dtype(
self.train_env.observation_space
)
self._rng, _ = jax_random.split(self._rng)
input_signature = ShapeDtype(batch_obs_shape, obs_dtype)
policy_and_value_net_params, self._model_state = (
policy_and_value_net.init(input_signature))
if self.init_policy_from_world_model_output_dir is not None:
policy_and_value_net_params = ppo.init_policy_from_world_model_checkpoint(
policy_and_value_net_params,
self.init_policy_from_world_model_output_dir,
)
# Initialize the optimizer.
(init_slots, init_opt_params) = (
self._policy_and_value_optimizer.tree_init(policy_and_value_net_params)
)
self._policy_and_value_opt_state = (
policy_and_value_net_params, init_slots, init_opt_params
)
# Restore the optimizer state.
self._epoch = 0
self._total_opt_step = 0
self.update_optimization_state(output_dir)
# Create summary writers and history.
if self._should_write_summaries:
self._train_sw = jaxboard.SummaryWriter(
os.path.join(self._output_dir, 'train'))
self._timing_sw = jaxboard.SummaryWriter(
os.path.join(self._output_dir, 'timing'))
self._eval_sw = jaxboard.SummaryWriter(
os.path.join(self._output_dir, 'eval'))
self._n_trajectories_done = 0
self._last_saved_at = 0
if self._async_mode:
logging.info('Saving model on startup to have a model policy file.')
self.save()
def _batch_obs_shape_and_dtype(self, obs_space):
if self._policy_and_value_vocab_size is None:
# Batch Observations Shape = [1, 1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
shape = (1, 1) + obs_space.shape
dtype = obs_space.dtype
else:
shape = (1, 1)
dtype = np.int32
return (shape, dtype)
def _policy_and_value_opt_update(self, step, grads, opt_state):
(params, slots, opt_params) = opt_state
(params, slots) = self._policy_and_value_optimizer.tree_update(
step, grads, params, slots, opt_params
)
return (params, slots, opt_params)
# Maybe restore the optimization state. If there is nothing to restore, then
# epoch = 0 and policy_and_value_opt_state is returned as is.
def update_optimization_state(self, output_dir):
(self._policy_and_value_opt_state, self._model_state, self._epoch,
self._total_opt_step, self._history) = ppo.maybe_restore_opt_state(
output_dir, self._policy_and_value_opt_state, self._model_state)
if self._epoch > 0:
logging.info('Restored parameters from epoch [%d]', self._epoch)
@property
def train_env(self):
return self._train_env
@train_env.setter
def train_env(self, new_train_env):
if self._train_env is not None:
def assert_same_space(space1, space2):
assert space1.shape == space2.shape
assert space1.dtype == space2.dtype
assert_same_space(new_train_env.observation_space,
self._train_env.observation_space)
assert_same_space(new_train_env.action_space,
self._train_env.action_space)
# We don't check the reward range, because PPO will work either way.
self._train_env = new_train_env
self._should_reset = True
@property
def epoch(self):
return self._epoch
@property
def history(self):
return self._history
def collect_trajectories_async(self,
env,
train=True,
n_trajectories=1,
temperature=1.0):
"""Collects trajectories in an async manner."""
assert self._async_mode
# trajectories/train and trajectories/eval are the two subdirectories.
trajectory_dir = os.path.join(self._output_dir, 'trajectories',
'train' if train else 'eval')
epoch = self.epoch
logging.info(
'Loading [%s] trajectories from dir [%s] for epoch [%s] and temperature'
' [%s]', n_trajectories, trajectory_dir, epoch, temperature)
bt = trajectory.BatchTrajectory.load_from_directory(
trajectory_dir,
epoch=epoch,
temperature=temperature,
wait_forever=True,
n_trajectories=n_trajectories)
if bt is None:
logging.error(
'Couldn\'t load [%s] trajectories from dir [%s] for epoch [%s] and '
'temperature [%s]', n_trajectories, trajectory_dir, epoch,
temperature)
assert bt
# Doing this is important, since we want to modify `env` so that it looks
# like `env` was actually played and the trajectories came from it.
env.trajectories = bt
trajs = env_problem_utils.get_completed_trajectories_from_env(
env, n_trajectories)
n_done = len(trajs)
timing_info = {}
return trajs, n_done, timing_info, self._model_state
def collect_trajectories(self,
train=True,
temperature=1.0,
abort_fn=None,
raw_trajectory=False):
self._rng, key = jax_random.split(self._rng)
env = self.train_env
max_timestep = self._max_timestep
should_reset = self._should_reset
if not train: # eval
env = self.eval_env
max_timestep = self._max_timestep_eval
should_reset = True
n_trajectories = env.batch_size
# If async, read the required trajectories for the epoch.
if self._async_mode:
trajs, n_done, timing_info, self._model_state = self.collect_trajectories_async(
env,
train=train,
n_trajectories=n_trajectories,
temperature=temperature)
else:
trajs, n_done, timing_info, self._model_state = ppo.collect_trajectories(
env,
policy_fn=self._policy_fun,
n_trajectories=n_trajectories,
max_timestep=max_timestep,
state=self._model_state,
rng=key,
len_history_for_policy=self._len_history_for_policy,
boundary=self._boundary,
reset=should_reset,
temperature=temperature,
abort_fn=abort_fn,
raw_trajectory=raw_trajectory,
)
if train:
self._n_trajectories_done += n_done
return trajs, n_done, timing_info, self._model_state
def train_epoch(self, evaluate=True):
"""Train one PPO epoch."""
epoch_start_time = time.time()
# Evaluate the policy.
policy_eval_start_time = time.time()
if evaluate and (self._epoch + 1) % self._eval_every_n == 0:
self._rng, key = jax_random.split(self._rng, num=2)
self.evaluate()
policy_eval_time = ppo.get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, 'PPO epoch [% 6d]: collecting trajectories.', self._epoch)
self._rng, key = jax_random.split(self._rng)
trajs, _, timing_info, self._model_state = self.collect_trajectories(
train=True, temperature=1.0)
trajs = [(t[0], t[1], t[2], t[4]) for t in trajs]
self._should_reset = False
trajectory_collection_time = ppo.get_time(trajectory_collection_start_time)
logging.vlog(1, 'Collecting trajectories took %0.2f msec.',
trajectory_collection_time)
rewards = np.array([np.sum(traj[2]) for traj in trajs])
avg_reward = np.mean(rewards)
std_reward = np.std(rewards)
max_reward = np.max(rewards)
min_reward = np.min(rewards)
self._log('train', 'train/reward_mean_truncated', avg_reward)
if evaluate and not self._separate_eval:
metrics = {'raw': {1.0: {'mean': avg_reward, 'std': std_reward}}}
ppo.write_eval_reward_summaries(metrics, self._log, self._epoch)
logging.vlog(1, 'Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s',
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
'Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]',
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, 'Trajectory Lengths: %s', [len(traj[0]) for traj in trajs])
preprocessing_start_time = time.time()
(padded_observations, padded_actions, padded_rewards, reward_mask,
padded_infos) = self._preprocess_trajectories(trajs)
preprocessing_time = ppo.get_time(preprocessing_start_time)
logging.vlog(1, 'Preprocessing trajectories took %0.2f msec.',
ppo.get_time(preprocessing_start_time))
logging.vlog(1, 'Padded Observations\' shape [%s]',
str(padded_observations.shape))
logging.vlog(1, 'Padded Actions\' shape [%s]', str(padded_actions.shape))
logging.vlog(1, 'Padded Rewards\' shape [%s]', str(padded_rewards.shape))
# Some assertions.
B, RT = padded_rewards.shape # pylint: disable=invalid-name
B, AT = padded_actions.shape # pylint: disable=invalid-name
assert (B, RT) == reward_mask.shape
assert B == padded_observations.shape[0]
log_prob_recompute_start_time = time.time()
# TODO(pkozakowski): The following commented out code collects the network
# predictions made while stepping the environment and uses them in PPO
# training, so that we can use non-deterministic networks (e.g. with
# dropout). This does not work well with serialization, so instead we
# recompute all network predictions. Let's figure out a solution that will
# work with both serialized sequences and non-deterministic networks.
# assert ('log_prob_actions' in padded_infos and
# 'value_predictions' in padded_infos)
# These are the actual log-probabs and value predictions seen while picking
# the actions.
# actual_log_probabs_traj = padded_infos['log_prob_actions']
# actual_value_predictions_traj = padded_infos['value_predictions']
# assert (B, T, C) == actual_log_probabs_traj.shape[:3]
# A = actual_log_probabs_traj.shape[3] # pylint: disable=invalid-name
# assert (B, T, 1) == actual_value_predictions_traj.shape
del padded_infos
# TODO(afrozm): log-probabs doesn't need to be (B, T+1, C, A) it can do with
# (B, T, C, A), so make that change throughout.
# NOTE: We don't have the log-probabs and value-predictions for the last
# observation, so we re-calculate for everything, but use the original ones
# for all but the last time-step.
self._rng, key = jax_random.split(self._rng)
(log_probabs_traj, value_predictions_traj) = (
self._policy_and_value_net_apply(
padded_observations,
weights=self._policy_and_value_net_weights,
state=self._model_state,
rng=key,
)
)
assert (B, AT) == log_probabs_traj.shape[:2]
assert (B, AT) == value_predictions_traj.shape
# TODO(pkozakowski): Commented out for the same reason as before.
# Concatenate the last time-step's log-probabs and value predictions to the
# actual log-probabs and value predictions and use those going forward.
# log_probabs_traj = np.concatenate(
# (actual_log_probabs_traj, log_probabs_traj[:, -1:, :]), axis=1)
# value_predictions_traj = np.concatenate(
# (actual_value_predictions_traj, value_predictions_traj[:, -1:, :]),
# axis=1)
log_prob_recompute_time = ppo.get_time(log_prob_recompute_start_time)
# Compute value and ppo losses.
self._rng, key1 = jax_random.split(self._rng, num=2)
logging.vlog(2, 'Starting to compute P&V loss.')
loss_compute_start_time = time.time()
(cur_combined_loss, component_losses, summaries, self._model_state) = (
ppo.combined_loss(
self._policy_and_value_net_weights,
log_probabs_traj,
value_predictions_traj,
self._policy_and_value_net_apply,
padded_observations,
padded_actions,
self._rewards_to_actions,
padded_rewards,
reward_mask,
nontrainable_params=self._nontrainable_params,
state=self._model_state,
rng=key1))
loss_compute_time = ppo.get_time(loss_compute_start_time)
(cur_ppo_loss, cur_value_loss, cur_entropy_bonus) = component_losses
logging.vlog(
1,
'Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.',
cur_combined_loss, cur_ppo_loss, cur_value_loss, cur_entropy_bonus,
ppo.get_time(loss_compute_start_time))
self._rng, key1 = jax_random.split(self._rng, num=2)
logging.vlog(1, 'Policy and Value Optimization')
optimization_start_time = time.time()
keys = jax_random.split(key1, num=self._n_optimizer_steps)
opt_step = 0
opt_batch_size = min(self._optimizer_batch_size, B)
index_batches = ppo.shuffled_index_batches(
dataset_size=B, batch_size=opt_batch_size
)
for (index_batch, key) in zip(index_batches, keys):
k1, k2, k3 = jax_random.split(key, num=3)
t = time.time()
# Update the optimizer state on the sampled minibatch.
self._policy_and_value_opt_state, self._model_state = (
ppo.policy_and_value_opt_step(
# We pass the optimizer slots between PPO epochs, so we need to
# pass the optimization step as well, so for example the
# bias-correction in Adam is calculated properly. Alternatively we
# could reset the slots and the step in every PPO epoch, but then
# the moment estimates in adaptive optimizers would never have
# enough time to warm up. So it makes sense to reuse the slots,
# even though we're optimizing a different loss in every new
# epoch.
self._total_opt_step,
self._policy_and_value_opt_state,
self._policy_and_value_opt_update,
self._policy_and_value_get_params,
self._policy_and_value_net_apply,
log_probabs_traj[index_batch],
value_predictions_traj[index_batch],
padded_observations[index_batch],
padded_actions[index_batch],
self._rewards_to_actions,
padded_rewards[index_batch],
reward_mask[index_batch],
nontrainable_params=self._nontrainable_params,
state=self._model_state,
rng=k1))
opt_step += 1
self._total_opt_step += 1
# Compute the approx KL for early stopping. Use the whole dataset - as we
# only do inference, it should fit in the memory.
(log_probab_actions_new, _) = (
self._policy_and_value_net_apply(
padded_observations,
weights=self._policy_and_value_net_weights,
state=self._model_state,
rng=k2))
action_mask = np.dot(
np.pad(reward_mask, ((0, 0), (0, 1))), self._rewards_to_actions
)
approx_kl = ppo.approximate_kl(log_probab_actions_new, log_probabs_traj,
action_mask)
early_stopping = approx_kl > 1.5 * self._target_kl
if early_stopping:
logging.vlog(
1, 'Early stopping policy and value optimization after %d steps, '
'with approx_kl: %0.2f', opt_step, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (opt_step % self._print_every_optimizer_steps == 0 or
opt_step == self._n_optimizer_steps or early_stopping):
# Compute and log the loss.
(combined_loss, component_losses, _, self._model_state) = (
ppo.combined_loss(
self._policy_and_value_net_weights,
log_probabs_traj,
value_predictions_traj,
self._policy_and_value_net_apply,
padded_observations,
padded_actions,
self._rewards_to_actions,
padded_rewards,
reward_mask,
nontrainable_params=self._nontrainable_params,
state=self._model_state,
rng=k3))
logging.vlog(1, 'One Policy and Value grad desc took: %0.2f msec',
ppo.get_time(t, t2))
(ppo_loss, value_loss, entropy_bonus) = component_losses
logging.vlog(
1, 'Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->'
' [%10.2f(%10.2f,%10.2f,%10.2f)]', cur_combined_loss, combined_loss,
ppo_loss, value_loss, entropy_bonus)
if early_stopping:
break
optimization_time = ppo.get_time(optimization_start_time)
logging.vlog(
1, 'Total Combined Loss reduction [%0.2f]%%',
(100 * (cur_combined_loss - combined_loss) / np.abs(cur_combined_loss)))
summaries.update({
'n_optimizer_steps': opt_step,
'approx_kl': approx_kl,
})
for (name, value) in summaries.items():
self._log('train', 'train/{}'.format(name), value)
logging.info(
'PPO epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined'
' Loss(ppo, value, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]', self._epoch,
min_reward, max_reward, avg_reward, combined_loss, ppo_loss, value_loss,
entropy_bonus)
# Bump the epoch counter before saving a checkpoint, so that a call to
# save() after the training loop is a no-op if a checkpoint was saved last
# epoch - otherwise it would bump the epoch counter on the checkpoint.
last_epoch = self._epoch
self._epoch += 1
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
policy_save_start_time = time.time()
# TODO(afrozm): Refactor to trax.save_trainer_state.
if (self._n_trajectories_done >=
self._done_frac_for_policy_save * self.train_env.batch_size and
self._epoch % self._save_every_n == 0) or self._async_mode:
self.save()
policy_save_time = ppo.get_time(policy_save_start_time)
epoch_time = ppo.get_time(epoch_start_time)
timing_dict = {
'epoch': epoch_time,
'policy_eval': policy_eval_time,
'trajectory_collection': trajectory_collection_time,
'preprocessing': preprocessing_time,
'log_prob_recompute': log_prob_recompute_time,
'loss_compute': loss_compute_time,
'optimization': optimization_time,
'policy_save': policy_save_time,
}
timing_dict.update(timing_info)
if self._should_write_summaries:
for k, v in timing_dict.items():
self._timing_sw.scalar('timing/%s' % k, v, step=last_epoch)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
'%s : % 10.2f' % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info('PPO epoch [% 6d], Timings: \n%s', last_epoch,
'\n'.join(timing_info_list))
# Flush summary writers once in a while.
if self._epoch % 1000 == 0:
self.flush_summaries()
def evaluate(self):
"""Evaluate the agent."""
if not self._separate_eval:
return
logging.vlog(1, 'PPO epoch [% 6d]: evaluating policy.', self._epoch)
if self._controller is not None:
ntp_updates = self._controller(self._history)(self._epoch)
self._nontrainable_params.update(ntp_updates)
(_, _, opt_params) = self._policy_and_value_opt_state
opt_params.update(ntp_updates)
for (name, value) in self._nontrainable_params.items():
self._log('train', 'training/{}'.format(name), value)
processed_reward_sums = collections.defaultdict(list)
raw_reward_sums = collections.defaultdict(list)
for _ in range(self._n_evals):
for temperature in self._eval_temperatures:
trajs, _, _, self._model_state = self.collect_trajectories(
train=False, temperature=temperature)
processed_reward_sums[temperature].extend(
sum(traj[2]) for traj in trajs)
raw_reward_sums[temperature].extend(sum(traj[3]) for traj in trajs)
# Return the mean and standard deviation for each temperature.
def compute_stats(reward_dict):
return {
temperature: { # pylint: disable=g-complex-comprehension
'mean': onp.mean(rewards),
'std': onp.std(rewards)
} for (temperature, rewards) in reward_dict.items()
}
reward_stats = {
'processed': compute_stats(processed_reward_sums),
'raw': compute_stats(raw_reward_sums),
}
ppo.write_eval_reward_summaries(reward_stats, self._log, epoch=self._epoch)
def save(self):
"""Save the agent parameters."""
if not self._should_save_checkpoints:
return
logging.vlog(1, 'PPO epoch [% 6d]: saving model.', self._epoch)
ppo.save_opt_state(
self._output_dir,
self._policy_and_value_opt_state,
self._model_state,
self._epoch,
self._total_opt_step,
self._history,
)
# Reset this number.
self._n_trajectories_done = 0
self._last_saved_at = self._epoch
def flush_summaries(self):
if self._should_write_summaries:
self._train_sw.flush()
self._timing_sw.flush()
self._eval_sw.flush()
def _log(self, mode, metric, value):
if self._should_write_summaries:
summary_writer = {
'train': self._train_sw,
'eval': self._eval_sw,
}[mode]
summary_writer.scalar(metric, value, step=self._epoch)
self._history.append(mode, metric, self._epoch, value)
def _policy_and_value_get_params(self, opt_state):
# (params, slots, opt_params)
(params, _, _) = opt_state
return params
@property
def _policy_and_value_net_weights(self):
return self._policy_and_value_get_params(self._policy_and_value_opt_state)
# Prepares the trajectories for policy training.
def _preprocess_trajectories(self, trajectories):
(_, reward_mask, observations, actions, rewards, infos) = (
ppo.pad_trajectories(trajectories, boundary=self._max_timestep)
)
(low, high) = self.train_env.reward_range
outside = np.logical_or(rewards < low, rewards > high)
rewards = jax.ops.index_update(rewards, jax.ops.index[outside], 0)
assert self.train_env.observation_space.shape == observations.shape[2:]
if self._policy_and_value_vocab_size is None:
# Add one timestep at the end, so it's compatible with
# self._rewards_to_actions.
pad_width = ((0, 0), (0, 1)) + ((0, 0),) * (actions.ndim - 2)
actions = np.pad(actions, pad_width)
actions = np.reshape(actions, (actions.shape[0], -1))
else:
(observations, actions) = self._serialize_trajectories(
observations, actions, reward_mask
)
return (observations, actions, rewards, reward_mask, infos)
def _serialize_trajectories(self, observations, actions, reward_mask):
(reprs, _) = serialization_utils.serialize_observations_and_actions(
observations=observations,
actions=actions,
mask=reward_mask,
**self._serialization_kwargs
)
# Mask out actions in the representation - otherwise we sample an action
# based on itself.
observations = reprs * serialization_utils.observation_mask(
**self._serialization_kwargs
)
actions = reprs
return (observations, actions)
def _policy_fun(self, observations, lengths, state, rng):
return ppo.run_policy(
self._policy_and_value_net_apply,
observations,
lengths,
self._policy_and_value_net_weights,
state,
rng,
self._policy_and_value_vocab_size,
self.train_env.observation_space,
self.train_env.action_space,
self._rewards_to_actions,
)
| 38.864124
| 86
| 0.673659
|
30adabcebb9a2be7be7bc89be8aaebbd9903928e
| 2,256
|
py
|
Python
|
python_modules/libraries/dagster-aws/dagster_aws/emr/solids.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | 3
|
2020-09-09T04:10:23.000Z
|
2021-11-08T02:10:42.000Z
|
python_modules/libraries/dagster-aws/dagster_aws/emr/solids.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | 2
|
2021-05-11T13:36:27.000Z
|
2021-09-03T01:53:11.000Z
|
python_modules/libraries/dagster-aws/dagster_aws/emr/solids.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
import math
import time
from dagster import (
Field,
InputDefinition,
Nothing,
Output,
OutputDefinition,
SolidDefinition,
String,
check,
)
from .configs import define_emr_run_job_flow_config
from .emr import EmrJobRunner
from .types import EMR_CLUSTER_DONE_STATES, EmrClusterState
_START = 'start'
# wait at most 24 hours by default for cluster job completion
_DEFAULT_CLUSTER_MAX_WAIT_TIME_SEC = 24 * 60 * 60
class EmrRunJobFlowSolidDefinition(SolidDefinition):
def __init__(
self,
name,
description=None,
max_wait_time_sec=_DEFAULT_CLUSTER_MAX_WAIT_TIME_SEC,
poll_interval_sec=5,
):
name = check.str_param(name, 'name')
description = check.opt_str_param(description, 'description', 'EMR create job flow solid.')
def _compute_fn(context, _):
job_runner = EmrJobRunner(region=context.solid_config.get('aws_region'))
cluster_id = job_runner.run_job_flow(context, context.solid_config['job_config'])
context.log.info('waiting for EMR cluster job flow completion...')
max_iter = int(math.ceil(max_wait_time_sec / float(poll_interval_sec)))
done = False
curr_iter = 0
while not done and curr_iter < max_iter:
# This will take a while... cluster creation usually > 5 minutes
time.sleep(poll_interval_sec)
cluster = job_runner.describe_cluster(cluster_id)['Cluster']
context.log.info(
'EMR cluster %s state: %s' % (cluster_id, cluster['Status']['State'])
)
done = EmrClusterState(cluster['Status']['State']) in EMR_CLUSTER_DONE_STATES
curr_iter += 1
yield Output(cluster_id)
super(EmrRunJobFlowSolidDefinition, self).__init__(
name=name,
description=description,
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(String)],
compute_fn=_compute_fn,
config={
'aws_region': Field(String, is_required=False),
'job_config': define_emr_run_job_flow_config(),
},
)
| 31.774648
| 99
| 0.630319
|
7144ab9f7a0fe68a3f517a8f1b7cb8a7a06c4ec4
| 18,067
|
py
|
Python
|
baseline_gslstm_modelf.py
|
aurtg/nary-relation-extraction-decomposed
|
0217238af3e36af0884767c0e0ed4beda4ed2368
|
[
"MIT"
] | 6
|
2019-12-17T02:49:05.000Z
|
2021-11-27T04:30:35.000Z
|
baseline_gslstm_modelf.py
|
aurtg/nary-relation-extraction-decomposed
|
0217238af3e36af0884767c0e0ed4beda4ed2368
|
[
"MIT"
] | null | null | null |
baseline_gslstm_modelf.py
|
aurtg/nary-relation-extraction-decomposed
|
0217238af3e36af0884767c0e0ed4beda4ed2368
|
[
"MIT"
] | 2
|
2020-06-05T02:26:44.000Z
|
2020-07-25T10:19:38.000Z
|
import sys
from argparse import Namespace, ArgumentParser
from collections import defaultdict
from logging import getLogger, FileHandler, INFO
import datetime
from functools import reduce
from copy import deepcopy
import _pickle as pic
import time
import numpy as np
from sklearn.utils import shuffle
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
from torch_scatter import scatter_mean, scatter_max, scatter_add
from util import *
from model import *
import warnings
warnings.simplefilter('error')
class GS_LSTM(torch.nn.Module):
u"""
Graph State LSTM.
"""
def __init__(self, dim_link_emb, dim_token_emb, dim_x, dim_h, aggr="add"):
super(GS_LSTM, self).__init__()
self.link_linear = nn.Sequential(
nn.Linear(dim_link_emb + dim_token_emb, dim_x),
nn.Tanh()
)
self.gate_i = nn.Sequential(
nn.Linear(dim_x*2 + dim_h*2, dim_h),
nn.Sigmoid()
)
self.gate_o = nn.Sequential(
nn.Linear(dim_x*2 + dim_h*2, dim_h),
nn.Sigmoid()
)
self.gate_f = nn.Sequential(
nn.Linear(dim_x*2 + dim_h*2, dim_h),
nn.Sigmoid()
)
self.gate_u = nn.Sequential(
nn.Linear(dim_x*2 + dim_h*2, dim_h),
nn.Tanh()
)
self.aggr = aggr
def forward(self, h_node, c_node, e_link, e_token, i_from, i_to):
u"""
Args:
h_node (FloatTensor) : Input hidden state of nodes.
e_link (FloatTensor) : Embedding of each link. (n_link x dim_link)
e_token (FloatTensor) : Embedding of each token. (n_token,)
i_from (LongTensor) : Indices of source nodes of links. (n_link,)
i_to (LongTensor) : Indices of target nodes of links. (n_link,)
Returns:
x, h
x (FloatTensor) : Input for LSTM cell.
h (FloatTensor) : Hidden state for LSTM cell.
"""
link_x = self.link_linear(torch.cat([e_link, e_token[i_from]], dim=1))
x_in = scatter_add(link_x, i_to, dim=0)
x_out = scatter_add(link_x, i_from, dim=0)
h_in = scatter_add(h_node[i_from], i_to, dim=0)
h_out = scatter_add(h_node[i_to], i_from, dim=0)
inp = torch.cat([x_in, x_out, h_in, h_out], dim=1)
i = self.gate_i(inp)
o = self.gate_o(inp)
f = self.gate_f(inp)
u = self.gate_u(inp)
_c_node = f * c_node + i * u
_h_node = o * torch.tanh(_c_node)
return _h_node, _c_node
class DocumentGraphEncoder(nn.Module):
def __init__(self, n_token, n_link_label, dim_embs, node_dropout, n_layers):
super(DocumentGraphEncoder, self).__init__()
self.dim_embs = dim_embs
self.n_layers = n_layers
# Embeddings
self.emb_token = nn.Embedding(n_token, dim_embs["word"])
nn.init.normal_(self.emb_token.weight, std=1/dim_embs["word"]**0.5)
self.emb_link_label = nn.Embedding(n_link_label, dim_embs["link_label"])
nn.init.normal_(self.emb_link_label.weight, std=1/dim_embs["link_label"])
# Compress word vectors.
self.compress = nn.Sequential(
nn.Linear(dim_embs["word"], dim_embs["node"]),
nn.Tanh()
)
self.dropout = nn.Dropout(p=node_dropout)
# GS-LSTM module
self.gslstm = GS_LSTM(
dim_link_emb = dim_embs["link_label"],
dim_token_emb = dim_embs["node"],
dim_x = dim_embs["state"],
dim_h = dim_embs["state"]
)
def forward(self, i_token, i_link, i_from, i_to):
u"""
Args:
i_token (LongTensor) : Token indices of each node in the document graph.
i_link (LongTensor) : Edge label indices of each edge in the document graph.
i_from (LongTensor) : Start point indices of each edge.
i_to (LongTensor) : End point indices of each edge.
Return:
h_node (FloatTensor) : Hidden representations of each node in given document graph.
"""
## Node embedding.
word_emb = self.emb_token(i_token)
node_emb = self.compress(word_emb)
node_emb = self.dropout(node_emb)
## Edge embedding.
edge_emb = self.emb_link_label(i_link)
## GS-LSTM
# initial states (n_node x dim_state)
h_node = node_emb.new_zeros((i_token.size(0), self.dim_embs["state"]))
c_node = node_emb.new_zeros((i_token.size(0), self.dim_embs["state"]))
for i_layer in range(self.n_layers):
h_node, c_node = self.gslstm(h_node, c_node, edge_emb, node_emb, i_from, i_to)
# h_node = word_emb.new_ones((i_token.size(0), self.dim_embs["state"]))
# mean_word_emb = torch.mean(word_emb, dim=0)[:self.dim_embs["state"]]
# h_node = h_node * mean_word_emb.unsqueeze(0)
return h_node
# NOTE: dim_embs["rel"] should be arity * dim_embs["state"]
class Model(nn.Module):
def __init__(self, n_rel, n_tup, n_token, n_link_label, dim_embs, node_dropout, n_layers, score_dropout):
super(Model, self).__init__()
self.dim_embs = dim_embs
## Encoders
# for cannonicalized KB relations.
self.rel_encoder = nn.Embedding(n_rel, dim_embs["rel"])
nn.init.normal_(self.rel_encoder.weight, std=1/dim_embs["rel"]**0.5)
# for surface pattern (document graph).
self.dg_encoder = DocumentGraphEncoder(
n_token, n_link_label, dim_embs, node_dropout, n_layers
)
# for entity tuples.
self.tup_encoder = nn.Embedding(n_tup, dim_embs["rel"])
nn.init.normal_(self.tup_encoder.weight, std=1/dim_embs["rel"]**0.5)
## Dropout
self.score_dropout = nn.Dropout(p=score_dropout)
def normalize(self):
with torch.no_grad():
self.tup_encoder.weight.div_(torch.norm(self.tup_encoder.weight, dim=1, keepdim=True))
def apply_word_vectors(self, word_vectors, i2t):
n_exist = 0
with torch.no_grad():
for i in range(len(indmap.i2t)):
token = i2t[i]
if token in word_vectors:
n_exist += 1
self.dg_encoder.emb_token.weight[i] = torch.FloatTensor(word_vectors[token])
print("{} out of {} tokens are initialized with word vectors".format(n_exist, len(i2t)))
def encode_relations(self, relations):
device = next(self.parameters()).device
orig_ind_symb = []
symb_rels = []
orig_ind_surf = []
surf_rels = []
for i_rel, rel in enumerate(relations):
if isinstance(rel, tuple):
surf_rels.append(rel)
orig_ind_surf.append(i_rel)
elif isinstance(rel, int):
symb_rels.append(rel)
orig_ind_symb.append(i_rel)
# Encode cannonicalized KB relations.
if len(symb_rels) > 0:
emb_symb_rels = self.encode_symbolic_relations(symb_rels)
# Encode surface patterns.
if len(surf_rels) > 0:
emb_surf_rels = self.encode_document_graphs(surf_rels)
# Merge results.
out = torch.zeros(len(relations), self.dim_embs["rel"]).to(device)
if len(symb_rels) > 0:
orig_ind_symb = torch.LongTensor(orig_ind_symb).to(device)
out[orig_ind_symb] = emb_symb_rels
if len(surf_rels) > 0:
orig_ind_surf = torch.LongTensor(orig_ind_surf).to(device)
out[orig_ind_surf] = emb_surf_rels
return out
def encode_entity_tuples(self, tups):
device = next(self.parameters()).device
tups = torch.LongTensor(tups).to(device)
out = self.tup_encoder(tups)
return out
def encode_symbolic_relations(self, rels):
device = next(self.parameters()).device
rels = torch.LongTensor(rels).to(device)
return self.rel_encoder(rels)
def encode_document_graphs(self, doc_graphs):
device = next(self.parameters()).device
arity = len(doc_graphs[0][-1])
# Merge all document graphs into a single big graph.
global_nodes = []
global_edges = []
global_i_from = []
global_i_to = []
entity_indices = []
belonging_entities = []
i_entity = 0
for nodes, edges, i_from, i_to, pos in doc_graphs:
node_ind_offset = len(global_nodes)
global_nodes += list(nodes)
global_edges += list(edges)
global_i_from += map(lambda ind: ind+node_ind_offset, i_from)
global_i_to += map(lambda ind: ind+node_ind_offset, i_to)
assert len(pos) == arity, "Illegal number of entities: {}. It should be {}.".format(len(pos), arity)
for i_ent, inds in enumerate(pos):
entity_indices += map(lambda ind: ind+node_ind_offset, inds)
belonging_entities += [i_entity] * len(inds)
i_entity += 1
# Encode merged document graph.
global_nodes = torch.LongTensor(global_nodes).to(device)
global_edges = torch.LongTensor(global_edges).to(device)
global_i_from = torch.LongTensor(global_i_from).to(device)
global_i_to = torch.LongTensor(global_i_to).to(device)
h_node = self.dg_encoder(global_nodes, global_edges, global_i_from, global_i_to)
# Calculate entity representations.
entity_indices = torch.LongTensor(entity_indices).to(device)
belonging_entities = torch.LongTensor(belonging_entities).to(device)
ent_h_node = h_node[entity_indices]
ent_reps = scatter_mean(ent_h_node, belonging_entities, dim=0, dim_size=arity*len(doc_graphs))
ent_reps = ent_reps.view(len(doc_graphs), -1)
return ent_reps
def train(model, optimizer, train_tuples, train_lookup, indmap, args):
def search_negative(tuple):
n_pairs = len(indmap.i2p)
while True:
pair, relation = tuple
new_pair = np.random.randint(n_pairs)
if (new_pair, relation) in train_lookup:
continue
else:
return new_pair
model.train()
s_batch = args.bs
n_batch = len(train_tuples) // s_batch
train_tuples = shuffle(train_tuples)
print("batch size: {}\tbatch num: {}".format(s_batch, n_batch))
logsoftmax = nn.LogSoftmax(dim=1)
for i_batch in range(n_batch):
sys.stdout.write("Processing Batch {}/{}\r".format(i_batch, n_batch))
sys.stdout.flush()
start = i_batch * s_batch
end = (i_batch + 1) * s_batch
optimizer.zero_grad()
batch_p_facts = train_tuples[start:end]
tups = []
n_fact = len(batch_p_facts)
for i_fact in range(n_fact):
fact = batch_p_facts[i_fact]
tups.append(fact[0])
for _ in range(args.K):
tups.append(search_negative(fact))
relations = [fact[1] for fact in batch_p_facts]
emb_tups = model.encode_entity_tuples(tups)
emb_tups = emb_tups.view(n_fact, args.K+1, emb_tups.size(1))
# (n_fact x K+1 x dim_emb)
emb_relations = model.encode_relations(relations) # (n_fact x dim_emb)
scores = torch.sum(model.score_dropout(emb_tups) * emb_relations.unsqueeze(1), dim=2)
# (n_fact x K+1)
loss = - logsoftmax(scores)[:,0]
loss = torch.mean(loss)
loss.backward()
optimizer.step()
if args.normalize:
model.normalize()
def eval_MAP(model, items, indmap, arities, args):
u"""Calculate MAP of each relation types."""
logger = getLogger("main")
n_predicate = len(arities)
model.eval()
device = next(model.parameters()).device
emb_relations = model.encode_relations(range(len(arities)))
keys = [p for p in items.keys()]
ent_tups = [indmap.p2i[p] for p in keys]
y_vec = [indmap.r2i[items[p]["relation"]] if items[p]["relation"] in indmap.r2i else -1 for p in keys]
s_batch = args.bs
n_batch = int(np.ceil(len(ent_tups) / s_batch))
scores = []
for i_batch in range(n_batch):
sys.stdout.write("{}/{}\r".format(i_batch, n_batch))
sys.stdout.flush()
start = i_batch * s_batch
end = (i_batch + 1) * s_batch
emb_ent_tups = model.encode_entity_tuples(ent_tups[start:end])
score = torch.sum(
model.score_dropout(emb_ent_tups).unsqueeze(1) * emb_relations.unsqueeze(0), dim=2
) # (n_ent_tups x n_relations)
scores += score.tolist()
scores = np.array(scores)
all_precisions = []
MAPs = []
for i_r in range(n_predicate):
score_y = sorted(
list(zip(scores[:,i_r], np.random.uniform(size=scores.shape[0]), y_vec, keys)),
reverse=True
)
n_all = 0
n_pos = 0
all_pos = []
logs = []
for score, _, y, key in score_y:
n_all += 1
logs.append((score, y, key))
if y==i_r:
n_pos += 1
all_pos.append((n_all, n_pos))
recalls = [_pos/n_pos for _all, _pos in all_pos]
precisions = [_pos/_all for _all,_pos in all_pos]
all_precisions += precisions
print("MAP for predicate {}: {}".format(i_r, np.mean(precisions)))
logger.info("MAP for predicate {}: {}".format(i_r, np.mean(precisions)))
MAPs.append(np.mean(precisions))
return np.mean(all_precisions), MAPs
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--decay", type=float, default=1e-8)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--bs", type=int, default=50)
parser.add_argument("--K", type=int, default=5)
parser.add_argument("--dropout", type=float, default=0.1)
parser.add_argument("--dim_state", type=int, default=100)
parser.add_argument("--dim_node", type=int, default=100)
parser.add_argument("--dim_link", type=int, default=3)
parser.add_argument("--dim_word", type=int, default=300)
parser.add_argument("--init_wordvec", action="store_true")
parser.add_argument("--node_dropout", type=float, default=0.0)
parser.add_argument("--n_layers", type=int, default=5)
parser.add_argument("--normalize", action="store_true")
parser.add_argument("--label_ratio", type=float, default=1.0)
parser.add_argument("--sparse_ratio", type=float, default=1.0)
parser.add_argument("--suffix", type=str, default="tmp")
parser.add_argument("--exp_number", type=int, default=0)
parser.add_argument("--gpu", type=int, default=-1)
parser.add_argument("--data", type=str, default="wiki.data.json")
args = parser.parse_args()
logger = getLogger("main")
logger.setLevel(INFO)
handler = FileHandler("logs/ExpLog_{}_{}.log".format(args.suffix, args.exp_number))
handler.setLevel(INFO)
logger.addHandler(handler)
# Load Data.
items, indmap, observed_tuples, arities = load_data(args.data)
if args.init_wordvec:
word_vectors = load_word_vector(args.dim_word)
# reduce label
observed_tuples = filter_data(observed_tuples, len(arities), args.label_ratio,
args.sparse_ratio)
train_lookup = set(observed_tuples)
#
device = "cpu" if args.gpu < 0 else "cuda:{}".format(args.gpu)
dim_embs = {
"word": args.dim_word,
"link_label": args.dim_link,
"node": args.dim_node,
"state": args.dim_state,
"rel": args.dim_state * arities[0]
}
model = Model(
n_rel = len(arities),
n_tup = len(indmap.i2p),
n_token = len(indmap.i2t),
n_link_label = len(indmap.i2e),
dim_embs = dim_embs,
node_dropout = args.node_dropout,
n_layers = args.n_layers,
score_dropout = args.dropout
)
if args.init_wordvec:
model.apply_word_vectors(word_vectors, indmap.i2t)
model.to(device)
optimizer = optim.Adam(model.parameters(), lr = args.lr, weight_decay = args.decay)
best_MAP_dev = -1.0
for i_epoch in range(args.epoch):
print("EPOCH: {}".format(i_epoch))
logger.info("EPOCH: {}".format(i_epoch))
print("training...")
train(model, optimizer, observed_tuples, train_lookup, indmap, args)
print("evaluating...")
with torch.no_grad():
print("train")
eval_MAP(model, items["train"], indmap, arities, args)
print("dev")
logger.info("dev")
MAP_dev, MAPs_dev = eval_MAP(model, items["dev"], indmap, arities, args)
if MAP_dev > best_MAP_dev:
print("new best model")
logger.info("new best model: {} -> {}".format(best_MAP_dev, MAP_dev))
best_MAP_dev = MAP_dev
with torch.no_grad():
print("test")
logger.info("test")
MAP_test, MAPs_test = eval_MAP(model, items["test"], indmap, arities, args)
else:
MAP_test = -1.0
MAPs_test = [-1.0] * len(arities)
print("(MAP)\tdev:{}\ttest:{}".format(MAP_dev, MAP_test))
logger.info("(MAP)\tdev:{}\ttest:{}".format(MAP_dev, MAP_test))
logger.info("(MAPs)\t{}\t{}".format(MAPs_dev, MAPs_test))
logger.info("best model dev: {}".format(best_MAP_dev))
| 34.347909
| 113
| 0.593845
|
e2da04ac2a1448ba1572ac3cd8a2dc42a2556711
| 1,076
|
py
|
Python
|
src/python/T0/WMBS/Oracle/SMNotification/InsertOfflineFileStatus.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 6
|
2016-03-09T14:36:19.000Z
|
2021-07-27T01:28:00.000Z
|
src/python/T0/WMBS/Oracle/SMNotification/InsertOfflineFileStatus.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 193
|
2015-01-07T21:03:43.000Z
|
2022-03-31T12:22:18.000Z
|
src/python/T0/WMBS/Oracle/SMNotification/InsertOfflineFileStatus.py
|
silviodonato/T0
|
a093729d08b31175ed35cd20e889bd7094ce152a
|
[
"Apache-2.0"
] | 36
|
2015-01-28T19:01:54.000Z
|
2021-12-15T17:18:20.000Z
|
"""
_InsertOfflineFileStatus_
Oracle implementation of InsertOfflineFileStatus
"""
from WMCore.Database.DBFormatter import DBFormatter
class InsertOfflineFileStatus(DBFormatter):
def execute(self, binds, conn = None, transaction = False):
sql = """DECLARE
cnt NUMBER(1);
BEGIN
SELECT COUNT(*)
INTO cnt
FROM file_transfer_status_offline
WHERE p5_fileid = :P5_ID
;
IF (cnt = 0)
THEN
INSERT INTO file_transfer_status_offline
(P5_FILEID, FILENAME, T0_CHECKED_TIME, CHECKED_RETRIEVE)
VALUES(:P5_ID, :FILENAME, CURRENT_TIMESTAMP, 1)
;
END IF;
EXCEPTION
WHEN DUP_VAL_ON_INDEX THEN NULL;
END;
"""
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
| 28.315789
| 77
| 0.494424
|
544898a7453d38bc1d42eb97bab569c519b90790
| 88
|
py
|
Python
|
cars_ke/apps.py
|
ahmed14-cell/cars
|
d9ec9784b416f945eb137d4dbf2875eee9db294d
|
[
"MIT"
] | null | null | null |
cars_ke/apps.py
|
ahmed14-cell/cars
|
d9ec9784b416f945eb137d4dbf2875eee9db294d
|
[
"MIT"
] | 5
|
2021-03-20T04:21:13.000Z
|
2021-03-20T04:22:01.000Z
|
cars_ke/apps.py
|
ahmed-gaal/cars
|
d9ec9784b416f945eb137d4dbf2875eee9db294d
|
[
"MIT"
] | 1
|
2020-09-01T12:33:41.000Z
|
2020-09-01T12:33:41.000Z
|
from django.apps import AppConfig
class CarsKeConfig(AppConfig):
name = 'cars_ke'
| 14.666667
| 33
| 0.75
|
95e690ad17aa75b004b178cba82d2ab0ad70ecc9
| 1,888
|
py
|
Python
|
SharedCode/blob_helper.py
|
office-for-students/beta-data-pipelines
|
36b90c7720e0be0f807d93a31cf7346522b6e1f0
|
[
"MIT"
] | 2
|
2019-06-04T14:15:16.000Z
|
2019-08-04T15:26:16.000Z
|
SharedCode/blob_helper.py
|
office-for-students/beta-data-pipelines
|
36b90c7720e0be0f807d93a31cf7346522b6e1f0
|
[
"MIT"
] | 3
|
2019-06-24T12:21:10.000Z
|
2019-07-22T11:15:27.000Z
|
SharedCode/blob_helper.py
|
office-for-students/beta-data-pipelines
|
36b90c7720e0be0f807d93a31cf7346522b6e1f0
|
[
"MIT"
] | 1
|
2019-09-26T19:29:15.000Z
|
2019-09-26T19:29:15.000Z
|
import gzip
import io
import os
from datetime import datetime
from azure.storage.blob import BlockBlobService
class BlobHelper:
def __init__(self, blob=None):
account_name = os.environ["AzureStorageAccountName"]
account_key = os.environ["AzureStorageAccountKey"]
self.blob_service = BlockBlobService(
account_name=account_name, account_key=account_key
)
self.blob = blob
def create_output_blob(self, destination_container_name):
source_url = os.environ["StorageUrl"] + self.blob.name
destination_blob_name = self.get_destination_blob_name()
self.blob_service.copy_blob(
container_name=destination_container_name,
blob_name=destination_blob_name,
copy_source=source_url,
)
def get_destination_blob_name(self):
blob_filename = self.blob.name.split("/")[1]
datetime_str = datetime.today().strftime("%Y%m%d-%H%M%S")
return f"{datetime_str}-{blob_filename}"
def get_str_file(self, storage_container_name, storage_blob_name):
compressed_file = io.BytesIO()
self.blob_service.get_blob_to_stream(
storage_container_name,
storage_blob_name,
compressed_file,
max_connections=1
)
compressed_file.seek(0)
compressed_gzip = gzip.GzipFile(fileobj=compressed_file)
decompressed_file = compressed_gzip.read()
compressed_file.close()
compressed_gzip.close()
file_string = decompressed_file.decode("utf-8-sig")
return file_string
def write_stream_file(self, storage_container_name, storage_blob_name, encoded_file):
self.blob_service.create_blob_from_bytes(
storage_container_name,
storage_blob_name,
encoded_file,
max_connections=1
)
| 29.968254
| 89
| 0.67214
|
d70108d4fd05d5dde7fa005f7450167095063a9b
| 1,974
|
py
|
Python
|
tests/trb_italian.py
|
friendly-traceback/friendly-traceback
|
4f6785f14c271a4d6412ef19c140f9d380cdbcbf
|
[
"MIT"
] | 45
|
2021-07-06T03:30:20.000Z
|
2022-03-16T17:30:58.000Z
|
tests/trb_italian.py
|
friendly-traceback/friendly-traceback
|
4f6785f14c271a4d6412ef19c140f9d380cdbcbf
|
[
"MIT"
] | 110
|
2021-06-28T11:48:46.000Z
|
2022-03-25T20:41:25.000Z
|
tests/trb_italian.py
|
friendly-traceback/friendly-traceback
|
4f6785f14c271a4d6412ef19c140f9d380cdbcbf
|
[
"MIT"
] | 4
|
2021-07-05T20:56:39.000Z
|
2021-11-11T20:24:34.000Z
|
"""Creates a version of traceback_es.rst to insert in the documentation.
"""
# When creating a new translation, you need to:
# 1. Make a copy of this file
# 2. Change the value of LANG as well as 'intro_text' so that they reflect the
# appropriate language
# 3. Change the first line of this file so that the name of the rst file
# is correct!
import os
import sys
import platform
this_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(this_dir, ".."))
import friendly_traceback
# Make it possible to find docs and tests source
docs_root_dir = os.path.abspath(
os.path.join(this_dir, "..", "..", "docs")
)
assert os.path.isdir(docs_root_dir), "Separate docs repo need to exist"
sys.path.append(os.path.join(this_dir, ".."))
LANG = "it"
friendly_traceback.install()
friendly_traceback.set_lang(LANG)
friendly_traceback.set_formatter("docs")
sys.path.insert(0, this_dir)
py_version = f"{sys.version_info.major}.{sys.version_info.minor}"
import trb_common
target = os.path.normpath(
os.path.join(docs_root_dir, f"source/tracebacks_{LANG}.rst")
)
intro_text = """
Friendly tracebacks - in italiano
======================================
Friendly aims to provide friendlier feedback when an exception
is raised than what is done by Python.
Below, we can find some examples. SyntaxError cases, as well as TabError and
IndentationError cases, are shown in a separate page.
Not all cases handled by friendly are included here.
.. note::
The content of this page is generated by running
`{name}` located in the ``tests/`` directory.
This needs to be done explicitly, independently of updating the
documentation using Sphinx.
Friendly-traceback version: {friendly}
Python version: {python}
""".format(
friendly=friendly_traceback.__version__,
python=platform.python_version(),
name=sys.argv[0],
)
print(f"Python version: {platform.python_version()}; Italian")
trb_common.create_tracebacks(target, intro_text)
| 27.802817
| 78
| 0.731003
|
b5633ce8738b8357e977c4cdd145125c8973c18c
| 273
|
py
|
Python
|
hydra/markets/viabtc_bch_cny.py
|
louis-cai/yourquant
|
6aeb0cbc825fb2b89c8c5d0c81c319b32701edd0
|
[
"Unlicense"
] | null | null | null |
hydra/markets/viabtc_bch_cny.py
|
louis-cai/yourquant
|
6aeb0cbc825fb2b89c8c5d0c81c319b32701edd0
|
[
"Unlicense"
] | null | null | null |
hydra/markets/viabtc_bch_cny.py
|
louis-cai/yourquant
|
6aeb0cbc825fb2b89c8c5d0c81c319b32701edd0
|
[
"Unlicense"
] | null | null | null |
# Copyright (C) 2017, Philsong <songbohr@gmail.com>
from ._viabtc import Viabtc
class Viabtc_BCH_CNY(Viabtc):
def __init__(self):
super().__init__("CNY", "BCH", "bcccny")
if __name__ == "__main__":
market = Viabtc_BCH_CNY()
print(market.get_ticker())
| 24.818182
| 51
| 0.677656
|
493a058b071e10afea610c82bdc7052880e29d3e
| 489
|
py
|
Python
|
coggers/register.py
|
restinya/Barkeep
|
9d6a7f47bc8e2bc3cda1ba2992a02a85d06efa7e
|
[
"MIT"
] | null | null | null |
coggers/register.py
|
restinya/Barkeep
|
9d6a7f47bc8e2bc3cda1ba2992a02a85d06efa7e
|
[
"MIT"
] | null | null | null |
coggers/register.py
|
restinya/Barkeep
|
9d6a7f47bc8e2bc3cda1ba2992a02a85d06efa7e
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import requests
import re
from discord.utils import get
from discord.ext import commands
from math import floor
from configs.settings import command_prefix
from utils import accessDB, point_buy, alpha_emojis, db, VerboseMDStringifier, traceBack, checkForChar
class Register(commands.Cog):
def __init__ (self, bot):
self.bot = bot
@commands.group(aliases=['r'], case_insensitive=True)
async def reward(self, ctx):
pass
| 24.45
| 102
| 0.744376
|
f88582fadcdc00fe8eb804c3fd0431a3c0a46a68
| 410
|
py
|
Python
|
exercicios/exercicio022.py
|
Helton-Rubens/Python-3
|
eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7
|
[
"MIT"
] | null | null | null |
exercicios/exercicio022.py
|
Helton-Rubens/Python-3
|
eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7
|
[
"MIT"
] | null | null | null |
exercicios/exercicio022.py
|
Helton-Rubens/Python-3
|
eb6d5ee71bcb2a2a80de4eaea942bd0c41d846b7
|
[
"MIT"
] | null | null | null |
nome = str(input('Seu nome: ')).strip()
print('Analisando seu nome...')
print('Seu nome com letras maiúsculas: {}'.format(nome.upper()))
print('Seu nome com letras minúsculas: {}'.format(nome.lower()))
print('{} letras ao todo no seu nome.'.format(len(nome) - nome.count(' ')))
#essa de baixo pode ser feita com ".format(nome.find(' '))"
print('O seu primeiro nome tem {} letras.'.format(len(nome.split()[0])))
| 58.571429
| 75
| 0.670732
|
0c09c5f6837e64665a4ad88cc6e78b09e4242987
| 1,127
|
py
|
Python
|
jasmin/routing/content.py
|
balsagoth/jasmin
|
53d55f6af8c0d5faca51849e5953452a0dd93452
|
[
"Apache-2.0"
] | null | null | null |
jasmin/routing/content.py
|
balsagoth/jasmin
|
53d55f6af8c0d5faca51849e5953452a0dd93452
|
[
"Apache-2.0"
] | null | null | null |
jasmin/routing/content.py
|
balsagoth/jasmin
|
53d55f6af8c0d5faca51849e5953452a0dd93452
|
[
"Apache-2.0"
] | null | null | null |
import cPickle as pickle
from txamqp.content import Content
class PDU(Content):
pickleProtocol = pickle.HIGHEST_PROTOCOL
def pickle(self, data):
return pickle.dumps(data, self.pickleProtocol)
def __init__(self, body="", children=None, properties=None, pickleProtocol=2):
self.pickleProtocol = pickleProtocol
body = self.pickle(body)
Content.__init__(self, body, children, properties)
class RoutedDeliverSmContent(PDU):
def __init__(self, deliver_sm, msgid, scid, dcs, route_type='simple', trycount=0, pickleProtocol=2):
props = {}
if type(dcs) != list:
# Since #467 this class must accept a list of destination connector(s)
# this test/conversion is done to preserve backward compatibility
dcs = [dcs]
props['message-id'] = msgid
props['headers'] = {
'route-type': route_type,
'src-connector-id': scid,
'dst-connectors': self.pickle(dcs),
'try-count': trycount}
PDU.__init__(self, deliver_sm, properties=props, pickleProtocol=pickleProtocol)
| 31.305556
| 104
| 0.645963
|
3cce0b538ae2d5377b9554b8a8ad16de300b3f20
| 1,310
|
py
|
Python
|
write_embeddings_summary.py
|
sungjae-cho/arithmetic-jordan-net
|
3eeb9cfa0fdcae9d8655aaa5d6112a91e201ec5f
|
[
"MIT"
] | 1
|
2019-03-05T14:07:03.000Z
|
2019-03-05T14:07:03.000Z
|
write_embeddings_summary.py
|
sungjae-cho/arithmetic-jordan-net
|
3eeb9cfa0fdcae9d8655aaa5d6112a91e201ec5f
|
[
"MIT"
] | null | null | null |
write_embeddings_summary.py
|
sungjae-cho/arithmetic-jordan-net
|
3eeb9cfa0fdcae9d8655aaa5d6112a91e201ec5f
|
[
"MIT"
] | 1
|
2019-09-07T16:35:41.000Z
|
2019-09-07T16:35:41.000Z
|
'''
This file is to archive deprecated code.
'''
def write_embeddings_summary(sess, h1):
# Reference: https://stackoverflow.com/questions/40849116/how-to-use-tensorboard-embedding-projector
dir_logs = os.path.join(config.dir_saved_models(), experiment_name)
metadata = os.path.join(dir_logs, 'metadata.tsv')
carry_datasets = data_utils.import_carry_datasets(operand_bits, operator)
input_arrays = list()
with open(metadata, 'w') as f:
for carries in carry_datasets.keys():
input_arrays.append(carry_datasets[carries]['input'])
f.write('{}\n'.format(carries))
carry_inputs = np.concatenate(input_arrays, axis=0)
[h1_val] = sess.run([h1],
feed_dict={inputs:carry_inputs,
condition_tlu:False})
h1_var = tf.Variable(h1_val, name='h1_var')
saver = tf.train.Saver([h1_var])
sess.run(h1_var.initializer)
saver.save(sess, os.path.join(dir_logs, 'h1_var.ckpt'))
pconfig = projector.ProjectorConfig()
pconfig.model_checkpoint_path = os.path.join(dir_logs, 'h1_var.ckpt')
embedding = pconfig.embeddings.add()
embedding.tensor_name = h1_var.name
embedding.metadata_path = metadata
projector.visualize_embeddings(tf.summary.FileWriter(dir_logs), pconfig)
| 39.69697
| 105
| 0.685496
|
a5eb23ac820077da6b9a687d04fe74c62ef25613
| 244
|
py
|
Python
|
networkapi/api_task/urls.py
|
vinicius-marinho/GloboNetworkAPI
|
94651d3b4dd180769bc40ec966814f3427ccfb5b
|
[
"Apache-2.0"
] | 73
|
2015-04-13T17:56:11.000Z
|
2022-03-24T06:13:07.000Z
|
networkapi/api_task/urls.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | 99
|
2015-04-03T01:04:46.000Z
|
2021-10-03T23:24:48.000Z
|
networkapi/api_task/urls.py
|
shildenbrand/GloboNetworkAPI
|
515d5e961456cee657c08c275faa1b69b7452719
|
[
"Apache-2.0"
] | 64
|
2015-08-05T21:26:29.000Z
|
2022-03-22T01:06:28.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns
from django.conf.urls import url
from networkapi.api_task import views
urlpatterns = patterns(
'',
url(r'^v3/task/(?P<task_id>[\w\d\-\.]+)/$', views.TaskView.as_view()),
)
| 22.181818
| 74
| 0.659836
|
9a4d61b4c436761ff6069be2e39ac836e18b0130
| 1,540
|
py
|
Python
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 83
|
2017-08-27T15:09:13.000Z
|
2022-01-18T17:03:41.000Z
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 808
|
2017-08-27T15:35:01.000Z
|
2021-12-14T17:30:50.000Z
|
tests/regressions/python/942_lazy_fmap.py
|
NanmiaoWu/phylanx
|
295b5f82cc39925a0d53e77ba3b6d02a65204535
|
[
"BSL-1.0"
] | 55
|
2017-08-27T15:09:22.000Z
|
2022-03-25T12:07:34.000Z
|
# Copyright (c) 2019 Bita Hasheminezhad
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# #942: `fold_left`, `fold_right` and `fmap` do not work with a lazy function
import numpy as np
from phylanx import Phylanx, PhylanxSession, execution_tree
PhylanxSession.init(1)
def variable(value, dtype=None, name=None, constraint=None):
if dtype is None:
dtype = "float32"
if constraint is not None:
raise TypeError("Constraint is the projection function to be "
"applied to the variable after an optimizer update")
if isinstance(value, execution_tree.variable):
if dtype is not None:
value.dtype = dtype
if name is not None:
value.name = name
return value
return execution_tree.variable(value, dtype=dtype, name=name)
def eval(func):
return func.eval()
def fmap(fn, elems):
pass # make flake happy
@Phylanx
def map_fn_eager(fn, elems, dtype=None):
return fmap(fn, elems)
def map_fn(fn, elems, dtype=None):
return map_fn_eager.lazy(fn, elems, dtype)
@Phylanx
def sum_eager(x, axis=None, keepdims=False):
return np.sum(x, axis, keepdims)
sum = Phylanx.lazy(sum_eager)
def test_map(x):
return eval(map_fn(sum, variable(x)))
result = test_map(np.array([[1, 2, 3]]))
assert(np.all(result == [6])), result
result = test_map(np.array([1, 2, 3]))
assert(np.all(result == [1, 2, 3])), result
| 24.0625
| 79
| 0.670779
|
de89865ffb2bbf14bedf2bdafbd7d598037f497d
| 49,966
|
py
|
Python
|
tests/fixtures.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
tests/fixtures.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
from django.conf import settings
# pylint:disable=line-too-long
start_experiment_value = {
'master':
{'pod': {'api_version': 'v1', 'kind': 'Pod',
'metadata': {'annotations': None, 'cluster_name': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None, 'finalizers': None,
'generate_name': None, 'generation': None,
'initializers': None,
'labels': {'experiment': 'piko/project1/1',
'project_name': 'piko/project1',
'role': 'polyaxon-workers',
'experiment_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'task_idx': '0',
'task_type': 'master',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-master0',
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '204744',
'self_link': '/api/v1/namespaces/polyaxon/pods/project1-id1-spec1-xp1-master0',
'uid': '09c436d4-d87b-11e7-8ab8-1273d6911587'},
'spec': {'active_deadline_seconds': None, 'affinity': None,
'automount_service_account_token': None,
'containers': [{'args': [
'from polyaxon.polyaxonfile.local_runner import start_experiment_run; start_experiment_run(\'{"version": 1, "project": {"name": "project1"}, "declarations": {"lr": 1.023292992280754}, "environment": {"n_workers": 1, "n_ps": 1, "delay_workers_by_global_step": true, "run_config": {"save_summary_steps": 100, "save_checkpoints_steps": 100}}, "settings": {"logging": {"level": "INFO", "path": "/tmp/plx/logs/project1"}}, "model": {"classifier": {"loss": {"SigmoidCrossEntropy": null}, "optimizer": {"Adam": {"learning_rate": 1.02329299228}}, "metrics": ["Accuracy", "Precision"], "one_hot_encode": true, "n_classes": 10, "graph": {"input_layers": ["image"], "layers": [{"Conv2D": {"filters": 32, "kernel_size": 3, "strides": 1, "activation": "elu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_1", "inbound_nodes": ["image"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_1", "inbound_nodes": ["Conv2D_1"]}}, {"Conv2D": {"filters": 64, "kernel_size": 3, "activation": "relu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_2", "inbound_nodes": ["MaxPooling2D_1"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_2", "inbound_nodes": ["Conv2D_2"]}}, {"Flatten": {"name": "Flatten_1", "inbound_nodes": ["MaxPooling2D_2"]}}, {"Dense": {"units": 128, "activation": "tanh", "name": "Dense_1", "inbound_nodes": ["Flatten_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_1", "inbound_nodes": ["Dense_1"]}}, {"Dense": {"units": 256, "activation": "tanh", "name": "Dense_2", "inbound_nodes": ["Dropout_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_2", "inbound_nodes": ["Dense_2"]}}, {"Dense": {"units": 10, "name": "Dense_3", "inbound_nodes": ["Dropout_2"]}}], "output_layers": ["Dense_3"]}}}, "train": {"steps": 1000, "data_pipeline": {"TFRecordImagePipeline": {"batch_size": 64, "num_epochs": 5, "shuffle": true, "data_files": ["/plx/data/mnist/mnist_train.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}, "eval": {"data_pipeline": {"TFRecordImagePipeline": {"batch_size": 32, "num_epochs": 1, "shuffle": false, "data_files": ["/plx/data/mnist/mnist_eval.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}}\', \'0\', \'master\', 0, \'train_and_evaluate\')'],
'command': ['python3', '-c'],
'env': [{
'name': 'CM_project1_id1_spec1_xp1_cluster_master',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'master',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_worker',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'worker',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_ps',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'ps',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}}],
'env_from': None,
'image': 'polyaxon/polyaxon-lib:0.0.1',
'image_pull_policy': 'IfNotPresent',
'lifecycle': None,
'liveness_probe': None,
'name': 'polyaxon-experiment-job', 'ports': [
{'container_port': 2222, 'host_ip': None,
'host_port': None, 'name': None,
'protocol': 'TCP'}],
'readiness_probe': None,
'resources': {'limits': None,
'requests': None},
'security_context': None,
'stdin': None, 'stdin_once': None,
'termination_message_path': '/dev/termination-log',
'termination_message_policy': 'File',
'tty': None, 'volume_mounts': [{
'mount_path': '/var/run/secrets/kubernetes.io/serviceaccount',
'name': 'default-token-28d2n',
'read_only': True,
'sub_path': None}],
'working_dir': None}],
'dns_policy': 'ClusterFirst', 'host_aliases': None,
'host_ipc': None, 'host_network': None,
'host_pid': None, 'hostname': None,
'image_pull_secrets': None, 'init_containers': None,
'node_name': None, 'node_selector': None,
'restart_policy': 'Never',
'scheduler_name': 'default-scheduler',
'security_context': {'fs_group': None,
'run_as_non_root': None,
'run_as_user': None,
'se_linux_options': None,
'supplemental_groups': None},
'service_account': 'default',
'service_account_name': 'default',
'subdomain': None,
'termination_grace_period_seconds': 30,
'tolerations': None, 'volumes': [
{'aws_elastic_block_store': None, 'azure_disk': None,
'azure_file': None, 'cephfs': None, 'cinder': None,
'config_map': None, 'downward_api': None,
'empty_dir': None, 'fc': None, 'flex_volume': None,
'flocker': None, 'gce_persistent_disk': None,
'git_repo': None, 'glusterfs': None,
'host_path': None, 'iscsi': None,
'name': 'default-token-28d2n', 'nfs': None,
'persistent_volume_claim': None,
'photon_persistent_disk': None,
'portworx_volume': None, 'projected': None,
'quobyte': None, 'rbd': None, 'scale_io': None,
'secret': {'default_mode': 420, 'items': None,
'optional': None,
'secret_name': 'default-token-28d2n'},
'storageos': None, 'vsphere_volume': None}]},
'status': {'conditions': None, 'container_statuses': None,
'host_ip': None, 'init_container_statuses': None,
'message': None, 'phase': 'Pending',
'pod_ip': None, 'qos_class': 'BestEffort',
'reason': None, 'start_time': None}},
'service': {'api_version': 'v1', 'kind': 'Service',
'metadata': {'annotations': None, 'cluster_name': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None,
'finalizers': None, 'generate_name': None,
'generation': None, 'initializers': None,
'labels': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-master0',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'task_idx': '0',
'task_type': 'master',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-master0',
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '204749',
'self_link': '/api/v1/namespaces/polyaxon/services/project1-id1-spec1-xp1-master0',
'uid': '09d6006c-d87b-11e7-8ab8-1273d6911587'},
'spec': {'cluster_ip': '10.0.0.113',
'external_i_ps': None, 'external_name': None,
'external_traffic_policy': None,
'health_check_node_port': None,
'load_balancer_ip': None,
'load_balancer_source_ranges': None, 'ports': [
{'name': None, 'node_port': None, 'port': 2222,
'protocol': 'TCP', 'target_port': '2222'}],
'selector': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-master0',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'task_idx': '0',
'task_type': 'master',
'type': 'polyaxon-experiment'},
'session_affinity': 'None',
'type': 'ClusterIP'},
'status': {'load_balancer': {'ingress': None}}}},
'worker': [{'pod': {'api_version': 'v1', 'kind': 'Pod',
'metadata': {'labels': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-worker0',
'job_uuid': '3a9c9b0bd56b5e9fbdbd1a3d43d57960',
'task_idx': '0',
'task_type': 'worker',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-worker0',
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '204751',
'self_link': '/api/v1/namespaces/polyaxon/pods/project1-id1-spec1-xp1-worker0',
'uid': '09dcf9eb-d87b-11e7-8ab8-1273d6911587'},
'spec': {'active_deadline_seconds': None, 'affinity': None,
'automount_service_account_token': None,
'containers': [{'args': [
'from polyaxon.polyaxonfile.local_runner import start_experiment_run; start_experiment_run(\'{"version": 1, "project": {"name": "project1"}, "declarations": {"lr": 1.023292992280754}, "environment": {"n_workers": 1, "n_ps": 1, "delay_workers_by_global_step": true, "run_config": {"save_summary_steps": 100, "save_checkpoints_steps": 100}}, "settings": {"logging": {"level": "INFO", "path": "/tmp/plx/logs/project1"}}, "model": {"classifier": {"loss": {"SigmoidCrossEntropy": null}, "optimizer": {"Adam": {"learning_rate": 1.02329299228}}, "metrics": ["Accuracy", "Precision"], "one_hot_encode": true, "n_classes": 10, "graph": {"input_layers": ["image"], "layers": [{"Conv2D": {"filters": 32, "kernel_size": 3, "strides": 1, "activation": "elu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_1", "inbound_nodes": ["image"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_1", "inbound_nodes": ["Conv2D_1"]}}, {"Conv2D": {"filters": 64, "kernel_size": 3, "activation": "relu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_2", "inbound_nodes": ["MaxPooling2D_1"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_2", "inbound_nodes": ["Conv2D_2"]}}, {"Flatten": {"name": "Flatten_1", "inbound_nodes": ["MaxPooling2D_2"]}}, {"Dense": {"units": 128, "activation": "tanh", "name": "Dense_1", "inbound_nodes": ["Flatten_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_1", "inbound_nodes": ["Dense_1"]}}, {"Dense": {"units": 256, "activation": "tanh", "name": "Dense_2", "inbound_nodes": ["Dropout_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_2", "inbound_nodes": ["Dense_2"]}}, {"Dense": {"units": 10, "name": "Dense_3", "inbound_nodes": ["Dropout_2"]}}], "output_layers": ["Dense_3"]}}}, "train": {"steps": 1000, "data_pipeline": {"TFRecordImagePipeline": {"batch_size": 64, "num_epochs": 5, "shuffle": true, "data_files": ["/plx/data/mnist/mnist_train.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}, "eval": {"data_pipeline": {"TFRecordImagePipeline": {"batch_size": 32, "num_epochs": 1, "shuffle": false, "data_files": ["/plx/data/mnist/mnist_eval.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}}\', \'0\', \'worker\', 0, \'train\')'],
'command': ['python3', '-c'],
'env': [{
'name': 'CM_project1_id1_spec1_xp1_cluster_master',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'master',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_worker',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'worker',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_ps',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'ps',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}}],
'env_from': None,
'image': 'polyaxon/polyaxon-lib:0.0.1',
'image_pull_policy': 'IfNotPresent',
'lifecycle': None,
'liveness_probe': None,
'name': 'polyaxon-experiment-job',
'ports': [{'container_port': 2222,
'host_ip': None,
'host_port': None,
'name': None,
'protocol': 'TCP'}],
'readiness_probe': None,
'resources': {'limits': None,
'requests': None},
'security_context': None,
'stdin': None, 'stdin_once': None,
'termination_message_path': '/dev/termination-log',
'termination_message_policy': 'File',
'tty': None, 'volume_mounts': [{
'mount_path': '/var/run/secrets/kubernetes.io/serviceaccount',
'name': 'default-token-28d2n',
'read_only': True,
'sub_path': None}],
'working_dir': None}],
'dns_policy': 'ClusterFirst', 'host_aliases': None,
'host_ipc': None, 'host_network': None,
'host_pid': None, 'hostname': None,
'image_pull_secrets': None,
'init_containers': None, 'node_name': None,
'node_selector': None, 'restart_policy': 'Never',
'scheduler_name': 'default-scheduler',
'security_context': {'fs_group': None,
'run_as_non_root': None,
'run_as_user': None,
'se_linux_options': None,
'supplemental_groups': None},
'service_account': 'default',
'service_account_name': 'default',
'subdomain': None,
'termination_grace_period_seconds': 30,
'tolerations': None, 'volumes': [
{'aws_elastic_block_store': None,
'azure_disk': None, 'azure_file': None,
'cephfs': None, 'cinder': None, 'config_map': None,
'downward_api': None, 'empty_dir': None,
'fc': None, 'flex_volume': None, 'flocker': None,
'gce_persistent_disk': None, 'git_repo': None,
'glusterfs': None, 'host_path': None,
'iscsi': None, 'name': 'default-token-28d2n',
'nfs': None, 'persistent_volume_claim': None,
'photon_persistent_disk': None,
'portworx_volume': None, 'projected': None,
'quobyte': None, 'rbd': None, 'scale_io': None,
'secret': {'default_mode': 420, 'items': None,
'optional': None,
'secret_name': 'default-token-28d2n'},
'storageos': None, 'vsphere_volume': None}]},
'status': {'conditions': None, 'container_statuses': None,
'host_ip': None, 'init_container_statuses': None,
'message': None, 'phase': 'Pending',
'pod_ip': None, 'qos_class': 'BestEffort',
'reason': None, 'start_time': None}},
'service': {'api_version': 'v1', 'kind': 'Service',
'metadata': {'annotations': None, 'cluster_name': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None,
'finalizers': None, 'generate_name': None,
'generation': None, 'initializers': None,
'labels': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-worker0',
'job_uuid': '3a9c9b0bd56b5e9fbdbd1a3d43d57960',
'task_idx': '0',
'task_type': 'worker',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-worker0',
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '204754',
'self_link': '/api/v1/namespaces/polyaxon/services/project1-id1-spec1-xp1-worker0',
'uid': '09ebb28e-d87b-11e7-8ab8-1273d6911587'},
'spec': {'cluster_ip': '10.0.0.214',
'external_i_ps': None, 'external_name': None,
'external_traffic_policy': None,
'health_check_node_port': None,
'load_balancer_ip': None,
'load_balancer_source_ranges': None, 'ports': [
{'name': None, 'node_port': None, 'port': 2222,
'protocol': 'TCP', 'target_port': '2222'}],
'selector': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-worker0',
'job_uuid': '3a9c9b0bd56b5e9fbdbd1a3d43d57960',
'task_idx': '0',
'task_type': 'worker',
'type': 'polyaxon-experiment'},
'session_affinity': 'None',
'type': 'ClusterIP'},
'status': {'load_balancer': {'ingress': None}}}}],
'ps': [{'pod': {'api_version': 'v1', 'kind': 'Pod',
'metadata': {'annotations': None, 'cluster_name': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None, 'finalizers': None,
'generate_name': None, 'generation': None,
'initializers': None, 'labels': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-ps0',
'job_uuid': '59e3601232b85a3d8be2511f23a62945',
'task_idx': '0',
'task_type': 'ps',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-ps0',
'namespace': 'polyaxon', 'owner_references': None,
'resource_version': '204758',
'self_link': '/api/v1/namespaces/polyaxon/pods/project1-id1-spec1-xp1-ps0',
'uid': '09eea5c3-d87b-11e7-8ab8-1273d6911587'},
'spec': {'active_deadline_seconds': None, 'affinity': None,
'automount_service_account_token': None,
'containers': [{'args': [
'from polyaxon.polyaxonfile.local_runner import start_experiment_run; start_experiment_run(\'{"version": 1, "project": {"name": "project1"}, "declarations": {"lr": 1.023292992280754}, "environment": {"n_workers": 1, "n_ps": 1, "delay_workers_by_global_step": true, "run_config": {"save_summary_steps": 100, "save_checkpoints_steps": 100}}, "settings": {"logging": {"level": "INFO", "path": "/tmp/plx/logs/project1"}}, "model": {"classifier": {"loss": {"SigmoidCrossEntropy": null}, "optimizer": {"Adam": {"learning_rate": 1.02329299228}}, "metrics": ["Accuracy", "Precision"], "one_hot_encode": true, "n_classes": 10, "graph": {"input_layers": ["image"], "layers": [{"Conv2D": {"filters": 32, "kernel_size": 3, "strides": 1, "activation": "elu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_1", "inbound_nodes": ["image"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_1", "inbound_nodes": ["Conv2D_1"]}}, {"Conv2D": {"filters": 64, "kernel_size": 3, "activation": "relu", "regularizer": {"L2": {"l": 0.02}}, "name": "Conv2D_2", "inbound_nodes": ["MaxPooling2D_1"]}}, {"MaxPooling2D": {"pool_size": 2, "name": "MaxPooling2D_2", "inbound_nodes": ["Conv2D_2"]}}, {"Flatten": {"name": "Flatten_1", "inbound_nodes": ["MaxPooling2D_2"]}}, {"Dense": {"units": 128, "activation": "tanh", "name": "Dense_1", "inbound_nodes": ["Flatten_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_1", "inbound_nodes": ["Dense_1"]}}, {"Dense": {"units": 256, "activation": "tanh", "name": "Dense_2", "inbound_nodes": ["Dropout_1"]}}, {"Dropout": {"rate": 0.8, "name": "Dropout_2", "inbound_nodes": ["Dense_2"]}}, {"Dense": {"units": 10, "name": "Dense_3", "inbound_nodes": ["Dropout_2"]}}], "output_layers": ["Dense_3"]}}}, "train": {"steps": 1000, "data_pipeline": {"TFRecordImagePipeline": {"batch_size": 64, "num_epochs": 5, "shuffle": true, "data_files": ["/plx/data/mnist/mnist_train.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}, "eval": {"data_pipeline": {"TFRecordImagePipeline": {"batch_size": 32, "num_epochs": 1, "shuffle": false, "data_files": ["/plx/data/mnist/mnist_eval.tfrecord"], "meta_data_file": "/plx/data/mnist/meta_data.json", "feature_processors": {"image": {"input_layers": ["image"], "layers": [{"Cast": {"dtype": "float32", "name": "Cast_1", "inbound_nodes": ["image"]}}], "output_layers": ["Cast_1"]}}}}}}\', \'0\', \'ps\', 0, \'run_std_server\')'],
'command': ['python3', '-c'], 'env': [{
'name': 'CM_project1_id1_spec1_xp1_cluster_master',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'master',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_worker',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'worker',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}},
{
'name': 'CM_project1_id1_spec1_xp1_cluster_ps',
'value': None,
'value_from': {
'config_map_key_ref': {
'key': 'ps',
'name': 'project1-id1-spec1-xp1-cluster',
'optional': None},
'field_ref': None,
'resource_field_ref': None,
'secret_key_ref': None}}],
'env_from': None,
'image': 'polyaxon/polyaxon-lib:0.0.1',
'image_pull_policy': 'IfNotPresent',
'lifecycle': None,
'liveness_probe': None,
'name': 'polyaxon-experiment-job', 'ports': [
{'container_port': 2222, 'host_ip': None,
'host_port': None, 'name': None,
'protocol': 'TCP'}], 'readiness_probe': None,
'resources': {'limits': None,
'requests': None},
'security_context': None,
'stdin': None, 'stdin_once': None,
'termination_message_path': '/dev/termination-log',
'termination_message_policy': 'File',
'tty': None, 'volume_mounts': [{
'mount_path': '/var/run/secrets/kubernetes.io/serviceaccount',
'name': 'default-token-28d2n',
'read_only': True,
'sub_path': None}],
'working_dir': None}],
'dns_policy': 'ClusterFirst', 'host_aliases': None,
'host_ipc': None, 'host_network': None,
'host_pid': None, 'hostname': None,
'image_pull_secrets': None, 'init_containers': None,
'node_name': None, 'node_selector': None,
'restart_policy': 'Never',
'scheduler_name': 'default-scheduler',
'security_context': {'fs_group': None,
'run_as_non_root': None,
'run_as_user': None,
'se_linux_options': None,
'supplemental_groups': None},
'service_account': 'default',
'service_account_name': 'default', 'subdomain': None,
'termination_grace_period_seconds': 30,
'tolerations': None, 'volumes': [
{'aws_elastic_block_store': None, 'azure_disk': None,
'azure_file': None, 'cephfs': None, 'cinder': None,
'config_map': None, 'downward_api': None,
'empty_dir': None, 'fc': None, 'flex_volume': None,
'flocker': None, 'gce_persistent_disk': None,
'git_repo': None, 'glusterfs': None, 'host_path': None,
'iscsi': None, 'name': 'default-token-28d2n',
'nfs': None, 'persistent_volume_claim': None,
'photon_persistent_disk': None,
'portworx_volume': None, 'projected': None,
'quobyte': None, 'rbd': None, 'scale_io': None,
'secret': {'default_mode': 420, 'items': None,
'optional': None,
'secret_name': 'default-token-28d2n'},
'storageos': None, 'vsphere_volume': None}]},
'status': {'conditions': None, 'container_statuses': None,
'host_ip': None, 'init_container_statuses': None,
'message': None, 'phase': 'Pending', 'pod_ip': None,
'qos_class': 'BestEffort', 'reason': None,
'start_time': None}},
'service': {'api_version': 'v1', 'kind': 'Service',
'metadata': {'annotations': None, 'cluster_name': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None, 'finalizers': None,
'generate_name': None, 'generation': None,
'initializers': None,
'labels': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-ps0',
'job_uuid': '59e3601232b85a3d8be2511f23a62945',
'task_idx': '0', 'task_type': 'ps',
'type': 'polyaxon-experiment'},
'name': 'project1-id1-spec1-xp1-ps0',
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '204760',
'self_link': '/api/v1/namespaces/polyaxon/services/project1-id1-spec1-xp1-ps0',
'uid': '0a03b8bd-d87b-11e7-8ab8-1273d6911587'},
'spec': {'cluster_ip': '10.0.0.79', 'external_i_ps': None,
'external_name': None,
'external_traffic_policy': None,
'health_check_node_port': None,
'load_balancer_ip': None,
'load_balancer_source_ranges': None, 'ports': [
{'name': None, 'node_port': None, 'port': 2222,
'protocol': 'TCP', 'target_port': '2222'}],
'selector': {'experiment': '1',
'project': 'project1-id1-spec1',
'role': 'polyaxon-workers',
'task': 'project1-id1-spec1-xp1-ps0',
'job_uuid': '59e3601232b85a3d8be2511f23a62945',
'task_idx': '0', 'task_type': 'ps',
'type': 'polyaxon-experiment'},
'session_affinity': 'None', 'type': 'ClusterIP'},
'status': {'load_balancer': {'ingress': None}}}}]}
def get_status_event(name, container_name, labels):
event = {
'type': 'ADDED',
'object': {
'api_version': 'v1',
'kind': 'Pod',
'metadata': {
'deletion_timestamp': None,
'name': name,
'namespace': 'polyaxon',
'owner_references': None,
'resource_version': '277329',
'self_link': '/api/v1/namespaces/polyaxon/pods/project1-id1-spec1-xp1-master0',
'uid': '05062d42-d915-11e7-8ab8-1273d6911587',
'labels': labels
},
'spec': {
'containers':
[{
'command': ['python3', '-c'],
'env': [],
'image': 'busybox/busybox',
'image_pull_policy': 'IfNotPresent',
'lifecycle': None,
'liveness_probe': None,
'name': container_name,
'ports': [{
'container_port': 2222,
'host_ip': None,
'host_port': None,
'name': None,
'protocol': 'TCP'}],
'readiness_probe': None,
}],
'volumes': [],
'node_name': None
},
'status': {
'conditions': None,
'container_statuses': None,
'host_ip': None,
'init_container_statuses': None,
'message': None,
'phase': 'Pending',
'pod_ip': None,
'qos_class': 'BestEffort',
'reason': None,
'start_time': None}
}
}
return event
def get_status_event_with_conditions(name, container_name, labels):
event = get_status_event(name, container_name, labels)
event['object']['status'] = {
'conditions':
[{'last_probe_time': None,
'message': None, 'reason': None, 'status': 'True', 'type': 'Initialized'},
{'last_probe_time': None,
'message': 'containers with unready status: [{}]'.format(container_name),
'reason': 'ContainersNotReady', 'status': 'False', 'type': 'Ready'},
{'last_probe_time': None,
'message': None, 'reason': None, 'status': 'True', 'type': 'PodScheduled'}],
'container_statuses': [{
'container_id': 'docker://539e6a6f4209997094802b0657f90576fe129b7f81697120172836073d9bbd75',
'image': 'busybox/busybox',
'image_id': 'docker://sha256:c66a51ffd71e2ec0cb0699dba06283bce9d254e2833a84ce7378298b04297ba3',
'last_state': {
'running': None,
'terminated': None,
'waiting': None
},
'name': container_name,
'ready': False,
'restart_count': 0,
'state': {
'running': 1, # This is changed to get the test to check container monitoring
'terminated': {
'container_id': 'docker://539e6a6f4209997094802b0657f90576fe129b7f81697120172836073d9bbd75',
'exit_code': 1,
'message': None,
'reason': 'Error',
'signal': None,
},
'waiting': None
}
}],
'host_ip': '192.168.64.4',
'init_container_statuses': None,
'message': None,
'phase': 'Failed',
'pod_ip': '172.17.0.2',
'qos_class': 'BestEffort',
'reason': None
}
return event
status_experiment_job_event = get_status_event(
name='project1-id1-spec1-xp1-master0',
container_name=settings.CONTAINER_NAME_EXPERIMENT_JOB,
labels={
'app': settings.APP_LABELS_EXPERIMENT,
'project_name': 'mike/project1',
'experiment_name': 'mike/project1/1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'experiment_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'task_idx': '0',
'task_type': 'master',
'role': settings.ROLE_LABELS_WORKER,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_experiment_job_event_with_conditions = get_status_event_with_conditions(
name='project1-id1-spec1-xp1-master0',
container_name=settings.CONTAINER_NAME_EXPERIMENT_JOB,
labels={
'app': settings.APP_LABELS_EXPERIMENT,
'project_name': 'mike/project1',
'experiment_name': 'mike/project1/1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'experiment_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'task_idx': '0',
'task_type': 'master',
'role': settings.ROLE_LABELS_WORKER,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_job_event = get_status_event(
name='plxproject-project_uuid-notebook',
container_name=settings.CONTAINER_NAME_JOB,
labels={
'app': settings.APP_LABELS_JOB,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/jobs/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_WORKER,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_notebook_job_event = get_status_event(
name='plxproject-project_uuid-notebook',
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
labels={
'app': settings.APP_LABELS_NOTEBOOK,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/notebook/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_notebook_job_event_with_conditions = get_status_event_with_conditions(
name='plxproject-project_uuid-notebook',
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
labels={
'app': settings.APP_LABELS_NOTEBOOK,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/notebook/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_tensorboard_job_event = get_status_event(
name='plxproject-project_uuid-tensroboard',
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
labels={
'app': settings.APP_LABELS_TENSORBOARD,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/tensorboards/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_job_event_with_conditions = get_status_event_with_conditions(
name='plxproject-project_uuid-job',
container_name=settings.CONTAINER_NAME_JOB,
labels={
'app': settings.APP_LABELS_JOB,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/jobs/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_WORKER,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_tensorboard_job_event_with_conditions = get_status_event_with_conditions(
name='plxproject-project_uuid-tensroboard',
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
labels={
'app': settings.APP_LABELS_TENSORBOARD,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/tensorboards/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_build_job_event = get_status_event(
name='plxproject-project_uuid-build',
container_name=settings.CONTAINER_NAME_DOCKERIZER_JOB,
labels={
'app': settings.APP_LABELS_DOCKERIZER,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/builds/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
status_build_job_event_with_conditions = get_status_event_with_conditions(
name='plxproject-project_uuid-build',
container_name=settings.CONTAINER_NAME_DOCKERIZER_JOB,
labels={
'app': settings.APP_LABELS_DOCKERIZER,
'project_name': 'mike/project1',
'project_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'job_name': 'mike/project1/builds/1',
'job_uuid': 'fa6203c189a855dd977019854a7ffcc3',
'role': settings.ROLE_LABELS_DASHBOARD,
'type': settings.TYPE_LABELS_RUNNER
}
)
| 69.882517
| 2,613
| 0.433895
|
5b692fbd72261856cf8ea1fa34eba7eb5287fc7d
| 1,132
|
py
|
Python
|
discordbot.py
|
tsu164/emptybot
|
edbfcd329a2ad0b15f50efdbcaf654bef6cb27c7
|
[
"MIT"
] | null | null | null |
discordbot.py
|
tsu164/emptybot
|
edbfcd329a2ad0b15f50efdbcaf654bef6cb27c7
|
[
"MIT"
] | null | null | null |
discordbot.py
|
tsu164/emptybot
|
edbfcd329a2ad0b15f50efdbcaf654bef6cb27c7
|
[
"MIT"
] | 1
|
2020-10-13T04:35:33.000Z
|
2020-10-13T04:35:33.000Z
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='$')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.event
async def on_message(message):
# bot相手なら無視
if message.author == bot.user:
return
if message.content == 'ローソン':
await message.channel.send('行け')
if message.content == 'からあげクン':
await message.channel.send('食え')
poops = ['うんこ', 'うんち', 'ウンコ', 'ウンチ', '糞']
for poop in poops:
if poop in message.content:
await message.add_reaction('💩')
await bot.process_commands(message)
@bot.event
async def on_member_join(member):
await member.edit(nick='はみるとん')
@bot.event
async def on_guild_remove(member):
text_channel = member.guild.text_channels[0]
await text_channel.send(f"{member.name}さんが消えていきました")
bot.load_extension("cogs.gamble")
bot.load_extension("cogs.gacha")
bot.run(token)
| 26.952381
| 89
| 0.696113
|
b4e48b22d35e13a54d2e59bdbe3ec37a9d38417a
| 7,309
|
py
|
Python
|
src/openai_ros/src/openai_ros/robot_envs/wamv_env.py
|
a4aleem/GymROS-drones
|
043eab8212d2670e543735294508462a1636256e
|
[
"MIT"
] | 2
|
2019-09-03T10:16:01.000Z
|
2020-01-16T06:01:16.000Z
|
src/openai_ros/src/openai_ros/robot_envs/wamv_env.py
|
a4aleem/GymROS-drones
|
043eab8212d2670e543735294508462a1636256e
|
[
"MIT"
] | null | null | null |
src/openai_ros/src/openai_ros/robot_envs/wamv_env.py
|
a4aleem/GymROS-drones
|
043eab8212d2670e543735294508462a1636256e
|
[
"MIT"
] | null | null | null |
import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from nav_msgs.msg import Odometry
from robotx_gazebo.msg import UsvDrive
from openai_ros.openai_ros_common import ROSLauncher
class WamvEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all WamvEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new WamvEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /wamv/odom: Odometry of the Base of Wamv
Actuators Topic List:
* /cmd_drive: You publish the speed of the left and right propellers.
Args:
"""
rospy.logdebug("Start WamvEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="robotx_gazebo",
launch_file_name="put_wamv_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(WamvEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("WamvEnv unpause1...")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_systems_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/wamv/odom", Odometry, self._odom_callback)
self.publishers_array = []
self._cmd_drive_pub = rospy.Publisher('/cmd_drive', UsvDrive, queue_size=1)
self.publishers_array.append(self._cmd_drive_pub)
self._check_all_publishers_ready()
self.gazebo.pauseSim()
rospy.logdebug("Finished WamvEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("WamvEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END WamvEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /wamv/odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/wamv/odom", Odometry, timeout=1.0)
rospy.logdebug("Current /wamv/odom READY=>")
except:
rospy.logerr("Current /wamv/odom not ready yet, retrying for getting odom")
return self.odom
def _odom_callback(self, data):
self.odom = data
def _check_all_publishers_ready(self):
"""
Checks that all the publishers are working
:return:
"""
rospy.logdebug("START ALL SENSORS READY")
for publisher_object in self.publishers_array:
self._check_pub_connection(publisher_object)
rospy.logdebug("ALL SENSORS READY")
def _check_pub_connection(self, publisher_object):
rate = rospy.Rate(10) # 10hz
while publisher_object.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to publisher_object yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("publisher_object Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def set_propellers_speed(self, right_propeller_speed, left_propeller_speed, time_sleep=1.0):
"""
It will set the speed of each of the two proppelers of wamv.
"""
i = 0
for publisher_object in self.publishers_array:
usv_drive_obj = UsvDrive()
usv_drive_obj.right = right_propeller_speed
usv_drive_obj.left = left_propeller_speed
rospy.logdebug("usv_drive_obj>>"+str(usv_drive_obj))
publisher_object.publish(usv_drive_obj)
i += 1
self.wait_time_for_execute_movement(time_sleep)
def wait_time_for_execute_movement(self, time_sleep):
"""
Because this Wamv position is global, we really dont have
a way to know if its moving in the direction desired, because it would need
to evaluate the diference in position and speed on the local reference.
"""
time.sleep(time_sleep)
def get_odom(self):
return self.odom
| 34.804762
| 107
| 0.621289
|
102cdf7295ed4b5346110771731a58b1c5c465cc
| 436
|
py
|
Python
|
deps/PicklingTools170Release/Xm/ptools170/host/tabinpytester.py
|
dcanelhas/sdf_tracker-LS
|
2685ce41fc1c8ae12d270c5e2b88afc987af9f45
|
[
"BSD-3-Clause"
] | 13
|
2017-07-06T12:38:39.000Z
|
2021-08-10T08:06:18.000Z
|
deps/PicklingTools170Release/Xm/ptools170/host/tabinpytester.py
|
dcanelhas/sdf_tracker-LS
|
2685ce41fc1c8ae12d270c5e2b88afc987af9f45
|
[
"BSD-3-Clause"
] | 2
|
2016-01-29T22:57:15.000Z
|
2021-08-29T19:07:09.000Z
|
Xm/ptools161/host/tabinpytester.py
|
RichIsMyName/PicklingToolsRepo
|
a53f64263bc82cef2f50fa02db90fb643c7e0fe0
|
[
"BSD-3-Clause"
] | 4
|
2017-07-24T01:50:42.000Z
|
2021-08-10T08:06:20.000Z
|
#! /usr/bin/xmpy
""" Simple example showing how a Python X-Midas primitive can read
T4000 files into dictionaries easily. """
from XMinter import *
from primitive import *
from t4val import recvT4Tab
hin = m_open(m_pick(1), 'r')
m_sync()
while not mcbreak() :
dictionary = recvT4Tab(hin)
# DON'T HAVE TO DO Grab! The recvT4Tab does the grab for you
#data = m_grabx(hin, 1, False)
print dictionary
m_close(hin)
| 21.8
| 66
| 0.697248
|
075d26e45fc5ce116fe12cdc7b958296d24e0f17
| 8,020
|
py
|
Python
|
PaddleNLP/unarchived/machine_reading_comprehension/utils/preprocess.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 3
|
2019-04-29T09:12:21.000Z
|
2021-04-30T02:39:02.000Z
|
PaddleNLP/unarchived/machine_reading_comprehension/utils/preprocess.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
PaddleNLP/unarchived/machine_reading_comprehension/utils/preprocess.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2018-06-14T13:59:36.000Z
|
2018-11-14T12:34:47.000Z
|
###############################################################################
# ==============================================================================
# Copyright 2017 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module finds the most related paragraph of each document according to recall.
"""
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
import json
from collections import Counter
def precision_recall_f1(prediction, ground_truth):
"""
This function calculates and returns the precision, recall and f1-score
Args:
prediction: prediction string or list to be matched
ground_truth: golden string or list reference
Returns:
floats of (p, r, f1)
Raises:
None
"""
if not isinstance(prediction, list):
prediction_tokens = prediction.split()
else:
prediction_tokens = prediction
if not isinstance(ground_truth, list):
ground_truth_tokens = ground_truth.split()
else:
ground_truth_tokens = ground_truth
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
p = 1.0 * num_same / len(prediction_tokens)
r = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * p * r) / (p + r)
return p, r, f1
def recall(prediction, ground_truth):
"""
This function calculates and returns the recall
Args:
prediction: prediction string or list to be matched
ground_truth: golden string or list reference
Returns:
floats of recall
Raises:
None
"""
return precision_recall_f1(prediction, ground_truth)[1]
def f1_score(prediction, ground_truth):
"""
This function calculates and returns the f1-score
Args:
prediction: prediction string or list to be matched
ground_truth: golden string or list reference
Returns:
floats of f1
Raises:
None
"""
return precision_recall_f1(prediction, ground_truth)[2]
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""
This function calculates and returns the precision, recall and f1-score
Args:
metric_fn: metric function pointer which calculates scores according to corresponding logic.
prediction: prediction string or list to be matched
ground_truth: golden string or list reference
Returns:
floats of (p, r, f1)
Raises:
None
"""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def find_best_question_match(doc, question, with_score=False):
"""
For each docment, find the paragraph that matches best to the question.
Args:
doc: The document object.
question: The question tokens.
with_score: If True then the match score will be returned,
otherwise False.
Returns:
The index of the best match paragraph, if with_score=False,
otherwise returns a tuple of the index of the best match paragraph
and the match score of that paragraph.
"""
most_related_para = -1
max_related_score = 0
most_related_para_len = 0
for p_idx, para_tokens in enumerate(doc['segmented_paragraphs']):
if len(question) > 0:
related_score = metric_max_over_ground_truths(recall, para_tokens,
question)
else:
related_score = 0
if related_score > max_related_score \
or (related_score == max_related_score \
and len(para_tokens) < most_related_para_len):
most_related_para = p_idx
max_related_score = related_score
most_related_para_len = len(para_tokens)
if most_related_para == -1:
most_related_para = 0
if with_score:
return most_related_para, max_related_score
return most_related_para
def find_fake_answer(sample):
"""
For each document, finds the most related paragraph based on recall,
then finds a span that maximize the f1_score compared with the gold answers
and uses this span as a fake answer span
Args:
sample: a sample in the dataset
Returns:
None
Raises:
None
"""
for doc in sample['documents']:
most_related_para = -1
most_related_para_len = 999999
max_related_score = 0
for p_idx, para_tokens in enumerate(doc['segmented_paragraphs']):
if len(sample['segmented_answers']) > 0:
related_score = metric_max_over_ground_truths(
recall, para_tokens, sample['segmented_answers'])
else:
continue
if related_score > max_related_score \
or (related_score == max_related_score
and len(para_tokens) < most_related_para_len):
most_related_para = p_idx
most_related_para_len = len(para_tokens)
max_related_score = related_score
doc['most_related_para'] = most_related_para
sample['answer_docs'] = []
sample['answer_spans'] = []
sample['fake_answers'] = []
sample['match_scores'] = []
best_match_score = 0
best_match_d_idx, best_match_span = -1, [-1, -1]
best_fake_answer = None
answer_tokens = set()
for segmented_answer in sample['segmented_answers']:
answer_tokens = answer_tokens | set(
[token for token in segmented_answer])
for d_idx, doc in enumerate(sample['documents']):
if not doc['is_selected']:
continue
if doc['most_related_para'] == -1:
doc['most_related_para'] = 0
most_related_para_tokens = doc['segmented_paragraphs'][doc[
'most_related_para']][:1000]
for start_tidx in range(len(most_related_para_tokens)):
if most_related_para_tokens[start_tidx] not in answer_tokens:
continue
for end_tidx in range(
len(most_related_para_tokens) - 1, start_tidx - 1, -1):
span_tokens = most_related_para_tokens[start_tidx:end_tidx + 1]
if len(sample['segmented_answers']) > 0:
match_score = metric_max_over_ground_truths(
f1_score, span_tokens, sample['segmented_answers'])
else:
match_score = 0
if match_score == 0:
break
if match_score > best_match_score:
best_match_d_idx = d_idx
best_match_span = [start_tidx, end_tidx]
best_match_score = match_score
best_fake_answer = ''.join(span_tokens)
if best_match_score > 0:
sample['answer_docs'].append(best_match_d_idx)
sample['answer_spans'].append(best_match_span)
sample['fake_answers'].append(best_fake_answer)
sample['match_scores'].append(best_match_score)
if __name__ == '__main__':
for line in sys.stdin:
sample = json.loads(line)
find_fake_answer(sample)
print(json.dumps(sample, encoding='utf8', ensure_ascii=False))
| 36.454545
| 100
| 0.623815
|
003519994ae88149d6bf772e3bf3f44f979594ab
| 14,891
|
py
|
Python
|
software/contrib/consequencer.py
|
Bridgee/EuroPi
|
8c755fa6cf26e0e4715421324898a6b3c8eeb412
|
[
"CC0-1.0"
] | null | null | null |
software/contrib/consequencer.py
|
Bridgee/EuroPi
|
8c755fa6cf26e0e4715421324898a6b3c8eeb412
|
[
"CC0-1.0"
] | null | null | null |
software/contrib/consequencer.py
|
Bridgee/EuroPi
|
8c755fa6cf26e0e4715421324898a6b3c8eeb412
|
[
"CC0-1.0"
] | null | null | null |
from europi import *
import machine
from time import ticks_diff, ticks_ms
from random import randint, uniform
from europi_script import EuroPiScript
'''
Consequencer
author: Nik Ansell (github.com/gamecat69)
date: 2022-02-05
labels: sequencer, triggers, drums, randomness
A gate and CV sequencer inspired by Grids from Mutable Instruments that contains pre-loaded drum patterns that can be smoothly morphed from one to another. Triggers are sent from outputs 1 - 3, randomized stepped CV patterns are sent from outputs 4 - 6.
Send a clock to the digital input to start the sequence.
Demo video: https://youtu.be/UwjajP6uiQU
digital_in: clock in
analog_in: randomness CV
knob_1: randomness
knob_2: select pre-loaded drum pattern
button_1: Short Press: toggle randomized hi-hats on / off. Long Press: Play previous CV Pattern
button_2:
- Short Press (<300ms) : Generate a new random cv pattern for outputs 4 - 6.
- Medium Press (>300ms) : Cycle through analogue input modes
- Long Press (>3000ms) : Toggle option to send clocks from output 4 on / off
output_1: trigger 1 / Bass Drum
output_2: trigger 2 / Snare Drum
output_3: trigger 3 / Hi-Hat
output_4: randomly generated CV (cycled by pushing button 2)
output_5: randomly generated CV (cycled by pushing button 2)
output_6: randomly generated CV (cycled by pushing button 2)
'''
class Consequencer(EuroPiScript):
def __init__(self):
# Overclock the Pico for improved performance.
machine.freq(250_000_000)
# Initialize sequencer pattern arrays
p = pattern()
self.BD=p.BD
self.SN=p.SN
self.HH=p.HH
# Initialize variables
self.step = 0
self.trigger_duration_ms = 50
self.clock_step = 0
self.pattern = 0
self.random_HH = False
self.minAnalogInputVoltage = 0.9
self.randomness = 0
self.analogInputMode = 1 # 1: Randomness, 2: Pattern, 3: CV Pattern
self.CvPattern = 0
self.reset_timeout = 500
# option to always output a clock on output 4
# this helps to sync Consequencer with other modules
self.output4isClock = False
# Calculate the longest pattern length to be used when generating random sequences
self.maxStepLength = len(max(self.BD, key=len))
# Generate random CV for cv4-6
self.random4 = []
self.random5 = []
self.random6 = []
self.generateNewRandomCVPattern()
# Triggered when button 2 is released.
# Short press: Generate random CV for cv4-6
# Long press: Change operating mode
@b2.handler_falling
def b2Pressed():
if ticks_diff(ticks_ms(), b2.last_pressed()) > 300:
if self.analogInputMode < 3:
self.analogInputMode += 1
else:
self.analogInputMode = 1
else:
# Move to next cv pattern if one already exists, otherwise create a new one
self.CvPattern += 1
if self.CvPattern == len(self.random4):
self.generateNewRandomCVPattern()
# Triggered when button 1 is released
# Short press: Play previous CV pattern for cv4-6
# Long press: Toggle random high-hat mode
@b1.handler_falling
def b1Pressed():
if ticks_diff(ticks_ms(), b1.last_pressed()) > 2000:
self.output4isClock = not self.output4isClock
elif ticks_diff(ticks_ms(), b1.last_pressed()) > 300:
self.random_HH = not self.random_HH
else:
# Play previous CV Pattern, unless we are at the first pattern
if self.CvPattern != 0:
self.CvPattern -= 1
# Triggered on each clock into digital input. Output triggers.
@din.handler
def clockTrigger():
# function timing code. Leave in and activate as needed
#t = time.ticks_us()
self.step_length = len(self.BD[self.pattern])
# A pattern was selected which is shorter than the current step. Set to zero to avoid an error
if self.step >= self.step_length:
self.step = 0
cv5.voltage(self.random5[self.CvPattern][self.step])
cv6.voltage(self.random6[self.CvPattern][self.step])
# How much randomness to add to cv1-3
# As the randomness value gets higher, the chance of a randomly selected int being lower gets higher
if randint(0,99) < self.randomness:
cv1.value(randint(0, 1))
cv2.value(randint(0, 1))
cv3.value(randint(0, 1))
else:
cv1.value(int(self.BD[self.pattern][self.step]))
cv2.value(int(self.SN[self.pattern][self.step]))
# If randomize HH is ON:
if self.random_HH:
cv3.value(randint(0, 1))
else:
cv3.value(int(self.HH[self.pattern][self.step]))
# Set cv4-6 voltage outputs based on previously generated random pattern
if self.output4isClock:
cv4.value(1)
else:
cv4.voltage(self.random4[self.CvPattern][self.step])
# Incremenent the clock step
self.clock_step +=1
self.step += 1
# function timing code. Leave in and activate as needed
#delta = time.ticks_diff(time.ticks_us(), t)
#print('Function {} Time = {:6.3f}ms'.format('clockTrigger', delta/1000))
@din.handler_falling
def clockTriggerEnd():
cv1.off()
cv2.off()
cv3.off()
if self.output4isClock:
cv4.off()
def generateNewRandomCVPattern(self):
self.random4.append(self.generateRandomPattern(self.maxStepLength, 0, 9))
self.random5.append(self.generateRandomPattern(self.maxStepLength, 0, 9))
self.random6.append(self.generateRandomPattern(self.maxStepLength, 0, 9))
def getPattern(self):
# If mode 2 and there is CV on the analogue input use it, if not use the knob position
val = 100 * ain.percent()
if self.analogInputMode == 2 and val > self.minAnalogInputVoltage:
self.pattern = int((len(self.BD) / 100) * val)
else:
self.pattern = k2.read_position(len(self.BD))
self.step_length = len(self.BD[self.pattern])
def getCvPattern(self):
# If analogue input mode 3, get the CV pattern from CV input
if self.analogInputMode != 3:
return
else:
# Get the analogue input voltage as a percentage
CvpVal = 100 * ain.percent()
# Is there a voltage on the analogue input and are we configured to use it?
if CvpVal > 0.4:
# Convert percentage value to a representative index of the pattern array
self.CvPattern = int((len(self.random4) / 100) * CvpVal)
def generateRandomPattern(self, length, min, max):
self.t=[]
for i in range(0, length):
self.t.append(uniform(0,9))
return self.t
def getRandomness(self):
# If mode 1 and there is CV on the analogue input use it, if not use the knob position
val = 100 * ain.percent()
if self.analogInputMode == 1 and val > self.minAnalogInputVoltage:
self.randomness = val
else:
self.randomness = k1.read_position()
def main(self):
while True:
self.getPattern()
self.getRandomness()
self.getCvPattern()
self.updateScreen()
# If I have been running, then stopped for longer than reset_timeout, reset the steps and clock_step to 0
if self.clock_step != 0 and ticks_diff(ticks_ms(), din.last_triggered()) > self.reset_timeout:
self.step = 0
self.clock_step = 0
def visualizePattern(self, pattern):
self.t = pattern
self.t = self.t.replace('1','^')
self.t = self.t.replace('0',' ')
return self.t
def updateScreen(self):
#oled.clear() - dont use this, it causes the screen to flicker!
oled.fill(0)
# Show selected pattern visually
oled.text(self.visualizePattern(self.BD[self.pattern]),0,0,1)
oled.text(self.visualizePattern(self.SN[self.pattern]),0,10,1)
oled.text(self.visualizePattern(self.HH[self.pattern]),0,20,1)
# If the random toggle is on, show a rectangle
if self.random_HH:
oled.fill_rect(0,29,10,3,1)
# Show self.output4isClock value
if self.output4isClock:
oled.rect(12,29,10,3,1)
# Show the analogInputMode
oled.text('M' + str(self.analogInputMode), 112, 25, 1)
# Show randomness
oled.text('R' + str(int(self.randomness)), 40, 25, 1)
# Show CV pattern
oled.text('C' + str(self.CvPattern), 76, 25, 1)
oled.show()
class pattern:
# Initialize pattern lists
BD=[]
SN=[]
HH=[]
# African Patterns
BD.append("10110000001100001011000000110000")
SN.append("10001000100010001010100001001010")
HH.append("00001011000010110000101100001011")
BD.append("10101010101010101010101010101010")
SN.append("00001000000010000000100000001001")
HH.append("10100010101000101010001010100000")
BD.append("11000000101000001100000010100000")
SN.append("00001000000010000000100000001010")
HH.append("10111001101110011011100110111001")
BD.append("10001000100010001000100010001010")
SN.append("00100100101100000010010010110010")
HH.append("10101010101010101010101010101011")
BD.append("00101011101000111010001110100010")
SN.append("00101011101000111010001110100010")
HH.append("00001000000010000000100000001000")
BD.append("10101111101000111010001110101000")
SN.append("10101111101000111010001110101000")
HH.append("00000000101000001010000010100010")
BD.append("10110110000011111011011000001111")
SN.append("10110110000011111011011000001111")
HH.append("11111010001011111010001110101100")
BD.append("10010100100101001001010010010100")
SN.append("00100010001000100010001000100010")
HH.append("01010101010101010101010101010101")
# 0,1,1,2,3,5,8,12
BD.append("0101011011101111")
SN.append("1010100100010000")
HH.append("1110100100010000")
# Add patterns
BD.append("1000100010001000")
SN.append("0000000000000000")
HH.append("0000000000000000")
BD.append("1000100010001000")
SN.append("0000000000000000")
HH.append("0010010010010010")
BD.append("1000100010001000")
SN.append("0000100000000000")
HH.append("0010010010010010")
BD.append("1000100010001000")
SN.append("0000100000001000")
HH.append("0010010010010010")
BD.append("1000100010001000")
SN.append("0000100000000000")
HH.append("0000000000000000")
BD.append("1000100010001000")
SN.append("0000100000001000")
HH.append("0000000000000000")
BD.append("1000100010001000")
SN.append("0000100000001000")
HH.append("0000100010001001")
BD.append("1000100010001000")
SN.append("0000100000001000")
HH.append("0101010101010101")
BD.append("1000100010001000")
SN.append("0000000000000000")
HH.append("1111111111111111")
BD.append("1000100010001000")
SN.append("0000100000001000")
HH.append("1111111111111111")
BD.append("1000100010001000")
SN.append("0000100000000000")
HH.append("0001000000000000")
BD.append("1000100010001000")
SN.append("0000100000000000")
HH.append("0001001000000000")
# Source: https://docs.google.com/spreadsheets/d/19_3BxUMy3uy1Gb0V8Wc-TcG7q16Amfn6e8QVw4-HuD0/edit#gid=0
BD.append("1000000010000000")
SN.append("0000100000001000")
HH.append("1010101010101010")
BD.append("1010001000100100")
SN.append("0000100101011001")
HH.append("0000000100000100")
BD.append("1000000110000010")
SN.append("0000100000001000")
HH.append("1010101110001010")
BD.append("1100000100110000")
SN.append("0000100000001000")
HH.append("1010101010101010")
BD.append("1000000110100000")
SN.append("0000100000001000")
HH.append("0010101010101010")
BD.append("1010000000110001")
SN.append("0000100000001000")
HH.append("1010101010101010")
BD.append("1000000110100001")
SN.append("0000100000001000")
HH.append("0000100010101011")
BD.append("1001001010000000")
SN.append("0000100000001000")
HH.append("0000100000001000")
BD.append("1010001001100000")
SN.append("0000100000001000")
HH.append("1010101010001010")
BD.append("1010000101110001")
SN.append("0000100000001000")
HH.append("1010101010001010")
# End external patterns
BD.append("1000100010001000")
SN.append("0000101001001000")
HH.append("0101010101010101")
BD.append("1100000001010000")
SN.append("0000101000001000")
HH.append("0101010101010101")
BD.append("1100000001010000")
SN.append("0000101000001000")
HH.append("1111111111111111")
BD.append("1001001001000100")
SN.append("0001000000010000")
HH.append("0101110010011110")
BD.append("1001001001000100")
SN.append("0001000000010000")
HH.append("1111111111111111")
# Be warned patterns < 16 steps can sound disjointed when using CV to select the pattern!
BD.append("10010000010010")
SN.append("00010010000010")
HH.append("11100110111011")
BD.append("1001000001001")
SN.append("0001001000001")
HH.append("1110011011101")
BD.append("100100000100")
SN.append("000100100000")
HH.append("111001101110")
BD.append("10010000010")
SN.append("00010010000")
HH.append("11100110111")
BD.append("10010000010")
SN.append("00010010000")
HH.append("11111010011")
BD.append("1001000010")
SN.append("0001000000")
HH.append("1111101101")
BD.append("100100010")
SN.append("000100000")
HH.append("111110111")
BD.append("10010010")
SN.append("00010000")
HH.append("11111111")
BD.append("1001001")
SN.append("0001000")
HH.append("1111111")
BD.append("100100")
SN.append("000100")
HH.append("111111")
BD.append("10000")
SN.append("00001")
HH.append("11110")
BD.append("1000")
SN.append("0000")
HH.append("1111")
BD.append("100")
SN.append("000")
HH.append("111")
if __name__ == '__main__':
# Reset module display state.
[cv.off() for cv in cvs]
dm = Consequencer()
dm.main()
| 31.88651
| 253
| 0.636358
|
2aee1e57202b837a0e91e1fbffb628e84b589ee9
| 3,516
|
py
|
Python
|
unittests/test_app.py
|
martinmartinchan/cookbook
|
361d2470971d3700118542de28726117ed5b175e
|
[
"MIT"
] | null | null | null |
unittests/test_app.py
|
martinmartinchan/cookbook
|
361d2470971d3700118542de28726117ed5b175e
|
[
"MIT"
] | null | null | null |
unittests/test_app.py
|
martinmartinchan/cookbook
|
361d2470971d3700118542de28726117ed5b175e
|
[
"MIT"
] | null | null | null |
def test_get_recipes_with_empty_database(client):
result = client.get("/recipes")
assert not result.json['success']
assert result.json['code'] == 404
def test_add_correct_recipe(client):
result = client.post('/addrecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe',
'servings': 2,
'description': 'This is a test recipe',
'ingredients': [
{'name': 'testIngredient',
'amount': 1,
'unit': 'dl'}
],
'instructions': [
{'step': 1,
'instruction': 'This is just a test, nothing to really do'}
]
})
assert result.json['success']
assert result.json['code'] == 201
def test_get_all_recipes_route1(client):
result = client.get("/")
assert result.json['success']
assert result.json['code'] == 200
assert len(result.json['result']) > 0
def test_get_all_recipes_route2(client):
result = client.get("/recipes")
assert result.json['success']
assert result.json['code'] == 200
assert len(result.json['result']) > 0
def test_get_single_recipe(client):
result = client.get("/recipe?name=testrecipe")
assert result.json['success']
assert result.json['code'] == 200
def test_get_all_recipes(client):
result = client.get("/recipes")
assert result.json['success']
assert result.json['code'] == 200
assert len(result.json['result'])
def test_add_recipe_without_password(client):
result = client.post('/addrecipe', json={
'name': 'testrecipe2',
'servings': 2,
'description': 'This is another test recipe',
'ingredients': [
{'name': 'testIngredient',
'amount': 1,
'unit': 'dl'}
],
'instructions': [
{'step': 1,
'instruction': 'This is again just a test, nothing to really do'}
]
})
assert not result.json['success']
assert result.json['code'] == 401
def test_edit_recipe(client):
result = client.put('/editrecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe',
'recipe': {
'name': 'testrecipe',
'servings': 2,
'description': 'This is the edited test recipe',
'ingredients': [
{'name': 'testIngredient',
'amount': 1,
'unit': 'dl'}
],
'instructions': [
{'step': 1,
'instruction': 'This is still just a test, nothing to really do'}
]
}
})
assert result.json['success']
assert result.json['code'] == 200
def test_edit_recipe_to_existing_recipe_name(client):
result1 = client.post('/addrecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe2',
'servings': 2,
'description': 'This is a test recipe',
'ingredients': [
{'name': 'testIngredient',
'amount': 1,
'unit': 'dl'}
],
'instructions': [
{'step': 1,
'instruction': 'This is just a test, nothing to really do'}
]
})
assert result1.json['success']
assert result1.json['code'] == 201
result2 = client.put('/editrecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe2',
'recipe': {
'name': 'testrecipe',
'servings': 2,
'description': 'This is the edited test recipe',
'ingredients': [
{'name': 'testIngredient',
'amount': 1,
'unit': 'dl'}
],
'instructions': [
{'step': 1,
'instruction': 'This is still just a test, nothing to really do'}
]
}
})
assert not result2.json['success']
assert result2.json['code'] == 400
result3 = client.delete('/deleterecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe2'
})
assert result3.json['success']
def test_delete_recipe(client):
result = client.delete('/deleterecipe', json={
'password': 'Troglodon5986',
'name': 'testrecipe'
})
assert result.json['success']
| 25.114286
| 68
| 0.648464
|
cc629b9acfaa20c7bbd81de7e6bd1f2efdf3cf59
| 2,497
|
py
|
Python
|
src/spaceone/inventory/model/network_policy_model.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/model/network_policy_model.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/model/network_policy_model.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
from mongoengine import *
from spaceone.core.model.mongo_model import MongoModel
from spaceone.inventory.model.zone_model import Zone
from spaceone.inventory.model.region_model import Region
from spaceone.inventory.model.collection_info_model import CollectionInfo
from spaceone.inventory.model.reference_resource_model import ReferenceResource
class RoutingTable(EmbeddedDocument):
cidr = StringField(max_length=40)
destination = StringField(max_length=40)
interface = StringField(max_length=40, null=True, default=None)
class NetworkPolicy(MongoModel):
network_policy_id = StringField(max_length=40, generate_id='npolicy', unique=True)
name = StringField(max_length=255)
routing_tables = ListField(EmbeddedDocumentField(RoutingTable))
dns = ListField(StringField(max_length=40))
data = DictField()
metadata = DictField()
reference = EmbeddedDocumentField(ReferenceResource, default=ReferenceResource)
tags = DictField()
zone = ReferenceField('Zone', reverse_delete_rule=DENY)
region = ReferenceField('Region', reverse_delete_rule=DENY)
domain_id = StringField(max_length=255)
collection_info = EmbeddedDocumentField(CollectionInfo, default=CollectionInfo)
created_at = DateTimeField(auto_now_add=True)
meta = {
'updatable_fields': [
'name',
'routing_tables',
'dns',
'data',
'metadata',
'reference',
'tags',
'collection_info'
],
'exact_fields': [
'network_policy_id',
'collection_info.state'
],
'minimal_fields': [
'network_policy_id',
'name',
'reference',
'collection_info.state'
],
'change_query_keys': {
'zone_id': 'zone.zone_id',
'region_id': 'region.region_id'
},
'reference_query_keys': {
'zone': Zone,
'region': Region
},
'ordering': [
'name'
],
'indexes': [
'network_policy_id',
'zone',
'region',
'domain_id',
'reference.resource_id',
'collection_info.state'
],
'aggregate': {
'lookup': {
'region': {
'from': 'region'
},
'zone': {
'from': 'zone'
}
}
}
}
| 30.45122
| 86
| 0.579495
|
379b77aac81f70a1608ca74430ecd78d172d1a55
| 5,155
|
py
|
Python
|
huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/show_instance_users_entity.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/show_instance_users_entity.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/show_instance_users_entity.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowInstanceUsersEntity:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'user_name': 'str',
'role': 'str',
'default_app': 'bool',
'created_time': 'int'
}
attribute_map = {
'user_name': 'user_name',
'role': 'role',
'default_app': 'default_app',
'created_time': 'created_time'
}
def __init__(self, user_name=None, role=None, default_app=None, created_time=None):
"""ShowInstanceUsersEntity - a model defined in huaweicloud sdk"""
self._user_name = None
self._role = None
self._default_app = None
self._created_time = None
self.discriminator = None
if user_name is not None:
self.user_name = user_name
if role is not None:
self.role = role
if default_app is not None:
self.default_app = default_app
if created_time is not None:
self.created_time = created_time
@property
def user_name(self):
"""Gets the user_name of this ShowInstanceUsersEntity.
用户名称。
:return: The user_name of this ShowInstanceUsersEntity.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this ShowInstanceUsersEntity.
用户名称。
:param user_name: The user_name of this ShowInstanceUsersEntity.
:type: str
"""
self._user_name = user_name
@property
def role(self):
"""Gets the role of this ShowInstanceUsersEntity.
用户角色。
:return: The role of this ShowInstanceUsersEntity.
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this ShowInstanceUsersEntity.
用户角色。
:param role: The role of this ShowInstanceUsersEntity.
:type: str
"""
self._role = role
@property
def default_app(self):
"""Gets the default_app of this ShowInstanceUsersEntity.
是否为默认应用。
:return: The default_app of this ShowInstanceUsersEntity.
:rtype: bool
"""
return self._default_app
@default_app.setter
def default_app(self, default_app):
"""Sets the default_app of this ShowInstanceUsersEntity.
是否为默认应用。
:param default_app: The default_app of this ShowInstanceUsersEntity.
:type: bool
"""
self._default_app = default_app
@property
def created_time(self):
"""Gets the created_time of this ShowInstanceUsersEntity.
创建时间。
:return: The created_time of this ShowInstanceUsersEntity.
:rtype: int
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this ShowInstanceUsersEntity.
创建时间。
:param created_time: The created_time of this ShowInstanceUsersEntity.
:type: int
"""
self._created_time = created_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowInstanceUsersEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.30102
| 87
| 0.572454
|
bacce1ae8e0aeded30095ab6e872f5d2c7bdecd9
| 6,353
|
py
|
Python
|
frameworks/kafka/tests/test_tls.py
|
vibhujain/dcos-commons
|
dad8ebdf5c9a21988dad054a77e775e55e499bd1
|
[
"Apache-2.0"
] | null | null | null |
frameworks/kafka/tests/test_tls.py
|
vibhujain/dcos-commons
|
dad8ebdf5c9a21988dad054a77e775e55e499bd1
|
[
"Apache-2.0"
] | null | null | null |
frameworks/kafka/tests/test_tls.py
|
vibhujain/dcos-commons
|
dad8ebdf5c9a21988dad054a77e775e55e499bd1
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import pytest
import sdk_cmd
import sdk_install
import sdk_networks
import sdk_recovery
import sdk_security
import sdk_utils
from security import transport_encryption, cipher_suites
from tests import config
pytestmark = [
sdk_utils.dcos_ee_only,
pytest.mark.skipif(
sdk_utils.dcos_version_less_than("1.10"), reason="TLS tests require DC/OS 1.10+"
),
]
log = logging.getLogger(__name__)
# Name of the broker TLS vip
BROKER_TLS_ENDPOINT = "broker-tls"
@pytest.fixture(scope="module")
def service_account(configure_security):
"""
Sets up a service account for use with TLS.
"""
try:
name = config.SERVICE_NAME
service_account_info = transport_encryption.setup_service_account(name)
yield service_account_info
finally:
transport_encryption.cleanup_service_account(config.SERVICE_NAME, service_account_info)
@pytest.fixture(scope="module")
def kafka_service(service_account):
service_options = {
"service": {
"name": config.SERVICE_NAME,
"service_account": service_account["name"],
"service_account_secret": service_account["secret"],
"security": {"transport_encryption": {"enabled": True}},
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
service_name=config.SERVICE_NAME,
expected_running_tasks=config.DEFAULT_TASK_COUNT,
additional_options=service_options,
timeout_seconds=30 * 60,
)
yield {**service_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.mark.tls
@pytest.mark.smoke
@pytest.mark.sanity
def test_tls_endpoints(kafka_service):
endpoint_names = sdk_networks.get_endpoint_names(config.PACKAGE_NAME, config.SERVICE_NAME)
assert len(endpoint_names) == 2
assert BROKER_TLS_ENDPOINT in endpoint_names
# Test that broker-tls endpoint is available
endpoint_tls = sdk_networks.get_endpoint(
config.PACKAGE_NAME, config.SERVICE_NAME, BROKER_TLS_ENDPOINT
)
assert len(endpoint_tls["dns"]) == config.DEFAULT_BROKER_COUNT
@pytest.mark.tls
@pytest.mark.smoke
@pytest.mark.sanity
def test_producer_over_tls(kafka_service):
sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
"topic create {}".format(config.DEFAULT_TOPIC_NAME),
)
rc, stdout, _ = sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
"topic describe {}".format(config.DEFAULT_TOPIC_NAME),
)
assert rc == 0, "Topic describe failed"
assert len(json.loads(stdout)["partitions"]) == config.DEFAULT_PARTITION_COUNT
# Write twice: Warm up TLS connections
num_messages = 10
sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
"topic producer_test_tls {} {}".format(config.DEFAULT_TOPIC_NAME, num_messages),
)
rc, stdout, _ = sdk_cmd.svc_cli(
config.PACKAGE_NAME,
config.SERVICE_NAME,
"topic producer_test_tls {} {}".format(config.DEFAULT_TOPIC_NAME, num_messages),
)
assert rc == 0, "producer_test_tls failed"
write_info = json.loads(stdout)
assert len(write_info) == 1
assert write_info["message"].startswith("Output: {} records sent".format(num_messages))
@pytest.mark.tls
@pytest.mark.smoke
@pytest.mark.sanity
def test_tls_ciphers(kafka_service):
task_name = "kafka-0-broker"
endpoint = sdk_networks.get_endpoint(
config.PACKAGE_NAME, config.SERVICE_NAME, BROKER_TLS_ENDPOINT
)["dns"][0]
ciphers_config_path = ["service", "security", "transport_encryption", "ciphers"]
rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "describe")
assert rc == 0, "Describe command failed"
expected_ciphers = set(
sdk_utils.get_in(
ciphers_config_path,
json.loads(stdout),
"",
)
.rstrip()
.split(",")
)
openssl_ciphers = sdk_security.openssl_ciphers()
missing_openssl_ciphers = cipher_suites.missing_openssl_ciphers(openssl_ciphers)
possible_openssl_ciphers = openssl_ciphers - missing_openssl_ciphers
enabled_ciphers = set()
assert openssl_ciphers, "OpenSSL ciphers should be non-empty"
assert expected_ciphers, "Expected ciphers should be non-empty"
assert possible_openssl_ciphers, "Possible OpenSSL ciphers should be non-empty"
# Output OpenSSL version.
sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name, "openssl version")
log.warning(
"\n%s OpenSSL ciphers missing from the cipher_suites module:", len(missing_openssl_ciphers)
)
log.warning("\n".join(to_sorted(list(missing_openssl_ciphers))))
log.info("\n%s expected ciphers:", len(expected_ciphers))
log.info("\n".join(to_sorted(list(expected_ciphers))))
log.info("\n%s ciphers will be checked:", len(possible_openssl_ciphers))
for openssl_cipher in to_sorted(list(possible_openssl_ciphers)):
log.info("%s (%s)", cipher_suites.rfc_name(openssl_cipher), openssl_cipher)
for openssl_cipher in possible_openssl_ciphers:
if sdk_security.is_cipher_enabled(config.SERVICE_NAME, task_name, openssl_cipher, endpoint):
enabled_ciphers.add(cipher_suites.rfc_name(openssl_cipher))
log.info("%s ciphers enabled out of %s:", len(enabled_ciphers), len(possible_openssl_ciphers))
log.info("\n".join(to_sorted(list(enabled_ciphers))))
assert expected_ciphers == enabled_ciphers, "Enabled ciphers should match expected ciphers"
def to_sorted(coll):
""" Sorts a collection and returns it. """
coll.sort()
return coll
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.recovery
def test_tls_recovery(kafka_service, service_account):
rc, stdout, _ = sdk_cmd.svc_cli(
kafka_service["package_name"], kafka_service["service"]["name"], "pod list"
)
assert rc == 0, "Pod list failed"
for pod in json.loads(stdout):
sdk_recovery.check_permanent_recovery(
kafka_service["package_name"],
kafka_service["service"]["name"],
pod,
recovery_timeout_s=25 * 60,
)
| 32.579487
| 100
| 0.701401
|
a602e1089f9a59400ab88cd97ca60d57693c4001
| 1,506
|
py
|
Python
|
model/quantization_head.py
|
chrisbyd/ContrastiveVehicleQuant
|
bf471988868cf0cb9713002dd1d6726272ecce7f
|
[
"MIT"
] | null | null | null |
model/quantization_head.py
|
chrisbyd/ContrastiveVehicleQuant
|
bf471988868cf0cb9713002dd1d6726272ecce7f
|
[
"MIT"
] | 1
|
2022-03-07T09:06:06.000Z
|
2022-03-07T09:06:06.000Z
|
model/quantization_head.py
|
chrisbyd/ContrastiveVehicleQuant
|
bf471988868cf0cb9713002dd1d6726272ecce7f
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
class GPQSoftMaxNet(nn.Module):
def __init__(self, training, intn_word=16, len_code=12, n_book=12):
super(GPQSoftMaxNet, self).__init__()
self.word = intn_word
self.len_code = len_code
self.n_book = n_book
self.training = training
print("len_code is {}, and n book is {}".format(len_code,n_book))
self.Z = torch.rand((intn_word, len_code*n_book), requires_grad=True, device = 'cuda')
torch.nn.init.xavier_uniform_(self.Z)
# self.Prototypes = torch.rand((n_classes, len_code*n_book), requires_grad=True, device = 'cuda')
# self.fake_linear = torch.nn.Linear(len_code*n_book, n_classes).cuda()
# def fake_C(self, features):
# return self.fake_linear(features)
def C(self, features, Prototypes):
x = torch.split(features, self.n_book, 1)
y = torch.split(Prototypes, self.n_book, 0)
for i in range(self.n_book):
sub_res = torch.unsqueeze(torch.matmul(x[i], y[i]), 2)
if i == 0:
res = sub_res
else:
res = torch.cat((res, sub_res), 2)
logits = torch.mean(res, 2)
return logits
def save(self, save_path):
torch.save(self.Z, save_path)
# torch.save(self.Prototypes, os.path.join('p.pt'))
@staticmethod
def load(pfn):
Z = torch.load(pfn)
# self.Prototypes = torch.load(os.path.join(pfn, 'p.pt'))
return Z
| 35.023256
| 105
| 0.600266
|
cc6e9bd519fd837138ba427f91cd59b73fcb37b7
| 24,251
|
py
|
Python
|
pytorchrl/agent/algorithms/sac.py
|
PyTorchRL/pytorchrl
|
055843ab58a06ba1f77da73082be6f23cf453ddd
|
[
"MIT"
] | 20
|
2021-01-12T16:31:34.000Z
|
2022-03-18T00:31:29.000Z
|
pytorchrl/agent/algorithms/sac.py
|
PyTorchRL/pytorchrl
|
055843ab58a06ba1f77da73082be6f23cf453ddd
|
[
"MIT"
] | 4
|
2021-01-19T09:29:58.000Z
|
2021-09-29T12:21:08.000Z
|
pytorchrl/agent/algorithms/sac.py
|
PyTorchRL/pytorchrl
|
055843ab58a06ba1f77da73082be6f23cf453ddd
|
[
"MIT"
] | 2
|
2021-01-12T16:07:37.000Z
|
2021-02-01T21:09:14.000Z
|
import itertools
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
import torch.optim as optim
import pytorchrl as prl
from pytorchrl.agent.algorithms.base import Algorithm
from pytorchrl.agent.algorithms.utils import get_gradients, set_gradients
from pytorchrl.agent.algorithms.policy_loss_addons import PolicyLossAddOn
class SAC(Algorithm):
"""
Soft Actor Critic algorithm class.
Algorithm class to execute SAC, from Haarnoja et al.
(https://arxiv.org/abs/1812.05905). Algorithms are modules generally
required by multiple workers, so SAC.algo_factory(...) returns a function
that can be passed on to workers to instantiate their own SAC module.
Parameters
----------
device : torch.device
CPU or specific GPU where class computations will take place.
actor : Actor
Actor_critic class instance.
lr_pi : float
Policy optimizer learning rate.
lr_q : float
Q-nets optimizer learning rate.
lr_alpha : float
Alpha optimizer learning rate.
gamma : float
Discount factor parameter.
initial_alpha : float
Initial entropy coefficient value (temperature).
polyak : float
SAC polyak averaging parameter.
num_updates : int
Num consecutive actor updates before data collection continues.
update_every : int
Regularity of actor updates in number environment steps.
start_steps : int
Num of initial random environment steps before learning starts.
mini_batch_size : int
Size of actor update batches.
target_update_interval : float
regularity of target nets updates with respect to actor Adam updates.
num_test_episodes : int
Number of episodes to complete in each test phase.
test_every : int
Regularity of test evaluations in actor updates.
max_grad_norm : float
Gradient clipping parameter.
policy_loss_addons : list
List of PolicyLossAddOn components adding loss terms to the algorithm policy loss.
Examples
--------
>>> create_algo = SAC.create_factory(
lr_q=1e-4, lr_pi=1e-4, lr_alpha=1e-4, gamma=0.99, polyak=0.995,
num_updates=50, update_every=50, test_every=5000, start_steps=20000,
mini_batch_size=64, alpha=1.0, num_test_episodes=0, target_update_interval=1)
"""
def __init__(self,
device,
actor,
lr_q=1e-4,
lr_pi=1e-4,
lr_alpha=1e-4,
gamma=0.99,
polyak=0.995,
num_updates=1,
update_every=50,
test_every=1000,
max_grad_norm=0.5,
initial_alpha=1.0,
start_steps=20000,
mini_batch_size=64,
num_test_episodes=5,
target_update_interval=1,
policy_loss_addons=[]):
# ---- General algo attributes ----------------------------------------
# Discount factor
self._gamma = gamma
# Number of steps collected with initial random policy
self._start_steps = int(start_steps)
# Times data in the buffer is re-used before data collection proceeds
self._num_epochs = int(1) # Default to 1 for off-policy algorithms
# Number of data samples collected between network update stages
self._update_every = int(update_every)
# Number mini batches per epoch
self._num_mini_batch = int(num_updates)
# Size of update mini batches
self._mini_batch_size = int(mini_batch_size)
# Number of network updates between test evaluations
self._test_every = int(test_every)
# Number of episodes to complete when testing
self._num_test_episodes = int(num_test_episodes)
# ---- SAC-specific attributes ----------------------------------------
self.iter = 0
self.polyak = polyak
self.device = device
self.actor = actor
self.max_grad_norm = max_grad_norm
self.target_update_interval = target_update_interval
assert hasattr(self.actor, "q1"), "SAC requires double q critic (num_critics=2)"
assert hasattr(self.actor, "q2"), "SAC requires double q critic (num_critics=2)"
self.log_alpha = torch.tensor(
data=[np.log(initial_alpha)], dtype=torch.float32,
requires_grad=True, device=device)
self.alpha = self.log_alpha.detach().exp()
# Compute target entropy
target_entropy = self.calculate_target_entropy()
self.target_entropy = torch.tensor(
data=target_entropy, dtype=torch.float32,
requires_grad=False, device=device)
# Create target networks
self.actor_targ = deepcopy(actor)
# Freeze target networks with respect to optimizers
for p in self.actor_targ.parameters():
p.requires_grad = False
# List of parameters for both Q-networks
q_params = itertools.chain(self.actor.q1.parameters(), self.actor.q2.parameters())
# List of parameters for policy network
p_params = itertools.chain(self.actor.policy_net.parameters())
# ----- Optimizers ----------------------------------------------------
self.pi_optimizer = optim.Adam(p_params, lr=lr_pi)
self.q_optimizer = optim.Adam(q_params, lr=lr_q)
self.alpha_optimizer = optim.Adam([self.log_alpha], lr=lr_alpha)
# ----- Policy Loss Addons --------------------------------------------
# Sanity check, policy_loss_addons is a PolicyLossAddOn instance
# or a list of PolicyLossAddOn instances
assert isinstance(policy_loss_addons, (PolicyLossAddOn, list)),\
"SAC policy_loss_addons parameter should be a PolicyLossAddOn instance " \
"or a list of PolicyLossAddOn instances"
if isinstance(policy_loss_addons, list):
for addon in policy_loss_addons:
assert isinstance(addon, PolicyLossAddOn), \
"SAC policy_loss_addons parameter should be a PolicyLossAddOn " \
"instance or a list of PolicyLossAddOn instances"
else:
policy_loss_addons = [policy_loss_addons]
self.policy_loss_addons = policy_loss_addons
for addon in self.policy_loss_addons:
addon.setup(self.device)
@classmethod
def create_factory(cls,
lr_q=1e-4,
lr_pi=1e-4,
lr_alpha=1e-4,
gamma=0.99,
polyak=0.995,
num_updates=50,
test_every=5000,
update_every=50,
start_steps=1000,
max_grad_norm=0.5,
initial_alpha=1.0,
mini_batch_size=64,
num_test_episodes=5,
target_update_interval=1.0,
policy_loss_addons=[]):
"""
Returns a function to create new SAC instances.
Parameters
----------
lr_pi : float
Policy optimizer learning rate.
lr_q : float
Q-nets optimizer learning rate.
lr_alpha : float
Alpha optimizer learning rate.
gamma : float
Discount factor parameter.
initial_alpha : float
Initial entropy coefficient value.
polyak : float
SAC polyak averaging parameter.
num_updates : int
Num consecutive actor updates before data collection continues.
update_every : int
Regularity of actor updates in number environment steps.
start_steps : int
Num of initial random environment steps before learning starts.
mini_batch_size : int
Size of actor update batches.
target_update_interval : float
regularity of target nets updates with respect to actor Adam updates.
num_test_episodes : int
Number of episodes to complete in each test phase.
test_every : int
Regularity of test evaluations in actor updates.
max_grad_norm : float
Gradient clipping parameter.
policy_loss_addons : list
List of PolicyLossAddOn components adding loss terms to the algorithm policy loss.
Returns
-------
create_algo_instance : func
Function that creates a new SAC class instance.
algo_name : str
Name of the algorithm.
"""
def create_algo_instance(device, actor):
return cls(lr_q=lr_q,
lr_pi=lr_pi,
lr_alpha=lr_alpha,
gamma=gamma,
device=device,
polyak=polyak,
actor=actor,
test_every=test_every,
start_steps=start_steps,
num_updates=num_updates,
update_every=update_every,
initial_alpha=initial_alpha,
max_grad_norm=max_grad_norm,
mini_batch_size=mini_batch_size,
num_test_episodes=num_test_episodes,
target_update_interval=target_update_interval,
policy_loss_addons=policy_loss_addons)
return create_algo_instance, prl.SAC
@property
def gamma(self):
"""Returns discount factor gamma."""
return self._gamma
@property
def start_steps(self):
"""Returns the number of steps to collect with initial random policy."""
return self._start_steps
@property
def num_epochs(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
return self._num_epochs
@property
def update_every(self):
"""
Returns the number of data samples collected between
network update stages.
"""
return self._update_every
@property
def num_mini_batch(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
return self._num_mini_batch
@property
def mini_batch_size(self):
"""
Returns the number of mini batches per epoch.
"""
return self._mini_batch_size
@property
def test_every(self):
"""Number of network updates between test evaluations."""
return self._test_every
@property
def num_test_episodes(self):
"""
Returns the number of episodes to complete when testing.
"""
return self._num_test_episodes
@property
def discrete_version(self):
"""Returns True if action_space is discrete."""
return self.actor.action_space.__class__.__name__ == "Discrete"
def acting_step(self, obs, rhs, done, deterministic=False):
"""
SAC acting function.
Parameters
----------
obs : torch.tensor
Current world observation
rhs : torch.tensor
RNN recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
done : torch.tensor
1.0 if current obs is the last one in the episode, else 0.0.
deterministic : bool
Whether to randomly sample action from predicted distribution or taking the mode.
Returns
-------
action : torch.tensor
Predicted next action.
clipped_action : torch.tensor
Predicted next action (clipped to be within action space).
rhs : torch.tensor
Policy recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
other : dict
Additional SAC predictions, which are not used in other algorithms.
"""
with torch.no_grad():
(action, clipped_action, logp_action, rhs,
entropy_dist, dist) = self.actor.get_action(
obs, rhs, done, deterministic=deterministic)
return action, clipped_action, rhs, {}
def compute_loss_q(self, batch, n_step=1, per_weights=1):
"""
Calculate SAC Q-nets loss
Parameters
----------
batch : dict
Data batch dict containing all required tensors to compute SAC losses.
n_step : int or float
Number of future steps used to computed the truncated n-step return value.
per_weights :
Prioritized Experience Replay (PER) important sampling weights or 1.0.
Returns
-------
loss_q1 : torch.tensor
Q1-net loss.
loss_q2 : torch.tensor
Q2-net loss.
loss_q : torch.tensor
Weighted average of loss_q1 and loss_q2.
errors : torch.tensor
TD errors.
"""
o, rhs, d = batch[prl.OBS], batch[prl.RHS], batch[prl.DONE]
a, r = batch[prl.ACT], batch[prl.REW]
o2, rhs2, d2 = batch[prl.OBS2], batch[prl.RHS2], batch[prl.DONE2]
if self.discrete_version:
# Q-values for all actions
q_scores = self.actor.get_q_scores(o, rhs, d)
q1 = q_scores.get("q1").gather(1, a.long())
q2 = q_scores.get("q2").gather(1, a.long())
# Bellman backup for Q functions
with torch.no_grad():
# Target actions come from *current* policy
a2, _, _, _, _, dist = self.actor.get_action(o2, rhs2, d2)
bs, n = o.shape[0], dist.probs.shape[-1]
actions = torch.arange(n)[..., None].expand(-1, bs).to(self.device)
p_a2 = dist.expand((n, bs)).log_prob(actions).exp().transpose(0, 1)
# p_a2 = dist.probs
z = (p_a2 == 0.0).float() * 1e-8
logp_a2 = torch.log(p_a2 + z)
# Target Q-values
q_scores_targ = self.actor_targ.get_q_scores(o2, rhs2, d2)
q1_targ = q_scores_targ.get("q1")
q2_targ = q_scores_targ.get("q2")
q_targ = (p_a2 * (torch.min(q1_targ, q2_targ) - self.alpha * logp_a2)).sum(dim=1, keepdim=True)
assert r.shape == q_targ.shape
backup = r + (self.gamma ** n_step) * (1 - d2) * q_targ
else:
# Q-values for all actions
q_scores = self.actor.get_q_scores(o, rhs, d, a)
q1 = q_scores.get("q1")
q2 = q_scores.get("q2")
# Bellman backup for Q functions
with torch.no_grad():
# Target actions come from *current* policy
a2, _, logp_a2, _, _, dist = self.actor.get_action(o2, rhs2, d2)
# Target Q-values
q_scores_targ = self.actor_targ.get_q_scores(o2, rhs2, d2, a2)
q1_targ = q_scores_targ.get("q1")
q2_targ = q_scores_targ.get("q2")
q_pi_targ = torch.min(q1_targ, q2_targ)
backup = r + (self.gamma ** n_step) * (1 - d2) * (q_pi_targ - self.alpha * logp_a2)
# MSE loss against Bellman backup
loss_q1 = (((q1 - backup) ** 2) * per_weights).mean()
loss_q2 = (((q2 - backup) ** 2) * per_weights).mean()
loss_q = 0.5 * loss_q1 + 0.5 * loss_q2
# errors = (torch.min(q1, q2) - backup).abs().detach().cpu()
# errors = torch.max((q1 - backup).abs(), (q2 - backup).abs()).detach().cpu()
errors = (0.5 * (q1 - backup).abs() + 0.5 * (q2 - backup).abs()).detach().cpu()
return loss_q1, loss_q2, loss_q, errors
def compute_loss_pi(self, batch, per_weights=1):
"""
Calculate SAC policy loss.
Parameters
----------
batch : dict
Data batch dict containing all required tensors to compute SAC losses.
per_weights :
Prioritized Experience Replay (PER) important sampling weights or 1.0.
Returns
-------
loss_pi : torch.tensor
SAC policy loss.
logp_pi : torch.tensor
Log probability of predicted next action.
"""
o, rhs, d = batch[prl.OBS], batch[prl.RHS], batch[prl.DONE]
if self.discrete_version:
pi, _, _, _, _, dist = self.actor.get_action(o, rhs, d)
# Get action log probs
bs, n = o.shape[0], dist.probs.shape[-1]
actions = torch.arange(n)[..., None].expand(-1, bs).to(self.device)
p_pi = dist.expand((n, bs)).log_prob(actions).exp().transpose(0, 1)
# p_pi = dist.probs
z = (p_pi == 0.0).float() * 1e-8
logp_pi = torch.log(p_pi + z)
logp_pi = torch.sum(p_pi * logp_pi, dim=1, keepdim=True)
q_scores = self.actor.get_q_scores(o, rhs, d)
q1 = q_scores.get("q1")
q2 = q_scores.get("q2")
q_pi = torch.sum(torch.min(q1, q2) * p_pi, dim=1, keepdim=True)
else:
pi, _, logp_pi, _, _, dist = self.actor.get_action(o, rhs, d)
q_scores = self.actor.get_q_scores(o, rhs, d, pi)
q1 = q_scores.get("q1")
q2 = q_scores.get("q2")
q_pi = torch.min(q1, q2)
loss_pi = ((self.alpha * logp_pi - q_pi) * per_weights).mean()
# Extend policy loss with addons
for addon in self.policy_loss_addons:
loss_pi += addon.compute_loss_term(self.actor, dist, batch)
return loss_pi, logp_pi
def compute_loss_alpha(self, log_probs, per_weights=1):
"""
Calculate SAC entropy loss.
Parameters
----------
log_probs : torch.tensor
Log probability of predicted next action.
per_weights :
Prioritized Experience Replay (PER) important sampling weights or 1.0.
Returns
-------
alpha_loss : torch.tensor
SAC entropy loss.
"""
alpha_loss = - ((self.log_alpha * (log_probs + self.target_entropy).detach()) * per_weights).mean()
return alpha_loss
def calculate_target_entropy(self):
"""Calculate SAC target entropy"""
if self.discrete_version:
target = - np.log(1.0 / self.actor.action_space.n) * 0.98
else:
target_old = - self.actor.action_space.shape[0]
target = - np.prod(self.actor.action_space.shape)
assert target_old == target
return target
def compute_gradients(self, batch, grads_to_cpu=True):
"""
Compute loss and compute gradients but don't do optimization step,
return gradients instead.
Parameters
----------
batch : dict
data batch containing all required tensors to compute SAC losses.
grads_to_cpu : bool
If gradient tensor will be sent to another node, need to be in CPU.
Returns
-------
grads : list of tensors
List of actor gradients.
info : dict
Dict containing current SAC iteration information.
"""
# Recurrent burn-in
if self.actor.is_recurrent:
batch = self.actor.burn_in_recurrent_states(batch)
# N-step returns
n_step = batch["n_step"] if "n_step" in batch else 1.0
# PER
per_weights = batch["per_weights"] if "per_weights" in batch else 1.0
# Run one gradient descent step for Q1 and Q2
loss_q1, loss_q2, loss_q, errors = self.compute_loss_q(batch, n_step, per_weights)
self.q_optimizer.zero_grad()
loss_q.backward(retain_graph=True)
nn.utils.clip_grad_norm_(itertools.chain(
self.actor.q1.parameters(), self.actor.q2.parameters()), self.max_grad_norm)
q_grads = get_gradients(self.actor.q1, self.actor.q2, grads_to_cpu=grads_to_cpu)
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in itertools.chain(self.actor.q1.parameters(), self.actor.q2.parameters()):
p.requires_grad = False
# Run one gradient descent step for pi.
loss_pi, logp_pi = self.compute_loss_pi(batch, per_weights)
self.pi_optimizer.zero_grad()
loss_pi.backward()
nn.utils.clip_grad_norm_(self.actor.policy_net.parameters(), self.max_grad_norm)
pi_grads = get_gradients(self.actor.policy_net, grads_to_cpu=grads_to_cpu)
for p in itertools.chain(self.actor.q1.parameters(), self.actor.q2.parameters()):
p.requires_grad = True
# Run one gradient descent step for alpha.
self.alpha_optimizer.zero_grad()
loss_alpha = self.compute_loss_alpha(logp_pi, per_weights)
loss_alpha.backward()
info = {
"loss_q1": loss_q1.detach().item(),
"loss_q2": loss_q2.detach().item(),
"loss_pi": loss_pi.detach().item(),
"loss_alpha": loss_alpha.detach().item(),
"alpha": self.alpha.detach().item(),
}
if "per_weights" in batch:
info.update({"errors": errors})
grads = {"q_grads": q_grads, "pi_grads": pi_grads}
return grads, info
def update_target_networks(self):
"""Update actor critic target networks with polyak averaging"""
if self.iter % self.target_update_interval == 0:
with torch.no_grad():
for p, p_targ in zip(self.actor.parameters(), self.actor_targ.parameters()):
p_targ.data.mul_(self.polyak)
p_targ.data.add_((1 - self.polyak) * p.data)
def apply_gradients(self, gradients=None):
"""
Take an optimization step, previously setting new gradients if provided.
Update also target networks.
Parameters
----------
gradients : list of tensors
List of actor gradients.
"""
if gradients is not None:
set_gradients(
self.actor.policy_net,
gradients=gradients["pi_grads"], device=self.device)
set_gradients(
self.actor.q1, self.actor.q2,
gradients=gradients["q_grads"], device=self.device)
self.q_optimizer.step()
self.pi_optimizer.step()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.detach().exp()
# Update target networks by polyak averaging.
self.iter += 1
self.update_target_networks()
def set_weights(self, actor_weights):
"""
Update actor with the given weights. Update also target networks.
Parameters
----------
actor_weights : dict of tensors
Dict containing actor weights to be set.
"""
self.actor.load_state_dict(actor_weights)
self.alpha_optimizer.step()
self.alpha = self.log_alpha.detach().exp()
# Update target networks by polyak averaging.
self.iter += 1
self.update_target_networks()
def update_algorithm_parameter(self, parameter_name, new_parameter_value):
"""
If `parameter_name` is an attribute of the algorithm, change its value
to `new_parameter_value value`.
Parameters
----------
parameter_name : str
Worker.algo attribute name
new_parameter_value : int or float
New value for `parameter_name`.
"""
if hasattr(self, parameter_name):
setattr(self, parameter_name, new_parameter_value)
if parameter_name == "lr":
for param_group in self.pi_optimizer.param_groups:
param_group['lr'] = new_parameter_value
for param_group in self.q_optimizer.param_groups:
param_group['lr'] = new_parameter_value
for param_group in self.alpha_optimizer.param_groups:
param_group['lr'] = new_parameter_value
| 36.087798
| 111
| 0.584718
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.