hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ccea48827abba3f8528cfac40c37658cee303eb7
| 3,422
|
py
|
Python
|
tests/web.adblockplus.org/tests/test_CookiesSettings.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9
|
2016-01-29T18:05:29.000Z
|
2021-10-06T04:21:55.000Z
|
tests/web.adblockplus.org/tests/test_CookiesSettings.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9
|
2015-04-06T19:03:32.000Z
|
2019-05-28T13:34:55.000Z
|
tests/web.adblockplus.org/tests/test_CookiesSettings.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 18
|
2015-04-06T17:42:31.000Z
|
2021-10-06T04:26:29.000Z
|
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from pages.landingPage import LandingPage
from chunks.topMenu import TopMenu
from chunks.cookiesPrompt import CookiesPrompt
import utils.global_functions as gf
@pytest.fixture(scope="function")
def driver():
options = Options()
gf.setup(options)
driver = webdriver.Chrome(options=options)
yield driver
driver.close()
def test_verify_prompt_displayed_when_not_agreeing(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
top_menu = TopMenu(driver)
top_menu.click_about_menu_item()
cookies_prompt = CookiesPrompt(driver)
assert cookies_prompt.is_prompt_visible()
def test_verify_prompt_not_displayed_when_agreeing(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookies_prompt.click_ok_got_it_button()
top_menu = TopMenu(driver)
top_menu.click_about_menu_item()
assert not cookies_prompt.is_prompt_visible()
def test_verify_all_options_selected_by_default(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookie_settings_form = cookies_prompt.click_edit_cookie_settings_button()
assert cookie_settings_form.is_necessary_cookies_toggle_enabled()
assert cookie_settings_form.is_tracking_cookies_toggle_enabled()
assert cookie_settings_form.is_ab_testing_cookies_toggle_enabled()
def test_disable_tracking_save(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookie_settings_form = cookies_prompt.click_edit_cookie_settings_button()
cookie_settings_form.click_tracking_cookies_toggle()
cookie_settings_form.click_save_preferences_button()
assert not cookies_prompt.is_prompt_visible()
def test_disable_ab_testing_save(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookie_settings_form = cookies_prompt.click_edit_cookie_settings_button()
cookie_settings_form.click_ab_testing_cookies_toggle()
cookie_settings_form.click_save_preferences_button()
assert not cookies_prompt.is_prompt_visible()
def test_verify_tracking_toggle_affects_ab_when_ab_enabled(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookie_settings_form = cookies_prompt.click_edit_cookie_settings_button()
cookie_settings_form.click_tracking_cookies_toggle()
assert not cookie_settings_form.is_ab_testing_cookies_toggle_enabled()
cookie_settings_form.click_tracking_cookies_toggle()
assert cookie_settings_form.is_ab_testing_cookies_toggle_enabled()
def test_verify_tracking_toggle_does_not_affect_ab_when_ab_disabled(driver):
landing_page = LandingPage(driver)
landing_page.go_home()
cookies_prompt = CookiesPrompt(driver)
cookie_settings_form = cookies_prompt.click_edit_cookie_settings_button()
cookie_settings_form.click_ab_testing_cookies_toggle()
cookie_settings_form.click_tracking_cookies_toggle()
assert not cookie_settings_form.is_ab_testing_cookies_toggle_enabled()
cookie_settings_form.click_tracking_cookies_toggle()
assert not cookie_settings_form.is_ab_testing_cookies_toggle_enabled()
| 33.223301
| 77
| 0.816774
|
fa9d45779528f1a76524617b3948ba2e1195dfe7
| 13,616
|
py
|
Python
|
tests/unit/test_configs.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | 4
|
2015-04-10T02:52:35.000Z
|
2015-04-12T19:33:48.000Z
|
tests/unit/test_configs.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | null | null | null |
tests/unit/test_configs.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | null | null | null |
import os
import logging
import pytest
from flask import Flask
from dash import Dash, exceptions as _exc
# noinspection PyProtectedMember
from dash._configs import (
pathname_configs,
DASH_ENV_VARS,
get_combined_config,
load_dash_env_vars,
)
from dash._utils import AttributeDict
from dash._get_paths import (
app_get_asset_url,
app_get_relative_path,
app_strip_relative_path,
get_asset_url,
get_relative_path,
strip_relative_path,
)
@pytest.fixture
def empty_environ():
for k in DASH_ENV_VARS.keys():
if k in os.environ:
os.environ.pop(k)
def test_dash_env_vars(empty_environ):
assert {None} == {
val for _, val in DASH_ENV_VARS.items()
}, "initial var values are None without extra OS environ setting"
@pytest.mark.parametrize(
"route_prefix, req_prefix, expected_route, expected_req",
[
(None, None, "/", "/"),
("/dash/", None, None, "/dash/"),
(None, "/my-dash-app/", "/", "/my-dash-app/"),
("/dash/", "/my-dash-app/dash/", "/dash/", "/my-dash-app/dash/"),
],
)
def test_valid_pathname_prefix_init(
empty_environ, route_prefix, req_prefix, expected_route, expected_req
):
_, routes, req = pathname_configs(
routes_pathname_prefix=route_prefix, requests_pathname_prefix=req_prefix
)
if expected_route is not None:
assert routes == expected_route
assert req == expected_req
def test_invalid_pathname_prefix(empty_environ):
with pytest.raises(_exc.InvalidConfig, match="url_base_pathname"):
_, _, _ = pathname_configs("/my-path", "/another-path")
with pytest.raises(_exc.InvalidConfig) as excinfo:
_, _, _ = pathname_configs(
url_base_pathname="/invalid", routes_pathname_prefix="/invalid"
)
assert str(excinfo.value).split(".")[0].endswith("`routes_pathname_prefix`")
with pytest.raises(_exc.InvalidConfig) as excinfo:
_, _, _ = pathname_configs(
url_base_pathname="/my-path", requests_pathname_prefix="/another-path"
)
assert str(excinfo.value).split(".")[0].endswith("`requests_pathname_prefix`")
with pytest.raises(_exc.InvalidConfig, match="start with `/`"):
_, _, _ = pathname_configs("my-path")
with pytest.raises(_exc.InvalidConfig, match="end with `/`"):
_, _, _ = pathname_configs("/my-path")
def test_pathname_prefix_from_environ_app_name(empty_environ):
os.environ["DASH_APP_NAME"] = "my-dash-app"
_, routes, req = pathname_configs()
assert req == "/my-dash-app/"
assert routes == "/"
def test_pathname_prefix_environ_routes(empty_environ):
os.environ["DASH_ROUTES_PATHNAME_PREFIX"] = "/routes/"
_, routes, _ = pathname_configs()
assert routes == "/routes/"
def test_pathname_prefix_environ_requests(empty_environ):
os.environ["DASH_REQUESTS_PATHNAME_PREFIX"] = "/requests/"
_, _, req = pathname_configs()
assert req == "/requests/"
@pytest.mark.parametrize(
"req, expected",
[
("/", "/assets/reset.css"),
("/requests/", "/requests/assets/reset.css"),
("/requests/routes/", "/requests/routes/assets/reset.css"),
],
)
def test_pathname_prefix_assets(empty_environ, req, expected):
config = AttributeDict(assets_external_path=req, assets_url_path="assets")
path = app_get_asset_url(config, "reset.css")
assert path == expected
@pytest.mark.parametrize(
"requests_pathname_prefix, assets_external_path, assets_url_path, expected",
[
(None, None, "assets", "/assets/reset.css"),
("/app/", None, "assets", "/app/assets/reset.css"),
(None, None, "css", "/css/reset.css"),
("/app/", None, "css", "/app/css/reset.css"),
(
None,
"http://external.com/",
"assets",
"http://external.com/assets/reset.css",
),
("/app/", "http://external.com/", "css", "http://external.com/css/reset.css"),
],
)
def test_asset_url(
empty_environ,
requests_pathname_prefix,
assets_external_path,
assets_url_path,
expected,
):
app = Dash(
"Dash",
requests_pathname_prefix=requests_pathname_prefix,
assets_external_path=assets_external_path,
assets_url_path=assets_url_path,
)
app_path = app.get_asset_url("reset.css")
dash_path = get_asset_url("reset.css")
assert app_path == dash_path == expected
@pytest.mark.parametrize(
"requests_pathname_prefix, expected",
[
(None, "/page2"),
("/app/", "/app/page2"),
],
)
def test_get_relative_path(
empty_environ,
requests_pathname_prefix,
expected,
):
app = Dash(
"Dash",
requests_pathname_prefix=requests_pathname_prefix,
)
app_path = app.get_relative_path("/page2")
dash_path = get_relative_path("/page2")
assert app_path == dash_path == expected
@pytest.mark.parametrize(
"requests_pathname_prefix, expected",
[
(None, "/app/page2"),
("/app/", "/page2"),
],
)
def test_strip_relative_path(
empty_environ,
requests_pathname_prefix,
expected,
):
app = Dash(
"Dash",
requests_pathname_prefix=requests_pathname_prefix,
)
app_path = app.strip_relative_path("/app/page2")
dash_path = strip_relative_path("/app/page2")
assert app_path == dash_path == expected
def test_get_combined_config_dev_tools_ui(empty_environ):
val1 = get_combined_config("ui", None, default=False)
assert (
not val1
), "should return the default value if None is provided for init and environment"
os.environ["DASH_UI"] = "true"
val2 = get_combined_config("ui", None, default=False)
assert val2, "should return the set environment value as True"
val3 = get_combined_config("ui", False, default=True)
assert not val3, "init value overrides the environment value"
def test_get_combined_config_props_check(empty_environ):
val1 = get_combined_config("props_check", None, default=False)
assert (
not val1
), "should return the default value if None is provided for init and environment"
os.environ["DASH_PROPS_CHECK"] = "true"
val2 = get_combined_config("props_check", None, default=False)
assert val2, "should return the set environment value as True"
val3 = get_combined_config("props_check", False, default=True)
assert not val3, "init value overrides the environment value"
def test_load_dash_env_vars_refects_to_os_environ(empty_environ):
for var in DASH_ENV_VARS.keys():
os.environ[var] = "true"
vars = load_dash_env_vars()
assert vars[var] == "true"
os.environ[var] = "false"
vars = load_dash_env_vars()
assert vars[var] == "false"
@pytest.mark.parametrize(
"name, server, expected",
[
(None, True, "__main__"),
("test", True, "test"),
("test", False, "test"),
(None, Flask("test"), "test"),
("test", Flask("other"), "test"),
],
)
def test_app_name_server(empty_environ, name, server, expected):
app = Dash(name=name, server=server)
assert app.config.name == expected
@pytest.mark.parametrize(
"prefix, partial_path, expected",
[
("/", "", "/"),
("/my-dash-app/", "", "/my-dash-app/"),
("/", "/", "/"),
("/my-dash-app/", "/", "/my-dash-app/"),
("/", "/page-1", "/page-1"),
("/my-dash-app/", "/page-1", "/my-dash-app/page-1"),
("/", "/page-1/", "/page-1/"),
("/my-dash-app/", "/page-1/", "/my-dash-app/page-1/"),
("/", "/page-1/sub-page-1", "/page-1/sub-page-1"),
("/my-dash-app/", "/page-1/sub-page-1", "/my-dash-app/page-1/sub-page-1"),
],
)
def test_pathname_prefix_relative_url(prefix, partial_path, expected):
path = app_get_relative_path(prefix, partial_path)
assert path == expected
@pytest.mark.parametrize(
"prefix, partial_path",
[("/", "relative-page-1"), ("/my-dash-app/", "relative-page-1")],
)
def test_invalid_get_relative_path(prefix, partial_path):
with pytest.raises(_exc.UnsupportedRelativePath):
app_get_relative_path(prefix, partial_path)
@pytest.mark.parametrize(
"prefix, partial_path, expected",
[
("/", None, None),
("/my-dash-app/", None, None),
("/", "/", ""),
("/my-dash-app/", "/my-dash-app", ""),
("/my-dash-app/", "/my-dash-app/", ""),
("/", "/page-1", "page-1"),
("/my-dash-app/", "/my-dash-app/page-1", "page-1"),
("/", "/page-1/", "page-1"),
("/my-dash-app/", "/my-dash-app/page-1/", "page-1"),
("/", "/page-1/sub-page-1", "page-1/sub-page-1"),
("/my-dash-app/", "/my-dash-app/page-1/sub-page-1", "page-1/sub-page-1"),
("/", "/page-1/sub-page-1/", "page-1/sub-page-1"),
("/my-dash-app/", "/my-dash-app/page-1/sub-page-1/", "page-1/sub-page-1"),
("/my-dash-app/", "/my-dash-app/my-dash-app/", "my-dash-app"),
(
"/my-dash-app/",
"/my-dash-app/something-else/my-dash-app/",
"something-else/my-dash-app",
),
],
)
def test_strip_relative_path(prefix, partial_path, expected):
path = app_strip_relative_path(prefix, partial_path)
assert path == expected
@pytest.mark.parametrize(
"prefix, partial_path",
[
("/", "relative-page-1"),
("/my-dash-app", "relative-page-1"),
("/my-dash-app", "/some-other-path"),
],
)
def test_invalid_strip_relative_path(prefix, partial_path):
with pytest.raises(_exc.UnsupportedRelativePath):
app_strip_relative_path(prefix, partial_path)
def test_port_env_fail_str(empty_environ):
app = Dash()
with pytest.raises(Exception) as excinfo:
app.run(port="garbage")
assert (
excinfo.exconly()
== "ValueError: Expecting an integer from 1 to 65535, found port='garbage'"
)
def test_port_env_fail_range(empty_environ):
app = Dash()
with pytest.raises(Exception) as excinfo:
app.run(port="0")
assert (
excinfo.exconly()
== "AssertionError: Expecting an integer from 1 to 65535, found port=0"
)
with pytest.raises(Exception) as excinfo:
app.run(port="65536")
assert (
excinfo.exconly()
== "AssertionError: Expecting an integer from 1 to 65535, found port=65536"
)
@pytest.mark.parametrize(
"setlevel_warning",
[False, True],
)
def test_no_proxy_success(mocker, caplog, empty_environ, setlevel_warning):
app = Dash()
if setlevel_warning:
app.logger.setLevel(logging.WARNING)
# mock out the run method so we don't actually start listening forever
mocker.patch.object(app.server, "run")
app.run(port=8787)
STARTUP_MESSAGE = "Dash is running on http://127.0.0.1:8787/\n"
if setlevel_warning:
assert caplog.text is None or STARTUP_MESSAGE not in caplog.text
else:
assert STARTUP_MESSAGE in caplog.text
@pytest.mark.parametrize(
"proxy, host, port, path",
[
("https://daash.plot.ly", "127.0.0.1", 8050, "/"),
("https://daaash.plot.ly", "0.0.0.0", 8050, "/a/b/c/"),
("https://daaaash.plot.ly", "127.0.0.1", 1234, "/"),
("http://go.away", "127.0.0.1", 8050, "/now/"),
("http://my.server.tv:8765", "0.0.0.0", 80, "/"),
],
)
def test_proxy_success(mocker, caplog, empty_environ, proxy, host, port, path):
proxystr = "http://{}:{}::{}".format(host, port, proxy)
app = Dash(url_base_pathname=path)
mocker.patch.object(app.server, "run")
app.run(proxy=proxystr, host=host, port=port)
assert "Dash is running on {}{}\n".format(proxy, path) in caplog.text
def test_proxy_failure(mocker, empty_environ):
app = Dash()
# if the tests work we'll never get to server.run, but keep the mock
# in case something is amiss and we don't get an exception.
mocker.patch.object(app.server, "run")
with pytest.raises(_exc.ProxyError) as excinfo:
app.run(
proxy="https://127.0.0.1:8055::http://plot.ly", host="127.0.0.1", port=8055
)
assert "protocol: http is incompatible with the proxy" in excinfo.exconly()
assert "you must use protocol: https" in excinfo.exconly()
with pytest.raises(_exc.ProxyError) as excinfo:
app.run(
proxy="http://0.0.0.0:8055::http://plot.ly", host="127.0.0.1", port=8055
)
assert "host: 127.0.0.1 is incompatible with the proxy" in excinfo.exconly()
assert "you must use host: 0.0.0.0" in excinfo.exconly()
with pytest.raises(_exc.ProxyError) as excinfo:
app.run(proxy="http://0.0.0.0:8155::http://plot.ly", host="0.0.0.0", port=8055)
assert "port: 8055 is incompatible with the proxy" in excinfo.exconly()
assert "you must use port: 8155" in excinfo.exconly()
def test_title():
app = Dash()
assert "<title>Dash</title>" in app.index()
app = Dash()
app.title = "Hello World"
assert "<title>Hello World</title>" in app.index()
app = Dash(title="Custom Title")
assert "<title>Custom Title</title>" in app.index()
def test_app_delayed_config():
app = Dash(server=False)
app.init_app(app=Flask("test"), requests_pathname_prefix="/dash/")
assert app.config.requests_pathname_prefix == "/dash/"
with pytest.raises(AttributeError):
app.config.name = "cannot update me"
def test_app_invalid_delayed_config():
app = Dash(server=False)
with pytest.raises(AttributeError):
app.init_app(app=Flask("test"), name="too late 2 update")
| 30.945455
| 87
| 0.630435
|
7f172f17224685d423a8a5b019e874add16aeccc
| 22,927
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
"""ApplicationGatewayPrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs
) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayPrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs
) -> AsyncLROPoller["_models.ApplicationGatewayPrivateEndpointConnection"]:
"""Updates the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:param parameters: Parameters supplied to update application gateway private endpoint
connection operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.ApplicationGatewayPrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayPrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.ApplicationGatewayPrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> "_models.ApplicationGatewayPrivateEndpointConnection":
"""Gets the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ApplicationGatewayPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncIterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
"""Lists all private endpoint connections on an application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayPrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ApplicationGatewayPrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'} # type: ignore
| 53.318605
| 241
| 0.690409
|
69fd8b4cdd24fc52e973eea89fb0c9cdf48f3818
| 502
|
py
|
Python
|
modules/test/multiply.py
|
JimScope/PowerM
|
b1c7b8926b728c05c36d518a3a1a9904e5b98418
|
[
"MIT"
] | 2
|
2020-01-09T22:56:32.000Z
|
2021-04-24T05:13:52.000Z
|
modules/test/multiply.py
|
JimScope/PowerM
|
b1c7b8926b728c05c36d518a3a1a9904e5b98418
|
[
"MIT"
] | null | null | null |
modules/test/multiply.py
|
JimScope/PowerM
|
b1c7b8926b728c05c36d518a3a1a9904e5b98418
|
[
"MIT"
] | 1
|
2019-03-27T19:10:07.000Z
|
2019-03-27T19:10:07.000Z
|
import sys
from threading import Thread
sys.path.append('..')
import emailer
class MyThread(Thread):
def __init__(self,user,z):
Thread.__init__(self)
self.z = int(z)
self.user = user
self.tab_list = []
def run(self):
for i in range(1,11):
self.tab_list.append((self.z,"x",i,"=",self.z*int(i)))
x = self.tab_list
emailer.send(self.user,
"Tabla del " + str(self.z),
str(x))
__version__ = '0.1'
| 17.310345
| 66
| 0.543825
|
03f1ce50fc4486c90ee439e0cd53753ff6157874
| 6,346
|
py
|
Python
|
regym/tests/rl_algorithms/agent_hook_test.py
|
KnwSondess/Regym
|
825c7dacf955a3e2f6c658c0ecb879a0ca036c1a
|
[
"MIT"
] | 6
|
2019-12-03T21:07:12.000Z
|
2021-03-25T13:09:39.000Z
|
regym/tests/rl_algorithms/agent_hook_test.py
|
KnwSondess/Regym
|
825c7dacf955a3e2f6c658c0ecb879a0ca036c1a
|
[
"MIT"
] | 1
|
2019-01-29T18:43:32.000Z
|
2019-01-31T17:31:39.000Z
|
regym/tests/rl_algorithms/agent_hook_test.py
|
KnwSondess/Regym
|
825c7dacf955a3e2f6c658c0ecb879a0ca036c1a
|
[
"MIT"
] | 4
|
2019-08-01T10:29:41.000Z
|
2021-12-06T21:44:30.000Z
|
import os
import numpy as np
from regym.rl_algorithms.agents import build_PPO_Agent
from regym.rl_algorithms.agents import build_DQN_Agent
from regym.rl_algorithms.agents import build_TabularQ_Agent
from regym.rl_algorithms.agent_hook import AgentHook, AgentType
from test_fixtures import RPSTask
from test_fixtures import ppo_config_dict, dqn_config_dict, tabular_q_learning_config_dict
def test_can_hook_tql_agent(RPSTask, tabular_q_learning_config_dict):
agent = build_TabularQ_Agent(RPSTask, tabular_q_learning_config_dict, 'TQL')
hook = AgentHook(agent)
compare_against_expected_agenthook(agent, hook, AgentType.TQL, [])
def test_can_hook_dqn_agent_using_cuda(RPSTask, dqn_config_dict):
dqn_config_dict['use_cuda'] = True
agent = build_DQN_Agent(RPSTask, dqn_config_dict, 'DQN')
assert all(map(lambda param: param.is_cuda, agent.algorithm.model.parameters()))
assert all(map(lambda param: param.is_cuda, agent.algorithm.target_model.parameters()))
hook = AgentHook(agent)
compare_against_expected_agenthook(agent, hook, AgentType.DQN, [hook.agent.algorithm.model, hook.agent.algorithm.target_model])
def test_can_hook_ppo_agent_using_cuda(RPSTask, ppo_config_dict):
ppo_config_dict['use_cuda'] = True
agent = build_PPO_Agent(RPSTask, ppo_config_dict, 'PPO')
assert all(map(lambda param: param.is_cuda, agent.algorithm.model.parameters()))
hook = AgentHook(agent)
compare_against_expected_agenthook(agent, hook, AgentType.PPO, [hook.agent.algorithm.model])
def compare_against_expected_agenthook(agent, hooked_agent, expected_hook_type, model_list):
assert hooked_agent.type == expected_hook_type
assert hooked_agent.agent == agent
for model in model_list: assert all(map(lambda param: not param.is_cuda, hooked_agent.agent.algorithm.model.parameters()))
def test_can_unhook_tql_agenthook(RPSTask, tabular_q_learning_config_dict):
agent = build_TabularQ_Agent(RPSTask, tabular_q_learning_config_dict, 'TQL')
hook = AgentHook(agent)
retrieved_agent = AgentHook.unhook(hook)
compare_against_expected_retrieved_agent(agent, retrieved_agent, [])
def test_can_unhook_dqn_agenthook_cuda(RPSTask, dqn_config_dict):
dqn_config_dict['use_cuda'] = True
agent = build_DQN_Agent(RPSTask, dqn_config_dict, 'DQN')
assert all(map(lambda param: param.is_cuda, agent.algorithm.model.parameters()))
assert all(map(lambda param: param.is_cuda, agent.algorithm.target_model.parameters()))
hook = AgentHook(agent)
retrieved_agent = AgentHook.unhook(hook)
compare_against_expected_retrieved_agent(agent, retrieved_agent, [retrieved_agent.algorithm.model, retrieved_agent.algorithm.target_model])
def test_can_unhook_ppo_agenthook_with_cuda(RPSTask, ppo_config_dict):
ppo_config_dict['use_cuda'] = True
agent = build_PPO_Agent(RPSTask, ppo_config_dict, 'PPO')
assert all(map(lambda param: param.is_cuda, agent.algorithm.model.parameters()))
hook = AgentHook(agent)
retrieved_agent = AgentHook.unhook(hook)
compare_against_expected_retrieved_agent(agent, retrieved_agent, [retrieved_agent.algorithm.model])
def compare_against_expected_retrieved_agent(agent, retrieved_agent, model_list):
assert agent == retrieved_agent
assert_model_parameters_are_cuda_tensors(model_list)
def test_can_save_tql_to_memory(RPSTask, tabular_q_learning_config_dict):
agent = build_TabularQ_Agent(RPSTask, tabular_q_learning_config_dict, 'TQL')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path)
assess_file_has_been_saved_on_disk_and_not_on_ram(hook, save_path)
os.remove(save_path)
def test_can_save_dqn_to_memory(RPSTask, dqn_config_dict):
agent = build_DQN_Agent(RPSTask, dqn_config_dict, 'DQN')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path)
assess_file_has_been_saved_on_disk_and_not_on_ram(hook, save_path)
os.remove(save_path)
def test_can_save_ppo_to_memory(RPSTask, ppo_config_dict):
agent = build_PPO_Agent(RPSTask, ppo_config_dict, 'PPO')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path=save_path)
assess_file_has_been_saved_on_disk_and_not_on_ram(hook, save_path)
os.remove(save_path)
def test_can_load_tql_from_agenthook(RPSTask, tabular_q_learning_config_dict):
agent = build_TabularQ_Agent(RPSTask, tabular_q_learning_config_dict, 'TQL')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path=save_path)
retrieved_agent = AgentHook.unhook(hook)
assert np.array_equal(agent.algorithm.Q_table, retrieved_agent.algorithm.Q_table)
def test_can_load_dqn_from_agenthook_with_cuda(RPSTask, dqn_config_dict):
dqn_config_dict['use_cuda'] = True
agent = build_DQN_Agent(RPSTask, dqn_config_dict, 'DQN')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path=save_path)
retrieved_agent = AgentHook.unhook(hook)
model_list = [retrieved_agent.algorithm.model, retrieved_agent.algorithm.target_model]
assert_model_parameters_are_cuda_tensors(model_list)
def test_can_load_ppo_from_agenthook_with_cuda(RPSTask, ppo_config_dict):
ppo_config_dict['use_cuda'] = True
agent = build_PPO_Agent(RPSTask, ppo_config_dict, 'PPO')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path=save_path)
assert not hasattr(hook, 'agent')
retrieved_agent = AgentHook.unhook(hook)
model_list = [retrieved_agent.algorithm.model]
assert_model_parameters_are_cuda_tensors(model_list)
def test_can_load_ppo_from_agenthook_disabling_cuda(RPSTask, ppo_config_dict):
ppo_config_dict['use_cuda'] = True
agent = build_PPO_Agent(RPSTask, ppo_config_dict, 'PPO')
save_path = '/tmp/test_save.agent'
hook = AgentHook(agent, save_path=save_path)
retrieved_agent = AgentHook.unhook(hook, use_cuda=False)
model = retrieved_agent.algorithm.model
assert all(map(lambda param: not param.is_cuda, model.parameters()))
def assert_model_parameters_are_cuda_tensors(model_list):
for model in model_list: assert all(map(lambda param: param.is_cuda, model.parameters()))
def assess_file_has_been_saved_on_disk_and_not_on_ram(hook, save_path):
assert not hasattr(hook, 'agent')
assert hook.save_path is save_path
assert os.path.exists(save_path)
| 39.91195
| 143
| 0.789474
|
5c0cd66c09b9f7f1958aad4f7490212c2d4070e7
| 1,704
|
py
|
Python
|
scaffoldgraph/utils/subset.py
|
UCLCheminformatics/ScaffoldGraph
|
0443ce118110290a99601d65b2d000ac8bc7a1e9
|
[
"MIT"
] | 121
|
2019-12-12T15:30:16.000Z
|
2022-02-28T02:00:54.000Z
|
scaffoldgraph/utils/subset.py
|
UCLCheminformatics/ScaffoldGraph
|
0443ce118110290a99601d65b2d000ac8bc7a1e9
|
[
"MIT"
] | 8
|
2020-04-04T15:37:26.000Z
|
2021-11-17T07:30:31.000Z
|
scaffoldgraph/utils/subset.py
|
UCLCheminformatics/ScaffoldGraph
|
0443ce118110290a99601d65b2d000ac8bc7a1e9
|
[
"MIT"
] | 28
|
2019-12-16T11:58:53.000Z
|
2021-11-19T09:57:46.000Z
|
"""
scaffoldgraph.utils.subset
"""
from networkx.algorithms.traversal import bfs_tree
from collections import defaultdict
def split_graph_by_molecule_attribute(graph, key, default=None):
"""Split a scaffold graph into subgraphs based on unique molecule attributes.
This function first groups molecule nodes sharing a unique attribute
value, and then proceeds to build subgraphs from each node subset using
a breadth-first search.
The returned subgraphs are graph views and thus changes to the graph are
nruled out by the view, but changes to node attributes
are reflected in the original graph. To prevent this behaviour use:
subgraph.copy()
Parameters
----------
graph : sg.core.ScaffoldGraph
A scaffold graph to split.
key : str
The key for the molecule node attribute used to split the graph
into subgraphs.
default : value, bool, optional
Value used for nodes that don't have the requested attribute.
Returns
-------
splits : dict
A dictionary with keys representing unique node attributes and
values representing the constructed subgraphs.
"""
if isinstance(key, bool):
raise ValueError('Attribute key cannot be a boolean type')
splits = defaultdict(list)
for node, attr in graph.get_molecule_nodes(key, default):
splits[attr].append(node)
splits.default_factory = None # Not really required
for attr, nodes in splits.items():
bfs_subset = set()
for node in nodes:
bfs = bfs_tree(graph, node, reverse=True)
bfs_subset.update(bfs)
splits[attr] = graph.subgraph(bfs_subset)
return splits
| 33.411765
| 81
| 0.693075
|
5fff15ef42c52b35865883411a333adbabee035c
| 4,777
|
py
|
Python
|
packages/open_aea/protocols/signing/dialogues.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 28
|
2021-10-31T18:54:14.000Z
|
2022-03-17T13:10:43.000Z
|
packages/open_aea/protocols/signing/dialogues.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 66
|
2021-10-31T11:55:48.000Z
|
2022-03-31T06:26:23.000Z
|
packages/open_aea/protocols/signing/dialogues.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 open_aea
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
This module contains the classes required for signing dialogue management.
- SigningDialogue: The dialogue class maintains state of a dialogue and manages it.
- SigningDialogues: The dialogues class keeps track of all dialogues.
"""
from abc import ABC
from typing import Callable, Dict, FrozenSet, Type, cast
from aea.common import Address
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue, DialogueLabel, Dialogues
from packages.open_aea.protocols.signing.message import SigningMessage
class SigningDialogue(Dialogue):
"""The signing dialogue class maintains state of a dialogue and manages it."""
INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{
SigningMessage.Performative.SIGN_TRANSACTION,
SigningMessage.Performative.SIGN_MESSAGE,
}
)
TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{
SigningMessage.Performative.SIGNED_TRANSACTION,
SigningMessage.Performative.SIGNED_MESSAGE,
SigningMessage.Performative.ERROR,
}
)
VALID_REPLIES: Dict[Message.Performative, FrozenSet[Message.Performative]] = {
SigningMessage.Performative.ERROR: frozenset(),
SigningMessage.Performative.SIGN_MESSAGE: frozenset(
{
SigningMessage.Performative.SIGNED_MESSAGE,
SigningMessage.Performative.ERROR,
}
),
SigningMessage.Performative.SIGN_TRANSACTION: frozenset(
{
SigningMessage.Performative.SIGNED_TRANSACTION,
SigningMessage.Performative.ERROR,
}
),
SigningMessage.Performative.SIGNED_MESSAGE: frozenset(),
SigningMessage.Performative.SIGNED_TRANSACTION: frozenset(),
}
class Role(Dialogue.Role):
"""This class defines the agent's role in a signing dialogue."""
DECISION_MAKER = "decision_maker"
SKILL = "skill"
class EndState(Dialogue.EndState):
"""This class defines the end states of a signing dialogue."""
SUCCESSFUL = 0
FAILED = 1
def __init__(
self,
dialogue_label: DialogueLabel,
self_address: Address,
role: Dialogue.Role,
message_class: Type[SigningMessage] = SigningMessage,
) -> None:
"""
Initialize a dialogue.
:param dialogue_label: the identifier of the dialogue
:param self_address: the address of the entity for whom this dialogue is maintained
:param role: the role of the agent this dialogue is maintained for
:param message_class: the message class used
"""
Dialogue.__init__(
self,
dialogue_label=dialogue_label,
message_class=message_class,
self_address=self_address,
role=role,
)
class SigningDialogues(Dialogues, ABC):
"""This class keeps track of all signing dialogues."""
END_STATES = frozenset(
{SigningDialogue.EndState.SUCCESSFUL, SigningDialogue.EndState.FAILED}
)
_keep_terminal_state_dialogues = False
def __init__(
self,
self_address: Address,
role_from_first_message: Callable[[Message, Address], Dialogue.Role],
dialogue_class: Type[SigningDialogue] = SigningDialogue,
) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:param dialogue_class: the dialogue class used
:param role_from_first_message: the callable determining role from first message
"""
Dialogues.__init__(
self,
self_address=self_address,
end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES),
message_class=SigningMessage,
dialogue_class=dialogue_class,
role_from_first_message=role_from_first_message,
)
| 34.868613
| 91
| 0.651664
|
85916d09a8b17d9f7cc7d310dec0b8c076020cdd
| 11,785
|
py
|
Python
|
cookbooks/aws-parallelcluster-config/files/default/head_node_slurm/slurm/pcluster_slurm_config_generator.py
|
pinak-p/aws-parallelcluster-cookbook
|
6a7cdbee9a9e79e00ffa96e7ab2f0781f5542883
|
[
"Apache-2.0"
] | 1
|
2021-12-15T08:01:13.000Z
|
2021-12-15T08:01:13.000Z
|
cookbooks/aws-parallelcluster-config/files/default/head_node_slurm/slurm/pcluster_slurm_config_generator.py
|
pinak-p/aws-parallelcluster-cookbook
|
6a7cdbee9a9e79e00ffa96e7ab2f0781f5542883
|
[
"Apache-2.0"
] | 30
|
2021-05-05T08:05:22.000Z
|
2022-03-23T00:17:18.000Z
|
cookbooks/aws-parallelcluster-config/files/default/head_node_slurm/slurm/pcluster_slurm_config_generator.py
|
lukeseawalker/aws-parallelcluster-cookbook
|
f881254a065ef77b6abd11ee0ac90bbbc39e5687
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import argparse
import json
import logging
import re
from os import makedirs, path
from socket import gethostname
import requests
import yaml
from jinja2 import Environment, FileSystemLoader
log = logging.getLogger()
instance_types_data = {}
class CriticalError(Exception):
"""Critical error for the daemon."""
pass
def generate_slurm_config_files(
output_directory, template_directory, input_file, instance_types_data_path, dryrun, no_gpu
):
"""
Generate Slurm configuration files.
For each queue, generate slurm_parallelcluster_{QueueName}_partitions.conf
and slurm_parallelcluster_{QueueName}_gres.conf, which contain node info.
Generate slurm_parallelcluster.conf and slurm_parallelcluster_gres.conf,
which includes queue specifc configuration files.
slurm_parallelcluster.conf is included in main slurm.conf
and slurm_parallelcluster_gres.conf is included in gres.conf.
"""
# Make output directories
output_directory = path.abspath(output_directory)
pcluster_subdirectory = path.join(output_directory, "pcluster")
makedirs(pcluster_subdirectory, exist_ok=True)
env = _get_jinja_env(template_directory)
cluster_config = _load_cluster_config(input_file)
head_node_config = _get_head_node_config()
queues = cluster_config["Scheduling"]["SlurmQueues"]
global instance_types_data
with open(instance_types_data_path) as input_file:
instance_types_data = json.load(input_file)
# Generate slurm_parallelcluster_{QueueName}_partitions.conf and slurm_parallelcluster_{QueueName}_gres.conf
is_default_queue = True # The first queue in the queues list is the default queue
for queue in queues:
for file_type in ["partition", "gres"]:
_generate_queue_config(
queue["Name"], queue, is_default_queue, file_type, env, pcluster_subdirectory, dryrun, no_gpu=no_gpu
)
is_default_queue = False
# Generate slurm_parallelcluster.conf and slurm_parallelcluster_gres.conf
for template_name in ["slurm_parallelcluster.conf", "slurm_parallelcluster_gres.conf"]:
_generate_slurm_parallelcluster_configs(
queues,
head_node_config,
cluster_config["Scheduling"]["SlurmSettings"],
template_name,
env,
output_directory,
dryrun,
)
generate_instance_type_mapping_file(pcluster_subdirectory, queues)
log.info("Finished.")
def _load_cluster_config(input_file_path):
"""
Load queues_info and add information used to render templates.
:return: queues_info containing id for first queue, head_node_hostname and queue_name
"""
with open(input_file_path) as input_file:
return yaml.load(input_file, Loader=yaml.SafeLoader)
def _get_head_node_config():
return {
"head_node_hostname": gethostname(),
"head_node_ip": _get_head_node_private_ip(),
}
def _get_head_node_private_ip():
"""Get head node private ip from EC2 metadata."""
return _get_metadata("local-ipv4")
def _generate_queue_config(
queue_name, queue_config, is_default_queue, file_type, jinja_env, output_dir, dryrun, no_gpu=False
):
log.info("Generating slurm_parallelcluster_%s_%s.conf", queue_name, file_type)
rendered_template = jinja_env.get_template(f"slurm_parallelcluster_queue_{file_type}.conf").render(
queue_name=queue_name, queue_config=queue_config, is_default_queue=is_default_queue, no_gpu=no_gpu
)
if not dryrun:
filename = path.join(output_dir, f"slurm_parallelcluster_{queue_name}_{file_type}.conf")
if file_type == "gres" and no_gpu:
_write_rendered_template_to_file(
"# This file is automatically generated by pcluster\n"
"# Skipping GPUs configuration because Nvidia driver is not installed",
filename,
)
else:
_write_rendered_template_to_file(rendered_template, filename)
def _generate_slurm_parallelcluster_configs(
queues, head_node_config, scaling_config, template_name, jinja_env, output_dir, dryrun
):
log.info("Generating %s", template_name)
rendered_template = jinja_env.get_template(f"{template_name}").render(
queues=queues,
head_node_config=head_node_config,
scaling_config=scaling_config,
output_dir=output_dir,
)
if not dryrun:
filename = f"{output_dir}/{template_name}"
_write_rendered_template_to_file(rendered_template, filename)
def _get_jinja_env(template_directory):
"""Return jinja environment with trim_blocks/lstrip_blocks set to True."""
file_loader = FileSystemLoader(template_directory)
# A nosec comment is appended to the following line in order to disable the B701 check.
# The contents of the default templates are known and the input configuration data is
# validated by the CLI.
env = Environment(loader=file_loader, trim_blocks=True, lstrip_blocks=True) # nosec nosemgrep
env.filters["sanify_name"] = lambda value: re.sub(r"[^A-Za-z0-9]", "", value)
env.filters["gpus"] = _gpu_count
env.filters["gpu_type"] = _gpu_type
env.filters["vcpus"] = _vcpus
return env
def _gpu_count(instance_type):
"""Return the number of GPUs for the instance."""
gpu_info = instance_types_data[instance_type].get("GpuInfo", None)
gpu_count = 0
if gpu_info:
for gpus in gpu_info.get("Gpus", []):
gpu_manufacturer = gpus.get("Manufacturer", "")
if gpu_manufacturer.upper() == "NVIDIA":
gpu_count += gpus.get("Count", 0)
else:
log.info(
f"ParallelCluster currently does not offer native support for '{gpu_manufacturer}' GPUs. "
"Please make sure to use a custom AMI with the appropriate drivers in order to leverage "
"GPUs functionalities"
)
return gpu_count
def _gpu_type(instance_type):
"""Return name or type of the GPU for the instance."""
gpu_info = instance_types_data[instance_type].get("GpuInfo", None)
# Remove space and change to all lowercase for name
return "no_gpu_type" if not gpu_info else gpu_info.get("Gpus")[0].get("Name").replace(" ", "").lower()
def _vcpus(compute_resource) -> int:
"""Get the number of vcpus for the instance according to disable_hyperthreading and instance features."""
instance_type = compute_resource["InstanceType"]
disable_simultaneous_multithreading = compute_resource["DisableSimultaneousMultithreading"]
instance_type_info = instance_types_data[instance_type]
vcpus_info = instance_type_info.get("VCpuInfo", {})
vcpus_count = vcpus_info.get("DefaultVCpus")
threads_per_core = vcpus_info.get("DefaultThreadsPerCore")
if threads_per_core is None:
supported_architectures = instance_type_info.get("ProcessorInfo", {}).get("SupportedArchitectures", [])
threads_per_core = 2 if "x86_64" in supported_architectures else 1
return vcpus_count if not disable_simultaneous_multithreading else (vcpus_count // threads_per_core)
def _write_rendered_template_to_file(rendered_template, filename):
log.info("Writing contents of %s", filename)
with open(filename, "w") as output_file:
output_file.write(rendered_template)
def _setup_logger():
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - [%(name)s:%(funcName)s] - %(levelname)s - %(message)s"
)
def generate_instance_type_mapping_file(output_dir, queues):
"""Generate a mapping file to retrieve the Instance Type related to the instance key used in the slurm nodename."""
instance_name_type_mapping = {}
for queue in queues:
instance_name_type_mapping[queue["Name"]] = {}
compute_resources = queue["ComputeResources"]
hostname_regex = re.compile("[^A-Za-z0-9]")
for compute_resource in compute_resources:
instance_type = compute_resource.get("InstanceType")
# Remove all characters excepts letters and numbers
sanitized_compute_name = re.sub(hostname_regex, "", compute_resource.get("Name"))
instance_name_type_mapping[queue["Name"]][sanitized_compute_name] = instance_type
filename = f"{output_dir}/instance_name_type_mappings.json"
log.info("Generating %s", filename)
with open(filename, "w") as output_file:
output_file.write(json.dumps(instance_name_type_mapping, indent=4))
def _get_metadata(metadata_path):
"""
Get EC2 instance metadata.
:param metadata_path: the metadata relative path
:return the metadata value.
"""
try:
token = requests.put(
"http://169.254.169.254/latest/api/token", headers={"X-aws-ec2-metadata-token-ttl-seconds": "300"}
)
headers = {}
if token.status_code == requests.codes.ok:
headers["X-aws-ec2-metadata-token"] = token.content
metadata_url = "http://169.254.169.254/latest/meta-data/{0}".format(metadata_path)
metadata_value = requests.get(metadata_url, headers=headers).text
except Exception as e:
error_msg = "Unable to get {0} metadata. Failed with exception: {1}".format(metadata_path, e)
log.critical(error_msg)
raise CriticalError(error_msg)
log.debug("%s=%s", metadata_path, metadata_value)
return metadata_value
def main():
try:
_setup_logger()
log.info("Running ParallelCluster Slurm Config Generator")
parser = argparse.ArgumentParser(description="Take in slurm configuration generator related parameters")
parser.add_argument(
"--output-directory", type=str, help="The output directory for generated slurm configs", required=True
)
parser.add_argument(
"--template-directory", type=str, help="The directory storing slurm config templates", required=True
)
parser.add_argument(
"--input-file",
type=str,
# Todo: is the default necessary?
default="/opt/parallelcluster/slurm_config.json",
help="Yaml file containing pcluster configuration file",
)
parser.add_argument(
"--instance-types-data",
type=str,
help="JSON file containing info about instance types",
)
parser.add_argument(
"--dryrun",
action="store_true",
help="dryrun",
required=False,
default=False,
)
parser.add_argument(
"--no-gpu",
action="store_true",
help="no gpu configuration",
required=False,
default=False,
)
args = parser.parse_args()
generate_slurm_config_files(
args.output_directory,
args.template_directory,
args.input_file,
args.instance_types_data,
args.dryrun,
args.no_gpu,
)
except Exception as e:
log.exception("Failed to generate slurm configurations, exception: %s", e)
raise
if __name__ == "__main__":
main()
| 38.139159
| 119
| 0.689351
|
b8c10fa82b2bcd9880c0dfe1e9cc8477b5c4029f
| 204
|
py
|
Python
|
api/serilizers.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | null | null | null |
api/serilizers.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | 1
|
2021-10-04T05:38:08.000Z
|
2021-10-05T07:20:59.000Z
|
api/serilizers.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | 2
|
2019-09-25T10:22:26.000Z
|
2020-07-29T16:34:20.000Z
|
from rest_framework import serializers
from api.models import ArticleModel
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = ArticleModel
fields = '__all__'
| 20.4
| 53
| 0.75
|
4ddde28f02d21a187208057945ae8da88f977b36
| 81,111
|
py
|
Python
|
mesonbuild/modules/gnome.py
|
hwti/meson
|
9e5c881b06bfb79ee9ee40cdd8dca3a78f268a40
|
[
"Apache-2.0"
] | 1
|
2021-09-14T00:19:25.000Z
|
2021-09-14T00:19:25.000Z
|
mesonbuild/modules/gnome.py
|
hwti/meson
|
9e5c881b06bfb79ee9ee40cdd8dca3a78f268a40
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/modules/gnome.py
|
hwti/meson
|
9e5c881b06bfb79ee9ee40cdd8dca3a78f268a40
|
[
"Apache-2.0"
] | 1
|
2021-06-12T19:07:19.000Z
|
2021-06-12T19:07:19.000Z
|
# Copyright 2015-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection, gresources and gtk-doc'''
import os
import copy
import subprocess
import functools
from .. import build
from .. import mlog
from .. import mesonlib
from .. import interpreter
from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget
from . import get_include_args
from . import ExtensionModule
from . import ModuleReturnValue
from ..mesonlib import (
MachineChoice, MesonException, OrderedSet, Popen_safe, extract_as_list,
join_args, unholder,
)
from ..dependencies import Dependency, PkgConfigDependency, InternalDependency
from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewKwargs
# gresource compilation is broken due to the way
# the resource compiler and Ninja clash about it
#
# https://github.com/ninja-build/ninja/issues/1184
# https://bugzilla.gnome.org/show_bug.cgi?id=774368
gresource_dep_needed_version = '>= 2.51.1'
native_glib_version = None
@functools.lru_cache(maxsize=None)
def gir_has_option(intr_obj, option):
try:
g_ir_scanner = intr_obj.find_program_impl('g-ir-scanner')
# Handle overridden g-ir-scanner
if isinstance(getattr(g_ir_scanner, "held_object", g_ir_scanner), interpreter.OverrideProgram):
assert option in ['--extra-library', '--sources-top-dirs']
return True
opts = Popen_safe(g_ir_scanner.get_command() + ['--help'], stderr=subprocess.STDOUT)[1]
return option in opts
except (MesonException, FileNotFoundError, subprocess.CalledProcessError):
return False
class GnomeModule(ExtensionModule):
gir_dep = None
@staticmethod
def _get_native_glib_version(state):
global native_glib_version
if native_glib_version is None:
glib_dep = PkgConfigDependency('glib-2.0', state.environment,
{'native': True, 'required': False})
if glib_dep.found():
native_glib_version = glib_dep.get_version()
else:
mlog.warning('Could not detect glib version, assuming 2.54. '
'You may get build errors if your glib is older.')
native_glib_version = '2.54'
return native_glib_version
@mesonlib.run_once
def __print_gresources_warning(self, state):
if not mesonlib.version_compare(self._get_native_glib_version(state),
gresource_dep_needed_version):
mlog.warning('GLib compiled dependencies do not work reliably with \n'
'the current version of GLib. See the following upstream issue:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368'))
@staticmethod
def _print_gdbus_warning():
mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n'
' include_directories of targets with GLib < 2.51.3:',
mlog.bold('https://github.com/mesonbuild/meson/issues/1387'),
once=True)
@FeatureNewKwargs('gnome.compile_resources', '0.37.0', ['gresource_bundle', 'export', 'install_header'])
@permittedKwargs({'source_dir', 'c_name', 'dependencies', 'export', 'gresource_bundle', 'install_header',
'install', 'install_dir', 'extra_args', 'build_by_default'})
def compile_resources(self, state, args, kwargs):
self.__print_gresources_warning(state)
glib_version = self._get_native_glib_version(state)
cmd = ['glib-compile-resources', '@INPUT@']
source_dirs, dependencies = [mesonlib.extract_as_list(kwargs, c, pop=True) for c in ['source_dir', 'dependencies']]
if len(args) < 2:
raise MesonException('Not enough arguments; the name of the resource '
'and the path to the XML file are required')
# Validate dependencies
for (ii, dep) in enumerate(dependencies):
if hasattr(dep, 'held_object'):
dependencies[ii] = dep = dep.held_object
if not isinstance(dep, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)):
m = 'Unexpected dependency type {!r} for gnome.compile_resources() ' \
'"dependencies" argument.\nPlease pass the return value of ' \
'custom_target() or configure_file()'
raise MesonException(m.format(dep))
if isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \
'be used with the current version of glib-compile-resources due to\n' \
'<https://bugzilla.gnome.org/show_bug.cgi?id=774368>'
raise MesonException(m)
ifile = args[1]
if isinstance(ifile, mesonlib.File):
# glib-compile-resources will be run inside the source dir,
# so we need either 'src_to_build' or the absolute path.
# Absolute path is the easiest choice.
if ifile.is_built:
ifile = os.path.join(state.environment.get_build_dir(), ifile.subdir, ifile.fname)
else:
ifile = os.path.join(ifile.subdir, ifile.fname)
elif isinstance(ifile, str):
ifile = os.path.join(state.subdir, ifile)
elif isinstance(ifile, (interpreter.CustomTargetHolder,
interpreter.CustomTargetIndexHolder,
interpreter.GeneratedObjectsHolder)):
m = 'Resource xml files generated at build-time cannot be used ' \
'with gnome.compile_resources() because we need to scan ' \
'the xml for dependencies. Use configure_file() instead ' \
'to generate it at configure-time.'
raise MesonException(m)
else:
raise MesonException('Invalid file argument: {!r}'.format(ifile))
depend_files, depends, subdirs = self._get_gresource_dependencies(
state, ifile, source_dirs, dependencies)
# Make source dirs relative to build dir now
source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs]
# Ensure build directories of generated deps are included
source_dirs += subdirs
# Always include current directory, but after paths set by user
source_dirs.append(os.path.join(state.build_to_src, state.subdir))
for source_dir in OrderedSet(source_dirs):
cmd += ['--sourcedir', source_dir]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
export = kwargs.pop('export', False)
if not export:
cmd += ['--internal']
cmd += ['--generate', '--target', '@OUTPUT@']
cmd += mesonlib.stringlistify(kwargs.pop('extra_args', []))
gresource = kwargs.pop('gresource_bundle', False)
if gresource:
output = args[0] + '.gresource'
name = args[0] + '_gresource'
else:
output = args[0] + '.c'
name = args[0] + '_c'
if kwargs.get('install', False) and not gresource:
raise MesonException('The install kwarg only applies to gresource bundles, see install_header')
install_header = kwargs.pop('install_header', False)
if install_header and gresource:
raise MesonException('The install_header kwarg does not apply to gresource bundles')
if install_header and not export:
raise MesonException('GResource header is installed yet export is not enabled')
kwargs['input'] = args[1]
kwargs['output'] = output
kwargs['depends'] = depends
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
# This will eventually go out of sync if dependencies are added
kwargs['depend_files'] = depend_files
kwargs['command'] = cmd
else:
depfile = kwargs['output'] + '.d'
kwargs['depfile'] = depfile
kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
if gresource: # Only one target for .gresource files
return ModuleReturnValue(target_c, [target_c])
h_kwargs = {
'command': cmd,
'input': args[1],
'output': args[0] + '.h',
# The header doesn't actually care about the files yet it errors if missing
'depends': depends
}
if 'build_by_default' in kwargs:
h_kwargs['build_by_default'] = kwargs['build_by_default']
if install_header:
h_kwargs['install'] = install_header
h_kwargs['install_dir'] = kwargs.get('install_dir',
state.environment.coredata.get_builtin_option('includedir'))
target_h = GResourceHeaderTarget(args[0] + '_h', state.subdir, state.subproject, h_kwargs)
rv = [target_c, target_h]
return ModuleReturnValue(rv, rv)
def _get_gresource_dependencies(self, state, input_file, source_dirs, dependencies):
cmd = ['glib-compile-resources',
input_file,
'--generate-dependencies']
# Prefer generated files over source files
cmd += ['--sourcedir', state.subdir] # Current build dir
for source_dir in source_dirs:
cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)]
pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir())
if pc.returncode != 0:
m = 'glib-compile-resources failed to get dependencies for {}:\n{}'
mlog.warning(m.format(cmd[1], stderr))
raise subprocess.CalledProcessError(pc.returncode, cmd)
dep_files = stdout.split('\n')[:-1]
depends = []
subdirs = []
for resfile in dep_files[:]:
resbasename = os.path.basename(resfile)
for dep in unholder(dependencies):
if isinstance(dep, mesonlib.File):
if dep.fname != resbasename:
continue
dep_files.remove(resfile)
dep_files.append(dep)
subdirs.append(dep.subdir)
break
elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
fname = None
outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()}
for o, baseo in outputs:
if baseo == resbasename:
fname = o
break
if fname is not None:
dep_files.remove(resfile)
depends.append(dep)
subdirs.append(dep.get_subdir())
break
else:
# In generate-dependencies mode, glib-compile-resources doesn't raise
# an error for missing resources but instead prints whatever filename
# was listed in the input file. That's good because it means we can
# handle resource files that get generated as part of the build, as
# follows.
#
# If there are multiple generated resource files with the same basename
# then this code will get confused.
try:
f = mesonlib.File.from_source_file(state.environment.get_source_dir(),
".", resfile)
except MesonException:
raise MesonException(
'Resource "%s" listed in "%s" was not found. If this is a '
'generated file, pass the target that generates it to '
'gnome.compile_resources() using the "dependencies" '
'keyword argument.' % (resfile, input_file))
dep_files.remove(resfile)
dep_files.append(f)
return dep_files, depends, subdirs
def _get_link_args(self, state, lib, depends, include_rpath=False,
use_gir_args=False):
link_command = []
# Construct link args
if isinstance(lib, build.SharedLibrary):
libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib))
link_command.append('-L' + libdir)
if include_rpath:
link_command.append('-Wl,-rpath,' + libdir)
depends.append(lib)
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(lib):
d = os.path.join(state.environment.get_build_dir(), d)
link_command.append('-L' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + d)
if gir_has_option(self.interpreter, '--extra-library') and use_gir_args:
link_command.append('--extra-library=' + lib.name)
else:
link_command.append('-l' + lib.name)
return link_command
def _get_dependencies_flags(self, deps, state, depends, include_rpath=False,
use_gir_args=False, separate_nodedup=False):
cflags = OrderedSet()
internal_ldflags = OrderedSet()
external_ldflags = OrderedSet()
# External linker flags that can't be de-duped reliably because they
# require two args in order, such as -framework AVFoundation
external_ldflags_nodedup = []
gi_includes = OrderedSet()
deps = mesonlib.unholder(mesonlib.listify(deps))
for dep in deps:
if isinstance(dep, InternalDependency):
cflags.update(dep.get_compile_args())
cflags.update(get_include_args(dep.include_directories))
for lib in unholder(dep.libraries):
if isinstance(lib, build.SharedLibrary):
internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath))
libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath,
use_gir_args, True)
cflags.update(libdepflags[0])
internal_ldflags.update(libdepflags[1])
external_ldflags.update(libdepflags[2])
external_ldflags_nodedup += libdepflags[3]
gi_includes.update(libdepflags[4])
extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath,
use_gir_args, True)
cflags.update(extdepflags[0])
internal_ldflags.update(extdepflags[1])
external_ldflags.update(extdepflags[2])
external_ldflags_nodedup += extdepflags[3]
gi_includes.update(extdepflags[4])
for source in unholder(dep.sources):
if isinstance(source, GirTarget):
gi_includes.update([os.path.join(state.environment.get_build_dir(),
source.get_subdir())])
# This should be any dependency other than an internal one.
elif isinstance(dep, Dependency):
cflags.update(dep.get_compile_args())
ldflags = iter(dep.get_link_args(raw=True))
for lib in ldflags:
if (os.path.isabs(lib) and
# For PkgConfigDependency only:
getattr(dep, 'is_libtool', False)):
lib_dir = os.path.dirname(lib)
external_ldflags.update(["-L%s" % lib_dir])
if include_rpath:
external_ldflags.update(['-Wl,-rpath {}'.format(lib_dir)])
libname = os.path.basename(lib)
if libname.startswith("lib"):
libname = libname[3:]
libname = libname.split(".so")[0]
lib = "-l%s" % libname
# FIXME: Hack to avoid passing some compiler options in
if lib.startswith("-W"):
continue
# If it's a framework arg, slurp the framework name too
# to preserve the order of arguments
if lib == '-framework':
external_ldflags_nodedup += [lib, next(ldflags)]
else:
external_ldflags.update([lib])
if isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir:
gi_includes.update([girdir])
elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
cflags.update(get_include_args(dep.get_include_dirs()))
depends.append(dep)
else:
mlog.log('dependency {!r} not handled to build gir files'.format(dep))
continue
if gir_has_option(self.interpreter, '--extra-library') and use_gir_args:
def fix_ldflags(ldflags):
fixed_ldflags = OrderedSet()
for ldflag in ldflags:
if ldflag.startswith("-l"):
ldflag = ldflag.replace('-l', '--extra-library=', 1)
fixed_ldflags.add(ldflag)
return fixed_ldflags
internal_ldflags = fix_ldflags(internal_ldflags)
external_ldflags = fix_ldflags(external_ldflags)
if not separate_nodedup:
external_ldflags.update(external_ldflags_nodedup)
return cflags, internal_ldflags, external_ldflags, gi_includes
else:
return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes
def _unwrap_gir_target(self, girtarget, state):
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary,
build.StaticLibrary)):
raise MesonException('Gir target must be an executable or library')
STATIC_BUILD_REQUIRED_VERSION = ">=1.58.1"
if isinstance(girtarget, (build.StaticLibrary)) and \
not mesonlib.version_compare(
self._get_gir_dep(state)[0].get_version(),
STATIC_BUILD_REQUIRED_VERSION):
raise MesonException('Static libraries can only be introspected with GObject-Introspection ' + STATIC_BUILD_REQUIRED_VERSION)
return girtarget
def _get_gir_dep(self, state):
try:
gir_dep = self.gir_dep or PkgConfigDependency('gobject-introspection-1.0',
state.environment,
{'native': True})
pkgargs = gir_dep.get_compile_args()
except Exception:
raise MesonException('gobject-introspection dependency was not found, gir cannot be generated.')
return gir_dep, pkgargs
def _scan_header(self, kwargs):
ret = []
header = kwargs.pop('header', None)
if header:
if not isinstance(header, str):
raise MesonException('header must be a string')
ret = ['--c-include=' + header]
return ret
def _scan_extra_args(self, kwargs):
return mesonlib.stringlistify(kwargs.pop('extra_args', []))
def _scan_link_withs(self, state, depends, kwargs):
ret = []
if 'link_with' in kwargs:
link_with = mesonlib.extract_as_list(kwargs, 'link_with', pop = True)
for link in link_with:
ret += self._get_link_args(state, link.held_object, depends,
use_gir_args=True)
return ret
# May mutate depends and gir_inc_dirs
def _scan_include(self, state, depends, gir_inc_dirs, kwargs):
ret = []
if 'includes' in kwargs:
includes = mesonlib.extract_as_list(kwargs, 'includes', pop = True)
for inc in unholder(includes):
if isinstance(inc, str):
ret += ['--include=%s' % (inc, )]
elif isinstance(inc, GirTarget):
gir_inc_dirs += [
os.path.join(state.environment.get_build_dir(),
inc.get_subdir()),
]
ret += [
"--include-uninstalled=%s" % (os.path.join(inc.get_subdir(), inc.get_basename()), )
]
depends += [inc]
else:
raise MesonException(
'Gir includes must be str, GirTarget, or list of them')
return ret
def _scan_symbol_prefix(self, kwargs):
ret = []
if 'symbol_prefix' in kwargs:
sym_prefixes = mesonlib.stringlistify(kwargs.pop('symbol_prefix', []))
ret += ['--symbol-prefix=%s' % sym_prefix for sym_prefix in sym_prefixes]
return ret
def _scan_identifier_prefix(self, kwargs):
ret = []
if 'identifier_prefix' in kwargs:
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
ret += ['--identifier-prefix=%s' % identifier_prefix]
return ret
def _scan_export_packages(self, kwargs):
ret = []
if 'export_packages' in kwargs:
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
ret += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
ret += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
return ret
def _scan_inc_dirs(self, kwargs):
ret = mesonlib.extract_as_list(kwargs, 'include_directories', pop = True)
for incd in ret:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
return ret
def _scan_langs(self, state, langs):
ret = []
for lang in langs:
link_args = state.environment.coredata.get_external_link_args(MachineChoice.HOST, lang)
for link_arg in link_args:
if link_arg.startswith('-L'):
ret.append(link_arg)
return ret
def _scan_gir_targets(self, state, girtargets):
ret = []
for girtarget in girtargets:
if isinstance(girtarget, build.Executable):
ret += ['--program', girtarget]
else:
# Because of https://gitlab.gnome.org/GNOME/gobject-introspection/merge_requests/72
# we can't use the full path until this is merged.
if isinstance(girtarget, build.SharedLibrary):
libname = girtarget.get_basename()
else:
libname = os.path.join("@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id(), girtarget.get_filename())
ret += ['--library', libname]
# need to put our output directory first as we need to use the
# generated libraries instead of any possibly installed system/prefix
# ones.
ret += ["-L@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id()]
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(girtarget):
d = os.path.join(state.environment.get_build_dir(), d)
ret.append('-L' + d)
return ret
def _get_girtargets_langs_compilers(self, girtargets):
ret = []
for girtarget in girtargets:
for lang, compiler in girtarget.compilers.items():
# XXX: Can you use g-i with any other language?
if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'):
ret.append((lang, compiler))
break
return ret
def _get_gir_targets_deps(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_all_link_deps()
ret += girtarget.get_external_deps()
return ret
def _get_gir_targets_inc_dirs(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_include_dirs()
return ret
def _get_langs_compilers_flags(self, state, langs_compilers):
cflags = []
internal_ldflags = []
external_ldflags = []
for lang, compiler in langs_compilers:
if state.global_args.get(lang):
cflags += state.global_args[lang]
if state.project_args.get(lang):
cflags += state.project_args[lang]
if 'b_sanitize' in compiler.base_options:
sanitize = state.environment.coredata.base_options['b_sanitize'].value
cflags += compiler.sanitizer_compile_args(sanitize)
sanitize = sanitize.split(',')
# These must be first in ldflags
if 'address' in sanitize:
internal_ldflags += ['-lasan']
if 'thread' in sanitize:
internal_ldflags += ['-ltsan']
if 'undefined' in sanitize:
internal_ldflags += ['-lubsan']
# FIXME: Linking directly to lib*san is not recommended but g-ir-scanner
# does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892
# ldflags += compiler.sanitizer_link_args(sanitize)
return cflags, internal_ldflags, external_ldflags
def _make_gir_filelist(self, state, srcdir, ns, nsversion, girtargets, libsources):
gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0])
if not os.path.isdir(gir_filelist_dir):
os.mkdir(gir_filelist_dir)
gir_filelist_filename = os.path.join(gir_filelist_dir, '%s_%s_gir_filelist' % (ns, nsversion))
with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist:
for s in unholder(libsources):
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
for custom_output in s.get_outputs():
gir_filelist.write(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
custom_output) + '\n')
elif isinstance(s, mesonlib.File):
gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n')
elif isinstance(s, build.GeneratedList):
for gen_src in s.get_outputs():
gir_filelist.write(os.path.join(srcdir, gen_src) + '\n')
else:
gir_filelist.write(os.path.join(srcdir, s) + '\n')
return gir_filelist_filename
def _make_gir_target(self, state, girfile, scan_command, depends, kwargs):
scankwargs = {'output': girfile,
'command': scan_command,
'depends': depends}
if 'install' in kwargs:
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = kwargs.get('install_dir_gir',
os.path.join(state.environment.get_datadir(), 'gir-1.0'))
if 'build_by_default' in kwargs:
scankwargs['build_by_default'] = kwargs['build_by_default']
return GirTarget(girfile, state.subdir, state.subproject, scankwargs)
def _make_typelib_target(self, state, typelib_output, typelib_cmd, kwargs):
typelib_kwargs = {
'output': typelib_output,
'command': typelib_cmd,
}
if 'install' in kwargs:
typelib_kwargs['install'] = kwargs['install']
typelib_kwargs['install_dir'] = kwargs.get('install_dir_typelib',
os.path.join(state.environment.get_libdir(), 'girepository-1.0'))
if 'build_by_default' in kwargs:
typelib_kwargs['build_by_default'] = kwargs['build_by_default']
return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs)
# May mutate depends
def _gather_typelib_includes_and_update_depends(self, state, deps, depends):
# Need to recursively add deps on GirTarget sources from our
# dependencies and also find the include directories needed for the
# typelib generation custom target below.
typelib_includes = []
for dep in unholder(deps):
# Add a dependency on each GirTarget listed in dependencies and add
# the directory where it will be generated to the typelib includes
if isinstance(dep, InternalDependency):
for source in unholder(dep.sources):
if isinstance(source, GirTarget) and source not in depends:
depends.append(source)
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
# Do the same, but for dependencies of dependencies. These are
# stored in the list of generated sources for each link dep (from
# girtarget.get_all_link_deps() above).
# FIXME: Store this in the original form from declare_dependency()
# so it can be used here directly.
elif isinstance(dep, build.SharedLibrary):
for source in dep.generated:
if isinstance(source, GirTarget):
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
elif isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir and girdir not in typelib_includes:
typelib_includes.append(girdir)
return typelib_includes
def _get_external_args_for_langs(self, state, langs):
ret = []
for lang in langs:
ret += state.environment.coredata.get_external_args(MachineChoice.HOST, lang)
return ret
@staticmethod
def _get_scanner_cflags(cflags):
'g-ir-scanner only accepts -I/-D/-U; must ignore all other flags'
for f in cflags:
if f.startswith(('-D', '-U', '-I')):
yield f
@staticmethod
def _get_scanner_ldflags(ldflags):
'g-ir-scanner only accepts -L/-l; must ignore -F and other linker flags'
for f in ldflags:
if f.startswith(('-L', '-l', '--extra-library')):
yield f
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'sources', 'nsversion', 'namespace', 'symbol_prefix', 'identifier_prefix',
'export_packages', 'includes', 'dependencies', 'link_with', 'include_directories',
'install', 'install_dir_gir', 'install_dir_typelib', 'extra_args',
'packages', 'header', 'build_by_default'})
def generate_gir(self, state, args, kwargs):
if not args:
raise MesonException('generate_gir takes at least one argument')
if kwargs.get('install_dir'):
raise MesonException('install_dir is not supported with generate_gir(), see "install_dir_gir" and "install_dir_typelib"')
girtargets = [self._unwrap_gir_target(arg, state) for arg in args]
if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]):
raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable')
self.gir_dep, pkgargs = self._get_gir_dep(state)
# find_program is needed in the case g-i is built as subproject.
# In that case it uses override_find_program so the gobject utilities
# can be used from the build dir instead of from the system.
# However, GObject-introspection provides the appropriate paths to
# these utilities via pkg-config, so it would be best to use the
# results from pkg-config when possible.
gi_util_dirs_check = [state.environment.get_build_dir(), state.environment.get_source_dir()]
giscanner = self.interpreter.find_program_impl('g-ir-scanner')
if giscanner.found():
giscanner_path = giscanner.get_command()[0]
if not any(x in giscanner_path for x in gi_util_dirs_check):
giscanner = self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {})
else:
giscanner = self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {})
gicompiler = self.interpreter.find_program_impl('g-ir-compiler')
if gicompiler.found():
gicompiler_path = gicompiler.get_command()[0]
if not any(x in gicompiler_path for x in gi_util_dirs_check):
gicompiler = self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {})
else:
gicompiler = self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {})
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = mesonlib.extract_as_list(kwargs, 'sources', pop=True)
girfile = '%s-%s.gir' % (ns, nsversion)
srcdir = os.path.join(state.environment.get_source_dir(), state.subdir)
builddir = os.path.join(state.environment.get_build_dir(), state.subdir)
depends = [] + girtargets
gir_inc_dirs = []
langs_compilers = self._get_girtargets_langs_compilers(girtargets)
cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers)
deps = self._get_gir_targets_deps(girtargets)
deps += mesonlib.unholder(extract_as_list(kwargs, 'dependencies', pop=True))
typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends)
# ldflags will be misinterpreted by gir scanner (showing
# spurious dependencies) but building GStreamer fails if they
# are not used here.
dep_cflags, dep_internal_ldflags, dep_external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, use_gir_args=True)
cflags += list(self._get_scanner_cflags(dep_cflags))
cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers])))
internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags))
external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags))
girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets)
inc_dirs = self._scan_inc_dirs(kwargs)
scan_command = [giscanner]
scan_command += pkgargs
scan_command += ['--no-libtool']
scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion]
scan_command += ['--warn-all']
scan_command += ['--output', '@OUTPUT@']
scan_command += self._scan_header(kwargs)
scan_command += self._scan_extra_args(kwargs)
scan_command += ['-I' + srcdir, '-I' + builddir]
scan_command += get_include_args(girtargets_inc_dirs)
scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)]
scan_command += self._scan_link_withs(state, depends, kwargs)
scan_command += self._scan_include(state, depends, gir_inc_dirs, kwargs)
scan_command += self._scan_symbol_prefix(kwargs)
scan_command += self._scan_identifier_prefix(kwargs)
scan_command += self._scan_export_packages(kwargs)
scan_command += ['--cflags-begin']
scan_command += cflags
scan_command += ['--cflags-end']
scan_command += get_include_args(inc_dirs)
scan_command += get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=')
scan_command += list(internal_ldflags)
scan_command += self._scan_gir_targets(state, girtargets)
scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers])
scan_command += list(external_ldflags)
if gir_has_option(self.interpreter, '--sources-top-dirs'):
scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_source_dir(), self.interpreter.subproject_dir, state.subproject)]
scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_build_dir(), self.interpreter.subproject_dir, state.subproject)]
scan_target = self._make_gir_target(state, girfile, scan_command, depends, kwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@']
typelib_cmd += get_include_args(gir_inc_dirs, prefix='--includedir=')
for incdir in typelib_includes:
typelib_cmd += ["--includedir=" + incdir]
typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, kwargs)
rv = [scan_target, typelib_target]
return ModuleReturnValue(rv, rv)
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'build_by_default', 'depend_files'})
def compile_schemas(self, state, args, kwargs):
if args:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = [self.interpreter.find_program_impl('glib-compile-schemas')]
cmd += ['--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir.replace('/', '_')
target_g = build.CustomTarget(targetname, state.subdir, state.subproject, kwargs)
return ModuleReturnValue(target_g, [target_g])
@permittedKwargs({'sources', 'media', 'symlink_media', 'languages'})
def yelp(self, state, args, kwargs):
if len(args) < 1:
raise MesonException('Yelp requires a project id')
project_id = args[0]
sources = mesonlib.stringlistify(kwargs.pop('sources', []))
if not sources:
if len(args) > 1:
sources = mesonlib.stringlistify(args[1:])
if not sources:
raise MesonException('Yelp requires a list of sources')
source_str = '@@'.join(sources)
langs = mesonlib.stringlistify(kwargs.pop('languages', []))
if langs:
mlog.deprecation('''The "languages" argument of gnome.yelp() is deprecated.
Use a LINGUAS file in the sources directory instead.
This will become a hard error in the future.''')
media = mesonlib.stringlistify(kwargs.pop('media', []))
symlinks = kwargs.pop('symlink_media', True)
if not isinstance(symlinks, bool):
raise MesonException('symlink_media must be a boolean')
if kwargs:
raise MesonException('Unknown arguments passed: {}'.format(', '.join(kwargs.keys())))
script = state.environment.get_build_command()
args = ['--internal',
'yelphelper',
'install',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--installdir=' + os.path.join(state.environment.get_datadir(), 'help'),
'--sources=' + source_str]
if symlinks:
args.append('--symlinks=true')
if media:
args.append('--media=' + '@@'.join(media))
if langs:
args.append('--langs=' + '@@'.join(langs))
inscript = build.RunScript(script, args)
potargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'pot',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
]
pottarget = build.RunTarget('help-' + project_id + '-pot', potargs[0],
potargs[1:], [], state.subdir, state.subproject)
poargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'update-po',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
'--langs=' + '@@'.join(langs),
]
potarget = build.RunTarget('help-' + project_id + '-update-po', poargs[0],
poargs[1:], [], state.subdir, state.subproject)
rv = [inscript, pottarget, potarget]
return ModuleReturnValue(None, rv)
@FeatureNewKwargs('gnome.gtkdoc', '0.52.0', ['check'])
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['c_args'])
@FeatureNewKwargs('gnome.gtkdoc', '0.48.0', ['module_version'])
@FeatureNewKwargs('gnome.gtkdoc', '0.37.0', ['namespace', 'mode'])
@permittedKwargs({'main_xml', 'main_sgml', 'src_dir', 'dependencies', 'install',
'install_dir', 'scan_args', 'scanobjs_args', 'gobject_typesfile',
'fixxref_args', 'html_args', 'html_assets', 'content_files',
'mkdb_args', 'ignore_headers', 'include_directories',
'namespace', 'mode', 'expand_content_files', 'module_version',
'c_args'})
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if 'src_dir' not in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
moduleversion = kwargs.get('module_version', '')
if not isinstance(moduleversion, str):
raise MesonException('Module version keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
targetname = modulename + ('-' + moduleversion if moduleversion else '') + '-doc'
command = state.environment.get_build_command()
namespace = kwargs.get('namespace', '')
mode = kwargs.get('mode', 'auto')
VALID_MODES = ('xml', 'sgml', 'none', 'auto')
if mode not in VALID_MODES:
raise MesonException('gtkdoc: Mode {} is not a valid mode: {}'.format(mode, VALID_MODES))
src_dirs = mesonlib.extract_as_list(kwargs, 'src_dir')
header_dirs = []
for src_dir in src_dirs:
if hasattr(src_dir, 'held_object'):
src_dir = src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalid keyword argument for src_dir.')
for inc_dir in src_dir.get_incdirs():
header_dirs.append(os.path.join(state.environment.get_source_dir(),
src_dir.get_curdir(), inc_dir))
header_dirs.append(os.path.join(state.environment.get_build_dir(),
src_dir.get_curdir(), inc_dir))
else:
header_dirs.append(src_dir)
args = ['--internal', 'gtkdoc',
'--sourcedir=' + state.environment.get_source_dir(),
'--builddir=' + state.environment.get_build_dir(),
'--subdir=' + state.subdir,
'--headerdirs=' + '@@'.join(header_dirs),
'--mainfile=' + main_file,
'--modulename=' + modulename,
'--moduleversion=' + moduleversion,
'--mode=' + mode]
for tool in ['scan', 'scangobj', 'mkdb', 'mkhtml', 'fixxref']:
program_name = 'gtkdoc-' + tool
program = self.interpreter.find_program_impl(program_name)
path = program.held_object.get_path()
args.append('--{}={}'.format(program_name, path))
if namespace:
args.append('--namespace=' + namespace)
args += self._unpack_args('--htmlargs=', 'html_args', kwargs)
args += self._unpack_args('--scanargs=', 'scan_args', kwargs)
args += self._unpack_args('--scanobjsargs=', 'scanobjs_args', kwargs)
args += self._unpack_args('--gobjects-types-file=', 'gobject_typesfile', kwargs, state)
args += self._unpack_args('--fixxrefargs=', 'fixxref_args', kwargs)
args += self._unpack_args('--mkdbargs=', 'mkdb_args', kwargs)
args += self._unpack_args('--html-assets=', 'html_assets', kwargs, state)
depends = []
content_files = []
for s in unholder(mesonlib.extract_as_list(kwargs, 'content_files')):
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
depends.append(s)
for o in s.get_outputs():
content_files.append(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
o))
elif isinstance(s, mesonlib.File):
content_files.append(s.absolute_path(state.environment.get_source_dir(),
state.environment.get_build_dir()))
elif isinstance(s, build.GeneratedList):
depends.append(s)
for gen_src in s.get_outputs():
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
gen_src))
elif isinstance(s, str):
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
s))
else:
raise MesonException(
'Invalid object type: {!r}'.format(s.__class__.__name__))
args += ['--content-files=' + '@@'.join(content_files)]
args += self._unpack_args('--expand-content-files=', 'expand_content_files', kwargs, state)
args += self._unpack_args('--ignore-headers=', 'ignore_headers', kwargs)
args += self._unpack_args('--installdir=', 'install_dir', kwargs)
args += self._get_build_args(kwargs, state, depends)
custom_kwargs = {'output': modulename + '-decl.txt',
'command': command + args,
'depends': depends,
'build_always_stale': True,
}
custom_target = build.CustomTarget(targetname, state.subdir, state.subproject, custom_kwargs)
alias_target = build.AliasTarget(targetname, [custom_target], state.subdir, state.subproject)
if kwargs.get('check', False):
check_cmd = self.interpreter.find_program_impl('gtkdoc-check')
check_env = ['DOC_MODULE=' + modulename,
'DOC_MAIN_SGML_FILE=' + main_file]
check_args = [targetname + '-check', check_cmd]
check_kwargs = {'env': check_env,
'workdir': os.path.join(state.environment.get_build_dir(), state.subdir),
'depends': custom_target}
self.interpreter.add_test(state.current_node, check_args, check_kwargs, True)
res = [custom_target, alias_target]
if kwargs.get('install', True):
res.append(build.RunScript(command, args))
return ModuleReturnValue(custom_target, res)
def _get_build_args(self, kwargs, state, depends):
args = []
deps = mesonlib.unholder(extract_as_list(kwargs, 'dependencies'))
cflags = []
cflags.extend(mesonlib.stringlistify(kwargs.pop('c_args', [])))
deps_cflags, internal_ldflags, external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, include_rpath=True)
inc_dirs = mesonlib.extract_as_list(kwargs, 'include_directories')
for incd in inc_dirs:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
cflags.extend(deps_cflags)
cflags.extend(get_include_args(inc_dirs))
ldflags = []
ldflags.extend(internal_ldflags)
ldflags.extend(external_ldflags)
cflags.extend(state.environment.coredata.get_external_args(MachineChoice.HOST, 'c'))
ldflags.extend(state.environment.coredata.get_external_link_args(MachineChoice.HOST, 'c'))
compiler = state.environment.coredata.compilers[MachineChoice.HOST]['c']
compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)])
cflags.extend(compiler_flags[0])
ldflags.extend(compiler_flags[1])
ldflags.extend(compiler_flags[2])
if compiler:
args += ['--cc=%s' % join_args(compiler.get_exelist())]
args += ['--ld=%s' % join_args(compiler.get_linker_exelist())]
if cflags:
args += ['--cflags=%s' % join_args(cflags)]
if ldflags:
args += ['--ldflags=%s' % join_args(ldflags)]
return args
@noKwargs
def gtkdoc_html_dir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Must have exactly one argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Argument must be a string')
return ModuleReturnValue(os.path.join('share/gtk-doc/html', modulename), [])
@staticmethod
def _unpack_args(arg, kwarg_name, kwargs, expend_file_state=None):
if kwarg_name not in kwargs:
return []
new_args = mesonlib.extract_as_list(kwargs, kwarg_name)
args = []
for i in new_args:
if expend_file_state and isinstance(i, mesonlib.File):
i = i.absolute_path(expend_file_state.environment.get_source_dir(), expend_file_state.environment.get_build_dir())
elif expend_file_state and isinstance(i, str):
i = os.path.join(expend_file_state.environment.get_source_dir(), expend_file_state.subdir, i)
elif not isinstance(i, str):
raise MesonException(kwarg_name + ' values must be strings.')
args.append(i)
if args:
return [arg + '@@'.join(args)]
return []
def _get_autocleanup_args(self, kwargs, glib_version):
if not mesonlib.version_compare(glib_version, '>= 2.49.1'):
# Warn if requested, silently disable if not
if 'autocleanup' in kwargs:
mlog.warning('Glib version ({}) is too old to support the \'autocleanup\' '
'kwarg, need 2.49.1 or newer'.format(glib_version))
return []
autocleanup = kwargs.pop('autocleanup', 'all')
values = ('none', 'objects', 'all')
if autocleanup not in values:
raise MesonException('gdbus_codegen does not support {!r} as an autocleanup value, '
'must be one of: {!r}'.format(autocleanup, ', '.join(values)))
return ['--c-generate-autocleanup', autocleanup]
@FeatureNewKwargs('build target', '0.46.0', ['install_header', 'install_dir', 'sources'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.47.0', ['extra_args', 'autocleanup'])
@permittedKwargs({'interface_prefix', 'namespace', 'extra_args', 'autocleanup', 'object_manager', 'build_by_default',
'annotations', 'docbook', 'install_header', 'install_dir', 'sources'})
def gdbus_codegen(self, state, args, kwargs):
if len(args) not in (1, 2):
raise MesonException('gdbus_codegen takes at most two arguments, name and xml file.')
namebase = args[0]
xml_files = args[1:]
cmd = [self.interpreter.find_program_impl('gdbus-codegen')]
extra_args = mesonlib.stringlistify(kwargs.pop('extra_args', []))
cmd += extra_args
# Autocleanup supported?
glib_version = self._get_native_glib_version(state)
cmd += self._get_autocleanup_args(kwargs, glib_version)
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
if kwargs.get('object_manager', False):
cmd += ['--c-generate-object-manager']
if 'sources' in kwargs:
xml_files += mesonlib.listify(kwargs.pop('sources'))
build_by_default = kwargs.get('build_by_default', False)
# Annotations are a bit ugly in that they are a list of lists of strings...
annotations = kwargs.pop('annotations', [])
if not isinstance(annotations, list):
raise MesonException('annotations takes a list')
if annotations and isinstance(annotations, list) and not isinstance(annotations[0], list):
annotations = [annotations]
for annotation in annotations:
if len(annotation) != 3 or not all(isinstance(i, str) for i in annotation):
raise MesonException('Annotations must be made up of 3 strings for ELEMENT, KEY, and VALUE')
cmd += ['--annotate'] + annotation
targets = []
install_header = kwargs.get('install_header', False)
install_dir = kwargs.get('install_dir', state.environment.coredata.get_builtin_option('includedir'))
output = namebase + '.c'
# Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2)
# Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2)
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default
}
else:
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
cmd += ['--generate-docbook', docbook]
# https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a
if mesonlib.version_compare(glib_version, '>= 2.51.3'):
cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@']
else:
self._print_gdbus_warning()
cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@']
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default
}
cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(cfile_custom_target)
output = namebase + '.h'
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir
}
else:
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir,
'depends': cfile_custom_target
}
hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(hfile_custom_target)
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@']
# The docbook output is always ${docbook}-${name_of_xml_file}
output = namebase + '-docbook'
outputs = []
for f in xml_files:
outputs.append('{}-{}'.format(docbook, os.path.basename(str(f))))
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': docbook_cmd,
'build_by_default': build_by_default
}
else:
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': cmd,
'build_by_default': build_by_default,
'depends': cfile_custom_target
}
docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(docbook_custom_target)
return ModuleReturnValue(targets, targets)
@permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir',
'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod',
'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'})
def mkenums(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Mkenums requires one positional argument.')
basename = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
cmd = []
known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail',
'identifier_prefix', 'symbol_prefix', 'template',
'vhead', 'vprod', 'vtail']
known_custom_target_kwargs = ['install_dir', 'build_always',
'depends', 'depend_files']
c_template = h_template = None
install_header = False
for arg, value in kwargs.items():
if arg == 'sources':
raise AssertionError("sources should've already been handled")
elif arg == 'c_template':
c_template = value
if isinstance(c_template, mesonlib.File):
c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'c_template and template keyword '
'arguments at the same time.')
elif arg == 'h_template':
h_template = value
if isinstance(h_template, mesonlib.File):
h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'h_template and template keyword '
'arguments at the same time.')
elif arg == 'install_header':
install_header = value
elif arg in known_kwargs:
cmd += ['--' + arg.replace('_', '-'), value]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Mkenums does not take a %s keyword argument.' % (arg, ))
cmd = [self.interpreter.find_program_impl(['glib-mkenums', 'mkenums'])] + cmd
custom_kwargs = {}
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
targets = []
if h_template is not None:
h_output = os.path.basename(os.path.splitext(h_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
h_cmd = cmd + ['--template', '@INPUT@']
h_sources = [h_template] + sources
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
h_target = self._make_mkenum_custom_target(state, h_sources,
h_output, h_cmd,
custom_kwargs)
targets.append(h_target)
if c_template is not None:
c_output = os.path.basename(os.path.splitext(c_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
c_cmd = cmd + ['--template', '@INPUT@']
c_sources = [c_template] + sources
# Never install the C file. Complain on bug tracker if you need it.
custom_kwargs['install'] = False
if h_template is not None:
if 'depends' in custom_kwargs:
custom_kwargs['depends'] += [h_target]
else:
custom_kwargs['depends'] = h_target
c_target = self._make_mkenum_custom_target(state, c_sources,
c_output, c_cmd,
custom_kwargs)
targets.insert(0, c_target)
if c_template is None and h_template is None:
generic_cmd = cmd + ['@INPUT@']
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
target = self._make_mkenum_custom_target(state, sources, basename,
generic_cmd, custom_kwargs)
return ModuleReturnValue(target, [target])
elif len(targets) == 1:
return ModuleReturnValue(targets[0], [targets[0]])
else:
return ModuleReturnValue(targets, targets)
@FeatureNew('gnome.mkenums_simple', '0.42.0')
def mkenums_simple(self, state, args, kwargs):
hdr_filename = args[0] + '.h'
body_filename = args[0] + '.c'
# not really needed, just for sanity checking
forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead',
'fprod', 'ftail', 'vhead', 'vtail', 'comments']
for arg in forbidden_kwargs:
if arg in kwargs:
raise MesonException('mkenums_simple() does not take a %s keyword argument' % (arg, ))
# kwargs to pass as-is from mkenums_simple() to mkenums()
shared_kwargs = ['sources', 'install_header', 'install_dir',
'identifier_prefix', 'symbol_prefix']
mkenums_kwargs = {}
for arg in shared_kwargs:
if arg in kwargs:
mkenums_kwargs[arg] = kwargs[arg]
# .c file generation
c_file_kwargs = copy.deepcopy(mkenums_kwargs)
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs['sources']
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
# The `install_header` argument will be used by mkenums() when
# not using template files, so we need to forcibly unset it
# when generating the C source file, otherwise we will end up
# installing it
c_file_kwargs['install_header'] = False
header_prefix = kwargs.get('header_prefix', '')
decl_decorator = kwargs.get('decorator', '')
func_prefix = kwargs.get('function_prefix', '')
body_prefix = kwargs.get('body_prefix', '')
# Maybe we should write our own template files into the build dir
# instead, but that seems like much more work, nice as it would be.
fhead = ''
if body_prefix != '':
fhead += '%s\n' % body_prefix
fhead += '#include "%s"\n' % hdr_filename
for hdr in sources:
fhead += '#include "%s"\n' % os.path.basename(str(hdr))
fhead += '''
#define C_ENUM(v) ((gint) v)
#define C_FLAGS(v) ((guint) v)
'''
c_file_kwargs['fhead'] = fhead
c_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
c_file_kwargs['vhead'] = '''
GType
%s@enum_name@_get_type (void)
{
static volatile gsize gtype_id = 0;
static const G@Type@Value values[] = {''' % func_prefix
c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },'
c_file_kwargs['vtail'] = ''' { 0, NULL, NULL }
};
if (g_once_init_enter (>ype_id)) {
GType new_type = g_@type@_register_static (g_intern_static_string ("@EnumName@"), values);
g_once_init_leave (>ype_id, new_type);
}
return (GType) gtype_id;
}'''
rv = self.mkenums(state, [body_filename], c_file_kwargs)
c_file = rv.return_value
# .h file generation
h_file_kwargs = copy.deepcopy(mkenums_kwargs)
h_file_kwargs['fhead'] = '''#pragma once
#include <glib-object.h>
{}
G_BEGIN_DECLS
'''.format(header_prefix)
h_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
h_file_kwargs['vhead'] = '''
{}
GType {}@enum_name@_get_type (void);
#define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({}@enum_name@_get_type())'''.format(decl_decorator, func_prefix, func_prefix)
h_file_kwargs['ftail'] = '''
G_END_DECLS'''
rv = self.mkenums(state, [hdr_filename], h_file_kwargs)
h_file = rv.return_value
return ModuleReturnValue([c_file, h_file], [c_file, h_file])
@staticmethod
def _make_mkenum_custom_target(state, sources, output, cmd, kwargs):
custom_kwargs = {
'input': sources,
'output': output,
'capture': True,
'command': cmd
}
custom_kwargs.update(kwargs)
return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs,
# https://github.com/mesonbuild/meson/issues/973
absolute_paths=True)
@permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc',
'nostdinc', 'internal', 'skip_source', 'valist_marshallers',
'extra_args'})
def genmarshal(self, state, args, kwargs):
if len(args) != 1:
raise MesonException(
'Genmarshal requires one positional argument.')
output = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3')
cmd = [self.interpreter.find_program_impl('glib-genmarshal')]
known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc',
'valist_marshallers', 'extra_args']
known_custom_target_kwargs = ['build_always', 'depends',
'depend_files', 'install_dir',
'install_header']
for arg, value in kwargs.items():
if arg == 'prefix':
cmd += ['--prefix', value]
elif arg == 'extra_args':
if new_genmarshal:
cmd += mesonlib.stringlistify(value)
else:
mlog.warning('The current version of GLib does not support extra arguments \n'
'for glib-genmarshal. You need at least GLib 2.53.3. See ',
mlog.bold('https://github.com/mesonbuild/meson/pull/2049'))
elif arg in known_kwargs and value:
cmd += ['--' + arg.replace('_', '-')]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Genmarshal does not take a %s keyword argument.' % (
arg, ))
install_header = kwargs.pop('install_header', False)
install_dir = kwargs.pop('install_dir', None)
custom_kwargs = {
'input': sources,
}
# https://github.com/GNOME/glib/commit/0fbc98097fac4d3e647684f344e508abae109fdf
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'):
cmd += ['--output', '@OUTPUT@']
else:
custom_kwargs['capture'] = True
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
header_file = output + '.h'
custom_kwargs['command'] = cmd + ['--body', '@INPUT@']
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'):
# Silence any warnings about missing prototypes
custom_kwargs['command'] += ['--include-header', header_file]
custom_kwargs['output'] = output + '.c'
body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs)
custom_kwargs['install'] = install_header
if install_dir is not None:
custom_kwargs['install_dir'] = install_dir
if new_genmarshal:
cmd += ['--pragma-once']
custom_kwargs['command'] = cmd + ['--header', '@INPUT@']
custom_kwargs['output'] = header_file
header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs)
rv = [body, header]
return ModuleReturnValue(rv, rv)
@staticmethod
def _vapi_args_to_command(prefix, variable, kwargs, accept_vapi=False):
arg_list = mesonlib.extract_as_list(kwargs, variable)
ret = []
for arg in arg_list:
if not isinstance(arg, str):
types = 'strings' + ' or InternalDependencys' if accept_vapi else ''
raise MesonException('All {} must be {}'.format(variable, types))
ret.append(prefix + arg)
return ret
def _extract_vapi_packages(self, state, kwargs):
'''
Packages are special because we need to:
- Get a list of packages for the .deps file
- Get a list of depends for any VapiTargets
- Get package name from VapiTargets
- Add include dirs for any VapiTargets
'''
arg_list = kwargs.get('packages')
if not arg_list:
return [], [], [], []
arg_list = mesonlib.listify(arg_list)
vapi_depends = []
vapi_packages = []
vapi_includes = []
ret = []
remaining_args = []
for arg in unholder(arg_list):
if isinstance(arg, InternalDependency):
targets = [t for t in arg.sources if isinstance(t, VapiTarget)]
for target in targets:
srcdir = os.path.join(state.environment.get_source_dir(),
target.get_subdir())
outdir = os.path.join(state.environment.get_build_dir(),
target.get_subdir())
outfile = target.get_outputs()[0][:-5] # Strip .vapi
ret.append('--vapidir=' + outdir)
ret.append('--girdir=' + outdir)
ret.append('--pkg=' + outfile)
vapi_depends.append(target)
vapi_packages.append(outfile)
vapi_includes.append(srcdir)
else:
vapi_packages.append(arg)
remaining_args.append(arg)
kwargs['packages'] = remaining_args
vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True)
return vapi_args, vapi_depends, vapi_packages, vapi_includes
def _generate_deps(self, state, library, packages, install_dir):
outdir = state.environment.scratch_dir
fname = os.path.join(outdir, library + '.deps')
with open(fname, 'w') as ofile:
for package in packages:
ofile.write(package + '\n')
return build.Data(mesonlib.File(True, outdir, fname), install_dir)
def _get_vapi_link_with(self, target):
link_with = []
for dep in target.get_target_dependencies():
if isinstance(dep, build.SharedLibrary):
link_with.append(dep)
elif isinstance(dep, GirTarget):
link_with += self._get_vapi_link_with(dep)
return link_with
@permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs',
'vapi_dirs', 'install', 'install_dir'})
def generate_vapi(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('The library name is required')
if not isinstance(args[0], str):
raise MesonException('The first argument must be the name of the library')
created_values = []
library = args[0]
build_dir = os.path.join(state.environment.get_build_dir(), state.subdir)
source_dir = os.path.join(state.environment.get_source_dir(), state.subdir)
pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs)
if 'VAPIGEN' in os.environ:
cmd = [self.interpreter.find_program_impl(os.environ['VAPIGEN'])]
else:
cmd = [self.interpreter.find_program_impl('vapigen')]
cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir]
cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs)
cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs)
cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs)
cmd += pkg_cmd
cmd += ['--metadatadir=' + source_dir]
if 'sources' not in kwargs:
raise MesonException('sources are required to generate the vapi file')
inputs = mesonlib.extract_as_list(kwargs, 'sources')
link_with = []
for i in inputs:
if isinstance(i, str):
cmd.append(os.path.join(source_dir, i))
elif hasattr(i, 'held_object') and isinstance(i.held_object, GirTarget):
link_with += self._get_vapi_link_with(i.held_object)
subdir = os.path.join(state.environment.get_build_dir(),
i.held_object.get_subdir())
gir_file = os.path.join(subdir, i.held_object.get_outputs()[0])
cmd.append(gir_file)
else:
raise MesonException('Input must be a str or GirTarget')
vapi_output = library + '.vapi'
custom_kwargs = {
'command': cmd,
'input': inputs,
'output': vapi_output,
'depends': vapi_depends,
}
install_dir = kwargs.get('install_dir',
os.path.join(state.environment.coredata.get_builtin_option('datadir'),
'vala', 'vapi'))
if kwargs.get('install'):
custom_kwargs['install'] = kwargs['install']
custom_kwargs['install_dir'] = install_dir
# We shouldn't need this locally but we install it
deps_target = self._generate_deps(state, library, vapi_packages, install_dir)
created_values.append(deps_target)
vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs)
# So to try our best to get this to just work we need:
# - link with with the correct library
# - include the vapi and dependent vapi files in sources
# - add relevant directories to include dirs
incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)]
sources = [vapi_target] + vapi_depends
rv = InternalDependency(None, incs, [], [], link_with, [], sources, [], {})
created_values.append(rv)
return ModuleReturnValue(rv, created_values)
def initialize(*args, **kwargs):
return GnomeModule(*args, **kwargs)
| 47.628303
| 151
| 0.580094
|
e25d7bdf7c046f395bb6c02351a322572b9dd82c
| 5,103
|
py
|
Python
|
bot/networth/networth.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 1
|
2021-06-15T07:31:13.000Z
|
2021-06-15T07:31:13.000Z
|
bot/networth/networth.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T10:14:32.000Z
|
2021-06-02T10:54:12.000Z
|
bot/networth/networth.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 2
|
2021-06-01T10:59:15.000Z
|
2021-06-03T18:29:36.000Z
|
import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import Option # type: ignore
from typing import Optional
import requests
from utils import error, PROFILE_NAMES, API_KEY, guild_ids
from menus import generate_static_preset_menu
from database_manager import insert_profile
from parse_profile import get_profile_data
from networth.generate_page import generate_page
from networth.constants import PAGES, EMOJI_LIST
class networth_cog(commands.Cog):
def __init__(self, bot) -> None:
self.client = bot
@commands.command(name="networth", aliases=["nw", "n", "net", "worth", "now", "new"])
async def networth_command(self, ctx: commands.Context, provided_username: Optional[str] = None, provided_profile_name: Optional[str] = None) -> None:
await self.get_networth(ctx, provided_username, provided_profile_name, is_response=False)
@commands.slash_command(name="networth", description="Gets networth data about someone", guild_ids=guild_ids)
async def networth_slash(self, ctx, username: Option(str, "username:", required=False),
profile: Option(str, "profile", choices=PROFILE_NAMES, required=False)):
if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.get_networth(ctx, username, profile, is_response=True)
@commands.slash_command(name="nw", description="Alias of /networth", guild_ids=guild_ids)
async def alias_networth_slash(self, ctx, username: Option(str, "username:", required=False),
profile: Option(str, "profile", choices=PROFILE_NAMES, required=False)):
if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.get_networth(ctx, username, profile, is_response=True)
@commands.user_command(name="Get networth", guild_ids=guild_ids)
async def networth_context_menu(self, ctx, member: discord.Member):
if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.get_networth(ctx, member.display_name, None, is_response=True)
#================================================================================================================================
async def get_networth(self, ctx, provided_username: Optional[str] = None, provided_profile_name: Optional[str] = None, is_response: bool = False) -> None:
# Convert username/linked_account/nick to profile and more
player_data = await get_profile_data(ctx, provided_username, provided_profile_name, return_profile_list=True, is_response=is_response)
if player_data is None:
return None
username, uuid, profile_data, profile_name = player_data["data"]
#=======================
# Make the API request
try:
request = requests.post(f"http://{self.client.ip_address}:8000/pages/{uuid}?profile_name={profile_name}", json=profile_data)
except Exception as e:
print(e)
return await error(ctx, "Error, the bot could not connect to the API", "This could be because the API is down for maintenance, because it's restarting, or because there are issues. Try again later.", is_response)
#=======================
# Deal with exceptions
if request.status_code == 500:
return await error(ctx, "Error, an exception has occured", "This happened internally. If it's continues, let the lead dev know (Skezza#1139)", is_response)
elif request.status_code == 401:
return await error(ctx, "Error, invalid profile given!", "Make sure it's one of their active profiles and try again.", is_response)
elif request.status_code == 423:
return await error(ctx, "Error, rate limit hit", "Your request has not been fufiled, please slow down and try again later.", is_response)
elif request.status_code == 404:
return await error(ctx, "Error, that person could not be found", "Perhaps you input the incorrect name?", is_response)
#=======================
data = request.json()
# Generate all the pages and initiate the menu handler
list_of_embeds = [generate_page(ctx.author, data, username, page) for page in PAGES]
await generate_static_preset_menu(ctx=ctx, list_of_embeds=list_of_embeds, emoji_list=EMOJI_LIST, alternate_colours=True, is_response=is_response)
# Add the data to the database (for the leaderboard command)
data_totals: list[int] = [data[page]['total'] for page in ("purse", "banking", "inventory", "accessories", "ender_chest", "armor", "vault", "wardrobe", "storage", "pets")]
insert_profile(uuid, data["profile_data"]["profile_name"], data["profile_data"]["profile_type"], *data_totals)
| 64.594937
| 224
| 0.673721
|
3e164cd12e904954a2b7feabb24e61cb7e92911a
| 2,777
|
py
|
Python
|
parchment/tests/test_views.py
|
jbittel/django-parchment
|
bd7c40902c6fa66740ec76eeac15e274b7e7bc5a
|
[
"BSD-3-Clause"
] | 1
|
2015-10-03T20:19:34.000Z
|
2015-10-03T20:19:34.000Z
|
parchment/tests/test_views.py
|
jbittel/django-parchment
|
bd7c40902c6fa66740ec76eeac15e274b7e7bc5a
|
[
"BSD-3-Clause"
] | null | null | null |
parchment/tests/test_views.py
|
jbittel/django-parchment
|
bd7c40902c6fa66740ec76eeac15e274b7e7bc5a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from parchment.views import ParchmentView
class ParchmentViewTests(TestCase):
valid_login = reverse('parchment_login') + '?student_id=12345678987654321&customers_dob=01/12/1980&customers_firstname=Joe&customers_lastname=Alumni&customers_email_address=joealumni@school.edu'
invalid_login = reverse('parchment_login')
def setUp(self):
self.rf = RequestFactory()
def test_view_template(self):
"""
When a request is made to the login view, the correct template
should be utilized.
"""
request = self.rf.get(self.valid_login)
response = ParchmentView.as_view()(request)
self.assertTemplateUsed(response, 'parchment/login.html')
def test_bad_response(self):
"""
When an invalid request is made to the login view, it should return
a 400 Bad Response.
"""
request = self.rf.get(self.invalid_login)
response = ParchmentView.as_view()(request)
self.assertEquals(response.status_code, 400)
def test_parch5_field(self):
"""
When a valid request is made to the login view, it should contain
a hidden ``parch5`` field.
"""
request = self.rf.get(self.valid_login)
response = ParchmentView.as_view()(request)
self.assertContains(response, '<input id="id_parch5" name="parch5" type="hidden" value="')
def test_parchiv_field(self):
"""
When a valid request is made to the login view, it should contain
a hidden ``parchiv`` field.
"""
request = self.rf.get(self.valid_login)
response = ParchmentView.as_view()(request)
self.assertContains(response, '<input id="id_parchiv" name="parchiv" type="hidden" value="')
@override_settings(PARCHMENT_DEBUG_MODE=True)
def test_debug_field(self):
"""
When a valid request is made to the login view and debug mode is
enabled, it should contain a hidden ``debug`` field.
"""
request = self.rf.get(self.valid_login)
response = ParchmentView.as_view()(request)
self.assertContains(response, '<input id="id_debug" name="debug" type="hidden" value="')
def test_action_url_s_id(self):
"""
When a valid request is made to the login view, the form action
URL should contain the configured school ID query parameter.
"""
request = self.rf.get(self.valid_login)
response = ParchmentView.as_view()(request)
self.assertContains(response, 's_id=1234567890abcdef')
| 38.569444
| 198
| 0.675189
|
02b1f0f536f63089099ab75904c0ce7752e3f020
| 2,152
|
py
|
Python
|
repeats.py
|
Ginfung/FSSE
|
c54510b78dfceec76c74893e8514ed5177b504e5
|
[
"MIT"
] | 3
|
2018-08-07T13:54:57.000Z
|
2020-02-24T11:46:05.000Z
|
repeats.py
|
Ginfung/FSSE
|
c54510b78dfceec76c74893e8514ed5177b504e5
|
[
"MIT"
] | 1
|
2019-01-15T23:22:19.000Z
|
2019-01-15T23:22:19.000Z
|
repeats.py
|
Ginfung/FSSE
|
c54510b78dfceec76c74893e8514ed5177b504e5
|
[
"MIT"
] | 1
|
2019-01-09T15:50:47.000Z
|
2019-01-09T15:50:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Jianfeng Chen <jchen37@ncsu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
from os.path import isfile, join
import os
import pdb
def request_new_file(folder, model_name):
if folder[-1] == '/':
folder = folder[:-1]
files = [f for f in os.listdir(folder) if isfile(join(folder, f))]
existed = [f for f in files if '_'+model_name+'_' in f]
if len(existed) == 0:
return folder+'/_'+model_name+'_1.txt'
else:
i = [int(e.split('_')[2].split('.')[0]) for e in existed]
i = max(i) + 1
return folder+'/_'+model_name+'_' + str(i) + '.txt'
def fetch_all_files(folder, model_name):
if folder[-1] == '/':
folder = folder[:-1]
files = [join(folder, f) for f in os.listdir(folder) if isfile(join(folder, f)) and '_'+model_name+'_' in f]
return files
if __name__ == '__main__':
print(request_new_file('./tse_rs/paper_material', 'osp'))
print(fetch_all_files('./tse_rs/paper_material', 'osp'))
| 38.428571
| 112
| 0.695167
|
819c4d7d73b4cad8b25468d7effa915671a30e09
| 3,601
|
py
|
Python
|
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/AddCdnDomainRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/AddCdnDomainRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/AddCdnDomainRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class AddCdnDomainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'AddCdnDomain')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Sources(self):
return self.get_query_params().get('Sources')
def set_Sources(self,Sources):
self.add_query_param('Sources',Sources)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_SourcePort(self):
return self.get_query_params().get('SourcePort')
def set_SourcePort(self,SourcePort):
self.add_query_param('SourcePort',SourcePort)
def get_Priorities(self):
return self.get_query_params().get('Priorities')
def set_Priorities(self,Priorities):
self.add_query_param('Priorities',Priorities)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_CdnType(self):
return self.get_query_params().get('CdnType')
def set_CdnType(self,CdnType):
self.add_query_param('CdnType',CdnType)
def get_Scope(self):
return self.get_query_params().get('Scope')
def set_Scope(self,Scope):
self.add_query_param('Scope',Scope)
def get_SourceType(self):
return self.get_query_params().get('SourceType')
def set_SourceType(self,SourceType):
self.add_query_param('SourceType',SourceType)
def get_TopLevelDomain(self):
return self.get_query_params().get('TopLevelDomain')
def set_TopLevelDomain(self,TopLevelDomain):
self.add_query_param('TopLevelDomain',TopLevelDomain)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_CheckUrl(self):
return self.get_query_params().get('CheckUrl')
def set_CheckUrl(self,CheckUrl):
self.add_query_param('CheckUrl',CheckUrl)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
| 31.313043
| 74
| 0.758956
|
6df8527b17140315f65ab330a8583ac873a4da19
| 5,500
|
py
|
Python
|
baseline/baseline_config.py
|
Leon-Francis/DefenceForNamedEntityAttack
|
59f95171453becc303801aa64788817edfeb6b5c
|
[
"Apache-2.0"
] | null | null | null |
baseline/baseline_config.py
|
Leon-Francis/DefenceForNamedEntityAttack
|
59f95171453becc303801aa64788817edfeb6b5c
|
[
"Apache-2.0"
] | null | null | null |
baseline/baseline_config.py
|
Leon-Francis/DefenceForNamedEntityAttack
|
59f95171453becc303801aa64788817edfeb6b5c
|
[
"Apache-2.0"
] | null | null | null |
import torch
config_path = './baseline/baseline_config.py'
bert_vocab_size = 30522
class Baseline_Config():
output_dir = 'output'
cuda_idx = 0
train_device = torch.device('cuda:' + str(cuda_idx))
batch_size = 64
dataset = 'AGNEWS'
baseline = 'TextCNN'
epoch = 100
save_acc_limit = 0.80
debug_mode = False
if_mask_NE = False
if_replace_NE = False
if_attach_NE = True
if_adversial_training = False
linear_layer_num = 1
dropout_rate = 0.5
is_fine_tuning = True
Bert_lr = 1e-5
lr = 3e-4
skip_loss = 0
class IMDBConfig():
train_data_path = r'./dataset/IMDB/aclImdb/train.std'
test_data_path = r'./dataset/IMDB/aclImdb/test.std'
labels_num = 2
tokenizer_type = 'Bert'
remove_stop_words = False
sen_len = 230
vocab_size = bert_vocab_size
adversarial_data_path = r'dataset/IMDB/aclImdb/adversial_instance.std'
class SST2Config():
train_data_path = r'./dataset/SST2/train.std'
test_data_path = r'./dataset/SST2/test.std'
labels_num = 2
tokenizer_type = 'Bert'
remove_stop_words = False
sen_len = 20
vocab_size = bert_vocab_size
class AGNEWSConfig():
train_data_path = r'./dataset/AGNEWS/train.std'
test_data_path = r'./dataset/AGNEWS/test.std'
labels_num = 4
tokenizer_type = 'Bert'
remove_stop_words = False
sen_len = 50
vocab_size = bert_vocab_size
dataset_config = {'IMDB': IMDBConfig,
'SST2': SST2Config, 'AGNEWS': AGNEWSConfig}
model_path = {'IMDB_Bert_MNE': 'output/train_baseline_model/2021-05-12_12:57:41/models/IMDB_Bert_0.91328_05-12-14-47.pt',
'IMDB_Bert_replace_NE': 'output/train_baseline_model/2021-05-13_21:20:48/models/IMDB_Bert_0.91096_05-13-22-37.pt',
'IMDB_Bert_attach_NE': 'output/train_baseline_model/2021-05-14_21:39:33/models/IMDB_Bert_0.91680_05-15-04-01.pt',
'IMDB_Bert_attach_NE_inhance': 'output/train_baseline_model/2021-05-18_19:47:01/models/IMDB_Bert_0.91008_05-19-05-14.pt',
'IMDB_Bert_attack_NE_weak': 'output/train_baseline_model/2021-05-19_18:03:20/models/IMDB_Bert_0.91120_05-19-23-35.pt',
'IMDB_Bert': 'output/train_baseline_model/2021-05-11_21:36:13/models/IMDB_Bert_0.91564_05-11-22-58.pt',
'IMDB_Bert_adversial_training': 'output/train_baseline_model/2021-06-16_22:56:38/models/IMDB_Bert_0.91304_06-17-02-34.pt',
'SST2_Bert_attach_NE': 'output/train_baseline_model/2021-05-25_11:01:06/models/SST2_Bert_0.85592_05-25-11-08.pt',
'SST2_Bert': 'output/train_baseline_model/2021-05-24_22:50:17/models/SST2_Bert_0.87078_05-24-22-59.pt',
'AGNEWS_Bert': 'output/train_baseline_model/2021-05-25_12:16:00/models/AGNEWS_Bert_0.94250_05-25-14-51.pt',
'AGNEWS_Bert_attach_NE': 'output/train_baseline_model/2021-05-25_17:30:41/models/AGNEWS_Bert_0.92803_05-26-05-35.pt',
'AGNEWS_TextCNN_limit_vocab': 'output/train_baseline_model/2021-06-23_16:53:25/models/AGNEWS_TextCNN_0.91671_06-23-16-57.pt',
'IMDB_LSTM': 'output/train_baseline_model/2021-06-09_21:19:25/models/IMDB_LSTM_0.85636_06-09-21-54.pt',
'IMDB_LSTM_MNE': 'output/train_baseline_model/2021-06-10_21:42:54/models/IMDB_LSTM_0.86404_06-10-22-31.pt',
'IMDB_LSTM_replace_NE': 'output/train_baseline_model/2021-06-16_18:37:22/models/IMDB_LSTM_0.87340_06-16-19-14.pt',
'IMDB_LSTM_attach_NE': 'output/train_baseline_model/2021-06-09_22:43:59/models/IMDB_LSTM_0.83464_06-10-04-24.pt',
'IMDB_LSTM_limit_vocab': 'output/train_baseline_model/2021-06-10_22:03:23/models/IMDB_LSTM_0.85852_06-10-23-12.pt',
'IMDB_LSTM_limit_vocab_MNE': 'output/train_baseline_model/2021-06-16_17:20:06/models/IMDB_LSTM_0.87360_06-16-18-17.pt',
'IMDB_LSTM_limit_vocab_replace_NE': 'output/train_baseline_model/2021-06-16_17:18:12/models/IMDB_LSTM_0.87532_06-16-18-14.pt',
'IMDB_LSTM_limit_vocab_attach_NE': 'output/train_baseline_model/2021-06-11_11:24:58/models/IMDB_LSTM_0.83784_06-11-17-53.pt',
'IMDB_LSTM_limit_vocab_adversial_training': 'output/train_baseline_model/2021-06-18_11:36:09/models/IMDB_LSTM_0.86500_06-18-11-54.pt',
'IMDB_TextCNN': 'output/train_baseline_model/2021-06-14_22:48:20/models/IMDB_TextCNN_0.86168_06-14-22-53.pt',
'IMDB_TextCNN_MNE': 'output/train_baseline_model/2021-06-16_17:25:26/models/IMDB_TextCNN_0.84924_06-16-18-03.pt',
'IMDB_TextCNN_replace_NE': 'output/train_baseline_model/2021-06-16_17:26:22/models/IMDB_TextCNN_0.86164_06-16-18-06.pt',
'IMDB_TextCNN_attach_NE': 'output/train_baseline_model/2021-06-16_17:27:02/models/IMDB_TextCNN_0.83480_06-16-18-24.pt',
'IMDB_TextCNN_limit_vocab': 'output/train_baseline_model/2021-06-17_17:18:02/models/IMDB_TextCNN_0.86648_06-17-17-26.pt',
'IMDB_TextCNN_limit_vocab_MNE': 'output/train_baseline_model/2021-06-17_17:19:27/models/IMDB_TextCNN_0.86388_06-17-17-56.pt',
'IMDB_TextCNN_limit_vocab_replace_NE': 'output/train_baseline_model/2021-06-17_17:19:45/models/IMDB_TextCNN_0.86400_06-17-17-52.pt',
'IMDB_TextCNN_limit_vocab_attach_NE': 'output/train_baseline_model/2021-06-17_17:20:09/models/IMDB_TextCNN_0.83636_06-17-18-54.pt',
'IMDB_TextCNN_limit_vocab_adversial_training': 'output/train_baseline_model/2021-06-18_11:37:55/models/IMDB_TextCNN_0.85640_06-18-11-49.pt'}
| 57.291667
| 154
| 0.720727
|
514a94b0bd895afdd11e4667f445fe6d68b83ace
| 913
|
py
|
Python
|
build/lib/bandwidth/http/auth/voice_basic_auth.py
|
Spaced-Out/python-bandwidth-sdk
|
5332f29d1c093003444384f63a9d4a00843c954f
|
[
"MIT"
] | null | null | null |
build/lib/bandwidth/http/auth/voice_basic_auth.py
|
Spaced-Out/python-bandwidth-sdk
|
5332f29d1c093003444384f63a9d4a00843c954f
|
[
"MIT"
] | null | null | null |
build/lib/bandwidth/http/auth/voice_basic_auth.py
|
Spaced-Out/python-bandwidth-sdk
|
5332f29d1c093003444384f63a9d4a00843c954f
|
[
"MIT"
] | 1
|
2020-12-01T15:25:51.000Z
|
2020-12-01T15:25:51.000Z
|
# -*- coding: utf-8 -*-
"""
bandwidth
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import base64
class VoiceBasicAuth:
@staticmethod
def apply(config, http_request):
""" Add basic authentication to the request.
Args:
config (Configuration): The Configuration object which holds the
authentication information.
http_request (HttpRequest): The HttpRequest object to which
authentication will be added.
"""
username = config.voice_basic_auth_user_name
password = config.voice_basic_auth_password
joined = "{}:{}".format(username, password)
encoded = base64.b64encode(str.encode(joined)).decode('iso-8859-1')
header_value = "Basic {}".format(encoded)
http_request.headers["Authorization"] = header_value
| 29.451613
| 80
| 0.627601
|
7626f54ed9f9526acf20438d3ec49e5b1622ed74
| 17,719
|
py
|
Python
|
coverage/html.py
|
janaknat/coveragepy
|
0e41eb8b24adc2c72a555c370432ed1689aabcf7
|
[
"Apache-2.0"
] | null | null | null |
coverage/html.py
|
janaknat/coveragepy
|
0e41eb8b24adc2c72a555c370432ed1689aabcf7
|
[
"Apache-2.0"
] | null | null | null |
coverage/html.py
|
janaknat/coveragepy
|
0e41eb8b24adc2c72a555c370432ed1689aabcf7
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""HTML reporting for coverage.py."""
import datetime
import json
import os
import re
import shutil
import types
import coverage
from coverage.data import add_data_to_hash
from coverage.exceptions import CoverageException
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime
from coverage.report import get_analysis_to_report
from coverage.results import Numbers
from coverage.templite import Templite
os = isolate_module(os)
# Static files are looked for in a list of places.
STATIC_PATH = [
# The place Debian puts system Javascript libraries.
"/usr/share/javascript",
# Our htmlfiles directory.
os.path.join(os.path.dirname(__file__), "htmlfiles"),
]
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that sub-directory.
"""
tried = []
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
else:
tried.append(static_filename)
raise CoverageException(
f"Couldn't find static file {fname!r} from {os.getcwd()!r}, tried: {tried!r}"
)
def read_data(fname):
"""Return the contents of a data file of ours."""
with open(data_filename(fname)) as data_file:
return data_file.read()
def write_html(fname, html):
"""Write `html` to `fname`, properly encoded."""
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
with open(fname, "wb") as fout:
fout.write(html.encode('ascii', 'xmlcharrefreplace'))
class HtmlDataGeneration:
"""Generate structured data to be turned into HTML reports."""
EMPTY = "(empty)"
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
data = self.coverage.get_data()
self.has_arcs = data.has_arcs()
if self.config.show_contexts:
if data.measured_contexts() == {""}:
self.coverage._warn("No contexts were measured")
data.set_query_contexts(self.config.report_contexts)
def data_for_file(self, fr, analysis):
"""Produce the data needed for one file's report."""
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
arcs_executed = analysis.arcs_executed()
if self.config.show_contexts:
contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
lines = []
for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
# Figure out how to mark this line.
category = None
short_annotations = []
long_annotations = []
if lineno in analysis.excluded:
category = 'exc'
elif lineno in analysis.missing:
category = 'mis'
elif self.has_arcs and lineno in missing_branch_arcs:
category = 'par'
for b in missing_branch_arcs[lineno]:
if b < 0:
short_annotations.append("exit")
else:
short_annotations.append(b)
long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
elif lineno in analysis.statements:
category = 'run'
contexts = contexts_label = None
context_list = None
if category and self.config.show_contexts:
contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno])
if contexts == [self.EMPTY]:
contexts_label = self.EMPTY
else:
contexts_label = f"{len(contexts)} ctx"
context_list = contexts
lines.append(types.SimpleNamespace(
tokens=tokens,
number=lineno,
category=category,
statement=(lineno in analysis.statements),
contexts=contexts,
contexts_label=contexts_label,
context_list=context_list,
short_annotations=short_annotations,
long_annotations=long_annotations,
))
file_data = types.SimpleNamespace(
relative_filename=fr.relative_filename(),
nums=analysis.numbers,
lines=lines,
)
return file_data
class HtmlReporter:
"""HTML reporting."""
# These files will be copied from the htmlfiles directory to the output
# directory.
STATIC_FILES = [
("style.css", ""),
("jquery.min.js", "jquery"),
("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"),
("jquery.hotkeys.js", "jquery-hotkeys"),
("jquery.isonscreen.js", "jquery-isonscreen"),
("jquery.tablesorter.min.js", "jquery-tablesorter"),
("coverage_html.js", ""),
("keybd_closed.png", ""),
("keybd_open.png", ""),
("favicon_32.png", ""),
]
def __init__(self, cov):
self.coverage = cov
self.config = self.coverage.config
self.directory = self.config.html_dir
self.skip_covered = self.config.html_skip_covered
if self.skip_covered is None:
self.skip_covered = self.config.skip_covered
self.skip_empty = self.config.html_skip_empty
if self.skip_empty is None:
self.skip_empty= self.config.skip_empty
title = self.config.html_title
if self.config.extra_css:
self.extra_css = os.path.basename(self.config.extra_css)
else:
self.extra_css = None
self.data = self.coverage.get_data()
self.has_arcs = self.data.has_arcs()
self.file_summaries = []
self.all_files_nums = []
self.incr = IncrementalChecker(self.directory)
self.datagen = HtmlDataGeneration(self.coverage)
self.totals = Numbers()
self.template_globals = {
# Functions available in the templates.
'escape': escape,
'pair': pair,
'len': len,
# Constants for this report.
'__url__': coverage.__url__,
'__version__': coverage.__version__,
'title': title,
'time_stamp': format_local_datetime(datetime.datetime.now()),
'extra_css': self.extra_css,
'has_arcs': self.has_arcs,
'show_contexts': self.config.show_contexts,
# Constants for all reports.
# These css classes determine which lines are highlighted by default.
'category': {
'exc': 'exc show_exc',
'mis': 'mis show_mis',
'par': 'par run show_par',
'run': 'run',
}
}
self.pyfile_html_source = read_data("pyfile.html")
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
def report(self, morfs):
"""Generate an HTML report for `morfs`.
`morfs` is a list of modules or file names.
"""
# Read the status data and check that this run used the same
# global data as the last run.
self.incr.read()
self.incr.check_global_data(self.config, self.pyfile_html_source)
# Process all the files.
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.html_file(fr, analysis)
if not self.all_files_nums:
raise CoverageException("No data to report.")
self.totals = sum(self.all_files_nums)
# Write the index file.
self.index_file()
self.make_local_static_report_files()
return self.totals.n_statements and self.totals.pc_covered
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
# The user may have extra CSS they want copied.
if self.extra_css:
shutil.copyfile(
self.config.extra_css,
os.path.join(self.directory, self.extra_css)
)
def html_file(self, fr, analysis):
"""Generate an HTML file for one source file."""
rootname = flat_rootname(fr.relative_filename())
html_filename = rootname + ".html"
ensure_dir(self.directory)
html_path = os.path.join(self.directory, html_filename)
# Get the numbers for this file.
nums = analysis.numbers
self.all_files_nums.append(nums)
if self.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
# If there's an existing file, remove it.
file_be_gone(html_path)
return
if self.skip_empty:
# Don't report on empty files.
if nums.n_statements == 0:
file_be_gone(html_path)
return
# Find out if the file on disk is already correct.
if self.incr.can_skip_file(self.data, fr, rootname):
self.file_summaries.append(self.incr.index_info(rootname))
return
# Write the HTML page for this file.
file_data = self.datagen.data_for_file(fr, analysis)
for ldata in file_data.lines:
# Build the HTML for the line.
html = []
for tok_type, tok_text in ldata.tokens:
if tok_type == "ws":
html.append(escape(tok_text))
else:
tok_html = escape(tok_text) or ' '
html.append(
f'<span class="{tok_type}">{tok_html}</span>'
)
ldata.html = ''.join(html)
if ldata.short_annotations:
# 202F is NARROW NO-BREAK SPACE.
# 219B is RIGHTWARDS ARROW WITH STROKE.
ldata.annotate = ", ".join(
f"{ldata.number} ↛ {d}"
for d in ldata.short_annotations
)
else:
ldata.annotate = None
if ldata.long_annotations:
longs = ldata.long_annotations
if len(longs) == 1:
ldata.annotate_long = longs[0]
else:
ldata.annotate_long = "{:d} missed branches: {}".format(
len(longs),
", ".join(
f"{num:d}) {ann_long}"
for num, ann_long in enumerate(longs, start=1)
),
)
else:
ldata.annotate_long = None
css_classes = []
if ldata.category:
css_classes.append(self.template_globals['category'][ldata.category])
ldata.css_class = ' '.join(css_classes) or "pln"
html = self.source_tmpl.render(file_data.__dict__)
write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
'html_filename': html_filename,
'relative_filename': fr.relative_filename(),
}
self.file_summaries.append(index_info)
self.incr.set_index_info(rootname, index_info)
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(read_data("index.html"), self.template_globals)
html = index_tmpl.render({
'files': self.file_summaries,
'totals': self.totals,
})
write_html(os.path.join(self.directory, "index.html"), html)
# Write the latest hashes for next time.
self.incr.write()
class IncrementalChecker:
"""Logic and data to support incremental reporting."""
STATUS_FILE = "status.json"
STATUS_FORMAT = 2
# pylint: disable=wrong-spelling-in-comment,useless-suppression
# The data looks like:
#
# {
# "format": 2,
# "globals": "540ee119c15d52a68a53fe6f0897346d",
# "version": "4.0a1",
# "files": {
# "cogapp___init__": {
# "hash": "e45581a5b48f879f301c0f30bf77a50c",
# "index": {
# "html_filename": "cogapp___init__.html",
# "relative_filename": "cogapp/__init__",
# "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
# }
# },
# ...
# "cogapp_whiteutils": {
# "hash": "8504bb427fc488c4176809ded0277d51",
# "index": {
# "html_filename": "cogapp_whiteutils.html",
# "relative_filename": "cogapp/whiteutils",
# "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
# }
# }
# }
# }
def __init__(self, directory):
self.directory = directory
self.reset()
def reset(self):
"""Initialize to empty. Causes all files to be reported."""
self.globals = ''
self.files = {}
def read(self):
"""Read the information we stored last time."""
usable = False
try:
status_file = os.path.join(self.directory, self.STATUS_FILE)
with open(status_file) as fstatus:
status = json.load(fstatus)
except (OSError, ValueError):
usable = False
else:
usable = True
if status['format'] != self.STATUS_FORMAT:
usable = False
elif status['version'] != coverage.__version__:
usable = False
if usable:
self.files = {}
for filename, fileinfo in status['files'].items():
fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
self.files[filename] = fileinfo
self.globals = status['globals']
else:
self.reset()
def write(self):
"""Write the current status."""
status_file = os.path.join(self.directory, self.STATUS_FILE)
files = {}
for filename, fileinfo in self.files.items():
fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
files[filename] = fileinfo
status = {
'format': self.STATUS_FORMAT,
'version': coverage.__version__,
'globals': self.globals,
'files': files,
}
with open(status_file, "w") as fout:
json.dump(status, fout, separators=(',', ':'))
def check_global_data(self, *data):
"""Check the global data that can affect incremental reporting."""
m = Hasher()
for d in data:
m.update(d)
these_globals = m.hexdigest()
if self.globals != these_globals:
self.reset()
self.globals = these_globals
def can_skip_file(self, data, fr, rootname):
"""Can we skip reporting this file?
`data` is a CoverageData object, `fr` is a `FileReporter`, and
`rootname` is the name being used for the file.
"""
m = Hasher()
m.update(fr.source().encode('utf-8'))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
that_hash = self.file_hash(rootname)
if this_hash == that_hash:
# Nothing has changed to require the file to be reported again.
return True
else:
self.set_file_hash(rootname, this_hash)
return False
def file_hash(self, fname):
"""Get the hash of `fname`'s contents."""
return self.files.get(fname, {}).get('hash', '')
def set_file_hash(self, fname, val):
"""Set the hash of `fname`'s contents."""
self.files.setdefault(fname, {})['hash'] = val
def index_info(self, fname):
"""Get the information for index.html for `fname`."""
return self.files.get(fname, {}).get('index', {})
def set_index_info(self, fname, info):
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, {})['index'] = info
# Helpers for templates and generating HTML
def escape(t):
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio):
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "%s %s" % ratio
| 34.140655
| 97
| 0.56747
|
42921513bdb948f712182d49e4e865863c87cb07
| 13,645
|
py
|
Python
|
metadata-ingestion/setup.py
|
pramodbiligiri/datahub
|
892adbcf330a9c7c687a293dd3edeca9fa0e2fd8
|
[
"Apache-2.0"
] | 2
|
2021-09-10T06:11:53.000Z
|
2021-12-13T05:45:51.000Z
|
metadata-ingestion/setup.py
|
pramodbiligiri/datahub
|
892adbcf330a9c7c687a293dd3edeca9fa0e2fd8
|
[
"Apache-2.0"
] | 1
|
2021-12-19T16:16:48.000Z
|
2021-12-19T16:16:48.000Z
|
metadata-ingestion/setup.py
|
pramodbiligiri/datahub
|
892adbcf330a9c7c687a293dd3edeca9fa0e2fd8
|
[
"Apache-2.0"
] | 2
|
2021-12-06T04:17:26.000Z
|
2021-12-06T06:08:13.000Z
|
import os
import sys
from typing import Dict, Set
import setuptools
is_py37_or_newer = sys.version_info >= (3, 7)
package_metadata: dict = {}
with open("./src/datahub/__init__.py") as fp:
exec(fp.read(), package_metadata)
def get_long_description():
root = os.path.dirname(__file__)
with open(os.path.join(root, "README.md")) as f:
description = f.read()
return description
base_requirements = {
# Compatability.
"dataclasses>=0.6; python_version < '3.7'",
"typing_extensions>=3.10.0.2",
"mypy_extensions>=0.4.3",
# Actual dependencies.
"typing-inspect",
"pydantic>=1.5.1",
}
framework_common = {
"click>=6.0.0",
"click-default-group",
"PyYAML",
"toml>=0.10.0",
"entrypoints",
"docker",
"expandvars>=0.6.5",
"avro-gen3==0.7.1",
"avro>=1.10.2",
"python-dateutil>=2.8.0",
"stackprinter",
"tabulate",
"progressbar2",
}
kafka_common = {
# We currently require both Avro libraries. The codegen uses avro-python3 (above)
# schema parsers at runtime for generating and reading JSON into Python objects.
# At the same time, we use Kafka's AvroSerializer, which internally relies on
# fastavro for serialization. We do not use confluent_kafka[avro], since it
# is incompatible with its own dep on avro-python3.
"confluent_kafka>=1.5.0",
"fastavro>=1.2.0",
}
sql_common = {
# Required for all SQL sources.
"sqlalchemy==1.3.24",
# Required for SQL profiling.
"great-expectations>=0.13.40",
"greenlet",
}
aws_common = {
# AWS Python SDK
"boto3",
# Deal with a version incompatibility between botocore (used by boto3) and urllib3.
# See https://github.com/boto/botocore/pull/2563.
"botocore!=1.23.0",
}
looker_common = {
# Looker Python SDK
"looker-sdk==21.6.0"
}
bigquery_common = {
# Google cloud logging library
"google-cloud-logging"
}
# Note: for all of these, framework_common will be added.
plugins: Dict[str, Set[str]] = {
# Sink plugins.
"datahub-kafka": kafka_common,
"datahub-rest": {"requests"},
# Integrations.
"airflow": {
"apache-airflow >= 1.10.2",
},
# Source plugins
"athena": sql_common | {"PyAthena[SQLAlchemy]"},
"azure-ad": set(),
"bigquery": sql_common | bigquery_common | {"pybigquery >= 0.6.0"},
"bigquery-usage": bigquery_common | {"cachetools"},
"datahub-business-glossary": set(),
"dbt": {"requests"},
"druid": sql_common | {"pydruid>=0.6.2"},
"feast": {"docker"},
"glue": aws_common,
"hive": sql_common
| {
# Acryl Data maintains a fork of PyHive, which adds support for table comments
# and column comments, and also releases HTTP and HTTPS transport schemes.
"acryl-pyhive[hive]>=0.6.11"
},
"kafka": kafka_common,
"kafka-connect": sql_common | {"requests", "JPype1"},
"ldap": {"python-ldap>=2.4"},
"looker": looker_common,
"lookml": looker_common | {"lkml>=1.1.0", "sql-metadata==2.2.2"},
"metabase": {"requests"},
"mode": {"requests", "sqllineage"},
"mongodb": {"pymongo>=3.11"},
"mssql": sql_common | {"sqlalchemy-pytds>=0.3"},
"mssql-odbc": sql_common | {"pyodbc"},
"mysql": sql_common | {"pymysql>=1.0.2"},
# mariadb should have same dependency as mysql
"mariadb": sql_common | {"pymysql>=1.0.2"},
"okta": {"okta~=1.7.0"},
"oracle": sql_common | {"cx_Oracle"},
"postgres": sql_common | {"psycopg2-binary", "GeoAlchemy2"},
"redash": {"redash-toolbelt", "sql-metadata"},
"redshift": sql_common
| {"sqlalchemy-redshift", "psycopg2-binary", "GeoAlchemy2", "sqllineage"},
"redshift-usage": sql_common
| {"sqlalchemy-redshift", "psycopg2-binary", "GeoAlchemy2"},
"sagemaker": aws_common,
"snowflake": sql_common | {"snowflake-sqlalchemy<=1.2.4"},
"snowflake-usage": sql_common | {"snowflake-sqlalchemy<=1.2.4"},
"sqlalchemy": sql_common,
"superset": {"requests"},
"trino": sql_common
| {
# SQLAlchemy support is coming up in trino python client
# subject to PR merging - https://github.com/trinodb/trino-python-client/pull/81.
# PR is from same author as that of sqlalchemy-trino library below.
"sqlalchemy-trino"
},
"starburst-trino-usage": sql_common
| {
# SQLAlchemy support is coming up in trino python client
# subject to PR merging - https://github.com/trinodb/trino-python-client/pull/81.
# PR is from same author as that of sqlalchemy-trino library below.
"sqlalchemy-trino"
},
"nifi": {"requests"},
}
all_exclude_plugins: Set[str] = {
# SQL Server ODBC requires additional drivers, and so we don't want to keep
# it included in the default "all" installation.
"mssql-odbc",
}
mypy_stubs = {
"types-dataclasses",
"sqlalchemy-stubs",
"types-pkg_resources",
"types-six",
"types-python-dateutil",
"types-requests",
"types-toml",
"types-PyMySQL",
"types-PyYAML",
"types-freezegun",
"types-cachetools",
# versions 0.1.13 and 0.1.14 seem to have issues
"types-click==0.1.12",
"boto3-stubs[s3,glue,sagemaker]",
"types-tabulate",
}
base_dev_requirements = {
*base_requirements,
*framework_common,
*mypy_stubs,
"black>=19.10b0",
"coverage>=5.1",
"flake8>=3.8.3",
"flake8-tidy-imports>=4.3.0",
"isort>=5.7.0",
# Waiting for https://github.com/samuelcolvin/pydantic/pull/3175 before allowing mypy 0.920.
"mypy>=0.901,<0.920",
"pytest>=6.2.2",
"pytest-cov>=2.8.1",
"pytest-docker>=0.10.3",
"tox",
"deepdiff",
"requests-mock",
"freezegun",
"jsonpickle",
"build",
"twine",
"pydot",
*list(
dependency
for plugin in [
"bigquery",
"bigquery-usage",
"looker",
"glue",
"mariadb",
"okta",
"oracle",
"postgres",
"sagemaker",
"datahub-kafka",
"datahub-rest",
"redash",
"redshift",
"redshift-usage"
# airflow is added below
]
for dependency in plugins[plugin]
),
}
if is_py37_or_newer:
# The lookml plugin only works on Python 3.7 or newer.
# The trino plugin only works on Python 3.7 or newer.
# The trino plugin can be supported on Python 3.6 with minimal changes to opensource sqlalchemy-trino sourcecode.
base_dev_requirements = base_dev_requirements.union(
{
dependency
for plugin in ["lookml", "trino", "starburst-trino-usage"]
for dependency in plugins[plugin]
}
)
dev_requirements = {
*base_dev_requirements,
"apache-airflow[snowflake]>=2.0.2", # snowflake is used in example dags
"snowflake-sqlalchemy<=1.2.4", # make constraint consistent with extras
}
dev_requirements_airflow_1 = {
*base_dev_requirements,
"apache-airflow==1.10.15",
"apache-airflow-backport-providers-snowflake",
"snowflake-sqlalchemy<=1.2.4", # make constraint consistent with extras
"WTForms==2.3.3", # make constraint consistent with extras
}
full_test_dev_requirements = {
*list(
dependency
for plugin in [
"druid",
"feast",
"hive",
"ldap",
"mongodb",
"mssql",
"mysql",
"mariadb",
"snowflake",
"redash",
"kafka-connect",
]
for dependency in plugins[plugin]
),
}
entry_points = {
"console_scripts": ["datahub = datahub.entrypoints:main"],
"datahub.ingestion.source.plugins": [
"file = datahub.ingestion.source.file:GenericFileSource",
"sqlalchemy = datahub.ingestion.source.sql.sql_generic:SQLAlchemyGenericSource",
"athena = datahub.ingestion.source.sql.athena:AthenaSource",
"azure-ad = datahub.ingestion.source.identity.azure_ad:AzureADSource",
"bigquery = datahub.ingestion.source.sql.bigquery:BigQuerySource",
"bigquery-usage = datahub.ingestion.source.usage.bigquery_usage:BigQueryUsageSource",
"dbt = datahub.ingestion.source.dbt:DBTSource",
"druid = datahub.ingestion.source.sql.druid:DruidSource",
"feast = datahub.ingestion.source.feast:FeastSource",
"glue = datahub.ingestion.source.aws.glue:GlueSource",
"sagemaker = datahub.ingestion.source.aws.sagemaker:SagemakerSource",
"hive = datahub.ingestion.source.sql.hive:HiveSource",
"kafka = datahub.ingestion.source.kafka:KafkaSource",
"kafka-connect = datahub.ingestion.source.kafka_connect:KafkaConnectSource",
"ldap = datahub.ingestion.source.ldap:LDAPSource",
"looker = datahub.ingestion.source.looker:LookerDashboardSource",
"lookml = datahub.ingestion.source.lookml:LookMLSource",
"datahub-business-glossary = datahub.ingestion.source.metadata.business_glossary:BusinessGlossaryFileSource",
"mode = datahub.ingestion.source.mode:ModeSource",
"mongodb = datahub.ingestion.source.mongodb:MongoDBSource",
"mssql = datahub.ingestion.source.sql.mssql:SQLServerSource",
"mysql = datahub.ingestion.source.sql.mysql:MySQLSource",
"mariadb = datahub.ingestion.source.sql.mariadb.MariaDBSource",
"okta = datahub.ingestion.source.identity.okta:OktaSource",
"oracle = datahub.ingestion.source.sql.oracle:OracleSource",
"postgres = datahub.ingestion.source.sql.postgres:PostgresSource",
"redash = datahub.ingestion.source.redash:RedashSource",
"redshift = datahub.ingestion.source.sql.redshift:RedshiftSource",
"redshift-usage = datahub.ingestion.source.usage.redshift_usage:RedshiftUsageSource",
"snowflake = datahub.ingestion.source.sql.snowflake:SnowflakeSource",
"snowflake-usage = datahub.ingestion.source.usage.snowflake_usage:SnowflakeUsageSource",
"superset = datahub.ingestion.source.superset:SupersetSource",
"openapi = datahub.ingestion.source.openapi:OpenApiSource",
"metabase = datahub.ingestion.source.metabase:MetabaseSource",
"trino = datahub.ingestion.source.sql.trino:TrinoSource",
"starburst-trino-usage = datahub.ingestion.source.usage.starburst_trino_usage:TrinoUsageSource",
"nifi = datahub.ingestion.source.nifi:NifiSource",
],
"datahub.ingestion.sink.plugins": [
"file = datahub.ingestion.sink.file:FileSink",
"console = datahub.ingestion.sink.console:ConsoleSink",
"datahub-kafka = datahub.ingestion.sink.datahub_kafka:DatahubKafkaSink",
"datahub-rest = datahub.ingestion.sink.datahub_rest:DatahubRestSink",
],
"datahub.ingestion.state_provider.plugins": [
"datahub = datahub.ingestion.source.state_provider.datahub_ingestion_state_provider:DatahubIngestionStateProvider",
],
"apache_airflow_provider": ["provider_info=datahub_provider:get_provider_info"],
}
setuptools.setup(
# Package metadata.
name=package_metadata["__package_name__"],
version=package_metadata["__version__"],
url="https://datahubproject.io/",
project_urls={
"Documentation": "https://datahubproject.io/docs/",
"Source": "https://github.com/linkedin/datahub",
"Changelog": "https://github.com/linkedin/datahub/releases",
},
license="Apache License 2.0",
description="A CLI to work with DataHub metadata",
long_description=get_long_description(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Unix",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Environment :: MacOS X",
"Topic :: Software Development",
],
# Package info.
zip_safe=False,
python_requires=">=3.6",
package_dir={"": "src"},
packages=setuptools.find_namespace_packages(where="./src"),
package_data={
"datahub": ["py.typed"],
"datahub.metadata": ["schema.avsc"],
"datahub.metadata.schemas": ["*.avsc"],
"datahub.ingestion.source.feast_image": ["Dockerfile", "requirements.txt"],
},
entry_points=entry_points,
# Dependencies.
install_requires=list(base_requirements | framework_common),
extras_require={
"base": list(framework_common),
**{
plugin: list(framework_common | dependencies)
for (plugin, dependencies) in plugins.items()
},
"all": list(
framework_common.union(
*[
requirements
for plugin, requirements in plugins.items()
if plugin not in all_exclude_plugins
]
)
),
"dev": list(dev_requirements),
"dev-airflow1": list(dev_requirements_airflow_1),
"integration-tests": list(full_test_dev_requirements),
},
)
| 35.077121
| 123
| 0.633785
|
9489d89bf2658d4f65cf18c3e51acc73db2a3df7
| 3,103
|
py
|
Python
|
BoostYourOwnDepth-main/pix2pix/util/util.py
|
qihao-huang/AD-Depth-Estimation
|
4c93a50efc30fc5b44e2b43412d78bc5f98fa430
|
[
"MIT"
] | 9
|
2022-01-04T02:47:12.000Z
|
2022-03-10T10:46:04.000Z
|
BoostYourOwnDepth-main/pix2pix/util/util.py
|
qihao-huang/AD-Depth-Estimation
|
4c93a50efc30fc5b44e2b43412d78bc5f98fa430
|
[
"MIT"
] | null | null | null |
BoostYourOwnDepth-main/pix2pix/util/util.py
|
qihao-huang/AD-Depth-Estimation
|
4c93a50efc30fc5b44e2b43412d78bc5f98fa430
|
[
"MIT"
] | 10
|
2021-12-25T06:36:18.000Z
|
2022-03-15T11:25:00.000Z
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint16):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = torch.squeeze(image_tensor).cpu().numpy() # convert it into a numpy array
image_numpy = (image_numpy + 1) / 2.0 * (2**16-1) #
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
image_pil = image_pil.convert('I;16')
# image_pil = Image.fromarray(image_numpy)
# h, w, _ = image_numpy.shape
#
# if aspect_ratio > 1.0:
# image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
# if aspect_ratio < 1.0:
# image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 31.343434
| 96
| 0.615533
|
97768bcd1be7e70c44bd05eba3cccea864ba320e
| 39,755
|
py
|
Python
|
metadata-ingestion/src/datahub/ingestion/source/kafka_connect.py
|
pedro-iatzky/datahub
|
53d1379a85b237700fa817d4aa8c95abe7805f7c
|
[
"Apache-2.0"
] | 1
|
2022-03-04T00:42:50.000Z
|
2022-03-04T00:42:50.000Z
|
metadata-ingestion/src/datahub/ingestion/source/kafka_connect.py
|
pedro-iatzky/datahub
|
53d1379a85b237700fa817d4aa8c95abe7805f7c
|
[
"Apache-2.0"
] | null | null | null |
metadata-ingestion/src/datahub/ingestion/source/kafka_connect.py
|
pedro-iatzky/datahub
|
53d1379a85b237700fa817d4aa8c95abe7805f7c
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, Optional, Tuple
import jpype
import jpype.imports
import requests
from sqlalchemy.engine.url import make_url
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.configuration.common import AllowDenyPattern, ConfigModel
from datahub.configuration.source_common import DatasetLineageProviderConfigBase
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
logger = logging.getLogger(__name__)
class ProvidedConfig(ConfigModel):
provider: str
path_key: str
value: str
class KafkaConnectSourceConfig(DatasetLineageProviderConfigBase):
# See the Connect REST Interface for details
# https://docs.confluent.io/platform/current/connect/references/restapi.html#
connect_uri: str = "http://localhost:8083/"
username: Optional[str] = None
password: Optional[str] = None
cluster_name: Optional[str] = "connect-cluster"
construct_lineage_workunits: bool = True
connector_patterns: AllowDenyPattern = AllowDenyPattern.allow_all()
provided_configs: Optional[List[ProvidedConfig]] = None
@dataclass
class KafkaConnectSourceReport(SourceReport):
connectors_scanned: int = 0
filtered: List[str] = field(default_factory=list)
def report_connector_scanned(self, connector: str) -> None:
self.connectors_scanned += 1
def report_dropped(self, connector: str) -> None:
self.filtered.append(connector)
@dataclass
class KafkaConnectLineage:
"""Class to store Kafka Connect lineage mapping, Each instance is potential DataJob"""
source_platform: str
target_dataset: str
target_platform: str
job_property_bag: Optional[Dict[str, str]] = None
source_dataset: Optional[str] = None
@dataclass
class ConnectorManifest:
"""Each instance is potential DataFlow"""
name: str
type: str
config: Dict
tasks: Dict
url: Optional[str] = None
flow_property_bag: Optional[Dict[str, str]] = None
lineages: List[KafkaConnectLineage] = field(default_factory=list)
topic_names: Iterable[str] = field(default_factory=list)
def remove_prefix(text: str, prefix: str) -> str:
if text.startswith(prefix):
index = len(prefix)
return text[index:]
return text
def unquote(string: str, leading_quote: str = '"', trailing_quote: str = None) -> str:
"""
If string starts and ends with a quote, unquote it
"""
trailing_quote = trailing_quote if trailing_quote else leading_quote
if string.startswith(leading_quote) and string.endswith(trailing_quote):
string = string[1:-1]
return string
@dataclass
class ConfluentJDBCSourceConnector:
connector_manifest: ConnectorManifest
report: KafkaConnectSourceReport
def __init__(
self, connector_manifest: ConnectorManifest, report: KafkaConnectSourceReport
) -> None:
self.connector_manifest = connector_manifest
self.report = report
self._extract_lineages()
REGEXROUTER = "org.apache.kafka.connect.transforms.RegexRouter"
KNOWN_TOPICROUTING_TRANSFORMS = [REGEXROUTER]
# https://kafka.apache.org/documentation/#connect_included_transformation
KAFKA_NONTOPICROUTING_TRANSFORMS = [
"InsertField",
"InsertField$Key",
"InsertField$Value",
"ReplaceField",
"ReplaceField$Key",
"ReplaceField$Value",
"MaskField",
"MaskField$Key",
"MaskField$Value",
"ValueToKey",
"ValueToKey$Key",
"ValueToKey$Value",
"HoistField",
"HoistField$Key",
"HoistField$Value",
"ExtractField",
"ExtractField$Key",
"ExtractField$Value",
"SetSchemaMetadata",
"SetSchemaMetadata$Key",
"SetSchemaMetadata$Value",
"Flatten",
"Flatten$Key",
"Flatten$Value",
"Cast",
"Cast$Key",
"Cast$Value",
"HeadersFrom",
"HeadersFrom$Key",
"HeadersFrom$Value",
"TimestampConverter",
"Filter",
"InsertHeader",
"DropHeaders",
]
# https://docs.confluent.io/platform/current/connect/transforms/overview.html
CONFLUENT_NONTOPICROUTING_TRANSFORMS = [
"Drop",
"Drop$Key",
"Drop$Value",
"Filter",
"Filter$Key",
"Filter$Value",
"TombstoneHandler",
]
KNOWN_NONTOPICROUTING_TRANSFORMS = (
KAFKA_NONTOPICROUTING_TRANSFORMS
+ [
"org.apache.kafka.connect.transforms.{}".format(t)
for t in KAFKA_NONTOPICROUTING_TRANSFORMS
]
+ CONFLUENT_NONTOPICROUTING_TRANSFORMS
+ [
"io.confluent.connect.transforms.{}".format(t)
for t in CONFLUENT_NONTOPICROUTING_TRANSFORMS
]
)
@dataclass
class JdbcParser:
db_connection_url: str
source_platform: str
database_name: str
topic_prefix: str
query: str
transforms: list
def report_warning(self, key: str, reason: str) -> None:
logger.warning(f"{key}: {reason}")
self.report.report_warning(key, reason)
def get_parser(
self,
connector_manifest: ConnectorManifest,
) -> JdbcParser:
url = remove_prefix(
str(connector_manifest.config.get("connection.url")), "jdbc:"
)
url_instance = make_url(url)
source_platform = url_instance.drivername
database_name = url_instance.database
db_connection_url = f"{url_instance.drivername}://{url_instance.host}:{url_instance.port}/{url_instance.database}"
topic_prefix = self.connector_manifest.config.get("topic.prefix", None)
query = self.connector_manifest.config.get("query", None)
transform_names = (
self.connector_manifest.config.get("transforms", "").split(",")
if self.connector_manifest.config.get("transforms")
else []
)
transforms = []
for name in transform_names:
transform = {"name": name}
transforms.append(transform)
for key in self.connector_manifest.config.keys():
if key.startswith("transforms.{}.".format(name)):
transform[
key.replace("transforms.{}.".format(name), "")
] = self.connector_manifest.config[key]
return self.JdbcParser(
db_connection_url,
source_platform,
database_name,
topic_prefix,
query,
transforms,
)
def default_get_lineages(
self,
topic_prefix,
database_name,
source_platform,
topic_names=None,
include_source_dataset=True,
):
lineages: List[KafkaConnectLineage] = list()
if not topic_names:
topic_names = self.connector_manifest.topic_names
for topic in topic_names:
# All good for NO_TRANSFORM or (SINGLE_TRANSFORM and KNOWN_NONTOPICROUTING_TRANSFORM) or (not SINGLE_TRANSFORM and all(KNOWN_NONTOPICROUTING_TRANSFORM))
# default method - as per earlier implementation
if topic_prefix:
source_table = remove_prefix(topic, topic_prefix)
else:
source_table = topic
dataset_name = (
database_name + "." + source_table if database_name else source_table
)
lineage = KafkaConnectLineage(
source_dataset=dataset_name if include_source_dataset else None,
source_platform=source_platform,
target_dataset=topic,
target_platform="kafka",
)
lineages.append(lineage)
return lineages
def get_table_names(self):
if self.connector_manifest.config.get("table.whitelist"):
return self.connector_manifest.config.get("table.whitelist").split(",") # type: ignore
if self.connector_manifest.tasks:
sep = "."
leading_quote_char = trailing_quote_char = '"'
quote_method = self.connector_manifest.config.get(
"quote.sql.identifiers", "always"
)
tableIds = ",".join(
[task["config"].get("tables") for task in self.connector_manifest.tasks]
)
if quote_method == "always":
leading_quote_char = tableIds[0]
trailing_quote_char = tableIds[-1]
# This will only work for single character quotes
tables = [
unquote(tableId.split(sep)[-1], leading_quote_char, trailing_quote_char)
for tableId in tableIds.split(",")
]
return tables
return []
def _extract_lineages(self):
lineages: List[KafkaConnectLineage] = list()
parser = self.get_parser(self.connector_manifest)
source_platform = parser.source_platform
database_name = parser.database_name
query = parser.query
topic_prefix = parser.topic_prefix
transforms = parser.transforms
self.connector_manifest.flow_property_bag = self.connector_manifest.config
# Mask/Remove properties that may reveal credentials
self.connector_manifest.flow_property_bag[
"connection.url"
] = parser.db_connection_url
if "connection.password" in self.connector_manifest.flow_property_bag:
del self.connector_manifest.flow_property_bag["connection.password"]
if "connection.user" in self.connector_manifest.flow_property_bag:
del self.connector_manifest.flow_property_bag["connection.user"]
logging.debug(
f"Extracting source platform: {source_platform} and database name: {database_name} from connection url "
)
if not self.connector_manifest.topic_names:
self.connector_manifest.lineages = lineages
return
if query:
# Lineage source_table can be extracted by parsing query
# For now, we use source table as topic (expected to be same as topic prefix)
for topic in self.connector_manifest.topic_names:
# default method - as per earlier implementation
source_table = topic
dataset_name = (
database_name + "." + source_table
if database_name
else source_table
)
lineage = KafkaConnectLineage(
source_platform=source_platform,
target_dataset=topic,
target_platform="kafka",
)
lineages.append(lineage)
self.report_warning(
self.connector_manifest.name,
"could not find input dataset, the connector has query configuration set",
)
self.connector_manifest.lineages = lineages
return
SINGLE_TRANSFORM = len(transforms) == 1
NO_TRANSFORM = len(transforms) == 0
UNKNOWN_TRANSFORM = any(
[
transform["type"]
not in self.KNOWN_TOPICROUTING_TRANSFORMS
+ self.KNOWN_NONTOPICROUTING_TRANSFORMS
for transform in transforms
]
)
ALL_TRANSFORMS_NON_TOPICROUTING = all(
[
transform["type"] in self.KNOWN_NONTOPICROUTING_TRANSFORMS
for transform in transforms
]
)
if NO_TRANSFORM or ALL_TRANSFORMS_NON_TOPICROUTING:
self.connector_manifest.lineages = self.default_get_lineages(
database_name=database_name,
source_platform=source_platform,
topic_prefix=topic_prefix,
)
return
if SINGLE_TRANSFORM and transforms[0]["type"] == self.REGEXROUTER:
tables = self.get_table_names()
topic_names = list(self.connector_manifest.topic_names)
from java.util.regex import Pattern
for source_table in tables:
topic = topic_prefix + source_table if topic_prefix else source_table
transform_regex = Pattern.compile(transforms[0]["regex"])
transform_replacement = transforms[0]["replacement"]
matcher = transform_regex.matcher(topic)
if matcher.matches():
topic = matcher.replaceFirst(transform_replacement)
# Additional check to confirm that the topic present
# in connector topics
if topic in self.connector_manifest.topic_names:
dataset_name = (
database_name + "." + source_table
if database_name
else source_table
)
lineage = KafkaConnectLineage(
source_dataset=dataset_name,
source_platform=source_platform,
target_dataset=topic,
target_platform="kafka",
)
topic_names.remove(topic)
lineages.append(lineage)
if topic_names:
lineages.extend(
self.default_get_lineages(
database_name=database_name,
source_platform=source_platform,
topic_prefix=topic_prefix,
topic_names=topic_names,
include_source_dataset=False,
)
)
self.report_warning(
self.connector_manifest.name,
f"could not find input dataset, for connector topics {topic_names}",
)
self.connector_manifest.lineages = lineages
return
else:
include_source_dataset = True
if SINGLE_TRANSFORM and UNKNOWN_TRANSFORM:
self.report_warning(
self.connector_manifest.name,
f"could not find input dataset, connector has unknown transform - {transforms[0]['type']}",
)
include_source_dataset = False
if not SINGLE_TRANSFORM and UNKNOWN_TRANSFORM:
self.report_warning(
self.connector_manifest.name,
"could not find input dataset, connector has one or more unknown transforms",
)
include_source_dataset = False
lineages = self.default_get_lineages(
database_name=database_name,
source_platform=source_platform,
topic_prefix=topic_prefix,
include_source_dataset=include_source_dataset,
)
self.connector_manifest.lineages = lineages
return
@dataclass
class DebeziumSourceConnector:
connector_manifest: ConnectorManifest
def __init__(self, connector_manifest: ConnectorManifest) -> None:
self.connector_manifest = connector_manifest
self._extract_lineages()
@dataclass
class DebeziumParser:
source_platform: str
server_name: Optional[str]
database_name: Optional[str]
def get_parser(
self,
connector_manifest: ConnectorManifest,
) -> DebeziumParser:
connector_class = connector_manifest.config.get("connector.class", "")
if connector_class == "io.debezium.connector.mysql.MySqlConnector":
# https://debezium.io/documentation/reference/connectors/mysql.html#mysql-topic-names
parser = self.DebeziumParser(
source_platform="mysql",
server_name=connector_manifest.config.get("database.server.name"),
database_name=None,
)
elif connector_class == "MySqlConnector":
parser = self.DebeziumParser(
source_platform="mysql",
server_name=connector_manifest.config.get("database.server.name"),
database_name=None,
)
elif connector_class == "io.debezium.connector.mongodb.MongoDbConnector":
# https://debezium.io/documentation/reference/connectors/mongodb.html#mongodb-topic-names
parser = self.DebeziumParser(
source_platform="mongodb",
server_name=connector_manifest.config.get("database.server.name"),
database_name=None,
)
elif connector_class == "io.debezium.connector.postgresql.PostgresConnector":
# https://debezium.io/documentation/reference/connectors/postgresql.html#postgresql-topic-names
parser = self.DebeziumParser(
source_platform="postgres",
server_name=connector_manifest.config.get("database.server.name"),
database_name=connector_manifest.config.get("database.dbname"),
)
elif connector_class == "io.debezium.connector.oracle.OracleConnector":
# https://debezium.io/documentation/reference/connectors/oracle.html#oracle-topic-names
parser = self.DebeziumParser(
source_platform="oracle",
server_name=connector_manifest.config.get("database.server.name"),
database_name=connector_manifest.config.get("database.dbname"),
)
elif connector_class == "io.debezium.connector.sqlserver.SqlServerConnector":
# https://debezium.io/documentation/reference/connectors/sqlserver.html#sqlserver-topic-names
parser = self.DebeziumParser(
source_platform="mssql",
server_name=connector_manifest.config.get("database.server.name"),
database_name=connector_manifest.config.get("database.dbname"),
)
elif connector_class == "io.debezium.connector.db2.Db2Connector":
# https://debezium.io/documentation/reference/connectors/db2.html#db2-topic-names
parser = self.DebeziumParser(
source_platform="db2",
server_name=connector_manifest.config.get("database.server.name"),
database_name=connector_manifest.config.get("database.dbname"),
)
elif connector_class == "io.debezium.connector.vitess.VitessConnector":
# https://debezium.io/documentation/reference/connectors/vitess.html#vitess-topic-names
parser = self.DebeziumParser(
source_platform="vitess",
server_name=connector_manifest.config.get("database.server.name"),
database_name=connector_manifest.config.get("vitess.keyspace"),
)
else:
raise ValueError(f"Connector class '{connector_class}' is unknown.")
return parser
def _extract_lineages(self):
lineages: List[KafkaConnectLineage] = list()
parser = self.get_parser(self.connector_manifest)
source_platform = parser.source_platform
server_name = parser.server_name
database_name = parser.database_name
topic_naming_pattern = r"({0})\.(\w+\.\w+)".format(server_name)
if not self.connector_manifest.topic_names:
return lineages
for topic in self.connector_manifest.topic_names:
found = re.search(re.compile(topic_naming_pattern), topic)
if found:
table_name = (
database_name + "." + found.group(2)
if database_name
else found.group(2)
)
lineage = KafkaConnectLineage(
source_dataset=table_name,
source_platform=source_platform,
target_dataset=topic,
target_platform="kafka",
)
lineages.append(lineage)
self.connector_manifest.lineages = lineages
@dataclass
class BigQuerySinkConnector:
connector_manifest: ConnectorManifest
report: KafkaConnectSourceReport
def __init__(
self, connector_manifest: ConnectorManifest, report: KafkaConnectSourceReport
) -> None:
self.connector_manifest = connector_manifest
self.report = report
self._extract_lineages()
@dataclass
class BQParser:
project: str
target_platform: str
sanitizeTopics: str
topicsToTables: Optional[str] = None
datasets: Optional[str] = None
defaultDataset: Optional[str] = None
version: str = "v1"
def report_warning(self, key: str, reason: str) -> None:
logger.warning(f"{key}: {reason}")
self.report.report_warning(key, reason)
def get_parser(
self,
connector_manifest: ConnectorManifest,
) -> BQParser:
project = connector_manifest.config["project"]
sanitizeTopics = connector_manifest.config.get("sanitizeTopics", "false")
if "defaultDataset" in connector_manifest.config:
defaultDataset = connector_manifest.config["defaultDataset"]
return self.BQParser(
project=project,
defaultDataset=defaultDataset,
target_platform="bigquery",
sanitizeTopics=sanitizeTopics.lower() == "true",
version="v2",
)
else:
# version 1.6.x and similar configs supported
datasets = connector_manifest.config["datasets"]
topicsToTables = connector_manifest.config.get("topicsToTables")
return self.BQParser(
project=project,
topicsToTables=topicsToTables,
datasets=datasets,
target_platform="bigquery",
sanitizeTopics=sanitizeTopics.lower() == "true",
)
def get_list(self, property: str) -> Iterable[Tuple[str, str]]:
entries = property.split(",")
for entry in entries:
key, val = entry.rsplit("=")
yield (key.strip(), val.strip())
def get_dataset_for_topic_v1(self, topic: str, parser: BQParser) -> Optional[str]:
topicregex_dataset_map: Dict[str, str] = dict(self.get_list(parser.datasets)) # type: ignore
from java.util.regex import Pattern
for pattern, dataset in topicregex_dataset_map.items():
patternMatcher = Pattern.compile(pattern).matcher(topic)
if patternMatcher.matches():
return dataset
return None
def sanitize_table_name(self, table_name):
table_name = re.sub("[^a-zA-Z0-9_]", "_", table_name)
if re.match("^[^a-zA-Z_].*", table_name):
table_name = "_" + table_name
return table_name
def get_dataset_table_for_topic(
self, topic: str, parser: BQParser
) -> Optional[str]:
if parser.version == "v2":
dataset = parser.defaultDataset
parts = topic.split(":")
if len(parts) == 2:
dataset = parts[0]
table = parts[1]
else:
table = parts[0]
else:
dataset = self.get_dataset_for_topic_v1(topic, parser)
if dataset is None:
return None
table = topic
if parser.topicsToTables:
topicregex_table_map: Dict[str, str] = dict(
self.get_list(parser.topicsToTables) # type: ignore
)
from java.util.regex import Pattern
for pattern, tbl in topicregex_table_map.items():
patternMatcher = Pattern.compile(pattern).matcher(topic)
if patternMatcher.matches():
table = tbl
break
if parser.sanitizeTopics:
table = self.sanitize_table_name(table)
return f"{dataset}.{table}"
def _extract_lineages(self):
lineages: List[KafkaConnectLineage] = list()
parser = self.get_parser(self.connector_manifest)
if not parser:
return lineages
target_platform = parser.target_platform
project = parser.project
self.connector_manifest.flow_property_bag = self.connector_manifest.config
# Mask/Remove properties that may reveal credentials
if "keyfile" in self.connector_manifest.flow_property_bag:
del self.connector_manifest.flow_property_bag["keyfile"]
for topic in self.connector_manifest.topic_names:
dataset_table = self.get_dataset_table_for_topic(topic, parser)
if dataset_table is None:
self.report_warning(
self.connector_manifest.name,
f"could not find target dataset for topic {topic}, please check your connector configuration",
)
continue
target_dataset = f"{project}.{dataset_table}"
lineages.append(
KafkaConnectLineage(
source_dataset=topic,
source_platform="kafka",
target_dataset=target_dataset,
target_platform=target_platform,
)
)
self.connector_manifest.lineages = lineages
return
def transform_connector_config(
connector_config: Dict, provided_configs: List[ProvidedConfig]
) -> None:
"""This method will update provided configs in connector config values, if any"""
lookupsByProvider = {}
for pconfig in provided_configs:
lookupsByProvider[f"${{{pconfig.provider}:{pconfig.path_key}}}"] = pconfig.value
for k, v in connector_config.items():
for key, value in lookupsByProvider.items():
if key in v:
connector_config[k] = v.replace(key, value)
class KafkaConnectSource(Source):
"""The class for Kafka Connect source.
Attributes:
config (KafkaConnectSourceConfig): Kafka Connect cluster REST API configurations.
report (KafkaConnectSourceReport): Kafka Connect source ingestion report.
"""
config: KafkaConnectSourceConfig
report: KafkaConnectSourceReport
def __init__(self, config: KafkaConnectSourceConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = KafkaConnectSourceReport()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
}
)
# Test the connection
test_response = self.session.get(f"{self.config.connect_uri}")
test_response.raise_for_status()
logger.info(f"Connection to {self.config.connect_uri} is ok")
if not jpype.isJVMStarted():
jpype.startJVM()
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> Source:
config = KafkaConnectSourceConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_connectors_manifest(self) -> List[ConnectorManifest]:
"""Get Kafka Connect connectors manifest using REST API.
Enrich with lineages metadata.
"""
connectors_manifest = list()
connector_response = self.session.get(
f"{self.config.connect_uri}/connectors",
)
payload = connector_response.json()
for c in payload:
connector_url = f"{self.config.connect_uri}/connectors/{c}"
connector_response = self.session.get(connector_url)
manifest = connector_response.json()
connector_manifest = ConnectorManifest(**manifest)
if self.config.provided_configs:
transform_connector_config(
connector_manifest.config, self.config.provided_configs
)
# Initialize connector lineages
connector_manifest.lineages = list()
connector_manifest.url = connector_url
topics = self.session.get(
f"{self.config.connect_uri}/connectors/{c}/topics",
).json()
connector_manifest.topic_names = topics[c]["topics"]
# Populate Source Connector metadata
if connector_manifest.type == "source":
tasks = self.session.get(
f"{self.config.connect_uri}/connectors/{c}/tasks",
).json()
connector_manifest.tasks = tasks
# JDBC source connector lineages
if connector_manifest.config.get("connector.class").__eq__(
"io.confluent.connect.jdbc.JdbcSourceConnector"
):
connector_manifest = ConfluentJDBCSourceConnector(
connector_manifest=connector_manifest, report=self.report
).connector_manifest
else:
# Debezium Source Connector lineages
try:
connector_manifest = DebeziumSourceConnector(
connector_manifest=connector_manifest
).connector_manifest
except ValueError as err:
logger.warning(
f"Skipping connector {connector_manifest.name} due to error: {err}"
)
self.report.report_failure(connector_manifest.name, str(err))
continue
if connector_manifest.type == "sink":
if connector_manifest.config.get("connector.class").__eq__(
"com.wepay.kafka.connect.bigquery.BigQuerySinkConnector"
):
connector_manifest = BigQuerySinkConnector(
connector_manifest=connector_manifest, report=self.report
).connector_manifest
else:
self.report.report_dropped(connector_manifest.name)
logger.warning(
f"Skipping connector {connector_manifest.name}. Lineage for Connector not yet implemented"
)
pass
connectors_manifest.append(connector_manifest)
return connectors_manifest
def construct_flow_workunit(
self, connector: ConnectorManifest
) -> Iterable[MetadataWorkUnit]:
connector_name = connector.name
connector_type = connector.type
connector_class = connector.config.get("connector.class")
flow_property_bag = connector.flow_property_bag
# connector_url = connector.url # NOTE: this will expose connector credential when used
flow_urn = builder.make_data_flow_urn(
"kafka-connect", connector_name, self.config.env
)
mcp = MetadataChangeProposalWrapper(
entityType="dataFlow",
entityUrn=flow_urn,
changeType=models.ChangeTypeClass.UPSERT,
aspectName="dataFlowInfo",
aspect=models.DataFlowInfoClass(
name=connector_name,
description=f"{connector_type.capitalize()} connector using `{connector_class}` plugin.",
customProperties=flow_property_bag,
# externalUrl=connector_url, # NOTE: this will expose connector credential when used
),
)
for proposal in [mcp]:
wu = MetadataWorkUnit(
id=f"kafka-connect.{connector_name}.{proposal.aspectName}", mcp=proposal
)
self.report.report_workunit(wu)
yield wu
def construct_job_workunits(
self, connector: ConnectorManifest
) -> Iterable[MetadataWorkUnit]:
connector_name = connector.name
flow_urn = builder.make_data_flow_urn(
"kafka-connect", connector_name, self.config.env
)
lineages = connector.lineages
if lineages:
for lineage in lineages:
source_dataset = lineage.source_dataset
source_platform = lineage.source_platform
source_platform_instance = (
self.config.platform_instance_map.get(source_platform)
if self.config.platform_instance_map
else None
)
target_dataset = lineage.target_dataset
target_platform = lineage.target_platform
target_platform_instance = (
self.config.platform_instance_map.get(target_platform)
if self.config.platform_instance_map
else None
)
job_property_bag = lineage.job_property_bag
job_id = (
source_dataset
if source_dataset
else f"unknown_source.{target_dataset}"
)
job_urn = builder.make_data_job_urn_with_flow(flow_urn, job_id)
inlets = (
[
builder.make_dataset_urn_with_platform_instance(
source_platform,
source_dataset,
platform_instance=source_platform_instance,
env=self.config.env,
)
]
if source_dataset
else []
)
outlets = [
builder.make_dataset_urn_with_platform_instance(
target_platform,
target_dataset,
platform_instance=target_platform_instance,
env=self.config.env,
)
]
mcp = MetadataChangeProposalWrapper(
entityType="dataJob",
entityUrn=job_urn,
changeType=models.ChangeTypeClass.UPSERT,
aspectName="dataJobInfo",
aspect=models.DataJobInfoClass(
name=f"{connector_name}:{job_id}",
type="COMMAND",
description=None,
customProperties=job_property_bag
# externalUrl=job_url,
),
)
wu = MetadataWorkUnit(
id=f"kafka-connect.{connector_name}.{job_id}.{mcp.aspectName}",
mcp=mcp,
)
self.report.report_workunit(wu)
yield wu
mcp = MetadataChangeProposalWrapper(
entityType="dataJob",
entityUrn=job_urn,
changeType=models.ChangeTypeClass.UPSERT,
aspectName="dataJobInputOutput",
aspect=models.DataJobInputOutputClass(
inputDatasets=inlets,
outputDatasets=outlets,
),
)
wu = MetadataWorkUnit(
id=f"kafka-connect.{connector_name}.{job_id}.{mcp.aspectName}",
mcp=mcp,
)
self.report.report_workunit(wu)
yield wu
def construct_lineage_workunits(
self, connector: ConnectorManifest
) -> Iterable[MetadataWorkUnit]:
lineages = connector.lineages
if lineages:
for lineage in lineages:
source_dataset = lineage.source_dataset
source_platform = lineage.source_platform
source_platform_instance = (
self.config.platform_instance_map.get(source_platform)
if self.config.platform_instance_map
else None
)
target_dataset = lineage.target_dataset
target_platform = lineage.target_platform
target_platform_instance = (
self.config.platform_instance_map.get(target_platform)
if self.config.platform_instance_map
else None
)
mcp = MetadataChangeProposalWrapper(
entityType="dataset",
entityUrn=builder.make_dataset_urn_with_platform_instance(
target_platform,
target_dataset,
platform_instance=target_platform_instance,
env=self.config.env,
),
changeType=models.ChangeTypeClass.UPSERT,
aspectName="dataPlatformInstance",
aspect=models.DataPlatformInstanceClass(
platform=builder.make_data_platform_urn(target_platform),
instance=builder.make_dataplatform_instance_urn(
target_platform, target_platform_instance
)
if target_platform_instance
else None,
),
)
wu = MetadataWorkUnit(id=target_dataset, mcp=mcp)
self.report.report_workunit(wu)
yield wu
if source_dataset:
mcp = MetadataChangeProposalWrapper(
entityType="dataset",
entityUrn=builder.make_dataset_urn_with_platform_instance(
source_platform,
source_dataset,
platform_instance=source_platform_instance,
env=self.config.env,
),
changeType=models.ChangeTypeClass.UPSERT,
aspectName="dataPlatformInstance",
aspect=models.DataPlatformInstanceClass(
platform=builder.make_data_platform_urn(source_platform),
instance=builder.make_dataplatform_instance_urn(
source_platform, source_platform_instance
)
if source_platform_instance
else None,
),
)
wu = MetadataWorkUnit(id=source_dataset, mcp=mcp)
self.report.report_workunit(wu)
yield wu
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
connectors_manifest = self.get_connectors_manifest()
for connector in connectors_manifest:
name = connector.name
if self.config.connector_patterns.allowed(name):
yield from self.construct_flow_workunit(connector)
yield from self.construct_job_workunits(connector)
if self.config.construct_lineage_workunits:
yield from self.construct_lineage_workunits(connector)
self.report.report_connector_scanned(name)
else:
self.report.report_dropped(name)
def get_report(self) -> KafkaConnectSourceReport:
return self.report
| 38.410628
| 164
| 0.587071
|
3b599299e908f808a9856f274064fbe68b5f255c
| 2,689
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20200601preview/list_open_id_connect_provider_secrets.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20200601preview/list_open_id_connect_provider_secrets.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/apimanagement/v20200601preview/list_open_id_connect_provider_secrets.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListOpenIdConnectProviderSecretsResult',
'AwaitableListOpenIdConnectProviderSecretsResult',
'list_open_id_connect_provider_secrets',
]
@pulumi.output_type
class ListOpenIdConnectProviderSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
"""
def __init__(__self__, client_secret=None):
if client_secret and not isinstance(client_secret, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", client_secret)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
"""
return pulumi.get(self, "client_secret")
class AwaitableListOpenIdConnectProviderSecretsResult(ListOpenIdConnectProviderSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListOpenIdConnectProviderSecretsResult(
client_secret=self.client_secret)
def list_open_id_connect_provider_secrets(opid: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListOpenIdConnectProviderSecretsResult:
"""
Client or app secret used in IdentityProviders, Aad, OpenID or OAuth.
:param str opid: Identifier of the OpenID Connect Provider.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['opid'] = opid
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20200601preview:listOpenIdConnectProviderSecrets', __args__, opts=opts, typ=ListOpenIdConnectProviderSecretsResult).value
return AwaitableListOpenIdConnectProviderSecretsResult(
client_secret=__ret__.client_secret)
| 38.971014
| 187
| 0.70026
|
4221e48cc76350d596890ca71b5e54ce2a12603f
| 729
|
py
|
Python
|
analytics/views.py
|
praekelt/nurseconnect
|
41d46d32d7f0f498903ea6c28eed02847af2a1d0
|
[
"BSD-2-Clause"
] | null | null | null |
analytics/views.py
|
praekelt/nurseconnect
|
41d46d32d7f0f498903ea6c28eed02847af2a1d0
|
[
"BSD-2-Clause"
] | 82
|
2016-08-19T13:33:21.000Z
|
2018-07-02T13:54:38.000Z
|
analytics/views.py
|
praekeltfoundation/nurseconnect
|
41d46d32d7f0f498903ea6c28eed02847af2a1d0
|
[
"BSD-2-Clause"
] | 2
|
2016-08-19T14:11:32.000Z
|
2016-09-21T12:18:34.000Z
|
from django.utils.http import is_safe_url
from django.views.generic import View
from django.http import (
HttpResponse,
HttpResponseRedirect,
HttpResponseBadRequest
)
class AnalyticsRedirectView(View):
def get(self, request, investigation_uuid, redirect_path, *args, **kwargs):
destination = request.build_absolute_uri('/{0}'.format(redirect_path))
allowed_hosts = [request.get_host()]
if is_safe_url(destination, allowed_hosts=allowed_hosts):
response = HttpResponseRedirect(destination)
response.set_cookie('investigation_uuid', investigation_uuid)
else:
response = HttpResponseBadRequest('Redirect URL is unsafe')
return response
| 33.136364
| 79
| 0.718793
|
44c5e1fc378efca87c5a3b022dd0ad545d0d3244
| 578
|
py
|
Python
|
solutions/codeforces/189A.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | 1
|
2021-01-02T04:31:34.000Z
|
2021-01-02T04:31:34.000Z
|
solutions/codeforces/189A.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | null | null | null |
solutions/codeforces/189A.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | null | null | null |
'''
cut ribbon
After the cutting each ribbon piece should have length a, b or c.
After the cutting the number of ribbon pieces should be maximum.
!!!solve the optimization problem
ax+by+cz = n
max. x+y+z
c·max= n +(c-a)x+(c-b)y
#Note:
PyPi accepted
'''
n, a, b, c = list(map(int, input().split(' ')))
count = 0
xmax = n // a
ymax = n // b
for x in range(xmax+1):
for y in range(ymax+1):
z = (n-a*x-b*y)/c
if z < 0:
break
result = x + y + z
if result > count and result % 1 == 0:
count = result
print(int(count))
| 18.645161
| 65
| 0.565744
|
bbd42d5e0f3c2c4d4a6a11b424c831974ad1bda2
| 1,821
|
py
|
Python
|
setup.py
|
emaballarin/stockroom
|
e2e098c4731a4ad0d4952a99c972cb4816bb1abe
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
emaballarin/stockroom
|
e2e098c4731a4ad0d4952a99c972cb4816bb1abe
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
emaballarin/stockroom
|
e2e098c4731a4ad0d4952a99c972cb4816bb1abe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read().split('<!--- marker-for-pypi-to-trim --->')[0]
requirements = ['Click>=7.0', 'click_didyoumean', 'hangar @ git+https://github.com/tensorwerk/hangar-py', 'rich']
setup(
author="Sherin Thomas",
author_email='sherin@tensorwerk.com',
python_requires='>=3.6',
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Database',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
],
description="Version control for software 2.0",
project_urls={
'Documentation': 'https://stockroom.readthedocs.io',
'Issue Tracker': 'https://github.com/tensorwerk/stockroom/issues'},
entry_points={'console_scripts': ['stock=stockroom.cli:stock']},
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n',
long_description_content_type='text/markdown',
include_package_data=True,
keywords='stockroom',
name='stockroom',
packages=find_packages(include=['stockroom', 'stockroom.*']),
url='https://github.com/tensorwerk/stockroom',
version='0.3.0',
zip_safe=False,
)
| 37.9375
| 113
| 0.643602
|
2a6f797ee669ca3609d4c2285811f75c97f0fc80
| 660
|
py
|
Python
|
ExerciciosPython/ex020.py
|
MecaFlavio/Exercicios-Python-3-Curso-em-Video
|
b93272c15b19b04deff73f1b0a684a0b49313edf
|
[
"MIT"
] | null | null | null |
ExerciciosPython/ex020.py
|
MecaFlavio/Exercicios-Python-3-Curso-em-Video
|
b93272c15b19b04deff73f1b0a684a0b49313edf
|
[
"MIT"
] | null | null | null |
ExerciciosPython/ex020.py
|
MecaFlavio/Exercicios-Python-3-Curso-em-Video
|
b93272c15b19b04deff73f1b0a684a0b49313edf
|
[
"MIT"
] | null | null | null |
# O mesmo professor quer sortear a ordem de apresentação de trabalhos do alunos. Faça um programa
# que leia o nome dos quatro alunos e mostre a ordem sorteada.
import random
n1 = str(input('Escreva nome 1: '))
n2 = str(input('Escreva nome 2: '))
n3 = str(input('Escreva nome 3: '))
n4 = str(input('Escreva nome 4: '))
nomes = n1,n2,n3,n4 # também não serve pra nada
print('A ordem será', random.sample([n1,n2,n3,n4], k=4))
## Resolução do professor
n1 = str(input('Escreva nome 1: '))
n2 = str(input('Escreva nome 2: '))
n3 = str(input('Escreva nome 3: '))
n4 = str(input('Escreva nome 4: '))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print(lista)
| 27.5
| 97
| 0.677273
|
fbb1370806b5bc653b4f26c3b4416181efd21daa
| 19,087
|
py
|
Python
|
spconv/pytorch/functional.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
spconv/pytorch/functional.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
spconv/pytorch/functional.py
|
djiajunustc/spconv
|
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Yan Yan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pickle
import torch
from torch import nn
from torch.autograd import Function
from typing import Optional, TypeVar
from spconv.tools import CUDAKernelTimer
from spconv.pytorch import ops, SparseConvTensor
from spconv.pytorch.constants import PYTORCH_VERSION
from spconv.debug_utils import spconv_save_debug_data
from torch.autograd.function import once_differentiable
import numpy as np
from pathlib import Path
from spconv.pytorch.hash import HashTable
from cumm.gemm.layout import to_stride
from typing import List
from functools import reduce
_MAX_INT32 = 2147483647
_T = TypeVar("_T")
def identity_decorator(func: _T) -> _T:
return func
if PYTORCH_VERSION >= [1, 6, 0]:
import torch.cuda.amp as amp
_TORCH_CUSTOM_FWD = amp.custom_fwd(cast_inputs=torch.float16)
_TORCH_CUSTOM_BWD = amp.custom_bwd
else:
_TORCH_CUSTOM_FWD = identity_decorator
_TORCH_CUSTOM_BWD = identity_decorator
class SparseConvFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx,
features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
algo,
timer: CUDAKernelTimer = CUDAKernelTimer(False)):
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
ctx.algo = algo
ctx.timer = timer
try:
return ops.indice_conv(features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
False,
algo=algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},act={num_activate_out},algo={algo}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors
timer = ctx.timer
try:
input_bp, filters_bp = ops.indice_conv_backward(features,
filters,
grad_output,
indice_pairs,
indice_pair_num,
False,
algo=ctx.algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv_backward]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},do={grad_output.shape}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
return input_bp, filters_bp, None, None, None, None, None
class SparseInverseConvFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx,
features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
algo,
timer: CUDAKernelTimer = CUDAKernelTimer(False)):
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
ctx.algo = algo
ctx.timer = timer
try:
return ops.indice_conv(features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
True,
False,
algo=algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv|inverse]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},act={num_activate_out},algo={algo}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors
timer = ctx.timer
try:
input_bp, filters_bp = ops.indice_conv_backward(features,
filters,
grad_output,
indice_pairs,
indice_pair_num,
True,
False,
algo=ctx.algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv_backward|inverse]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},do={grad_output.shape}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
return input_bp, filters_bp, None, None, None, None, None
class SparseImplicitGemmFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx,
features: torch.Tensor,
filters: torch.Tensor,
pair_fwd: torch.Tensor,
pair_bwd: torch.Tensor,
pair_mask_fwd_splits: List[torch.Tensor],
pair_mask_bwd_splits: List[torch.Tensor],
mask_argsort_fwd_splits: List[torch.Tensor],
mask_argsort_bwd_splits: List[torch.Tensor],
num_activate_out: int,
masks: List[np.ndarray],
is_train: bool,
is_subm: bool,
timer: CUDAKernelTimer = CUDAKernelTimer(False),
fp32_accum: Optional[bool] = None):
try:
out, mask_out, mask_width = ops.implicit_gemm(features, filters,
pair_fwd,
pair_mask_fwd_splits,
mask_argsort_fwd_splits,
num_activate_out, masks,
is_train, is_subm, timer,
fp32_accum)
except Exception as e:
msg = "[Exception|implicit_gemm]"
msg += f"feat={features.shape},w={filters.shape},pair={pair_fwd.shape},"
msg += f"act={num_activate_out},issubm={is_subm},istrain={is_train}"
print(msg, file=sys.stderr)
spconv_save_debug_data((pair_fwd, pair_bwd, pair_mask_fwd_splits,
pair_mask_bwd_splits, mask_argsort_fwd_splits, mask_argsort_bwd_splits,
masks))
raise e
ctx.save_for_backward(features, filters, pair_fwd, pair_bwd)
ctx.mask_width = mask_width
ctx.mask_out = mask_out
ctx.timer = timer
ctx.pair_mask_fwd_splits = pair_mask_fwd_splits
ctx.mask_argsort_fwd_splits = mask_argsort_fwd_splits
ctx.pair_mask_bwd_splits = pair_mask_bwd_splits
ctx.mask_argsort_bwd_splits = mask_argsort_bwd_splits
# ctx.num_activate_out = num_activate_out
ctx.masks = masks
ctx.is_subm = is_subm
ctx.fp32_accum = fp32_accum
return out
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
features, filters, pair_fwd, pair_bwd = ctx.saved_tensors
mask_width = ctx.mask_width
mask_out = ctx.mask_out
pair_mask_fwd_splits = ctx.pair_mask_fwd_splits
mask_argsort_fwd_splits = ctx.mask_argsort_fwd_splits
pair_mask_bwd_splits = ctx.pair_mask_bwd_splits
mask_argsort_bwd_splits = ctx.mask_argsort_bwd_splits
# num_activate_out = ctx.num_activate_out
masks = ctx.masks
is_subm = ctx.is_subm
timer = ctx.timer
fp32_accum = ctx.fp32_accum
try:
input_bp, filters_bp = ops.implicit_gemm_backward(
features,
filters,
grad_output,
pair_fwd,
pair_bwd,
pair_mask_fwd_splits,
pair_mask_bwd_splits,
mask_argsort_fwd_splits,
mask_argsort_bwd_splits,
mask_output_fwd=mask_out,
masks=masks,
mask_width=mask_width,
is_subm=is_subm,
timer=timer,
fp32_accum=fp32_accum)
except Exception as e:
msg = "[Exception|implicit_gemm_backward]"
msg += f"feat={features.shape},w={filters.shape},pair={pair_fwd.shape},"
msg += f"issubm={is_subm},do={grad_output.shape}"
print(msg, file=sys.stderr)
spconv_save_debug_data((pair_fwd, pair_bwd, pair_mask_fwd_splits,
pair_mask_bwd_splits, mask_argsort_fwd_splits, mask_argsort_bwd_splits,
masks))
raise e
None_9 = [None] * 12
return (input_bp, filters_bp, *None_9)
class SubMConvFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx,
features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
algo,
timer: CUDAKernelTimer = CUDAKernelTimer(False)):
ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters)
ctx.algo = algo
ctx.timer = timer
try:
return ops.indice_conv(features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
False,
True,
algo=algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv|subm]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},act={num_activate_out},algo={algo}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
indice_pairs, indice_pair_num, features, filters = ctx.saved_tensors
timer = ctx.timer
try:
input_bp, filters_bp = ops.indice_conv_backward(features,
filters,
grad_output,
indice_pairs,
indice_pair_num,
False,
True,
algo=ctx.algo,
timer=timer)
except Exception as e:
msg = "[Exception|indice_conv_backward|subm]"
msg += f"feat={features.shape},w={filters.shape},pair={indice_pairs.shape},"
msg += f"pairnum={indice_pair_num},do={grad_output.shape}"
print(msg, file=sys.stderr)
spconv_save_debug_data((indice_pairs, indice_pair_num))
raise e
return input_bp, filters_bp, None, None, None, None, None
class SparseMaxPoolFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx, features, indice_pairs, indice_pair_num,
num_activate_out):
out = ops.indice_maxpool(features, indice_pairs, indice_pair_num,
num_activate_out)
ctx.save_for_backward(indice_pairs, indice_pair_num, features, out)
return out
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
indice_pairs, indice_pair_num, features, out = ctx.saved_tensors
input_bp = ops.indice_maxpool_backward(features, out, grad_output,
indice_pairs, indice_pair_num)
return input_bp, None, None, None
class SparseMaxPoolImplicitGemmFunction(Function):
@staticmethod
@_TORCH_CUSTOM_FWD
def forward(ctx, features: torch.Tensor, indice_pairs_fwd: torch.Tensor,
indice_pairs_bwd: torch.Tensor, num_activate_out: int):
out = ops.indice_maxpool_implicit_gemm(features, indice_pairs_fwd,
num_activate_out)
ctx.save_for_backward(indice_pairs_bwd, features, out)
return out
@staticmethod
@once_differentiable
@_TORCH_CUSTOM_BWD
def backward(ctx, grad_output):
indice_pairs_bwd, features, out = ctx.saved_tensors
input_bp = ops.indice_maxpool_implicit_gemm_backward(
features, out, grad_output, indice_pairs_bwd)
return input_bp, None, None, None
indice_conv = SparseConvFunction.apply
implicit_gemm = SparseImplicitGemmFunction.apply
indice_inverse_conv = SparseInverseConvFunction.apply
indice_subm_conv = SubMConvFunction.apply
indice_maxpool = SparseMaxPoolFunction.apply
indice_maxpool_implicit_gemm = SparseMaxPoolImplicitGemmFunction.apply
def _indice_to_scalar(indices: torch.Tensor, shape: List[int]):
assert indices.shape[1] == len(shape)
stride = to_stride(np.array(shape, dtype=np.int64))
scalar_inds = indices[:, -1].clone()
for i in range(len(shape) - 1):
scalar_inds += stride[i] * indices[:, i]
return scalar_inds.contiguous()
def sparse_add_hash_based(*tens: SparseConvTensor):
""" sparse add with misaligned indices.
if you use sparse add, the indice_dict will be dropped and impossible
to use inverse.
There is only one situation that keep indices: there is one operand that
its indices is output indices.
"""
table_size = 0
max_num_indices = 0
max_num_indices_idx = 0
for i, ten in enumerate(tens):
assert ten.spatial_shape == tens[0].spatial_shape
assert ten.batch_size == tens[0].batch_size
assert ten.features.shape[1] == tens[0].features.shape[1]
table_size += ten.features.shape[0]
if max_num_indices < ten.features.shape[0]:
max_num_indices_idx = i
max_num_indices = ten.features.shape[0]
first = tens[0]
feat = first.features
shape = [first.batch_size, *first.spatial_shape]
whole_shape = int(np.prod(shape))
table_size *= 2
k_type = torch.int32
if whole_shape >= _MAX_INT32:
k_type = torch.int64
table = HashTable(first.features.device, k_type, torch.int32, table_size)
scalars: List[torch.Tensor] = []
for ten in tens:
indices = ten.indices
if whole_shape >= _MAX_INT32:
indices = indices.long()
scalar = _indice_to_scalar(indices, shape)
scalars.append(scalar)
table.insert(scalar)
# assign arange to values of hash table
count = table.assign_arange_()
count_val = count.item()
out_features = torch.zeros([int(count_val), feat.shape[1]], dtype=feat.dtype, device=feat.device)
out_indices = torch.zeros([int(count_val), first.indices.shape[1]], dtype=first.indices.dtype, device=first.indices.device)
for ten, scalar in zip(tens, scalars):
out_inds, _ = table.query(scalar)
out_inds = out_inds.long()
out_features[out_inds] += ten.features
out_indices[out_inds] = ten.indices
res = SparseConvTensor(out_features, out_indices, first.spatial_shape, first.batch_size,
benchmark=first.benchmark)
if count_val == max_num_indices:
res.indice_dict = tens[max_num_indices_idx].indice_dict
res.benchmark_record = first.benchmark_record
res._timer = first._timer
res.thrust_allocator = first.thrust_allocator
return res
def sparse_add(*tens: SparseConvTensor):
"""reuse torch.sparse. the internal is sort + unique
"""
max_num_indices = 0
max_num_indices_idx = 0
ten_ths: List[torch.Tensor] = []
first = tens[0]
res_shape = [first.batch_size, *first.spatial_shape, first.features.shape[1]]
for i, ten in enumerate(tens):
assert ten.spatial_shape == tens[0].spatial_shape
assert ten.batch_size == tens[0].batch_size
assert ten.features.shape[1] == tens[0].features.shape[1]
if max_num_indices < ten.features.shape[0]:
max_num_indices_idx = i
max_num_indices = ten.features.shape[0]
ten_ths.append(torch.sparse_coo_tensor(ten.indices.T, ten.features, res_shape, requires_grad=True))
c_th = reduce(lambda x, y: x + y, ten_ths).coalesce()
c_th_inds = c_th.indices().T.contiguous().int()
c_th_values = c_th.values()
assert c_th_values.is_contiguous()
res = SparseConvTensor(c_th_values, c_th_inds, first.spatial_shape, first.batch_size,
benchmark=first.benchmark)
if c_th_values.shape[0] == max_num_indices:
res.indice_dict = tens[max_num_indices_idx].indice_dict
res.benchmark_record = first.benchmark_record
res._timer = first._timer
res.thrust_allocator = first.thrust_allocator
return res
| 40.87152
| 127
| 0.572903
|
5d6b92ce91275e790ea67190a3c78c88703213aa
| 4,830
|
py
|
Python
|
pypureclient/flashblade/FB_2_3/models/nfs.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_3/models/nfs.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_3/models/nfs.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class Nfs(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'v3_enabled': 'bool',
'v4_1_enabled': 'bool',
'rules': 'str',
'export_policy': 'Reference'
}
attribute_map = {
'v3_enabled': 'v3_enabled',
'v4_1_enabled': 'v4_1_enabled',
'rules': 'rules',
'export_policy': 'export_policy'
}
required_args = {
}
def __init__(
self,
v3_enabled=None, # type: bool
v4_1_enabled=None, # type: bool
rules=None, # type: str
export_policy=None, # type: models.Reference
):
"""
Keyword args:
v3_enabled (bool): If set to `true`, the NFSv3 protocol will be enabled.
v4_1_enabled (bool): If set to `true`, the NFSv4.1 protocol will be enabled.
rules (str): The NFS export rules for the system. Either the `export_policy` or `rules` will control the NFS export functionality for the file system. If this is set, then the `policy` field will be cleared. Both `export_policy` and `rules` can not be set in the same request. Rules can be applied to an individual client or a range of clients specified by IP address (`ip_address(options)`), netmask (`ip_address/length(options)`), or netgroup (`@groupname(options)`). Possible export options include `rw`, `ro`, `fileid_32bit`, `no_fileid_32bit`, `anonuid`, `anongid`, `root_squash`, `no_root_squash`, `all_squash`, `no_all_squash`, `secure`, `insecure`, `atime`, `noatime`, and `sec`. If not specified, defaults to `*(rw,no_root_squash)`.
export_policy (Reference): The NFS export policy for the system. Either the `export_policy` or `rules` will control the NFS export functionality for the file system. If this is set, then the `rules` field will be cleared. Both `export_policy` and `rules` can not be set in the same request.
"""
if v3_enabled is not None:
self.v3_enabled = v3_enabled
if v4_1_enabled is not None:
self.v4_1_enabled = v4_1_enabled
if rules is not None:
self.rules = rules
if export_policy is not None:
self.export_policy = export_policy
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Nfs`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Nfs, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Nfs):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.153846
| 755
| 0.586749
|
79bbec66e32c7742047b535b32eb513b27ff0c36
| 39,395
|
py
|
Python
|
tests/unit/config_test.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | 1
|
2015-05-20T16:55:50.000Z
|
2015-05-20T16:55:50.000Z
|
tests/unit/config_test.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/config_test.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | 1
|
2021-12-02T15:30:00.000Z
|
2021-12-02T15:30:00.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.unit.config_test
~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import logging
import os
import shutil
import tempfile
from contextlib import contextmanager
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.mock import MagicMock, patch
from salttesting.helpers import ensure_in_syspath, TestsLoggingHandler
from salt.exceptions import CommandExecutionError
ensure_in_syspath('../')
# Import salt libs
import salt.minion
import salt.utils
import salt.utils.network
import integration
from salt import config as sconfig
from salt.exceptions import SaltCloudConfigError
# Import Third-Party Libs
import yaml
log = logging.getLogger(__name__)
# mock hostname should be more complex than the systems FQDN
MOCK_HOSTNAME = 'very.long.complex.fqdn.that.is.crazy.extra.long.example.com'
MOCK_ETC_HOSTS = (
'##\n'
'# Host Database\n'
'#\n'
'# localhost is used to configure the loopback interface\n'
'# when the system is booting. Do not change this entry.\n'
'##\n'
'\n' # This empty line MUST STAY HERE, it factors into the tests
'127.0.0.1 localhost ' + MOCK_HOSTNAME + '\n'
'10.0.0.100 ' + MOCK_HOSTNAME + '\n'
'200.200.200.2 other.host.alias.com\n'
'::1 ip6-localhost ip6-loopback\n'
'fe00::0 ip6-localnet\n'
'ff00::0 ip6-mcastprefix\n'
)
MOCK_ETC_HOSTNAME = '{0}\n'.format(MOCK_HOSTNAME)
PATH = 'path/to/some/cloud/conf/file'
DEFAULT = {'default_include': PATH}
def _unhandled_mock_read(filename):
'''
Raise an error because we should not be calling salt.utils.fopen()
'''
raise CommandExecutionError('Unhandled mock read for {0}'.format(filename))
@contextmanager
def _fopen_side_effect_etc_hostname(filename):
'''
Mock reading from /etc/hostname
'''
log.debug('Mock-reading {0}'.format(filename))
if filename == '/etc/hostname':
mock_open = MagicMock()
mock_open.read.return_value = MOCK_ETC_HOSTNAME
yield mock_open
elif filename == '/etc/hosts':
raise IOError(2, "No such file or directory: '{0}'".format(filename))
else:
_unhandled_mock_read(filename)
@contextmanager
def _fopen_side_effect_etc_hosts(filename):
'''
Mock /etc/hostname not existing, and falling back to reading /etc/hosts
'''
log.debug('Mock-reading {0}'.format(filename))
if filename == '/etc/hostname':
raise IOError(2, "No such file or directory: '{0}'".format(filename))
elif filename == '/etc/hosts':
mock_open = MagicMock()
mock_open.__iter__.return_value = MOCK_ETC_HOSTS.splitlines()
yield mock_open
else:
_unhandled_mock_read(filename)
class ConfigTestCase(TestCase, integration.AdaptedConfigurationTestCaseMixIn):
def test_proper_path_joining(self):
fpath = tempfile.mktemp()
try:
salt.utils.fopen(fpath, 'w').write(
"root_dir: /\n"
"key_logfile: key\n"
)
config = sconfig.master_config(fpath)
# os.path.join behavior
self.assertEqual(config['key_logfile'], os.path.join('/', 'key'))
# os.sep.join behavior
self.assertNotEqual(config['key_logfile'], '//key')
finally:
if os.path.isfile(fpath):
os.unlink(fpath)
def test_common_prefix_stripping(self):
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
config = sconfig.master_config(fpath)
self.assertEqual(config['log_file'], fpath)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_load_master_config_from_environ_var(self):
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_MASTER_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file.
config = sconfig.master_config('/etc/salt/master')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, i.e., the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_MASTER_CONFIG'] = env_fpath
config = sconfig.master_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_load_minion_config_from_environ_var(self):
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_MINION_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = sconfig.minion_config('/etc/salt/minion')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, i.e., the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_MINION_CONFIG'] = env_fpath
config = sconfig.minion_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_load_client_config_from_environ_var(self):
original_environ = os.environ.copy()
try:
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
# Let's populate a master configuration file which should not get
# picked up since the client configuration tries to load the master
# configuration settings using the provided client configuration
# file
master_config = os.path.join(env_root_dir, 'master')
salt.utils.fopen(master_config, 'w').write(
'blah: true\n'
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, master_config)
)
os.environ['SALT_MASTER_CONFIG'] = master_config
# Now the client configuration file
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLIENT_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = sconfig.client_config(os.path.expanduser('~/.salt'))
self.assertEqual(config['log_file'], env_fpath)
self.assertTrue('blah' not in config)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, i.e., the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_MASTER_CONFIG'] = env_fpath
config = sconfig.master_config(fpath)
self.assertEqual(config['log_file'], fpath)
os.environ.clear()
os.environ.update(original_environ)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_issue_5970_minion_confd_inclusion(self):
try:
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
minion_config = os.path.join(tempdir, 'minion')
minion_confd = os.path.join(tempdir, 'minion.d')
os.makedirs(minion_confd)
# Let's populate a minion configuration file with some basic
# settings
salt.utils.fopen(minion_config, 'w').write(
'blah: false\n'
'root_dir: {0}\n'
'log_file: {1}\n'.format(tempdir, minion_config)
)
# Now, let's populate an extra configuration file under minion.d
# Notice that above we've set blah as False and bellow as True.
# Since the minion.d files are loaded after the main configuration
# file so overrides can happen, the final value of blah should be
# True.
extra_config = os.path.join(minion_confd, 'extra.conf')
salt.utils.fopen(extra_config, 'w').write(
'blah: true\n'
)
# Let's load the configuration
config = sconfig.minion_config(minion_config)
self.assertEqual(config['log_file'], minion_config)
# As proven by the assertion below, blah is True
self.assertTrue(config['blah'])
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_master_confd_inclusion(self):
try:
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
master_config = os.path.join(tempdir, 'master')
master_confd = os.path.join(tempdir, 'master.d')
os.makedirs(master_confd)
# Let's populate a master configuration file with some basic
# settings
salt.utils.fopen(master_config, 'w').write(
'blah: false\n'
'root_dir: {0}\n'
'log_file: {1}\n'.format(tempdir, master_config)
)
# Now, let's populate an extra configuration file under master.d
# Notice that above we've set blah as False and bellow as True.
# Since the master.d files are loaded after the main configuration
# file so overrides can happen, the final value of blah should be
# True.
extra_config = os.path.join(master_confd, 'extra.conf')
salt.utils.fopen(extra_config, 'w').write(
'blah: true\n'
)
# Let's load the configuration
config = sconfig.master_config(master_config)
self.assertEqual(config['log_file'], master_config)
# As proven by the assertion below, blah is True
self.assertTrue(config['blah'])
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_syndic_config(self):
syndic_conf_path = self.get_config_file_path('syndic')
minion_conf_path = self.get_config_file_path('minion')
syndic_opts = sconfig.syndic_config(
syndic_conf_path, minion_conf_path
)
syndic_opts.update(salt.minion.resolve_dns(syndic_opts))
root_dir = syndic_opts['root_dir']
# id & pki dir are shared & so configured on the minion side
self.assertEqual(syndic_opts['id'], 'minion')
self.assertEqual(syndic_opts['pki_dir'], os.path.join(root_dir, 'pki'))
# the rest is configured master side
self.assertEqual(syndic_opts['master_uri'], 'tcp://127.0.0.1:54506')
self.assertEqual(syndic_opts['master_port'], 54506)
self.assertEqual(syndic_opts['master_ip'], '127.0.0.1')
self.assertEqual(syndic_opts['master'], 'localhost')
self.assertEqual(syndic_opts['sock_dir'], os.path.join(root_dir, 'minion_sock'))
self.assertEqual(syndic_opts['cachedir'], os.path.join(root_dir, 'cachedir'))
self.assertEqual(syndic_opts['log_file'], os.path.join(root_dir, 'osyndic.log'))
self.assertEqual(syndic_opts['pidfile'], os.path.join(root_dir, 'osyndic.pid'))
# Show that the options of localclient that repub to local master
# are not merged with syndic ones
self.assertEqual(syndic_opts['_master_conf_file'], minion_conf_path)
self.assertEqual(syndic_opts['_minion_conf_file'], syndic_conf_path)
def test_issue_6714_parsing_errors_logged(self):
try:
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
test_config = os.path.join(tempdir, 'config')
# Let's populate a master configuration file with some basic
# settings
salt.utils.fopen(test_config, 'w').write(
'root_dir: {0}\n'
'log_file: {0}/foo.log\n'.format(tempdir) +
'\n\n\n'
'blah:false\n'
)
with TestsLoggingHandler() as handler:
# Let's load the configuration
config = sconfig.master_config(test_config)
for message in handler.messages:
if message.startswith('ERROR:Error parsing configuration'):
break
else:
raise AssertionError(
'No parsing error message was logged'
)
finally:
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
@patch('salt.utils.network.get_fqhostname', MagicMock(return_value='localhost'))
def test_get_id_etc_hostname(self):
'''
Test calling salt.config.get_id() and falling back to looking at
/etc/hostname.
'''
with patch('salt.utils.fopen', _fopen_side_effect_etc_hostname):
self.assertEqual(
sconfig.get_id({'root_dir': None, 'minion_id_caching': False}), (MOCK_HOSTNAME, False)
)
@patch('salt.utils.network.get_fqhostname', MagicMock(return_value='localhost'))
def test_get_id_etc_hosts(self):
'''
Test calling salt.config.get_id() and falling back all the way to
looking up data from /etc/hosts.
'''
with patch('salt.utils.fopen', _fopen_side_effect_etc_hosts):
self.assertEqual(
sconfig.get_id({'root_dir': None, 'minion_id_caching': False}), (MOCK_HOSTNAME, False)
)
# <---- Salt Cloud Configuration Tests ---------------------------------------------
# cloud_config tests
@patch('salt.config.load_config', MagicMock(return_value={}))
def test_cloud_config_double_master_path(self):
'''
Tests passing in master_config_path and master_config kwargs.
'''
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
master_config_path='foo', master_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
def test_cloud_config_double_providers_path(self):
'''
Tests passing in providers_config_path and providers_config kwargs.
'''
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
providers_config_path='foo', providers_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
def test_cloud_config_double_profiles_path(self):
'''
Tests passing in profiles_config_path and profiles_config kwargs.
'''
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
profiles_config_path='foo', profiles_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
@patch('salt.config.apply_cloud_config',
MagicMock(return_value={'providers': 'foo'}))
def test_cloud_config_providers_in_opts(self):
'''
Tests mixing old cloud providers with pre-configured providers configurations
using the providers_config kwarg
'''
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
providers_config='bar')
@patch('salt.config.load_config', MagicMock(return_value={}))
@patch('salt.config.apply_cloud_config',
MagicMock(return_value={'providers': 'foo'}))
@patch('os.path.isfile', MagicMock(return_value=True))
def test_cloud_config_providers_in_opts_path(self):
'''
Tests mixing old cloud providers with pre-configured providers configurations
using the providers_config_path kwarg
'''
self.assertRaises(SaltCloudConfigError, sconfig.cloud_config, PATH,
providers_config_path='bar')
# apply_cloud_config tests
def test_apply_cloud_config_no_provider_detail_list(self):
'''
Tests when the provider is not contained in a list of details
'''
overrides = {'providers': {'foo': [{'bar': 'baz'}]}}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_config,
overrides, defaults=DEFAULT)
def test_apply_cloud_config_no_provider_detail_dict(self):
'''
Tests when the provider is not contained in the details dictionary
'''
overrides = {'providers': {'foo': {'bar': 'baz'}}}
self.assertRaises(SaltCloudConfigError, sconfig.apply_cloud_config,
overrides, defaults=DEFAULT)
@patch('salt.config.old_to_new',
MagicMock(return_value={'default_include': 'path/to/some/cloud/conf/file',
'providers': {'foo': {'bar': {
'provider': 'foo:bar'}}}}))
def test_apply_cloud_config_success_list(self):
'''
Tests success when valid data is passed into the function as a list
'''
overrides = {'providers': {'foo': [{'provider': 'bar'}]}}
ret = {'default_include': 'path/to/some/cloud/conf/file',
'providers': {'foo': {'bar': {'provider': 'foo:bar'}}}}
self.assertEqual(sconfig.apply_cloud_config(overrides, defaults=DEFAULT), ret)
@patch('salt.config.old_to_new',
MagicMock(return_value={'default_include': 'path/to/some/cloud/conf/file',
'providers': {'foo': {'bar': {
'provider': 'foo:bar'}}}}))
def test_apply_cloud_config_success_dict(self):
'''
Tests success when valid data is passed into function as a dictionary
'''
overrides = {'providers': {'foo': {'provider': 'bar'}}}
ret = {'default_include': 'path/to/some/cloud/conf/file',
'providers': {'foo': {'bar': {'provider': 'foo:bar'}}}}
self.assertEqual(sconfig.apply_cloud_config(overrides, defaults=DEFAULT), ret)
# apply_vm_profiles_config tests
def test_apply_vm_profiles_config_bad_profile_format(self):
'''
Tests passing in a bad profile format in overrides
'''
overrides = {'foo': 'bar', 'conf_file': PATH}
self.assertRaises(SaltCloudConfigError, sconfig.apply_vm_profiles_config,
PATH, overrides, defaults=DEFAULT)
def test_apply_vm_profiles_config_success(self):
'''
Tests passing in valid provider and profile config files successfully
'''
providers = {'test-provider':
{'digital_ocean':
{'provider': 'digital_ocean', 'profiles': {}}}}
overrides = {'test-profile':
{'provider': 'test-provider',
'image': 'Ubuntu 12.10 x64',
'size': '512MB'},
'conf_file': PATH}
ret = {'test-profile':
{'profile': 'test-profile',
'provider': 'test-provider:digital_ocean',
'image': 'Ubuntu 12.10 x64',
'size': '512MB'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers,
overrides,
defaults=DEFAULT), ret)
def test_apply_vm_profiles_config_extend_success(self):
'''
Tests profile extends functionality with valid provider and profile configs
'''
providers = {'test-config': {'ec2': {'profiles': {}, 'provider': 'ec2'}}}
overrides = {'Amazon': {'image': 'test-image-1',
'extends': 'dev-instances'},
'Fedora': {'image': 'test-image-2',
'extends': 'dev-instances'},
'conf_file': PATH,
'dev-instances': {'ssh_username': 'test_user',
'provider': 'test-config'}}
ret = {'Amazon': {'profile': 'Amazon',
'ssh_username': 'test_user',
'image': 'test-image-1',
'provider': 'test-config:ec2'},
'Fedora': {'profile': 'Fedora',
'ssh_username': 'test_user',
'image': 'test-image-2',
'provider': 'test-config:ec2'},
'dev-instances': {'profile': 'dev-instances',
'ssh_username': 'test_user',
'provider': 'test-config:ec2'}}
self.assertEqual(sconfig.apply_vm_profiles_config(providers,
overrides,
defaults=DEFAULT), ret)
# apply_cloud_providers_config tests
def test_apply_cloud_providers_config_same_providers(self):
'''
Tests when two providers are given with the same provider name
'''
overrides = {'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2'},
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'password': 'supersecret',
'provider': 'ec2'}],
'conf_file': PATH}
self.assertRaises(SaltCloudConfigError,
sconfig.apply_cloud_providers_config,
overrides,
DEFAULT)
def test_apply_cloud_providers_config_extend(self):
'''
Tests the successful extension of a cloud provider
'''
overrides = {'my-production-envs':
[{'extends': 'my-dev-envs:ec2',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'
}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2'
},
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'password': 'supersecret',
'provider': 'linode'
}],
'conf_file': PATH}
ret = {'my-production-envs':
{'ec2':
{'profiles': {},
'location': 'us-east-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2',
'id': 'ABCDEFGHIJKLMNOP',
'user': 'ec2-user@mycorp.com'}},
'my-dev-envs':
{'linode':
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'password': 'supersecret',
'profiles': {},
'provider': 'linode'},
'ec2':
{'profiles': {},
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2',
'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com'}}}
self.assertEqual(ret,
sconfig.apply_cloud_providers_config(
overrides,
defaults=DEFAULT))
def test_apply_cloud_providers_config_extend_multiple(self):
'''
Tests the successful extension of two cloud providers
'''
overrides = {'my-production-envs':
[{'extends': 'my-dev-envs:ec2',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'},
{'password': 'new-password',
'extends': 'my-dev-envs:linode',
'location': 'Salt Lake City'
}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2'},
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'password': 'supersecret',
'provider': 'linode'}],
'conf_file': PATH}
ret = {'my-production-envs':
{'linode':
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'profiles': {},
'location': 'Salt Lake City',
'provider': 'linode',
'password': 'new-password'},
'ec2':
{'user': 'ec2-user@mycorp.com',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2',
'id': 'ABCDEFGHIJKLMNOP',
'profiles': {},
'location': 'us-east-1'}},
'my-dev-envs':
{'linode':
{'apikey': 'abcdefghijklmnopqrstuvwxyz',
'password': 'supersecret',
'profiles': {},
'provider': 'linode'},
'ec2':
{'profiles': {},
'user': 'user@mycorp.com',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2',
'id': 'ABCDEFGHIJKLMNOP',
'location': 'ap-southeast-1'}}}
self.assertEqual(ret, sconfig.apply_cloud_providers_config(
overrides,
defaults=DEFAULT))
def test_apply_cloud_providers_config_extends_bad_alias(self):
'''
Tests when the extension contains an alias not found in providers list
'''
overrides = {'my-production-envs':
[{'extends': 'test-alias:ec2',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2'}],
'conf_file': PATH}
self.assertRaises(SaltCloudConfigError,
sconfig.apply_cloud_providers_config,
overrides,
DEFAULT)
def test_apply_cloud_providers_config_extends_bad_provider(self):
'''
Tests when the extension contains a provider not found in providers list
'''
overrides = {'my-production-envs':
[{'extends': 'my-dev-envs:linode',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'ec2'}],
'conf_file': PATH}
self.assertRaises(SaltCloudConfigError,
sconfig.apply_cloud_providers_config,
overrides,
DEFAULT)
def test_apply_cloud_providers_config_extends_no_provider(self):
'''
Tests when no provider is supplied in the extends statement
'''
overrides = {'my-production-envs':
[{'extends': 'my-dev-envs',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'linode'}],
'conf_file': PATH}
self.assertRaises(SaltCloudConfigError,
sconfig.apply_cloud_providers_config,
overrides,
DEFAULT)
def test_apply_cloud_providers_extends_not_in_providers(self):
'''
Tests when extends is not in the list of providers
'''
overrides = {'my-production-envs':
[{'extends': 'my-dev-envs ec2',
'location': 'us-east-1',
'user': 'ec2-user@mycorp.com'}],
'my-dev-envs':
[{'id': 'ABCDEFGHIJKLMNOP',
'user': 'user@mycorp.com',
'location': 'ap-southeast-1',
'key': 'supersecretkeysupersecretkey',
'provider': 'linode'}],
'conf_file': PATH}
self.assertRaises(SaltCloudConfigError,
sconfig.apply_cloud_providers_config,
overrides,
DEFAULT)
# is_provider_configured tests
def test_is_provider_configured_no_alias(self):
'''
Tests when provider alias is not in opts
'''
opts = {'providers': 'test'}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_no_driver(self):
'''
Tests when provider driver is not in opts
'''
opts = {'providers': {'foo': 'baz'}}
provider = 'foo:bar'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_key_is_none(self):
'''
Tests when a required configuration key is not set
'''
opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'foo:bar'
self.assertFalse(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)))
def test_is_provider_configured_success(self):
'''
Tests successful cloud provider configuration
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo:bar'
ret = {'api_key': 'baz'}
self.assertEqual(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)), ret)
def test_is_provider_configured_multiple_driver_not_provider(self):
'''
Tests when the drive is not the same as the provider when
searching through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'foo'
self.assertFalse(sconfig.is_provider_configured(opts, provider))
def test_is_provider_configured_multiple_key_is_none(self):
'''
Tests when a required configuration key is not set when
searching through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': None}}}}
provider = 'bar'
self.assertFalse(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)))
def test_is_provider_configured_multiple_success(self):
'''
Tests successful cloud provider configuration when searching
through multiple providers
'''
opts = {'providers': {'foo': {'bar': {'api_key': 'baz'}}}}
provider = 'bar'
ret = {'api_key': 'baz'}
self.assertEqual(
sconfig.is_provider_configured(opts,
provider,
required_keys=('api_key',)), ret)
# other cloud configuration tests
def test_load_cloud_config_from_environ_var(self):
original_environ = os.environ.copy()
tempdir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
try:
env_root_dir = os.path.join(tempdir, 'foo', 'env')
os.makedirs(env_root_dir)
env_fpath = os.path.join(env_root_dir, 'config-env')
salt.utils.fopen(env_fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(env_root_dir, env_fpath)
)
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
# Should load from env variable, not the default configuration file
config = sconfig.cloud_config('/etc/salt/cloud')
self.assertEqual(config['log_file'], env_fpath)
os.environ.clear()
os.environ.update(original_environ)
root_dir = os.path.join(tempdir, 'foo', 'bar')
os.makedirs(root_dir)
fpath = os.path.join(root_dir, 'config')
salt.utils.fopen(fpath, 'w').write(
'root_dir: {0}\n'
'log_file: {1}\n'.format(root_dir, fpath)
)
# Let's set the environment variable, yet, since the configuration
# file path is not the default one, i.e., the user has passed an
# alternative configuration file form the CLI parser, the
# environment variable will be ignored.
os.environ['SALT_CLOUD_CONFIG'] = env_fpath
config = sconfig.cloud_config(fpath)
self.assertEqual(config['log_file'], fpath)
finally:
# Reset the environ
os.environ.clear()
os.environ.update(original_environ)
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def test_deploy_search_path_as_string(self):
temp_conf_dir = os.path.join(integration.TMP, 'issue-8863')
config_file_path = os.path.join(temp_conf_dir, 'cloud')
deploy_dir_path = os.path.join(temp_conf_dir, 'test-deploy.d')
try:
for directory in (temp_conf_dir, deploy_dir_path):
if not os.path.isdir(directory):
os.makedirs(directory)
default_config = sconfig.cloud_config(config_file_path)
default_config['deploy_scripts_search_path'] = deploy_dir_path
with salt.utils.fopen(config_file_path, 'w') as cfd:
cfd.write(yaml.dump(default_config))
default_config = sconfig.cloud_config(config_file_path)
# Our custom deploy scripts path was correctly added to the list
self.assertIn(
deploy_dir_path,
default_config['deploy_scripts_search_path']
)
# And it's even the first occurrence as it should
self.assertEqual(
deploy_dir_path,
default_config['deploy_scripts_search_path'][0]
)
finally:
if os.path.isdir(temp_conf_dir):
shutil.rmtree(temp_conf_dir)
def test_includes_load(self):
'''
Tests that cloud.{providers,profiles}.d directories are loaded, even if not
directly passed in through path
'''
config = sconfig.cloud_config(self.get_config_file_path('cloud'))
self.assertIn('ec2-config', config['providers'])
self.assertIn('ec2-test', config['profiles'])
# <---- Salt Cloud Configuration Tests ---------------------------------------------
if __name__ == '__main__':
from integration import run_tests
run_tests(ConfigTestCase, needs_daemon=False)
| 42.224009
| 106
| 0.541211
|
69ab7aac19ef283e295473991d6939c156b87378
| 7,875
|
py
|
Python
|
dynamics/linkage.py
|
udayansarin/SPIKM
|
01a6363056c77cdd497dca1ce01f192722210ec6
|
[
"MIT"
] | null | null | null |
dynamics/linkage.py
|
udayansarin/SPIKM
|
01a6363056c77cdd497dca1ce01f192722210ec6
|
[
"MIT"
] | null | null | null |
dynamics/linkage.py
|
udayansarin/SPIKM
|
01a6363056c77cdd497dca1ce01f192722210ec6
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from numpy.polynomial import Polynomial as Poly
from dynamics.spikm_trig import Toolkit as STrig
class CrankShaft:
def __init__(self, node, shaft, crank_length, crank_start_angle, link_length, crank_plane):
"""
initialize the crankshaft assembly between the motor and the corresponding connection on the platform
:param node: dict{'x', 'y', 'z'}, location of the platform connection in the global x, y, z coordinate system
:param shaft: dict{'x', 'y', 'z'}, location of the motor shaft in the global x, y, z coordinate system
:param crank_length: float or int length of crankshaft
:param crank_start_angle: float or int, starting angle of crankshaft, 90 is horizontal
:param link_length: float or int length of linkage
:param crank_plane: angle that the plane of rotation of the motor shaft subtends to the global x axis
"""
self.init = False
self.incompatible = False
try:
for connection, coordinates in {'platform': node, 'motor': shaft}.items():
for coordinate, val in coordinates.items():
assert ((type(val) is int) or (type(val) is float) or isinstance(val, np.float))
except AssertionError:
print(f"Error in coordinate for {connection}[{coordinate}]\nvalue:{val}, {type(val)}")
return
try:
assert(
((type(crank_plane) is int) or (type(crank_plane) is float) or isinstance(val, np.float))
and -360 <= crank_plane <= 360
)
except AssertionError:
print(f"Error in initializing crank orientation: angle: {crank_plane}")
return
self._crank = self._Crank(length=crank_length, start_angle=crank_start_angle)
self._link = self._Linkage(length=link_length)
if not(self._crank.init and self._link.init):
print("Terminating linkage initialization due to setup error!")
return
self.init = True
self._crank_plane = crank_plane
self._node = node
self._shaft = shaft
self._connector = self._con_loc_global()
def _con_loc_global(self):
"""
calculate the coordinate of the crank-linkage connection in global coordinates
:return: dict, {'x', 'y', 'z'} crank-linkage connection in global coordinates
"""
_alpha = 0
_beta = 0
_gamma = self._crank_plane
# global coordinate of the motor shaft + the local crank vector rotated to the global coordinate system
_delta_coordinates = STrig.apply_rotation(_alpha, _beta, _gamma, self._crank.connector)
return {'x': self._shaft['x'] + _delta_coordinates[0],
'y': self._shaft['y'] + _delta_coordinates[1],
'z': self._shaft['z'] + _delta_coordinates[2]}
def _node_loc_local(self):
"""
calculate the local coordinate of the linkage-platform connection for computational simplicity
:return: dict, {'x', 'y'} coordinates of the linkage-platform connection
"""
_alpha = 0
_beta = 0
_gamma = -self._crank_plane
_vector = np.array([self._node['x'], self._node['y'], self._node['z']]) - \
np.array([self._shaft['x'], self._shaft['y'], self._shaft['z']])
_loc_vector = STrig.apply_rotation(_alpha, _beta, _gamma, _vector)
return {'x': _loc_vector[0], 'y': _loc_vector[1], 'z': _loc_vector[2]}
def move(self, x_new, y_new, z_new):
"""
move the crankshaft into the commanded node orientation
:param x_new: float, new x coordinate of the crankshaft node
:param y_new: float, new y coordinate of the crankshaft node
:param z_new: float, new z coordinate of the crankshaft node
:return:
"""
self._node['x'] = x_new
self._node['y'] = y_new
self._node['z'] = z_new
node_local = self._node_loc_local() # local x, y and z for the platform
x = node_local['x']
y = node_local['y']
z = node_local['z']
k_sq = self._crank.length**2 - self._link.length**2 + x**2 + y**2 + z**2
a = 1 + (x/z)**2 # x^2 term
b = -(k_sq*x)/(z**2) # x term
c = (k_sq/(2*z))**2 - self._crank.length**2 # constant term
c_local_x = Poly([c, b, a]).roots()[1] # ax^2 + bx + c = 0
if np.iscomplex(c_local_x):
print("You cannot complete this move!")
self.incompatible = True
else:
self.incompatible = False
c_local_z = k_sq/(2*z) - (c_local_x*x/z)
self._crank.move({'x': c_local_x, 'z': c_local_z})
self._connector = self._con_loc_global()
return
def get_linkage(self):
"""
develop critical parameters for the orientation of the link in space and its motor
:return: dict, {'feasible': bool, move feasible, 'x', 'y', 'z': list of link coordinates, 'angle': float, motor}
"""
x = []
y = []
z = []
if not self.incompatible:
x = [self._shaft['x'], self._connector['x'], self._node['x']]
y = [self._shaft['y'], self._connector['y'], self._node['y']]
z = [self._shaft['z'], self._connector['z'], self._node['z']]
return {
'feasible': (not self.incompatible),
'x': x,
'y': y,
'z': z,
'angle': math.degrees(self._crank.angle)
}
class _Crank:
"""
Instances of this class show behaviour of the crank portion of a crankshaft
"""
def __init__(self, length, start_angle):
"""
define a crank connected to a motor with the motor shaft at (0, 0, 0) in local coordinates
:param length: length of the crank shaft
:param start_angle: starting angle of the crank shaft, between (-90, 90) with 0 being horizontal
"""
self.init = False
try:
assert (
((type(length) is int) or (type(length) is float))
and length > 0
and ((type(start_angle) is float) or type(start_angle) is int)
and -90 <= start_angle <= 90
)
except AssertionError:
print("Error in initializing crank!")
return
self.init = True
self.length = length
self.angle = math.radians(start_angle)
_connector = STrig.get_xz(length=self.length, theta=self.angle)
self.connector = [_connector['x'], 0, _connector['z']]
def move(self, new_connector):
"""
update the crank angle based on the movement of the linkage
:param new_connector: new position of the crank as dict {'x': _, 'z': _}
:return:
"""
self.connector = [new_connector['x'], 0, new_connector['z']]
self.angle = STrig.get_theta(x=new_connector['x'], z=new_connector['z'])
return
@property
def position(self):
return self.angle
class _Linkage:
"""
Instances of this class show behaviour of the linkage part of a crankshaft
"""
def __init__(self, length):
"""
define the linkage connecting a crank to the corresponding stewart platform connection
"""
self.init = False
try:
assert (
((type(length) is int) or type(length) is float)
and length > 0
)
except AssertionError:
print("Error in initializing linkage")
return
self.init = True
self.length = length
| 42.112299
| 120
| 0.569143
|
f94ff00734d4a3123230507d6ed1af45da983388
| 38,694
|
py
|
Python
|
pedal/tifa/tifa_visitor.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 14
|
2019-08-22T03:40:23.000Z
|
2022-03-13T00:30:53.000Z
|
pedal/tifa/tifa_visitor.py
|
pedal-edu/pedal
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 74
|
2019-09-12T04:35:56.000Z
|
2022-01-26T19:21:32.000Z
|
pedal/tifa/tifa_visitor.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 2
|
2018-09-16T22:39:15.000Z
|
2018-09-17T12:53:28.000Z
|
"""
Main TIFA visitor-based algorithm here.
TODO: JoinedStr
"""
import ast
# TODO: FileType, DayType, TimeType,
from pedal.core.commands import system_error
from pedal.tifa.tifa_core import TifaCore, TifaAnalysis
from pedal.types.definitions import (UnknownType,
FunctionType, ClassType, InstanceType,
NumType, NoneType, BoolType, TupleType,
ListType, StrType, GeneratorType,
DictType, ModuleType, SetType,
LiteralNum, LiteralBool,
LiteralNone, LiteralStr,
LiteralTuple, Type)
from pedal.types.normalize import (get_pedal_type_from_json,
get_pedal_literal_from_pedal_type,
get_pedal_type_from_annotation,
get_pedal_type_from_value)
from pedal.types.builtin import (get_builtin_module, get_builtin_function)
from pedal.types.operations import (are_types_equal,
VALID_UNARYOP_TYPES, VALID_BINOP_TYPES,
ORDERABLE_TYPES, INDEXABLE_TYPES)
from pedal.tifa.constants import TOOL_NAME
from pedal.tifa.contexts import NewPath, NewScope
from pedal.tifa.identifier import Identifier
from pedal.tifa.state import State
from pedal.tifa.feedbacks import (action_after_return, return_outside_function,
write_out_of_scope, unconnected_blocks,
iteration_problem, not_a_function,
initialization_problem, unused_variable,
overwritten_variable, iterating_over_non_list,
iterating_over_empty_list, incompatible_types,
parameter_type_mismatch, read_out_of_scope,
type_changes, unnecessary_second_branch,
recursive_call, multiple_return_types,
possible_initialization_problem,
incorrect_arity, else_on_loop_body,
module_not_found, nested_function_definition,
unused_returned_value, invalid_indexing)
from pedal.utilities.system import IS_PYTHON_39
class Tifa(TifaCore, ast.NodeVisitor):
"""
TIFA subclass for traversing an AST and finding common issues.
You can instantiate this class, manipulate settings, and then process
some code or AST.
"""
def process_code(self, code, filename=None, reset=True):
"""
Processes the AST of the given source code to generate a report.
Args:
code (str): The Python source code
filename (str): The filename of the source code (defaults to
the submissions' main file).
reset (bool): Whether or not to reset the results from the
previous analysis before running this one.
Returns:
Report: The successful or successful report object
"""
if reset or self.analysis is None:
self.analysis = TifaAnalysis()
filename = filename or self.report.submission.main_file
self.line_offset = self.report.submission.line_offsets.get(filename, 0)
# Attempt parsing - might fail!
try:
ast_tree = ast.parse(code, filename)
except Exception as error:
self.analysis.fail(error)
system_error(TOOL_NAME, "Could not parse code: " + str(error),
report=self.report)
return self.analysis
# Attempt processing code - might fail!
try:
self.process_ast(ast_tree)
except Exception as error:
self.analysis.fail(error)
system_error(TOOL_NAME, "Successfully parsed but could not "
"process AST: " + str(error),
report=self.report)
# Return whatever we got
return self.analysis
def process_ast(self, ast_tree):
"""
Given an AST, actually performs the type and flow analyses to return a
report.
Args:
ast_tree (AST): The AST object
"""
self.reset()
# Traverse every node
self.visit(ast_tree)
# Update analysis, finish out the current scope.
self.analysis.variables = self.name_map
self._finish_scope()
# Collect top level variables
self._collect_top_level_variables()
return self.analysis
def visit(self, node):
"""
Process this node by calling its appropriate visit_*
Args:
node (AST): The node to visit
Returns:
Type: The type calculated during the visit.
"""
# Start processing the node
self.node_chain.append(node)
self.ast_id += 1
# Actions after return?
if len(self.scope_chain) > 1:
return_state = self.find_variable_scope("*return")
if return_state.exists and return_state.in_scope:
if return_state.state.set == "yes":
self._issue(action_after_return(self.locate(),
report=self.report))
# No? All good, let's enter the node
self.final_node = node
result = ast.NodeVisitor.visit(self, node)
# If a node failed to return something, return the UNKNOWN TYPE
if result is None:
result = UnknownType()
self.analysis.node_types[node] = result
# Pop the node out of the chain
self.ast_id -= 1
self.node_chain.pop()
return result
def _visit_nodes(self, nodes):
"""
Visit all the nodes in the given list.
Args:
nodes (list): A list of values, of which any AST nodes will be
visited.
"""
for node in nodes:
if isinstance(node, ast.AST):
self.visit(node)
@staticmethod
def walk_targets(targets, target_type, walker):
"""
Iterate through the targets and call the given function on each one.
Args:
targets (list of Ast nodes): A list of potential targets to be
traversed.
target_type (Type): The given type to be unraveled and applied to the
targets.
walker (Ast Node, Type -> None): A function that will process
each target and unravel the type.
"""
for target in targets:
walker(target, target_type)
def _walk_target(self, target, target_type):
"""
Recursively apply the type to the target
Args:
target (Ast): The current AST node to process
target_type (Type): The type to apply to this node
"""
if isinstance(target, ast.Name):
self.store_iter_variable(target.id, target_type, self.locate(target))
return target.id
elif isinstance(target, (ast.Tuple, ast.List)):
result = None
for i, elt in enumerate(target.elts):
elt_type = target_type.iterate(LiteralNum(i))
potential_name = self._walk_target(elt, elt_type)
if potential_name is not None and result is None:
result = potential_name
return result
# TODO: Properly handle assignments with subscripts
def assign_target(self, target, target_type):
"""
Assign the type to the target, handling all kinds of assignment
statements, including Names, Tuples/Lists, Subscripts, and
Attributes.
Args:
target (ast.AST): The target AST Node.
target_type (Type): The TIFA type.
Returns:
"""
if isinstance(target, ast.Name):
self.store_variable(target.id, target_type)
elif isinstance(target, (ast.Tuple, ast.List)):
for i, elt in enumerate(target.elts):
elt_type = target_type.iterate(LiteralNum(i))
self.assign_target(elt, elt_type)
elif isinstance(target, ast.Subscript):
left_hand_type = self.visit(target.value)
if isinstance(left_hand_type, ListType):
# TODO: Handle updating value in list
pass
elif isinstance(left_hand_type, DictType):
# TODO: Update this for Python 3.9, now that Slice notation has changed
if not isinstance(target.slice, ast.Index):
# TODO: Can't subscript a dictionary assignment
return None
literal = self.get_literal(target.slice.value)
if not literal:
key_type = self.visit(target.slice.value)
left_hand_type.empty = False
left_hand_type.keys = [key_type.clone()]
left_hand_type.values = [target_type.clone()]
elif left_hand_type.literals:
original_type = left_hand_type.has_literal(literal)
if not original_type:
left_hand_type.update_key(literal, target_type.clone())
elif not are_types_equal(original_type, target_type):
# TODO: Fix "Dictionary" to be the name of the variable
self._issue(type_changes(self.locate(), 'Dictionary', original_type, target_type))
elif isinstance(target, ast.Attribute):
left_hand_type = self.visit(target.value)
if isinstance(left_hand_type, InstanceType):
left_hand_type.add_attr(target.attr, target_type)
# TODO: Otherwise we attempted to assign to a non-instance
# TODO: Handle minor type changes (e.g., appending to an inner list)
def _visit_collection_loop(self, node):
was_empty = False
# Handle the iteration list
iter_list = node.iter
iter_list_name = None
if isinstance(iter_list, ast.Name):
iter_list_name = iter_list.id
if iter_list_name == "___":
self._issue(unconnected_blocks(self.locate(iter_list), report=self.report))
state = self.iterate_variable(iter_list_name, self.locate(iter_list))
iter_type = state.type
else:
iter_type = self.visit(iter_list)
if iter_type.is_empty():
# TODO: It should check if its ONLY ever iterating over an empty list.
# For now, only reports if we are NOT in a function
was_empty = True
if len(self.scope_chain) == 1:
self._issue(iterating_over_empty_list(self.locate(iter_list), iter_list_name))
if not isinstance(iter_type, INDEXABLE_TYPES):
self._issue(iterating_over_non_list(self.locate(iter_list), iter_list_name, report=self.report))
iter_subtype = iter_type.iterate(LiteralNum(0))
# Handle the iteration variable
iter_variable_name = self._walk_target(node.target, iter_subtype)
# Check that the iteration list and variable are distinct
if iter_variable_name and iter_list_name:
if iter_variable_name == iter_list_name:
self._issue(iteration_problem(self.locate(node.target), iter_variable_name))
return was_empty
def visit_AnnAssign(self, node):
"""
TODO: Implement!
"""
# Name, Attribute, or SubScript
target = node.target
# Type
annotation = node.annotation
# Optional assigned value
value = node.value
# 0 or 1, with 1 indicating pure names (not expressions)
simple = node.simple
# If it's a class attribute, then build up the type!
if simple:
self.visit(annotation)
annotation = get_pedal_type_from_annotation(annotation, self)
current_scope = self.scope_chain[0]
if current_scope in self.class_scopes:
# TODO: Treat it as a different kind of ClassType? TypedDict?
self.class_scopes[current_scope].add_attr(target.id, annotation)
def visit_Expr(self, node):
"""
Any expression being used as a statement.
Args:
node (AST): An Expr node
Returns:
"""
value = self.visit(node.value)
if isinstance(node.value, ast.Call) and not isinstance(value, NoneType):
# TODO: Helper function to get name with title ("append method")
if isinstance(node.value.func, ast.Name):
call_type = 'function'
else:
call_type = 'method'
name = self.identify_caller(node.value)
self._issue(unused_returned_value(self.locate(), name,
call_type, value))
def visit_Assign(self, node):
"""
Simple assignment statement:
__targets__ = __value__
Args:
node (AST): An Assign node
Returns:
None
"""
# Handle value
value_type = self.visit(node.value)
# Handle targets
self._visit_nodes(node.targets)
self.walk_targets(node.targets, value_type, self.assign_target)
def visit_AugAssign(self, node):
"""
Args:
node:
Returns:
"""
# Handle value
right = self.visit(node.value)
# Handle target
left = self.visit(node.target)
# Target is always a Name, Subscript, or Attribute
name = self.identify_caller(node.target)
# Handle operation
self.load_variable(name)
if isinstance(left, UnknownType) or isinstance(right, UnknownType):
return UnknownType()
elif type(node.op) in VALID_BINOP_TYPES:
op_lookup = VALID_BINOP_TYPES[type(node.op)]
if type(left) in op_lookup:
op_lookup = op_lookup[type(left)]
if type(right) in op_lookup:
op_lookup = op_lookup[type(right)]
result_type = op_lookup(left, right)
self.assign_target(node.target, result_type)
return result_type
self._issue(incompatible_types(self.locate(), node.op, left, right, report=self.report))
def visit_Attribute(self, node):
"""
Args:
node:
Returns:
"""
# Handle value
value_type = self.visit(node.value)
self.check_common_bad_lookups(value_type, node.attr, node.value)
# Handle ctx
# TODO: Handling contexts
# Handle attr
result = value_type.load_attr(node.attr)
return result
def visit_BinOp(self, node):
"""
Args:
node:
Returns:
"""
# Handle left and right
left = self.visit(node.left)
right = self.visit(node.right)
# Handle operation
if isinstance(left, UnknownType) or isinstance(right, UnknownType):
return UnknownType()
elif type(node.op) in VALID_BINOP_TYPES:
op_lookup = VALID_BINOP_TYPES[type(node.op)]
if type(left) in op_lookup:
op_lookup = op_lookup[type(left)]
if type(right) in op_lookup:
op_lookup = op_lookup[type(right)]
return op_lookup(left, right)
self._issue(incompatible_types(self.locate(), node.op, left, right, report=self.report))
return UnknownType()
def visit_Bool(self, node):
"""
Visit a constant boolean value.
Args:
node (ast.AST): The boolean value Node.
Returns:
Type: A Bool type.
"""
return BoolType()
def visit_BoolOp(self, node):
"""
Args:
node:
Returns:
"""
# Handle left and right
values = []
for value in node.values:
values.append(self.visit(value))
# TODO: Truthiness is not supported! Probably need a Union type
# TODO: Literals used as truthy value
# Handle operation
return BoolType()
def visit_Call(self, node):
"""
Args:
node:
Returns:
"""
# Handle func part (Name or Attribute)
function_type = self.visit(node.func)
# TODO: Need to grab the actual type in some situations
callee = self.identify_caller(node)
# Handle args
arguments = [self.visit(arg) for arg in node.args] if node.args else []
# Check special common mistakes
# TODO: Handle keywords
# TODO: Handle star args
# TODO: Handle kwargs
if isinstance(function_type, FunctionType):
# Test if we have called this definition before
if function_type.definition not in self.definition_chain:
self.definition_chain.append(function_type.definition)
# Function invocation
result = function_type.definition(self, function_type, callee,
arguments, self.locate())
self.definition_chain.pop()
return result
else:
self._issue(recursive_call(self.locate(), callee, report=self.report))
elif isinstance(function_type, ClassType):
constructor = function_type.get_constructor().definition
self.definition_chain.append(constructor)
result = constructor(self, constructor, callee, arguments, self.locate())
self.definition_chain.pop()
if '__init__' in function_type.fields:
initializer = function_type.fields['__init__']
if isinstance(initializer, FunctionType):
self.definition_chain.append(initializer)
initializer.definition(self, initializer, result, [result] + arguments, self.locate())
self.definition_chain.pop()
return result
elif isinstance(function_type, (NumType, StrType, BoolType, NoneType)):
self._issue(not_a_function(self.locate(), callee, function_type, report=self.report))
return UnknownType()
def visit_ClassDef(self, node):
"""
Args:
node:
"""
class_name = node.name
new_class_type = ClassType(class_name)
self.store_variable(class_name, new_class_type)
# TODO: Define a new scope definition that executes the body
# TODO: find __init__, execute that
# TODO: handle Record subclasses
definitions_scope = self.scope_chain[:]
class_scope = NewScope(self, definitions_scope, class_type=new_class_type)
with class_scope:
self.generic_visit(node)
def visit_Compare(self, node):
"""
Args:
node:
Returns:
"""
# Handle left and right
left = self.visit(node.left)
comparators = [self.visit(compare) for compare in node.comparators]
# Handle ops
for op, right in zip(node.ops, comparators):
if isinstance(op, (ast.Eq, ast.NotEq, ast.Is, ast.IsNot)):
continue
elif isinstance(op, (ast.Lt, ast.LtE, ast.GtE, ast.Gt)):
if are_types_equal(left, right):
if isinstance(left, ORDERABLE_TYPES):
continue
elif isinstance(op, (ast.In, ast.NotIn)):
if isinstance(right, INDEXABLE_TYPES):
continue
self._issue(incompatible_types(self.locate(), op, left, right, report=self.report))
return BoolType()
def visit_comprehension(self, node):
"""
Args:
node:
"""
self._visit_collection_loop(node)
# Handle ifs, unless they're blank (None in Skulpt :)
if node.ifs:
self.visit_statements(node.ifs)
def visit_Dict(self, node):
"""
Three types of dictionaries
- empty
- uniform type
- record
TODO: Handle records appropriately
"""
result_type = DictType()
if not node.keys:
result_type.empty = True
else:
result_type.empty = False
all_literals = True
keys, values, literals = [], [], []
for key, value in zip(node.keys, node.values):
literal = self.get_literal(key)
key, value = self.visit(key), self.visit(value)
values.append(value)
keys.append(key)
if literal is not None:
literals.append(literal)
else:
all_literals = False
if all_literals:
result_type.literals = literals
result_type.values = values
else:
result_type.keys = node.keys[0]
result_type.values = node.values[0]
return result_type
def visit_DictComp(self, node):
"""
Args:
node:
Returns:
"""
# TODO: Handle comprehension scope
for generator in node.generators:
self.visit(generator)
keys = self.visit(node.key)
values = self.visit(node.value)
return DictType(keys=keys, values=values)
def visit_For(self, node):
"""
Args:
node:
"""
was_empty = self._visit_collection_loop(node)
# Handle the bodies
#if not was_empty:
#this_path_id = self.path_chain[0]
#non_empty_path = NewPath(self, this_path_id, "f")
#with non_empty_path:
self.visit_statements(node.body)
self.visit_statements(node.orelse)
def visit_FunctionDef(self, node):
"""
Args:
node:
Returns:
"""
# Name
function_name = node.name
position = self.locate()
definitions_scope = self.scope_chain[:]
def definition(tifa, call_type, call_name, parameters, call_position):
"""
Args:
tifa:
call_type:
call_name:
parameters:
call_position:
Returns:
"""
function_scope = NewScope(self, definitions_scope)
with function_scope:
# Process arguments
args = node.args.args
if len(args) != len(parameters):
self._issue(incorrect_arity(self.locate(), function_name, report=self.report))
# TODO: Handle special types of parameters
for arg, parameter in zip(args, parameters):
name = arg.arg
if arg.annotation:
self.visit(arg.annotation)
annotation = get_pedal_type_from_annotation(arg.annotation, self)
# TODO: Use parameter information to "fill in" empty lists
if isinstance(parameter, ListType) and isinstance(annotation, ListType):
if isinstance(parameter.subtype, UnknownType):
parameter.subtype = annotation.subtype
# TODO: Check that arg.type and parameter type match!
if not are_types_equal(annotation, parameter, True):
self._issue(parameter_type_mismatch(self.locate(), name, annotation, parameter))
if parameter is not None:
parameter = parameter.clone_mutably()
self.create_variable(name, parameter, position)
# Too many arguments
if len(args) < len(parameters):
for undefined_parameter in parameters[len(args):]:
self.create_variable(name, UnknownType(), position)
# Not enough arguments
if len(args) > len(parameters):
for arg in args[len(parameters):]:
if arg.annotation:
self.visit(arg.annotation)
annotation = get_pedal_type_from_annotation(arg.annotation, self)
else:
annotation = UnknownType()
self.create_variable(arg.arg, annotation, position)
self.visit_statements(node.body)
return_state = self.find_variable_scope("*return")
return_value = NoneType()
# TODO: Figure out if we are not returning something when we should
# If the pseudo variable exists, we load it and get its type
if return_state.exists and return_state.in_scope:
return_state = self.load_variable("*return", call_position)
return_value = return_state.type
if node.returns:
# self.visit(node.returns)
returns = get_pedal_type_from_annotation(node.returns, self)
if not are_types_equal(return_value, returns, True):
self._issue(multiple_return_types(return_state.position,
returns.precise_description(),
return_value.precise_description(),
report=self.report))
return return_value
function = FunctionType(definition=definition, name=function_name)
self.store_variable(function_name, function)
if len(self.node_chain) > 2:
self._issue(nested_function_definition(self.locate(), function_name))
return function
def visit_GeneratorExp(self, node):
"""
Args:
node:
Returns:
"""
# TODO: Handle comprehension scope
for generator in node.generators:
self.visit(generator)
return GeneratorType(self.visit(node.elt))
def visit_If(self, node):
"""
Args:
node:
"""
# Visit the conditional
self.visit(node.test)
if len(node.orelse) == 1 and isinstance(node.orelse[0], ast.Pass):
self._issue(unnecessary_second_branch(self.locate()))
elif len(node.body) == 1 and isinstance(node.body[0], ast.Pass):
if node.orelse:
self._issue(unnecessary_second_branch(self.locate()))
# Visit the bodies
this_path_id = self.path_chain[0]
if_path = NewPath(self, this_path_id, "i")
with if_path:
for statement in node.body:
self.visit(statement)
else_path = NewPath(self, this_path_id, "e")
with else_path:
for statement in node.orelse:
self.visit(statement)
# TODO: Unconditional return
# Combine two paths into one
# Check for any names that are on the IF path
self.merge_paths(this_path_id, if_path.id, else_path.id)
def visit_IfExp(self, node):
"""
Args:
node:
Returns:
"""
# Visit the conditional
self.visit(node.test)
# Visit the body
body = self.visit(node.body)
# Visit the orelse
orelse = self.visit(node.orelse)
if are_types_equal(body, orelse):
return body
# TODO: Union type?
return UnknownType()
def visit_Import(self, node):
"""
Args:
node:
"""
# Handle names
for alias in node.names:
asname = alias.asname or alias.name
module_type = self.load_module(alias.name)
self.store_variable(asname, module_type)
def visit_ImportFrom(self, node):
"""
Args:
node:
"""
# Handle names
for alias in node.names:
if node.module is None:
asname = alias.asname or alias.name
module_type = self.load_module(alias.name)
else:
module_name = node.module
asname = alias.asname or alias.name
module_type = self.load_module(module_name)
name_type = module_type.load_attr(alias.name)
self.store_variable(asname, name_type)
def visit_Lambda(self, node):
"""
Args:
node:
Returns:
"""
# Name
position = self.locate()
definitions_scope = self.scope_chain[:]
def definition(tifa, call_type, call_name, parameters, call_position):
"""
Args:
tifa:
call_type:
call_name:
parameters:
call_position:
Returns:
"""
function_scope = NewScope(self, definitions_scope)
with function_scope:
# Process arguments
args = node.args.args
if len(args) != len(parameters):
self._issue(incorrect_arity(position, "lambda", report=self.report))
# TODO: Handle special types of parameters
for arg, parameter in zip(args, parameters):
name = arg.arg
if parameter is not None:
parameter = parameter.clone_mutably()
self.store_variable(name, parameter, position)
if len(args) < len(parameters):
for undefined_parameter in parameters[len(args):]:
self.store_variable(name, UnknownType(), position)
return_value = self.visit(node.body)
return return_value
return FunctionType(definition=definition)
def visit_List(self, node):
"""
Args:
node:
Returns:
"""
result_type = ListType()
if node.elts:
result_type.empty = False
# TODO: confirm homogenous subtype
for elt in node.elts:
result_type.subtype = self.visit(elt)
else:
result_type.empty = True
return result_type
def visit_ListComp(self, node):
"""
Args:
node:
Returns:
"""
# TODO: Handle comprehension scope
for generator in node.generators:
self.visit(generator)
return ListType(self.visit(node.elt))
def visit_NameConstant(self, node):
"""
Args:
node:
Returns:
"""
value = node.value
if isinstance(value, bool):
return BoolType()
else:
return NoneType()
def visit_Name(self, node):
"""
Args:
node:
Returns:
"""
name = node.id
if name == "___":
self._issue(unconnected_blocks(self.locate()))
if isinstance(node.ctx, ast.Load):
if name == "True" or name == "False":
return BoolType()
elif name == "None":
return NoneType()
else:
variable = self.find_variable_scope(name)
builtin = get_builtin_function(name)
if not variable.exists and builtin:
return builtin
else:
state = self.load_variable(name)
return state.type
else:
variable = self.find_variable_scope(name)
if variable.exists:
return variable.state.type
else:
return UnknownType()
def visit_Num(self, node):
"""
Args:
node:
Returns:
"""
return NumType()
def visit_Constant(self, node) -> Type:
""" Handle new 3.8's Constant node """
return get_pedal_type_from_value(node.value)
def visit_Return(self, node):
"""
Args:
node:
"""
if len(self.scope_chain) == 1:
self._issue(return_outside_function(self.locate(), report=self.report))
# TODO: Unconditional return inside loop
if node.value is not None:
self.return_variable(self.visit(node.value))
else:
self.return_variable(NoneType())
def visit_Set(self, node):
# Fun fact, it's impossible to make a literal empty set
return SetType(subtype=self.visit(node.elts[0]), empty=False)
def visit_SetComp(self, node):
"""
Args:
node:
Returns:
"""
# TODO: Handle comprehension scope
for generator in node.generators:
self.visit(generator)
return SetType(subtype=self.visit(node.elt))
def visit_statements(self, nodes):
"""
Args:
nodes:
Returns:
"""
# TODO: Check for pass in the middle of a series of statement
if any(isinstance(node, ast.Pass) for node in nodes):
pass
return [self.visit(statement) for statement in nodes]
def visit_Str(self, node):
"""
Args:
node:
Returns:
"""
if node.s == "":
return StrType(True)
else:
return StrType(False)
def visit_JoinedStr(self, node):
values = [self.visit(expr) for expr in node.values]
# The result will be all StrType
return StrType(empty=all(n.empty for n in values))
def visit_FormattedValue(self, node):
value = self.visit(node.value)
if isinstance(value, StrType):
return value
else:
return StrType(empty=False)
def visit_Subscript(self, node):
"""
Args:
node:
Returns:
"""
# Handle value
value_type = self.visit(node.value)
# Handle slice
if IS_PYTHON_39 and isinstance(node.slice, ast.Tuple):
# TODO: Do something about extslices (especially since students stumble into this accidentally)
pass
elif not IS_PYTHON_39 and isinstance(node.slice, ast.ExtSlice):
# TODO: Do something about extslices (especially since students stumble into this accidentally)
pass
elif isinstance(node.slice, ast.Slice):
self.visit_Slice(node.slice)
return value_type
else:
if IS_PYTHON_39:
slice = node.slice
else:
slice = node.slice.value
literal = self.get_literal(slice)
if literal is None:
literal = get_pedal_literal_from_pedal_type(self.visit(slice))
result = value_type.index(literal)
# TODO: Is this sufficient? Maybe we should be throwing?
if isinstance(result, UnknownType):
self._issue(invalid_indexing(self.locate(), value_type,
literal.type()))
else:
return result
def visit_Slice(self, node):
""" Handles a slice by visiting its components; cannot return a value
because the slice is always the same type as its value, which is
not available on the Slice node itself. """
if node.lower is not None:
self.visit(node.lower)
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.visit(node.step)
def visit_Tuple(self, node) -> TupleType:
""" Handle Tuple literal """
result_type = TupleType()
if not node.elts:
result_type.empty = True
result_type.subtypes = []
else:
result_type.empty = False
# TODO: confirm homogenous subtype
result_type.subtypes = [self.visit(elt) for elt in node.elts]
return result_type
def visit_UnaryOp(self, node):
"""
Args:
node:
Returns:
"""
# Handle operand
operand = self.visit(node.operand)
if isinstance(node.op, ast.Not):
return BoolType()
elif isinstance(operand, UnknownType):
return UnknownType()
elif type(node.op) in VALID_UNARYOP_TYPES:
op_lookup = VALID_UNARYOP_TYPES[type(node.op)]
if type(operand) in op_lookup:
return op_lookup[type(operand)]()
return UnknownType()
def visit_While(self, node):
"""
Args:
node:
"""
# Visit conditional
self.visit(node.test)
# Visit the bodies
this_path_id = self.path_id
# One path is that we never enter the body
empty_path = NewPath(self, this_path_id, "e")
with empty_path:
pass
# Another path is that we loop through the body and check the test again
body_path = NewPath(self, this_path_id, "w")
with body_path:
for statement in node.body:
self.visit(statement)
# Revisit conditional
self.visit(node.test)
# If there's else bodies (WEIRD) then we should check them afterwards
if node.orelse:
self._issue(else_on_loop_body(self.locate()))
for statement in node.orelse:
self.visit(statement)
# Combine two paths into one
# Check for any names that are on the IF path
self.merge_paths(this_path_id, body_path.id, empty_path.id)
def visit_With(self, node):
"""
Args:
node:
"""
for item in node.items:
type_value = self.visit(item.context_expr)
self.visit(item.optional_vars)
self._walk_target(item.optional_vars, type_value)
# Handle the bodies
self.visit_statements(node.body)
| 33.793886
| 108
| 0.552721
|
2af7c1c4a0e2650a3078f0433382b5b254b6777f
| 7,580
|
py
|
Python
|
sira/modelling/structural.py
|
GeoscienceAustralia/sira
|
01c99acae497fedc971367e0fa1611a90f5c64ef
|
[
"Apache-2.0"
] | 1
|
2021-11-17T16:10:56.000Z
|
2021-11-17T16:10:56.000Z
|
sira/modelling/structural.py
|
GeoscienceAustralia/sira
|
01c99acae497fedc971367e0fa1611a90f5c64ef
|
[
"Apache-2.0"
] | 10
|
2020-07-28T02:19:22.000Z
|
2022-03-04T00:34:40.000Z
|
sira/modelling/structural.py
|
GeoscienceAustralia/sira
|
01c99acae497fedc971367e0fa1611a90f5c64ef
|
[
"Apache-2.0"
] | 1
|
2019-08-22T10:26:38.000Z
|
2019-08-22T10:26:38.000Z
|
import inspect
from sira.tools.utils import class_getter
# from future.builtins import str, object
# from future.utils import with_metaclass
class NoDefaultException(Exception):
"""
Thrown when a :py:class:`_Base` is created without providing values for one
or more :py:class:`Element`s which do not have default values.
Note that users should never be instantiating or subclassing
:py:class:`_Base` directly. One should extend a class returned by
:py:func:`generate_element_base`, which returns a class which extends
:py:class:`_Base`.
"""
class ValidationError(Exception):
"""
Thrown when validation of some item fails. Some examples of when this may
occur are:
- A value for an :py:class:`Element` is provided which is not an
an instance of the type specified for the Element (which is specified
via argument *cls* to :py:meth:`Element.__init__`).
- One of the validators provided for an element (see the agument
*validators* to :py:class:`Element.__init__`) fails or raises an
exception of this type.
"""
# class AlreadySavedException(Exception):
# """
# Raised if an attempt is made to save a 'Document' which has previously been
# saved.
# """
# pass
class DisallowedElementException(ValueError):
"""
Raised if an an attempt is made to define an element with a disallowed
name. Disallowed names are specified by
:py:attr:StructuralMeta.DISALLOWED_FIELDS.
"""
class MultipleBasesOfTypeBaseError(ValueError):
"""
Raised if an attempt is made to define a class which inherits from
multiple classes (``c``) for which ``issubclass(type(c), StructuralMeta)``
is *True*.
The reason to dissalow multiple inheritance of said classes is to conform
to the structure of XML, where an element can only have one parent. This may
not turn out to be an issue as other interpretations of class hierachies in
the context of XML may be sensible/feasible... but the semantics and
practicalities would need to be considered so stop this for now and see how
we go.
"""
class Info(str):
"""
Strings that provide 'metadata' on classes. At present, this is only used to
identify immutable strings on a class when they are displayed.
"""
class Element(object):
"""
Represents an element of a model. If a model were represented in a relational
database, this would be analogous to a field in a table.
"""
@staticmethod
def NO_DEFAULT():
"""
A callable that can be used to signal that an Element has no default
value. Simply raises a :py:exception:`NoDefaultException`.
"""
raise NoDefaultException()
def __init__(self, cls, description, default=None, validators=None):
self.cls = cls
self.description = Info(description)
self._default = default
self.validators = validators
@property
def default(self):
if self._default is False:
raise NoDefaultException()
return self._default() if callable(self._default) else self._default
class StructuralMeta(type):
"""
Metaclass for structural
Names of :py:class:`Element`s that cannot be defined on any class ``c`` for
which ``issubclass(type(c), StructuralMeta)`` is *True*. These are names
of elements which are used internally and for the sake of the performance
of attribute lookup, are banned for other use.
"""
DISALLOWED_FIELDS = [
'class',
'predecessor', '_predecessor', '_id',
'_value',
'_attributes']
def __new__(mcs, name, bases, dct):
# check that only one base is instance of _Base
if len([base for base in bases if issubclass(type(base), StructuralMeta)]) > 1:
raise MultipleBasesOfTypeBaseError('Invalid bases in class {}'.format(name))
def extract_params_of_type(clazz):
# extract the parameters
params = {}
for k in list(dct.keys()):
if isinstance(dct[k], clazz):
params[k] = dct.pop(k)
# cannot have a parameter with name class, as this messes with
# serialisation
for field in StructuralMeta.DISALLOWED_FIELDS:
if field in params:
raise DisallowedElementException(
'class {} cannot have Element with name "{}"'.
format(name, field))
return params
dct['__params__'] = extract_params_of_type(Element)
# create a json description of the class
json_desc = {}
for k, v in list(dct['__params__'].items()):
# TODO: put validators in here # noqa: W0511
json_desc[k] = {'class': v.cls}
for k, v in list(extract_params_of_type(Info).items()):
json_desc[k] = {
'class': 'Info',
'value': str(v)}
dct['__json_desc__'] = json_desc
return super(StructuralMeta, mcs).__new__(mcs, name, bases, dct)
def __init__(cls, name, bases, dct):
# We do this here as I prefer to get the module from the class. Not sure
# if it matters in practice, but it feels better. cls_module contains
# the module in which this class is defined and we know that the types
# declared for the Elements of a class are accessible in that module.
cls_module = inspect.getmodule(cls).__name__
cls.__json_desc__['class'] = '.'.join([cls_module, name])
for param in list(cls.__params__.values()):
param.cls_module = cls_module
for k, v in list(cls.__json_desc__.items()):
if k == 'class':
continue
try:
ecls = class_getter([cls_module, v['class']])
if hasattr(ecls, '__json_desc__'):
cls.__json_desc__[k] = ecls.__json_desc__
else:
v['class'] = '.'.join([ecls.__module__, ecls.__name__])
if isinstance(ecls, Element):
try:
default = v.default
except NoDefaultException:
pass
else:
# default = jsonify(default)
if default:
cls.__json_desc__[k]['default'] = default
except Exception: # pylint: disable=broad-except
v['class'] = '.'.join(['__builtin__', v['class']])
super(StructuralMeta, cls).__init__(name, bases, dct)
class Base(metaclass=StructuralMeta):
"""
Base class for all 'model' classes. **This should never be used by clients**
and serves as a base class for dynamically generated classes returned by
:py:func:``, which are designed for use by clients.
"""
def __init__(self, **kwargs):
self._predecessor = kwargs.pop('predecessor', None)
if self._predecessor is None:
# then we provide default values for each element
for k, v in list(self.__params__.items()): # pylint: disable=no-member
if k not in kwargs:
try:
kwargs[k] = v.default
except NoDefaultException as error:
raise ValueError('Must provide value for {}'.format(k)) from error
for k, v in list(kwargs.items()):
setattr(self, k, v)
| 36.095238
| 90
| 0.608311
|
e3d9df2108310e05b92f1268ca71bcca13e6c0ab
| 6,383
|
py
|
Python
|
os_pausebot_RECOMM.py
|
Cyb4rUf0/cyberufobvb02
|
ebbc212256dc27e5b02f5815a5a82c0a4c1b0b72
|
[
"MIT"
] | null | null | null |
os_pausebot_RECOMM.py
|
Cyb4rUf0/cyberufobvb02
|
ebbc212256dc27e5b02f5815a5a82c0a4c1b0b72
|
[
"MIT"
] | null | null | null |
os_pausebot_RECOMM.py
|
Cyb4rUf0/cyberufobvb02
|
ebbc212256dc27e5b02f5815a5a82c0a4c1b0b72
|
[
"MIT"
] | 2
|
2021-11-18T17:19:46.000Z
|
2021-11-20T02:47:06.000Z
|
# Based off Firewatch custsignalmod.py
from tradingview_ta import TA_Handler, Interval, Exchange
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for directory handling
import glob
import threading
import time
# my helper utils
from helpers.os_utils import(rchop)
from vyacheslav_signalbuy_VolScan import txcolors
MY_EXCHANGE = 'BINANCE'
MY_SCREENER = 'CRYPTO'
SYMBOLS = ['BTCUSDT', 'ETHUSDT']
MY_FIRST_INTERVAL = Interval.INTERVAL_1_MINUTE
MY_SECOND_INTERVAL = Interval.INTERVAL_5_MINUTES
MY_THIRD_INTERVAL = Interval.INTERVAL_15_MINUTES
MY_FOUR_INTERVAL = Interval.INTERVAL_1_HOUR
MY_FIVE_INTERVAL = Interval.INTERVAL_4_HOURS
MY_SIX_INTERVAL = Interval.INTERVAL_1_DAY
TIME_TO_WAIT = 1 # Minutes to wait between analysis
FULL_LOG = True # List anylysis result to console
SIGNAL_NAME = 'os_pausebot_RECOMM'
SIGNAL_FILE = 'signals/pausebot.pause'
def analyze():
taMax = 0
taMaxCoin = 'none'
signal_coins = {}
first_analysis = {}
second_analysis = {}
third_analysis = {}
four_analysis = {}
five_analysis = {}
six_analysis = {}
first_handler = {}
second_handler = {}
third_handler = {}
four_handler = {}
five_handler = {}
six_handler = {}
paused = 0
retPaused = False
for symbol in SYMBOLS:
first_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FIRST_INTERVAL,
timeout= 10
)
second_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_SECOND_INTERVAL,
timeout= 10
)
third_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_THIRD_INTERVAL,
timeout= 10
)
four_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FOUR_INTERVAL,
timeout= 10
)
five_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FIVE_INTERVAL,
timeout= 10
)
six_handler[symbol] = TA_Handler(
symbol=symbol,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_SIX_INTERVAL,
timeout= 10
)
for symbol in SYMBOLS:
try:
first_analysis = first_handler[symbol].get_analysis()
second_analysis = second_handler[symbol].get_analysis()
third_analysis = third_handler[symbol].get_analysis()
four_analysis = four_handler[symbol].get_analysis()
five_analysis = five_handler[symbol].get_analysis()
six_analysis = six_handler[symbol].get_analysis()
except Exception as e:
print(f'{SIGNAL_NAME}')
print("Exception:")
print(e)
print (f'Coin: {symbol}')
print (f'First handler: {first_handler[symbol]}')
print (f'Second handler: {second_handler[symbol]}')
print (f'Second handler: {third_handler[symbol]}')
print(f'Four handler: {four_handler[symbol]}')
print(f'Five handler: {five_handler[symbol]}')
print(f'Six handler: {six_handler[symbol]}')
continue
first_recommendation = first_analysis.summary['RECOMMENDATION']
second_recommendation = second_analysis.summary['RECOMMENDATION']
third_recommendation = third_analysis.summary['RECOMMENDATION']
four_recommendation = four_analysis.summary['RECOMMENDATION']
five_recommendation = five_analysis.summary['RECOMMENDATION']
six_recommendation = six_analysis.summary['RECOMMENDATION']
#if FULL_LOG:
#print(f'|{SIGNAL_NAME}| <{symbol}> |First: {first_recommendation}| |Second: {second_recommendation}| Third: {third_recommendation}| Four: {four_recommendation}| Five: {five_recommendation}| Six: {six_recommendation}|')
#print(f'|{SIGNAL_NAME}| <{symbol}> |First: {first_recommendation}| |Second: {second_recommendation}| Third: {third_recommendation}|')
if (first_recommendation == "SELL" or first_recommendation == "STRONG_SELL") and \
(second_recommendation == "SELL" or second_recommendation == "STRONG_SELL"): #and \
#(third_recommendation == "SELL" or third_recommendation == "STRONG_SELL"): #and \
#(four_recommendation == "BUY" or four_recommendation == "STRONG_BUY") and \
#(five_recommendation == "BUY" or five_recommendation == "STRON_BUY"): #and \
#(six_recommendation == "BUY" or five_recommendation == "STRONG_BUY"):
paused = paused + 1
if FULL_LOG:
#print(f'|{SIGNAL_NAME}| <{symbol}> |First: {first_recommendation}| |Second: {second_recommendation}| Third: {third_recommendation}| Four: {four_recommendation}| Five: {five_recommendation}| Six: {six_recommendation}|')
print(f'|{SIGNAL_NAME}| <{symbol}> |First: {first_recommendation}| |Second: {second_recommendation}|')
#print(f'{txcolors.YELLOW}|{SIGNAL_NAME}|: Buy Signal Detected On <{symbol}>{txcolors.DEFAULT}')
if paused > 0:
print(f'|{SIGNAL_NAME}| Market Alert: <PAUSED BUYING> [{TIME_TO_WAIT}] Minutes For Next Checkup')
retPaused = True
else:
print(f'|{SIGNAL_NAME}| Market OK: <WORKING> [{TIME_TO_WAIT}] Minutes For Next Checkup')
retPaused = False
return retPaused
def do_work():
while True:
try:
if not threading.main_thread().is_alive(): exit()
paused = analyze()
if paused:
with open(SIGNAL_FILE,'a') as f:
f.write('yes')
else:
if os.path.isfile(SIGNAL_FILE):
os.remove(SIGNAL_FILE)
time.sleep((TIME_TO_WAIT*60))
#except Exception as e:
# print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')
# continue
except KeyboardInterrupt as ki:
continue
| 37.110465
| 231
| 0.626351
|
deb0b68902d12d5797ad6034f160e798cb0797c5
| 3,829
|
py
|
Python
|
insta/settings.py
|
osman2491/Insta
|
f941f2b20f814e1c13fe0f9c02a1a60be3cd2c87
|
[
"MIT"
] | null | null | null |
insta/settings.py
|
osman2491/Insta
|
f941f2b20f814e1c13fe0f9c02a1a60be3cd2c87
|
[
"MIT"
] | 2
|
2021-06-10T22:33:39.000Z
|
2021-09-08T01:39:21.000Z
|
insta/settings.py
|
osman2491/Insta
|
f941f2b20f814e1c13fe0f9c02a1a60be3cd2c87
|
[
"MIT"
] | null | null | null |
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# Application definition
INSTALLED_APPS = [
'instagram.apps.InstagramConfig',
'bootstrap4',
'pyuploadcare.dj',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
UPLOADCARE = {
'pub_key': 'aeed84efde73eba99ff6',
'secret': '1110614ad7e895d38955',
}
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
django_heroku.settings(locals())
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
| 26.406897
| 91
| 0.678245
|
dd85358f6ec6cbe0abeff6c5412ca9cbfae83e7a
| 6,772
|
py
|
Python
|
mmdet/apis/test.py
|
Andrew-Zhu/DyFPN
|
a74463b59c4ce28253c2449a07c0f6692a0147a1
|
[
"Apache-2.0"
] | 32
|
2021-01-07T10:51:21.000Z
|
2022-03-24T02:49:51.000Z
|
mmdet/apis/test.py
|
Andrew-Zhu/DyFPN
|
a74463b59c4ce28253c2449a07c0f6692a0147a1
|
[
"Apache-2.0"
] | 6
|
2021-04-14T11:22:18.000Z
|
2022-01-29T03:00:32.000Z
|
mmdet/apis/test.py
|
Andrew-Zhu/DyFPN
|
a74463b59c4ce28253c2449a07c0f6692a0147a1
|
[
"Apache-2.0"
] | 9
|
2021-01-15T15:54:02.000Z
|
2022-03-21T07:51:24.000Z
|
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results, tensor2imgs
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result, tuple):
bbox_results, mask_results = result
encoded_mask_results = encode_mask_results(mask_results)
result = bbox_results, encoded_mask_results
results.append(result)
batch_size = len(data['img_metas'][0].data)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result, tuple):
bbox_results, mask_results = result
encoded_mask_results = encode_mask_results(mask_results)
result = bbox_results, encoded_mask_results
results.append(result)
if rank == 0:
batch_size = (
len(data['img_meta'].data)
if 'img_meta' in data else len(data['img_metas'][0].data))
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 35.455497
| 79
| 0.60632
|
b98fb0fe4a9cbbfce76b2615b56a48427305ab31
| 5,552
|
py
|
Python
|
homeassistant/components/rflink/cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/rflink/cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/rflink/cover.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Support for Rflink Cover devices."""
import logging
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverEntity
from homeassistant.const import CONF_DEVICES, CONF_NAME, CONF_TYPE, STATE_OPEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import (
CONF_ALIASES,
CONF_DEVICE_DEFAULTS,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS,
DEVICE_DEFAULTS_SCHEMA,
RflinkCommand,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
TYPE_STANDARD = "standard"
TYPE_INVERTED = "inverted"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): vol.Schema(
{
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE): vol.Any(TYPE_STANDARD, TYPE_INVERTED),
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
}
}
),
}
)
def entity_type_for_device_id(device_id):
"""Return entity class for protocol of a given device_id.
Async friendly.
"""
entity_type_mapping = {
# KlikAanKlikUit cover have the controls inverted
"newkaku": TYPE_INVERTED
}
protocol = device_id.split("_")[0]
return entity_type_mapping.get(protocol, TYPE_STANDARD)
def entity_class_for_type(entity_type):
"""Translate entity type to entity class.
Async friendly.
"""
entity_device_mapping = {
# default cover implementation
TYPE_STANDARD: RflinkCover,
# cover with open/close commands inverted
# like KAKU/COCO ASUN-650
TYPE_INVERTED: InvertedRflinkCover,
}
return entity_device_mapping.get(entity_type, RflinkCover)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink cover devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
# Determine what kind of entity to create, RflinkCover
# or InvertedRflinkCover
if CONF_TYPE in config:
# Remove type from config to not pass it as and argument
# to entity instantiation
entity_type = config.pop(CONF_TYPE)
else:
entity_type = entity_type_for_device_id(device_id)
entity_class = entity_class_for_type(entity_type)
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = entity_class(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink cover platform."""
async_add_entities(devices_from_config(config))
class RflinkCover(RflinkCommand, CoverEntity, RestoreEntity):
"""Rflink entity which can switch on/stop/off (eg: cover)."""
async def async_added_to_hass(self):
"""Restore RFLink cover state (OPEN/CLOSE)."""
await super().async_added_to_hass()
if (old_state := await self.async_get_last_state()) is not None:
self._state = old_state.state == STATE_OPEN
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon", "up"]:
self._state = True
elif command in ["off", "alloff", "down"]:
self._state = False
@property
def should_poll(self):
"""No polling available in RFlink cover."""
return False
@property
def is_closed(self):
"""Return if the cover is closed."""
return not self._state
@property
def assumed_state(self):
"""Return True because covers can be stopped midway."""
return True
async def async_close_cover(self, **kwargs):
"""Turn the device close."""
await self._async_handle_command("close_cover")
async def async_open_cover(self, **kwargs):
"""Turn the device open."""
await self._async_handle_command("open_cover")
async def async_stop_cover(self, **kwargs):
"""Turn the device stop."""
await self._async_handle_command("stop_cover")
class InvertedRflinkCover(RflinkCover):
"""Rflink cover that has inverted open/close commands."""
async def _async_send_command(self, cmd, repetitions):
"""Will invert only the UP/DOWN commands."""
_LOGGER.debug("Getting command: %s for Rflink device: %s", cmd, self._device_id)
cmd_inv = {"UP": "DOWN", "DOWN": "UP"}
await super()._async_send_command(cmd_inv.get(cmd, cmd), repetitions)
| 32.852071
| 88
| 0.644813
|
0d03a943f1831d53e76b1bf0e5e3df680a1b1bce
| 17,241
|
py
|
Python
|
biobakery_workflows/files.py
|
tkuntz-hsph/biobakery_workflows
|
e861705d939354178362fd5b26e59dcc696489d2
|
[
"MIT"
] | 47
|
2020-08-18T20:51:02.000Z
|
2022-03-21T19:43:13.000Z
|
biobakery_workflows/files.py
|
tkuntz-hsph/biobakery_workflows
|
e861705d939354178362fd5b26e59dcc696489d2
|
[
"MIT"
] | 18
|
2020-06-12T21:26:46.000Z
|
2022-03-19T08:24:55.000Z
|
biobakery_workflows/files.py
|
tkuntz-hsph/biobakery_workflows
|
e861705d939354178362fd5b26e59dcc696489d2
|
[
"MIT"
] | 15
|
2020-07-24T16:41:46.000Z
|
2022-02-22T09:02:01.000Z
|
"""
bioBakery Workflows: tasks.files module
A collection of file names used by tasks
Copyright (c) 2017 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import copy
import sys
from anadama2 import reporters
from .utilities import name_files
class FileInfo(object):
def __init__(self, name=None, subfolder=None, tag=None, extension=None, description=None):
# set a list of non-path keywords
self.non_path_keywords=["description"]
# concat multiple strings if present in description
if description and isinstance(description,tuple):
description="\n".join(description)
keywords={"names":name, "subfolder":subfolder, "tag":tag, "extension":extension, "description":description}
self.keywords = {key:value for key, value in keywords.items() if value}
def get_path_keywords(self):
info = copy.copy(self.keywords)
# remove the non-path keywords
for key in self.non_path_keywords:
try:
del info[key]
except KeyError:
pass
return info
def __getitem__(self, key):
""" Return the file info """
try:
value=self.keywords[key]
except KeyError:
value=""
return value
class Workflow(object):
file_info = {}
file_info["log"]=FileInfo(reporters.LOG_FILE_NAME,
description="The AnADAMA2 workflow log.")
@classmethod
def path(cls, name, main_folder="", none_if_not_found=None, error_if_not_found=None, search_for_file=False, **keywords):
merged_keywords = copy.copy(keywords)
merged_keywords.update(cls.file_info[name].get_path_keywords())
file_path=name_files(folder=main_folder, **merged_keywords)
# if the file is not found, then look in the input folder
if not os.path.isfile(file_path) and search_for_file:
file_name = cls.filename(name)
file_path=os.path.join(main_folder, file_name)
# if set, error if the file does not exist
if error_if_not_found and not os.path.isfile(file_path):
message="\nERROR: Unable to find file: "+file_path
desc=cls.description(name)
if desc:
message+="\n\nFile description:\n"+desc
sys.exit(message)
# if set, check if the file exists, if not return None
if none_if_not_found and not os.path.isfile(file_path):
file_path = None
return file_path
@classmethod
def description(cls,name):
try:
desc=cls.file_info[name].keywords["description"]
except (KeyError, AttributeError):
desc=""
return desc
@classmethod
def filename(cls, name):
try:
fname=cls.file_info[name].keywords["names"]
except (KeyError, AttributeError):
fname=""
return fname
@classmethod
def list_file_path_description(cls,folder,input_files):
""" List the file names and descriptions in a format to be used in an argument help description """
desc=""
for required in input_files:
desc+="\n\n".join(["* "+cls.path(name,folder)+ " ( " + required + " )\n-- "+cls.description(name) for name in input_files[required]])+"\n"
return desc
@classmethod
def list_file_description(cls,input_files):
""" List the file names and descriptions in a format to be used in an argument help description """
desc=""
for required in input_files:
desc+="\n".join(["* "+cls.filename(name)+ " ( " + required + " )\n-- "+cls.description(name)+"\n\n" for name in input_files[required]])+"\n"
return desc
class ShotGun(Workflow):
""" A collection of information of folders/files created by the shotgun tasks """
file_info=copy.copy(Workflow.file_info)
# set the folder names for wmgx_wmtx data workflows
wmgx_folder_name="whole_metagenome_shotgun"
wmtx_folder_name="whole_metatranscriptome_shotgun"
# set the kneaddata file name
file_info["kneaddata_read_counts"]=FileInfo("kneaddata_read_count_table.tsv",subfolder=os.path.join("kneaddata","merged"),
description=("A tab-delimited file with samples as rows and read counts as columns.",
"This file is generated by compiling information from the KneadData log files. ",
"The file contains read counts after each step of the Kneaddata workflow. ",
"Depending on the input to Kneaddata this file will contain counts for ",
"single or paired/orphan reads. The read counts from the filtering steps ",
"are for filtering applied in a serial manner with the reads that do not ",
"map to the first reference used as input in filtering with the next ",
"reference database continuing until all databases have been run."))
# set the taxonomy file names
file_info["taxonomic_profile"]=FileInfo("metaphlan_taxonomic_profiles.tsv",subfolder=os.path.join("metaphlan","merged"),
description=("A tab-delimited file with samples as columns and relative abundance as rows.",
"This file contains the merged taxonomic profiles computed by MetaPhlAn for all samples."))
file_info["species_counts"]=FileInfo("metaphlan_species_counts_table.tsv",subfolder=os.path.join("metaphlan","merged"),
description=("A tab-delimited file with samples as rows and counts as columns.",
"This file contains the counts of total species for each sample using the ",
"species identified by MetaPhlAn."))
# set the merged feature file names
file_info["genefamilies"]=FileInfo("genefamilies.tsv",subfolder=os.path.join("humann","merged"),
description=("A tab-delimited file with samples as columns and gene families ",
"as rows. This file is a merged set of gene families for all samples ",
"computed by HUMAnN. This file contains stratified counts as RPKs."))
file_info["ecs"]=FileInfo("ecs.tsv", subfolder=os.path.join("humann","merged"),
description=("A tab-delimited file with samples as columns and ecs as rows. ",
"This file is a merged set of ecs for all samples generated from the gene ",
"families computed by HUMAnN. This file contains stratified counts as RPKs."))
file_info["pathabundance"]=FileInfo("pathabundance.tsv", subfolder=os.path.join("humann","merged"),
description=("A tab-delimited file with samples as columns and pathways ",
"as rows. This file is a merged set of pathway abundances for all ",
"samples computed by HUMAnN. This file contains stratified counts ",
"of non-normalized abundances."))
# set the normed feature file names
file_info["genefamilies_relab"]=FileInfo("genefamilies_relab.tsv", subfolder=os.path.join("humann","merged"))
file_info["ecs_relab"]=FileInfo("ecs_relab.tsv", subfolder=os.path.join("humann","merged"),
description=("A tab-delimited file with samples as columns and ECs as rows.",
"This file is a merged set of EC abundances for all samples computed ",
"by HUMAnN. This file contains relative abundances."))
file_info["pathabundance_relab"]=FileInfo("pathabundance_relab.tsv", subfolder=os.path.join("humann","merged"),
description=("A tab-delimited file with samples as columns and pathways ",
"as rows. This file is a merged set of pathway abundances for all ",
"samples computed by HUMAnN. This file contains stratified counts ",
"of relative abundances."))
# set the feature count file names
file_info["genefamilies_relab_counts"]=FileInfo("humann_genefamilies_relab_counts.tsv", subfolder=os.path.join("humann","counts"))
file_info["ecs_relab_counts"]=FileInfo("humann_ecs_relab_counts.tsv", subfolder=os.path.join("humann","counts"))
file_info["pathabundance_relab_counts"]=FileInfo("humann_pathabundance_relab_counts.tsv", subfolder=os.path.join("humann","counts"))
# set the all feature counts file names
file_info["feature_counts"]=FileInfo("humann_feature_counts.tsv", subfolder=os.path.join("humann","counts"),
description=("A tab-delimited file with samples as rows and features ",
"as columns. This file includes the total feature counts (non-stratified)",
"for the features computed by HUMAnN (genes, ecs, and pathways)."))
file_info["humann_read_counts"]=FileInfo("humann_read_and_species_count_table.tsv", subfolder=os.path.join("humann","counts"),
description=("A tab-delimited file with samples as rows and counts as columns.",
"This file was created using the HUMAnN logs. It includes the total number ",
"of species used to generate the custom database, the total number of initial",
"reads, and the total reads aligning for both search steps."))
# set the names for the rna/dna normed files
file_info["genefamilies_norm_ratio"]=FileInfo("rna_dna_relative_expression_unstratified.tsv",subfolder=os.path.join("humann","rna_dna_norm","genes"),
description=("A tab-delimited file with samples as columns and genes as rows. ",
"This file includes the normalized RNA abundances as a ratio to DNA abundance. ",
"This file does not include stratified features."))
file_info["ecs_norm_ratio"]=FileInfo("rna_dna_relative_expression_unstratified.tsv",subfolder=os.path.join("humann","rna_dna_norm","ecs"),
description=("A tab-delimited file with samples as columns and ecs as rows. ",
"This file includes the normalized RNA abundances as a ratio to DNA abundance. ",
"This file does not include stratified features."))
file_info["paths_norm_ratio"]=FileInfo("rna_dna_relative_expression_unstratified.tsv",subfolder=os.path.join("humann","rna_dna_norm","paths"),
description=("A tab-delimited file with samples as columns and pathways as rows. ",
"This file includes the normalized RNA abundances as a ratio to DNA abundance. ",
"This file does not include stratified features."))
class ShotGunVis(Workflow):
""" A collection of information of folders/files created by the shotgun vis templates """
file_info=copy.copy(Workflow.file_info)
file_info["microbial_counts"]=FileInfo("microbial_counts_table.tsv",
description="A tab-delimited file with samples as rows and ratios as "+\
"columns. Includes the proportion of reads remaining after "+\
"trimming and filtering in the quality control workflow.")
file_info["rna_microbial_counts"]=FileInfo("rna_microbial_counts_table.tsv",
description="A tab-delimited file with RNA samples as rows and ratios as "+\
"columns. Includes the proportion of reads remaining after "+\
"trimming and filtering in the quality control workflow.")
file_info["qc_counts"]=FileInfo("qc_counts_table.tsv",
description="A tab-delimited file with samples as rows and read counts "+\
"as columns. Includes the read counts for trimming and filtering steps "+\
"in the quality control workflow. The reads are single end.")
file_info["qc_counts_paired"]=FileInfo("qc_counts_pairs_table.tsv",
description="A tab-delimited file with samples as rows and read counts "+\
"as columns. Includes the read counts for trimming and filtering steps "+\
"in the quality control workflow. The reads are paired end and these "+\
"counts are only for pairs.")
file_info["qc_counts_orphan"]=FileInfo("qc_counts_orphans_table.tsv",
description="A tab-delimited file with samples as rows and read counts "+\
"as columns. Includes the read counts for trimming and filtering steps "+\
"in the quality control workflow. The reads are paired end and these "+\
"counts are only for orphans.")
file_info["rna_qc_counts_paired"]=FileInfo("rna_qc_counts_pairs_table.tsv",
description="A tab-delimited file with RNA samples as rows and read counts "+\
"as columns. Includes the read counts for trimming and filtering steps "+\
"in the quality control workflow. The reads are paired end and these "+\
"counts are only for pairs.")
file_info["rna_qc_counts_orphan"]=FileInfo("rna_qc_counts_orphans_table.tsv",
description="A tab-delimited file with RNA samples as rows and read counts "+\
"as columns. Includes the read counts for trimming and filtering steps "+\
"in the quality control workflow. The reads are paired end and these "+\
"counts are only for orphans.")
file_info["taxa_counts"]=FileInfo("taxa_counts_table.tsv",
description="A tab-delimited file with samples as rows and counts as "+\
"columns. These are the total number of species/genera identified for each "+\
"sample before and after filtering.")
class SixteenS(Workflow):
""" A collection of information of folders/files created by the 16s tasks """
file_info=copy.copy(Workflow.file_info)
# set the names for the otu table and read count files
file_info["otu_table_closed_reference"]=FileInfo("all_samples_taxonomy_closed_reference.tsv",
description=("A tab-delimited file with samples/taxonomy as columns and taxonomy as rows. ",
"First column is the OTU id and the last column is the taxonomy. The remaining",
"columns are sample names. Values are counts."))
file_info["otu_table_open_reference"]=FileInfo("all_samples_taxonomy_open_reference.tsv",
description=("A tab-delimited file with samples/taxonomy as columns and taxonomy as rows. ",
"First column is the OTU id and the last column is the taxonomy. The remaining",
"columns are sample names. Values are counts. All OTUs without taxonomy are labeled Unclassified."))
file_info["read_count_table"]=FileInfo("all_samples_read_counts.tsv",
description=("A tab-delimited file with samples as rows and counts as columns. ",
"The counts included are the original read count, total number of reads ",
"mapping to an OTU with known taxonomy, and total reads mapping to an ",
"unclassified OTU."))
file_info["eestats2"]=FileInfo("all_samples_eestats2.txt",
description=("A file with maxee as columns and read lengths as rows."))
file_info["msa_nonchimera"]=FileInfo("all_samples_clustalo_aligned_nonchimera.fasta",
description=("A multiple sequence alignment file generated from the nonchimera sequences ",
"using Clustalo."))
file_info["msa_closed_reference"]=FileInfo("all_samples_clustalo_aligned_closed_reference.fasta",
description=("A multiple sequence alignment file generated from the closed reference sequences ",
"using Clustalo."))
# set the names for the otu tables and read count files
file_info["error_ratesF"]=FileInfo("Error_rates_per_sample_FWD.png",
description=("Plots of forward read error rates in dada2 workflow for each sample"))
file_info["error_ratesR"]=FileInfo("Error_rates_per_sample_REV.png",
description=("Plots of reverse read error rates in dada2 workflow for each sample"))
file_info["readF_qc"]=FileInfo("FWD_read_plot.png",
description=("Plots of quality of forward reads for each sample"))
file_info["readR_qc"]=FileInfo("REV_read_plot.png",
description=("Plots of quality of reverse reads for each sample"))
file_info["counts_each_step"]=FileInfo("Read_counts_at_each_step.tsv",
description=("A tab-delimited file with samples as rows and counts as columns. ",
"The counts included in each step of workflow process"))
file_info["filtN"]=FileInfo("filtN",
description=("Folder with N filtered files"))
| 56.343137
| 153
| 0.687779
|
ce72d95f6a97f261ee3d0db2eb7691bfd5f6a51b
| 6,163
|
py
|
Python
|
ansible/modules/hashivault/hashivault_pki_cert_issue.py
|
headwest/ansible-modules-hashivault
|
d5742daf0d931a1c7eb053c1fbfc22fd2aa5607b
|
[
"MIT"
] | 1
|
2020-09-08T05:49:24.000Z
|
2020-09-08T05:49:24.000Z
|
ansible/modules/hashivault/hashivault_pki_cert_issue.py
|
headwest/ansible-modules-hashivault
|
d5742daf0d931a1c7eb053c1fbfc22fd2aa5607b
|
[
"MIT"
] | null | null | null |
ansible/modules/hashivault/hashivault_pki_cert_issue.py
|
headwest/ansible-modules-hashivault
|
d5742daf0d931a1c7eb053c1fbfc22fd2aa5607b
|
[
"MIT"
] | 2
|
2020-09-09T03:42:29.000Z
|
2020-09-09T03:43:26.000Z
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import check_secrets_engines
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import check_pki_role
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = r'''
---
module: hashivault_pki_cert_issue
version_added: "4.5.0"
short_description: Hashicorp Vault PKI Generate Certificate
description:
- This module generates a new set of credentials (private key and certificate) based on the role named in the
module.
- The issuing CA certificate is returned as well, so that only the root CA need be in a client's trust store.
options:
role:
recuired: true
description:
- Specifies the name of the role to create.
common_name:
recuired: true
description:
- Specifies the requested CN for the certificate. If the CN is allowed by role policy, it will be issued.
mount_point:
default: pki
description:
- location where secrets engine is mounted. also known as path
extra_params:
description:
- "Collection of properties from pki role U(https://www.vaultproject.io/api-docs/secret/pki#parameters-6)"
type: dict
suboptions:
alt_names:
type: string
description:
- Specifies requested Subject Alternative Names, in a comma-delimited list. These can be host names or
email addresses; they will be parsed into their respective fields. If any requested names do not match
role policy, the entire request will be denied.
ip_sans:
type: string
description:
- Specifies requested IP Subject Alternative Names, in a comma-delimited list. Only valid if the role
allows IP SANs (which is the default).
uri_sans:
type: string
description:
- Specifies the requested URI Subject Alternative Names, in a comma-delimited list.
other_sans:
type: string
description:
- Specifies custom OID/UTF8-string SANs. These must match values specified on the role in
allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same
as OpenSSL <oid>;<type>:<value> where the only current valid type is UTF8. This can be a
comma-delimited list or a JSON string slice.
ttl:
type: string
description:
- Specifies requested Time To Live. Cannot be greater than the role's max_ttl value. If not provided,
the role's ttl value will be used. Note that the role values default to system values if not
explicitly set.
format:
type: string
description:
- Specifies the format for returned data. Can be pem, der, or pem_bundle; defaults to pem. If der, the
output is base64 encoded. If pem_bundle, the certificate field will contain the private key and
certificate, concatenated; if the issuing CA is not a Vault-derived self-signed root, this will be
included as well.
private_key_format:
type: string
description:
- Specifies the format for marshaling the private key. Defaults to der which will return either
base64-encoded DER or PEM-encoded DER, depending on the value of format. The other option is pkcs8
which will return the key marshalled as PEM-encoded PKCS8.
exclude_cn_from_sans:
type: bool
description:
- If true, the given common_name will not be included in DNS or Email Subject Alternate Names
(as appropriate). Useful if the CN is not a hostname or email address, but is instead some
human-readable identifier.
extends_documentation_fragment:
- hashivault
'''
EXAMPLES = r'''
---
- hosts: localhost
tasks:
- hashivault_pki_cert_issue:
role: 'tester'
common_name: 'test.example.com'
register: cert
- debug: msg="{{ cert }}"
'''
def main():
argspec = hashivault_argspec()
argspec['role'] = dict(required=True, type='str')
argspec['common_name'] = dict(required=True, type='str')
argspec['extra_params'] = dict(required=False, type='dict', default={})
argspec['mount_point'] = dict(required=False, type='str', default='pki')
module = hashivault_init(argspec)
result = hashivault_pki_cert_issue(module)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_pki_cert_issue(module):
params = module.params
client = hashivault_auth_client(params)
role = params.get('role').strip('/')
common_name = params.get('common_name')
extra_params = params.get('extra_params')
mount_point = params.get('mount_point').strip('/')
# check if engine is enabled
_, err = check_secrets_engines(module, client)
if err:
return err
if not check_pki_role(name=role, mount_point=mount_point, client=client):
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.generate_certificate(name=role, common_name=common_name,
extra_params=extra_params,
mount_point=mount_point).get('data')
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
if __name__ == '__main__':
main()
| 41.362416
| 120
| 0.63719
|
3444cbdca63ea4bbc90f3a0d9f66bb71aac1d748
| 14,792
|
py
|
Python
|
astroquery/utils/commons.py
|
iskren-y-g/astroquery
|
8248ba8fd0aa3a4c8221db729a127db047e18f4e
|
[
"BSD-3-Clause"
] | 6
|
2018-10-29T22:00:17.000Z
|
2021-07-18T14:33:39.000Z
|
astroquery/utils/commons.py
|
iskren-y-g/astroquery
|
8248ba8fd0aa3a4c8221db729a127db047e18f4e
|
[
"BSD-3-Clause"
] | 3
|
2020-02-26T15:42:55.000Z
|
2020-03-30T13:56:06.000Z
|
astroquery/utils/commons.py
|
iskren-y-g/astroquery
|
8248ba8fd0aa3a4c8221db729a127db047e18f4e
|
[
"BSD-3-Clause"
] | 3
|
2019-07-08T21:30:17.000Z
|
2021-04-15T08:24:28.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Common functions and classes that are required by all query classes.
"""
import re
import warnings
import os
import shutil
import socket
import requests
from six.moves.urllib_error import URLError
import six
import astropy.units as u
from astropy import coordinates as coord
from collections import OrderedDict
from astropy.utils import minversion
import astropy.utils.data as aud
from astropy.io import fits, votable
from astropy.coordinates import BaseCoordinateFrame
from ..exceptions import TimeoutError, InputWarning
from .. import version
def ICRSCoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='icrs', **kwargs)
def GalacticCoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='galactic', **kwargs)
def FK5CoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='fk5', **kwargs)
def FK4CoordGenerator(*args, **kwargs):
return coord.SkyCoord(*args, frame='fk4', **kwargs)
ICRSCoord = coord.SkyCoord
CoordClasses = (coord.SkyCoord, BaseCoordinateFrame)
__all__ = ['send_request',
'parse_coordinates',
'TableList',
'suppress_vo_warnings',
'validate_email',
'ASTROPY_LT_4_0',
'ASTROPY_LT_4_1']
ASTROPY_LT_4_0 = not minversion('astropy', '4.0')
ASTROPY_LT_4_1 = not minversion('astropy', '4.1')
def send_request(url, data, timeout, request_type='POST', headers={},
**kwargs):
"""
A utility function that post HTTP requests to remote server
and returns the HTTP response.
Parameters
----------
url : str
The URL of the remote server
data : dict
A dictionary representing the payload to be posted via the HTTP request
timeout : int, quantity_like
Time limit for establishing successful connection with remote server
request_type : str
options are 'POST' (default) and 'GET'. Determines whether to perform
an HTTP POST or an HTTP GET request
headers : dict
POST or GET headers. user-agent will be set to
astropy:astroquery.version
Returns
-------
response : `requests.Response`
Response object returned by the remote server
"""
headers['User-Agent'] = ('astropy:astroquery.{vers}'
.format(vers=version.version))
if hasattr(timeout, "unit"):
warnings.warn("Converting timeout to seconds and truncating "
"to integer.", InputWarning)
timeout = int(timeout.to(u.s).value)
try:
if request_type == 'GET':
response = requests.get(url, params=data, timeout=timeout,
headers=headers, **kwargs)
elif request_type == 'POST':
response = requests.post(url, data=data, timeout=timeout,
headers=headers, **kwargs)
else:
raise ValueError("request_type must be either 'GET' or 'POST'.")
response.raise_for_status()
return response
except requests.exceptions.Timeout:
raise TimeoutError("Query timed out, time elapsed {time}s".
format(time=timeout))
except requests.exceptions.RequestException as ex:
raise Exception("Query failed: {0}\n".format(ex))
def radius_to_unit(radius, unit='degree'):
"""
Helper function: Parse a radius, then return its value in degrees
Parameters
----------
radius : str or `~astropy.units.Quantity`
The radius of a region
Returns
-------
Floating point scalar value of radius in degrees
"""
rad = coord.Angle(radius)
if isinstance(unit, six.string_types):
if hasattr(rad, unit):
return getattr(rad, unit)
elif hasattr(rad, f"{unit}s"):
return getattr(rad, f"{unit}s")
return rad.to(unit).value
def parse_coordinates(coordinates):
"""
Takes a string or astropy.coordinates object. Checks if the
string is parsable as an `astropy.coordinates`
object or is a name that is resolvable. Otherwise asserts
that the argument is an astropy.coordinates object.
Parameters
----------
coordinates : str or `astropy.coordinates` object
Astronomical coordinate
Returns
-------
coordinates : a subclass of `astropy.coordinates.BaseCoordinateFrame`
Raises
------
astropy.units.UnitsError
TypeError
"""
if isinstance(coordinates, six.string_types):
try:
c = ICRSCoordGenerator(coordinates)
warnings.warn("Coordinate string is being interpreted as an "
"ICRS coordinate.", InputWarning)
except u.UnitsError:
warnings.warn("Only ICRS coordinates can be entered as "
"strings.\n For other systems please use the "
"appropriate astropy.coordinates object.", InputWarning)
raise u.UnitsError
except ValueError as err:
if isinstance(err.args[1], u.UnitsError):
try:
c = ICRSCoordGenerator(coordinates, unit='deg')
warnings.warn("Coordinate string is being interpreted as an "
"ICRS coordinate provided in degrees.", InputWarning)
except ValueError:
c = ICRSCoord.from_name(coordinates)
else:
c = ICRSCoord.from_name(coordinates)
elif isinstance(coordinates, CoordClasses):
if hasattr(coordinates, 'frame'):
c = coordinates
else:
# Convert the "frame" object into a SkyCoord
c = coord.SkyCoord(coordinates)
else:
raise TypeError("Argument cannot be parsed as a coordinate")
return c
def coord_to_radec(coordinate):
"""
Wrapper to turn any astropy coordinate into FK5 RA in Hours and FK5 Dec in
degrees
This is a hack / temporary wrapper to deal with the unstable astropy API
(it may be wise to remove this hack since it's not clear that the old
coordinate API can even do transforms)
"""
C = coordinate.transform_to('fk5')
if hasattr(C.ra, 'hour'):
ra = C.ra.hour
elif hasattr(C.ra, 'hourangle'):
ra = C.ra.hourangle
else:
raise ValueError("API Error: RA cannot be converted to hour "
"or hourangle.")
dec = C.dec.degree
return ra, dec
class TableList(list):
"""
A class that inherits from `list` but included some pretty printing methods
for an OrderedDict of `astropy.table.Table` objects.
HINT: To access the tables by # instead of by table ID:
>>> t = TableList([('a',1),('b',2)])
>>> t[1]
2
>>> t['b']
2
"""
def __init__(self, inp):
if not isinstance(inp, OrderedDict):
try:
inp = OrderedDict(inp)
except (TypeError, ValueError):
raise ValueError("Input to TableList must be an OrderedDict "
"or list of (k,v) pairs")
self._dict = inp
super(TableList, self).__init__(inp.values())
def __getitem__(self, key):
if isinstance(key, int):
# get the value in the (key,value) pair
return super(TableList, self).__getitem__(key)
elif key in self._dict:
return self._dict[key]
else:
raise TypeError("TableLists can only be indexed with the "
"named keys and integers.")
def __setitem__(self, value):
raise TypeError("TableList is immutable.")
def __getslice__(self, slice):
return list(self.values())[slice]
def keys(self):
return list(self._dict.keys())
def values(self):
return list(self._dict.values())
def __repr__(self):
"""
Overrides the `OrderedDict.__repr__` method to return a simple summary
of the `TableList` object.
"""
return self.format_table_list()
def format_table_list(self):
"""
Prints the names of all `astropy.table.Table` objects, with their
respective number of row and columns, contained in the
`TableList` instance.
"""
ntables = len(list(self.keys()))
if ntables == 0:
return "Empty TableList"
header_str = "TableList with {keylen} tables:".format(keylen=ntables)
body_str = "\n".join(["\t'{t_number}:{t_name}' with {ncol} column(s) "
"and {nrow} row(s) "
.format(t_number=t_number, t_name=t_name,
nrow=len(self[t_number]),
ncol=len(self[t_number].colnames))
for t_number, t_name in enumerate(self.keys())])
return "\n".join([header_str, body_str])
def print_table_list(self):
print(self.format_table_list())
def pprint(self, **kwargs):
""" Helper function to make API more similar to astropy.Tables """
if kwargs != {}:
warnings.warn("TableList is a container of astropy.Tables.", InputWarning)
self.print_table_list()
def _is_coordinate(coordinates):
"""
Returns `True` if coordinates can be parsed via `astropy.coordinates`
and `False` otherwise.
Parameters
----------
coordinates : str or `astropy.coordinates` object
The target around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as strings as specified in the
`astropy.coordinates` module.
Returns
-------
bool
"""
if hasattr(coordinates, 'fk5'):
# its coordinate-like enough
return True
try:
ICRSCoordGenerator(coordinates)
return True
except ValueError:
return False
def suppress_vo_warnings():
"""
Suppresses all warnings of the class
`astropy.io.votable.exceptions.VOWarning`.
"""
warnings.filterwarnings("ignore", category=votable.exceptions.VOWarning)
def validate_email(email):
"""
E-mail address validation. Uses validate_email if available, else a simple
regex that will let through some invalid e-mails but will catch the most
common violators.
"""
try:
import validate_email
return validate_email.validate_email(email)
except ImportError:
return bool(re.compile(r'^\S+@\S+\.\S+$').match(email))
class FileContainer:
"""
A File Object container, meant to offer lazy access to downloaded FITS
files.
"""
def __init__(self, target, **kwargs):
kwargs.setdefault('cache', True)
self._target = target
self._timeout = kwargs.get('remote_timeout', aud.conf.remote_timeout)
if (os.path.splitext(target)[1] == '.fits' and not
('encoding' in kwargs and kwargs['encoding'] == 'binary')):
warnings.warn("FITS files must be read as binaries; error is "
"likely.", InputWarning)
self._readable_object = get_readable_fileobj(target, **kwargs)
def get_fits(self):
"""
Assuming the contained file is a FITS file, read it
and return the file parsed as FITS HDUList
"""
filedata = self.get_string()
if len(filedata) == 0:
raise TypeError("The file retrieved was empty.")
self._fits = fits.HDUList.fromstring(filedata)
return self._fits
def save_fits(self, savepath, link_cache='hard'):
"""
Save a FITS file to savepath
Parameters
----------
savepath : str
The full path to a FITS filename, e.g. "file.fits", or
"/path/to/file.fits".
link_cache : 'hard', 'sym', or False
Try to create a hard or symbolic link to the astropy cached file?
If the system is unable to create a hardlink, the file will be
copied to the target location.
"""
self.get_fits()
target_key = str(self._target)
# There has been some internal refactoring in astropy.utils.data
# so we do this check. Update when minimum required astropy changes.
if ASTROPY_LT_4_0:
if not aud.is_url_in_cache(target_key):
raise IOError("Cached file not found / does not exist.")
target = aud.download_file(target_key, cache=True)
else:
target = aud.download_file(target_key, cache=True, sources=[])
if link_cache == 'hard':
try:
os.link(target, savepath)
except (IOError, OSError, AttributeError):
shutil.copy(target, savepath)
elif link_cache == 'sym':
try:
os.symlink(target, savepath)
except AttributeError:
raise OSError('Creating symlinks is not possible on this OS.')
else:
shutil.copy(target, savepath)
def get_string(self):
"""
Download the file as a string
"""
if not hasattr(self, '_string'):
try:
with self._readable_object as f:
data = f.read()
self._string = data
except URLError as e:
if isinstance(e.reason, socket.timeout):
raise TimeoutError("Query timed out, time elapsed {t}s".
format(t=self._timeout))
else:
raise e
return self._string
def get_stringio(self):
"""
Return the file as an io.StringIO object
"""
s = self.get_string()
# TODO: replace with six.BytesIO
try:
return six.BytesIO(s)
except TypeError:
return six.StringIO(s)
def __repr__(self):
if hasattr(self, '_fits'):
return f"Downloaded FITS file: {self._fits!r}"
else:
return f"Downloaded object from URL {self._target} with ID {id(self._readable_object)}"
def get_readable_fileobj(*args, **kwargs):
"""
Overload astropy's get_readable_fileobj so that we can safely monkeypatch
it in astroquery without affecting astropy core functionality
"""
return aud.get_readable_fileobj(*args, **kwargs)
def parse_votable(content):
"""
Parse a votable in string format
"""
tables = votable.parse(six.BytesIO(content), pedantic=False)
return tables
| 31.539446
| 99
| 0.600595
|
9f9a89e579c15d240052e8c06372ca89f0627875
| 8,096
|
py
|
Python
|
tests/test_tracer.py
|
pelotoncycle/dd-trace-py
|
b5254016dc42185eebfadce8dc634003408439d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_tracer.py
|
pelotoncycle/dd-trace-py
|
b5254016dc42185eebfadce8dc634003408439d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_tracer.py
|
pelotoncycle/dd-trace-py
|
b5254016dc42185eebfadce8dc634003408439d7
|
[
"BSD-3-Clause"
] | 2
|
2017-05-27T05:58:36.000Z
|
2019-02-07T13:38:53.000Z
|
"""
tests for Tracer and utilities.
"""
import time
from nose.tools import assert_raises, eq_
from unittest.case import SkipTest
from ddtrace.encoding import JSONEncoder, MsgpackEncoder
from ddtrace.tracer import Tracer
from ddtrace.writer import AgentWriter
def test_tracer_vars():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
# explicit vars
s = tracer.trace("a", service="s", resource="r", span_type="t")
eq_(s.service, "s")
eq_(s.resource, "r")
eq_(s.span_type, "t")
s.finish()
# defaults
s = tracer.trace("a")
eq_(s.service, None)
eq_(s.resource, "a") # inherits
eq_(s.span_type, None)
def test_tracer():
# add some dummy tracing code.
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
sleep = 0.05
def _mix():
with tracer.trace("cake.mix"):
time.sleep(sleep)
def _bake():
with tracer.trace("cake.bake"):
time.sleep(sleep)
def _make_cake():
with tracer.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
assert not writer.spans
_make_cake()
spans = writer.pop()
assert spans, "%s" % spans
eq_(len(spans), 3)
spans_by_name = {s.name:s for s in spans}
eq_(len(spans_by_name), 3)
make = spans_by_name["cake.make"]
assert make.span_id
assert make.parent_id is None
assert make.trace_id
for other in ["cake.mix", "cake.bake"]:
s = spans_by_name[other]
eq_(s.parent_id, make.span_id)
eq_(s.trace_id, make.trace_id)
eq_(s.service, make.service) # ensure it inherits the service
eq_(s.resource, s.name) # ensure when we don't set a resource, it's there.
# do it again and make sure it has new trace ids
_make_cake()
spans = writer.pop()
for s in spans:
assert s.trace_id != make.trace_id
def test_tracer_wrap():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
@tracer.wrap('decorated_function', service='s', resource='r',
span_type='t')
def f(tag_name, tag_value):
# make sure we can still set tags
span = tracer.current_span()
span.set_tag(tag_name, tag_value)
f('a', 'b')
spans = writer.pop()
eq_(len(spans), 1)
s = spans[0]
eq_(s.name, 'decorated_function')
eq_(s.service, 's')
eq_(s.resource, 'r')
eq_(s.span_type, 't')
eq_(s.to_dict()['meta']['a'], 'b')
def test_tracer_wrap_default_name():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
@tracer.wrap()
def f():
pass
f()
eq_(writer.spans[0].name, 'tests.test_tracer.f')
def test_tracer_wrap_exception():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
@tracer.wrap()
def f():
raise Exception('bim')
assert_raises(Exception, f)
eq_(len(writer.spans), 1)
eq_(writer.spans[0].error, 1)
def test_tracer_wrap_multiple_calls():
# Make sure that we create a new span each time the function is called
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
@tracer.wrap()
def f():
pass
f()
f()
spans = writer.pop()
eq_(len(spans), 2)
assert spans[0].span_id != spans[1].span_id
def test_tracer_wrap_span_nesting():
# Make sure that nested spans have the correct parents
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
@tracer.wrap('inner')
def inner():
pass
@tracer.wrap('outer')
def outer():
with tracer.trace('mid'):
inner()
outer()
spans = writer.pop()
eq_(len(spans), 3)
# sift through the list so we're not dependent on span ordering within the
# writer
for span in spans:
if span.name == 'outer':
outer_span = span
elif span.name == 'mid':
mid_span = span
elif span.name == 'inner':
inner_span = span
else:
assert False, 'unknown span found' # should never get here
assert outer_span
assert mid_span
assert inner_span
eq_(outer_span.parent_id, None)
eq_(mid_span.parent_id, outer_span.span_id)
eq_(inner_span.parent_id, mid_span.span_id)
def test_tracer_wrap_class():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
class Foo(object):
@staticmethod
@tracer.wrap()
def s():
return 1
@classmethod
@tracer.wrap()
def c(cls):
return 2
@tracer.wrap()
def i(cls):
return 3
f = Foo()
eq_(f.s(), 1)
eq_(f.c(), 2)
eq_(f.i(), 3)
spans = writer.pop()
eq_(len(spans), 3)
names = [s.name for s in spans]
# FIXME[matt] include the class name here.
eq_(sorted(names), sorted(["tests.test_tracer.%s" % n for n in ["s", "c", "i"]]))
def test_tracer_disabled():
# add some dummy tracing code.
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
tracer.enabled = True
with tracer.trace("foo") as s:
s.set_tag("a", "b")
assert writer.pop()
tracer.enabled = False
with tracer.trace("foo") as s:
s.set_tag("a", "b")
assert not writer.pop()
def test_unserializable_span_with_finish():
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
with tracer.trace("parent") as span:
span.metrics['as'] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak():
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
tracer.enabled = False
s1 = tracer.trace("foo")
s1.finish()
p1 = tracer.current_span()
s2 = tracer.trace("bar")
assert not s2._parent, s2._parent
s2.finish()
assert not p1, p1
def test_tracer_global_tags():
writer = DummyWriter()
tracer = Tracer()
tracer.writer = writer
s1 = tracer.trace('brie')
s1.finish()
assert not s1.meta
tracer.set_tags({'env': 'prod'})
s2 = tracer.trace('camembert')
s2.finish()
assert s2.meta == {'env': 'prod'}
tracer.set_tags({'env': 'staging', 'other': 'tag'})
s3 = tracer.trace('gruyere')
s3.finish()
assert s3.meta == {'env': 'staging', 'other': 'tag'}
class DummyWriter(AgentWriter):
""" DummyWriter is a small fake writer used for tests. not thread-safe. """
def __init__(self):
# original call
super(DummyWriter, self).__init__()
# dummy components
self.spans = []
self.services = {}
self.json_encoder = JSONEncoder()
self.msgpack_encoder = MsgpackEncoder()
def write(self, spans=None, services=None):
if spans:
# the traces encoding expect a list of traces so we
# put spans in a list like we do in the real execution path
# with both encoders
self.json_encoder.encode_traces([spans])
self.msgpack_encoder.encode_traces([spans])
self.spans += spans
if services:
self.json_encoder.encode_services(services)
self.msgpack_encoder.encode_services(services)
self.services.update(services)
def pop(self):
# dummy method
s = self.spans
self.spans = []
return s
def pop_services(self):
# dummy method
s = self.services
self.services = {}
return s
def get_dummy_tracer():
tracer = Tracer()
tracer.writer = DummyWriter()
return tracer
| 24.607903
| 87
| 0.600667
|
1324f094165cc567c9aa7c5ee9a1cf3b6272f5da
| 14,459
|
py
|
Python
|
contrib/aseprite_codec/aseprite.py
|
Lylitalo/Minesweeper
|
37b5ea775436c588aa340e32a303b39aeb953daa
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/aseprite_codec/aseprite.py
|
Lylitalo/Minesweeper
|
37b5ea775436c588aa340e32a303b39aeb953daa
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/aseprite_codec/aseprite.py
|
Lylitalo/Minesweeper
|
37b5ea775436c588aa340e32a303b39aeb953daa
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Decoder for Aseprite animation files in .ase or .aseprite format.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import struct
import zlib
from pyglet.image import ImageData, Animation, AnimationFrame
from pyglet.image.codecs import ImageDecoder, ImageDecodeException
from pyglet.compat import BytesIO
# Documentation for the Aseprite format can be found here:
# https://raw.githubusercontent.com/aseprite/aseprite/master/docs/ase-file-specs.md
BYTE = "B"
WORD = "H"
SIGNED_WORD = "h"
DWORD = "I"
BLEND_MODES = {0: 'Normal',
1: 'Multiply',
2: 'Screen',
3: 'Overlay',
4: 'Darken',
5: 'Lighten',
6: 'Color Dodge',
7: 'Color Burn',
8: 'Hard Light',
9: 'Soft Light',
10: 'Difference',
11: 'Exclusion',
12: 'Hue',
13: 'Saturation',
14: 'Color',
15: 'Luminosity'}
PALETTE_DICT = {}
PALETTE_INDEX = 0
def _unpack(fmt, file):
"""Unpack little endian bytes fram a file-like object. """
size = struct.calcsize(fmt)
data = file.read(size)
if len(data) < size:
raise ImageDecodeException('Unexpected EOF')
return struct.unpack("<" + fmt, data)[0]
def _chunked_iter(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
#########################################
# Class for Aseprite compliant header
#########################################
class AsepriteHeader(object):
def __init__(self, file):
self.file_size = _unpack(DWORD, file)
self.magic_number = hex(_unpack(WORD, file))
self.num_frames = _unpack(WORD, file)
self.width = _unpack(WORD, file)
self.height = _unpack(WORD, file)
self.color_depth = _unpack(WORD, file)
self.flags = _unpack(DWORD, file)
self.speed = _unpack(WORD, file)
self._zero = _unpack(DWORD, file)
self._zero = _unpack(DWORD, file)
self.palette_index = _unpack(BYTE, file)
self._ignore = _unpack(BYTE * 3, file)
self.number_of_colors = _unpack(WORD, file)
self._zero = _unpack(BYTE * 94, file)
#########################################
# Class for Aseprite animation frames
#########################################
class Frame(object):
def __init__(self, num_chunks, duration, header, data):
self.num_chunks = num_chunks
self.duration = duration
self.color_depth = header.color_depth
self.width = header.width
self.height = header.height
self._data = data
self.chunks = self._parse_chunks()
self.cels = [c for c in self.chunks if type(c) == CelChunk]
self.layers = [c for c in self.chunks if type(c) == LayerChunk]
def _parse_chunks(self):
fileobj = BytesIO(self._data)
chunks = []
for chunk in range(self.num_chunks):
chunk_size = _unpack(DWORD, fileobj)
chunk_type = format(_unpack(WORD, fileobj), "#06x")
header_size = struct.calcsize(DWORD + WORD)
chunk_data = fileobj.read(chunk_size - header_size)
if chunk_type in ("0x0004", "0x0011", "0x2016"):
chunks.append(DeprecatedChunk(chunk_size, chunk_type, chunk_data))
elif chunk_type == "0x2004":
chunks.append(LayerChunk(chunk_size, chunk_type, chunk_data))
elif chunk_type == "0x2005":
chunks.append(CelChunk(chunk_size, chunk_type, chunk_data))
elif chunk_type == "0x2017":
chunks.append(PathChunk(chunk_size, chunk_type, chunk_data))
elif chunk_type == "0x2018":
chunks.append(FrameTagsChunk(chunk_size, chunk_type, chunk_data))
elif chunk_type == "0x2019":
palette_chunk = PaletteChunk(chunk_size, chunk_type, chunk_data)
chunks.append(palette_chunk)
global PALETTE_DICT
PALETTE_DICT = palette_chunk.palette_dict.copy()
elif chunk_type == "0x2020":
chunks.append(UserDataChunk(chunk_size, chunk_type, chunk_data))
return chunks
def _pad_pixels(self, cel):
"""For cels that dont fill the entire frame, pad with zeros."""
fileobj = BytesIO(cel.pixel_data)
padding = b'\x00\x00\x00\x00'
top_pad = bytes(padding) * (self.width * cel.y_pos)
left_pad = bytes(padding) * cel.x_pos
right_pad = bytes(padding) * (self.width - cel.x_pos - cel.width)
bottom_pad = bytes(padding) * (self.width * (self.height - cel.height - cel.y_pos))
line_size = cel.width * len(padding)
pixel_array = top_pad
for i in range(cel.height):
pixel_array += (left_pad + fileobj.read(line_size) + right_pad)
pixel_array += bottom_pad
return pixel_array
@staticmethod
def _blend_pixels(bottom, top, mode):
# Iterate over the arrays in chunks of 4 (RGBA):
bottom_iter = _chunked_iter(bottom, 4)
top_iter = _chunked_iter(top, 4)
if mode == 'Normal':
final_array = []
# If RGB values are > 0, use the top pixel.
for bottom_pixel, top_pixel in zip(bottom_iter, top_iter):
if sum(top_pixel[:3]) > 0:
final_array.extend(top_pixel)
else:
final_array.extend(bottom_pixel)
return bytes(final_array)
# TODO: implement additional blend modes
else:
raise ImageDecodeException('Unsupported blend mode.')
def _convert_to_rgba(self, cel):
if self.color_depth == 8:
global PALETTE_INDEX
pixel_array = []
for pixel in cel.pixel_data:
if pixel == PALETTE_INDEX:
pixel_array.extend([0, 0, 0, 0])
else:
pixel_array.extend(PALETTE_DICT[pixel])
cel.pixel_data = bytes(pixel_array)
return cel
elif self.color_depth == 16:
greyscale_iter = _chunked_iter(cel.pixel_data, 2)
pixel_array = []
for pixel in greyscale_iter:
rgba = (pixel[0] * 3) + pixel[1]
pixel_array.append(rgba)
cel.pixel_data = bytes(pixel_array)
return cel
else:
return cel
def get_pixel_array(self, layers):
# Start off with an empty RGBA base:
pixel_array = bytes(4) * self.width * self.height
# Blend each layer's cel data one-by-one:
for cel in self.cels:
cel = self._convert_to_rgba(cel)
padded_pixels = self._pad_pixels(cel)
blend_mode = BLEND_MODES[layers[cel.layer_index].blend_mode]
pixel_array = self._blend_pixels(pixel_array, padded_pixels, blend_mode)
return pixel_array
#########################################
# Aseprite Chunk type definitions
#########################################
class Chunk(object):
def __init__(self, size, chunk_type):
self.size = size
self.chunk_type = chunk_type
class LayerChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(LayerChunk, self).__init__(size, chunk_type)
fileobj = BytesIO(data)
self.flags = _unpack(WORD, fileobj)
self.layer_type = _unpack(WORD, fileobj)
self.child_level = _unpack(WORD, fileobj)
_ignored_width = _unpack(WORD, fileobj)
_ignored_height = _unpack(WORD, fileobj)
self.blend_mode = _unpack(WORD, fileobj)
self.opacity = _unpack(BYTE, fileobj)
_zero_unused = _unpack(BYTE * 3, fileobj)
name_length = _unpack(WORD, fileobj)
self.name = fileobj.read(name_length)
if hasattr(self.name, "decode"):
self.name = self.name.decode('utf8')
class CelChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(CelChunk, self).__init__(size, chunk_type)
fileobj = BytesIO(data)
self.layer_index = _unpack(WORD, fileobj)
self.x_pos = _unpack(SIGNED_WORD, fileobj)
self.y_pos = _unpack(SIGNED_WORD, fileobj)
self.opacity_level = _unpack(BYTE, fileobj)
self.cel_type = _unpack(WORD, fileobj)
_zero_unused = _unpack(BYTE * 7, fileobj)
if self.cel_type == 0:
self.width = _unpack(WORD, fileobj)
self.height = _unpack(WORD, fileobj)
self.pixel_data = fileobj.read()
elif self.cel_type == 1:
self.frame_position = _unpack(WORD, fileobj)
elif self.cel_type == 2:
self.width = _unpack(WORD, fileobj)
self.height = _unpack(WORD, fileobj)
self.pixel_data = zlib.decompress(fileobj.read())
class PathChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(PathChunk, self).__init__(size, chunk_type)
class FrameTagsChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(FrameTagsChunk, self).__init__(size, chunk_type)
# TODO: unpack this data.
class PaletteChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(PaletteChunk, self).__init__(size, chunk_type)
fileobj = BytesIO(data)
self.palette_size = _unpack(DWORD, fileobj)
self.first_color_index = _unpack(DWORD, fileobj)
self.last_color_index = _unpack(DWORD, fileobj)
_zero = _unpack(BYTE * 8, fileobj)
self.palette_dict = {}
if _unpack(WORD, fileobj) == 1: # color has name
size = 7
else:
size = 6
for index in range(self.first_color_index, self.last_color_index+1):
rgba_data = fileobj.read(size)
# Ignore the palette names, as they aren't needed:
r, g, b, a = struct.unpack('<BBBB', rgba_data[:4])
self.palette_dict[index] = r, g, b, a
class UserDataChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(UserDataChunk, self).__init__(size, chunk_type)
# TODO: unpack this data.
class DeprecatedChunk(Chunk):
def __init__(self, size, chunk_type, data):
super(DeprecatedChunk, self).__init__(size, chunk_type)
#########################################
# Image Decoder class definition
#########################################
class AsepriteImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.ase', '.aseprite']
def get_animation_file_extensions(self):
return ['.ase', '.aseprite']
def decode(self, file, filename):
header, frames, layers, pitch = self._parse_file(file, filename)
pixel_data = frames[0].get_pixel_array(layers=layers)
return ImageData(header.width, header.height, 'RGBA', pixel_data, -pitch)
def decode_animation(self, file, filename):
header, frames, layers, pitch = self._parse_file(file, filename)
animation_frames = []
for frame in frames:
pixel_data = frame.get_pixel_array(layers=layers)
image = ImageData(header.width, header.height, 'RGBA', pixel_data, -pitch)
animation_frames.append(AnimationFrame(image, frame.duration/1000.0))
return Animation(animation_frames)
@staticmethod
def _parse_file(file, filename):
if not file:
file = open(filename, 'rb')
header = AsepriteHeader(file)
if header.magic_number != '0xa5e0':
raise ImageDecodeException("Does not appear to be a valid ASEprite file.")
if header.color_depth not in (8, 16, 32):
raise ImageDecodeException("Invalid color depth.")
global PALETTE_INDEX
PALETTE_INDEX = header.palette_index
frames = []
for _ in range(header.num_frames):
frame_size = _unpack(DWORD, file)
magic_number = hex(_unpack(WORD, file))
if magic_number != '0xf1fa':
raise ImageDecodeException("Malformed frame. File may be corrupted.")
num_chunks = _unpack(WORD, file)
duration = _unpack(WORD, file)
_zero = _unpack(BYTE * 6, file)
header_size = struct.calcsize(DWORD + WORD * 3 + BYTE * 6)
data = file.read(frame_size - header_size)
frames.append(Frame(num_chunks, duration, header, data))
# Layers chunk is in the first frame:
layers = frames[0].layers
pitch = len('RGBA') * header.width
file.close()
return header, frames, layers, pitch
def get_decoders():
return [AsepriteImageDecoder()]
def get_encoders():
return []
| 36.885204
| 91
| 0.606197
|
490553816be4cd6d31d978ffbf5040fc92cfc1ef
| 3,654
|
py
|
Python
|
testing/model_loading.py
|
l2tor/underworlds
|
2937b6f858a1725d4581a792e34e5c08587301d0
|
[
"BSD-3-Clause"
] | null | null | null |
testing/model_loading.py
|
l2tor/underworlds
|
2937b6f858a1725d4581a792e34e5c08587301d0
|
[
"BSD-3-Clause"
] | null | null | null |
testing/model_loading.py
|
l2tor/underworlds
|
2937b6f858a1725d4581a792e34e5c08587301d0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
import unittest
import underworlds
import underworlds.server
import logging; logger = logging.getLogger("underworlds.testing")
logging.basicConfig(level=logging.DEBUG)
import time
from underworlds.tools.loader import ModelLoader
from underworlds.types import MESH, CAMERA
import os.path as path
from underworlds.helpers.transformations import compose_matrix
PROPAGATION_TIME=0.01 # time to wait for node update notification propagation (in sec)
class TestModelLoading(unittest.TestCase):
"""Test for a bug where loading a second model reset the transformation of the first model
"""
def setUp(self):
self.server = underworlds.server.start()
time.sleep(0.1) # leave some time to the server to start
self.ctx = underworlds.Context("unittest - root anchoring transformation issue")
def test_basic_loading(self):
world = self.ctx.worlds["test"]
nodes = ModelLoader().load(path.join("res","tree.blend"), world="test")
time.sleep(PROPAGATION_TIME)
self.assertEquals(len(nodes), 2) # <BlenderRoot> and <tree>
self.assertEquals(len(world.scene.nodes), 2)
trees = world.scene.nodebyname("tree")
self.assertEquals(len(trees), 1) # only one tree
self.assertEquals(trees[0].type, MESH)
def test_complex_loading(self):
world = self.ctx.worlds["test"]
nodes = ModelLoader().load(path.join("res","visibility.blend"), world="test")
time.sleep(PROPAGATION_TIME)
self.assertEquals(len(nodes), 8)
self.assertEquals(len(world.scene.nodes), 8)
self.assertEquals(len(world.scene.nodebyname("Camera1")), 1)
cam1 = world.scene.nodebyname("Camera1")[0]
self.assertEquals(cam1.type, CAMERA)
self.assertFalse(cam1.hires)
self.assertEquals(len(world.scene.nodebyname("Cube1")), 1)
cube1 = world.scene.nodebyname("Cube1")[0]
self.assertEquals(cube1.type, MESH)
self.assertTrue(cube1.hires)
def test_double_loading(self):
world = self.ctx.worlds["test"]
ModelLoader().load(path.join("res","tree.blend"), world="test")
ModelLoader().load(path.join("res","tree.blend"), world="test")
time.sleep(PROPAGATION_TIME)
self.assertEquals(len(world.scene.nodes), 3) # one root and 2 trees
trees = world.scene.nodebyname("tree")
self.assertEquals(len(trees), 2) # should have 2 trees
self.assertEquals(trees[0].hires, trees[1].hires)
self.assertNotEquals(trees[0].id, trees[1].id)
def test_anchoring(self):
world = self.ctx.worlds["test"]
nodes = ModelLoader().load(path.join("res","tree.blend"), world="test")
tree = world.scene.nodebyname("tree")[0]
self.assertEqual(tree.transformation[0,3], 0)
tree.transformation = compose_matrix(None, None, None, [2, 0, 0], None)
world.scene.nodes.update(tree)
time.sleep(PROPAGATION_TIME)
self.assertEqual(world.scene.nodes[tree.id].transformation[0,3], 2)
# ...loading another model reset the transformation of our original
# model
nodes = ModelLoader().load(path.join("res","cow.blend"), world="test")
time.sleep(PROPAGATION_TIME)
self.assertEqual(world.scene.nodes[tree.id].transformation[0,3], 2)
def tearDown(self):
self.ctx.close()
self.server.stop(0)
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestModelLoading)
return suite
if __name__ == '__main__':
unittest.main()
| 32.336283
| 94
| 0.660372
|
fa0e93a72aa6b8908730a267a8390376ce314a92
| 18,009
|
py
|
Python
|
homeassistant/components/zwave_js/services.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 5
|
2019-02-24T11:46:18.000Z
|
2019-05-28T17:37:21.000Z
|
homeassistant/components/zwave_js/services.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 77
|
2020-07-16T16:43:09.000Z
|
2022-03-31T06:14:37.000Z
|
homeassistant/components/zwave_js/services.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Methods and classes related to executing Z-Wave commands and publishing these to hass."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
import voluptuous as vol
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import CommandStatus
from zwave_js_server.exceptions import SetValueFailed
from zwave_js_server.model.node import Node as ZwaveNode
from zwave_js_server.model.value import get_value_id
from zwave_js_server.util.multicast import async_multicast_set_value
from zwave_js_server.util.node import (
async_bulk_set_partial_config_parameters,
async_set_config_parameter,
)
from homeassistant.const import ATTR_DEVICE_ID, ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import DeviceRegistry
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_registry import EntityRegistry
from . import const
from .helpers import async_get_node_from_device_id, async_get_node_from_entity_id
_LOGGER = logging.getLogger(__name__)
def parameter_name_does_not_need_bitmask(
val: dict[str, int | str | list[str]]
) -> dict[str, int | str | list[str]]:
"""Validate that if a parameter name is provided, bitmask is not as well."""
if (
isinstance(val[const.ATTR_CONFIG_PARAMETER], str)
and const.ATTR_CONFIG_PARAMETER_BITMASK in val
):
raise vol.Invalid(
"Don't include a bitmask when a parameter name is specified",
path=[const.ATTR_CONFIG_PARAMETER, const.ATTR_CONFIG_PARAMETER_BITMASK],
)
return val
def broadcast_command(val: dict[str, Any]) -> dict[str, Any]:
"""Validate that the service call is for a broadcast command."""
if val.get(const.ATTR_BROADCAST):
return val
raise vol.Invalid(
"Either `broadcast` must be set to True or multiple devices/entities must be "
"specified"
)
# Validates that a bitmask is provided in hex form and converts it to decimal
# int equivalent since that's what the library uses
BITMASK_SCHEMA = vol.All(
cv.string,
vol.Lower,
vol.Match(
r"^(0x)?[0-9a-f]+$",
msg="Must provide an integer (e.g. 255) or a bitmask in hex form (e.g. 0xff)",
),
lambda value: int(value, 16),
)
VALUE_SCHEMA = vol.Any(
bool,
vol.Coerce(int),
vol.Coerce(float),
BITMASK_SCHEMA,
cv.string,
)
class ZWaveServices:
"""Class that holds our services (Zwave Commands) that should be published to hass."""
def __init__(
self, hass: HomeAssistant, ent_reg: EntityRegistry, dev_reg: DeviceRegistry
) -> None:
"""Initialize with hass object."""
self._hass = hass
self._ent_reg = ent_reg
self._dev_reg = dev_reg
@callback
def async_register(self) -> None:
"""Register all our services."""
@callback
def get_nodes_from_service_data(val: dict[str, Any]) -> dict[str, Any]:
"""Get nodes set from service data."""
nodes: set[ZwaveNode] = set()
for entity_id in val.pop(ATTR_ENTITY_ID, []):
try:
nodes.add(
async_get_node_from_entity_id(
self._hass, entity_id, self._ent_reg, self._dev_reg
)
)
except ValueError as err:
const.LOGGER.warning(err.args[0])
for device_id in val.pop(ATTR_DEVICE_ID, []):
try:
nodes.add(
async_get_node_from_device_id(
self._hass, device_id, self._dev_reg
)
)
except ValueError as err:
const.LOGGER.warning(err.args[0])
val[const.ATTR_NODES] = nodes
return val
@callback
def validate_multicast_nodes(val: dict[str, Any]) -> dict[str, Any]:
"""Validate the input nodes for multicast."""
nodes: set[ZwaveNode] = val[const.ATTR_NODES]
broadcast: bool = val[const.ATTR_BROADCAST]
# User must specify a node if they are attempting a broadcast and have more
# than one zwave-js network.
if (
broadcast
and not nodes
and len(self._hass.config_entries.async_entries(const.DOMAIN)) > 1
):
raise vol.Invalid(
"You must include at least one entity or device in the service call"
)
first_node = next((node for node in nodes), None)
# If any nodes don't have matching home IDs, we can't run the command because
# we can't multicast across multiple networks
if first_node and any(
node.client.driver.controller.home_id
!= first_node.client.driver.controller.home_id
for node in nodes
):
raise vol.Invalid(
"Multicast commands only work on devices in the same network"
)
return val
@callback
def validate_entities(val: dict[str, Any]) -> dict[str, Any]:
"""Validate entities exist and are from the zwave_js platform."""
for entity_id in val[ATTR_ENTITY_ID]:
entry = self._ent_reg.async_get(entity_id)
if entry is None or entry.platform != const.DOMAIN:
raise vol.Invalid(
f"Entity {entity_id} is not a valid {const.DOMAIN} entity."
)
return val
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_CONFIG_PARAMETER,
self.async_set_config_parameter,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Any(
vol.Coerce(int), cv.string
),
vol.Optional(const.ATTR_CONFIG_PARAMETER_BITMASK): vol.Any(
vol.Coerce(int), BITMASK_SCHEMA
),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(
vol.Coerce(int), BITMASK_SCHEMA, cv.string
),
},
cv.has_at_least_one_key(ATTR_DEVICE_ID, ATTR_ENTITY_ID),
parameter_name_does_not_need_bitmask,
get_nodes_from_service_data,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_BULK_SET_PARTIAL_CONFIG_PARAMETERS,
self.async_bulk_set_partial_config_parameters,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(
vol.Coerce(int),
{
vol.Any(
vol.Coerce(int), BITMASK_SCHEMA, cv.string
): vol.Any(vol.Coerce(int), BITMASK_SCHEMA, cv.string)
},
),
},
cv.has_at_least_one_key(ATTR_DEVICE_ID, ATTR_ENTITY_ID),
get_nodes_from_service_data,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_REFRESH_VALUE,
self.async_poll_value,
schema=vol.Schema(
vol.All(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(
const.ATTR_REFRESH_ALL_VALUES, default=False
): cv.boolean,
},
validate_entities,
)
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_VALUE,
self.async_set_value,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_COMMAND_CLASS): vol.Coerce(int),
vol.Required(const.ATTR_PROPERTY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_PROPERTY_KEY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_ENDPOINT): vol.Coerce(int),
vol.Required(const.ATTR_VALUE): VALUE_SCHEMA,
vol.Optional(const.ATTR_WAIT_FOR_RESULT): cv.boolean,
vol.Optional(const.ATTR_OPTIONS): {cv.string: VALUE_SCHEMA},
},
cv.has_at_least_one_key(ATTR_DEVICE_ID, ATTR_ENTITY_ID),
get_nodes_from_service_data,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_MULTICAST_SET_VALUE,
self.async_multicast_set_value,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(const.ATTR_BROADCAST, default=False): cv.boolean,
vol.Required(const.ATTR_COMMAND_CLASS): vol.Coerce(int),
vol.Required(const.ATTR_PROPERTY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_PROPERTY_KEY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_ENDPOINT): vol.Coerce(int),
vol.Required(const.ATTR_VALUE): VALUE_SCHEMA,
vol.Optional(const.ATTR_OPTIONS): {cv.string: VALUE_SCHEMA},
},
vol.Any(
cv.has_at_least_one_key(ATTR_DEVICE_ID, ATTR_ENTITY_ID),
broadcast_command,
),
get_nodes_from_service_data,
validate_multicast_nodes,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_PING,
self.async_ping,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
},
cv.has_at_least_one_key(ATTR_DEVICE_ID, ATTR_ENTITY_ID),
get_nodes_from_service_data,
),
),
)
async def async_set_config_parameter(self, service: ServiceCall) -> None:
"""Set a config value on a node."""
nodes = service.data[const.ATTR_NODES]
property_or_property_name = service.data[const.ATTR_CONFIG_PARAMETER]
property_key = service.data.get(const.ATTR_CONFIG_PARAMETER_BITMASK)
new_value = service.data[const.ATTR_CONFIG_VALUE]
for node in nodes:
zwave_value, cmd_status = await async_set_config_parameter(
node,
new_value,
property_or_property_name,
property_key=property_key,
)
if cmd_status == CommandStatus.ACCEPTED:
msg = "Set configuration parameter %s on Node %s with value %s"
else:
msg = (
"Added command to queue to set configuration parameter %s on Node "
"%s with value %s. Parameter will be set when the device wakes up"
)
_LOGGER.info(msg, zwave_value, node, new_value)
async def async_bulk_set_partial_config_parameters(
self, service: ServiceCall
) -> None:
"""Bulk set multiple partial config values on a node."""
nodes = service.data[const.ATTR_NODES]
property_ = service.data[const.ATTR_CONFIG_PARAMETER]
new_value = service.data[const.ATTR_CONFIG_VALUE]
for node in nodes:
cmd_status = await async_bulk_set_partial_config_parameters(
node,
property_,
new_value,
)
if cmd_status == CommandStatus.ACCEPTED:
msg = "Bulk set partials for configuration parameter %s on Node %s"
else:
msg = (
"Added command to queue to bulk set partials for configuration "
"parameter %s on Node %s"
)
_LOGGER.info(msg, property_, node)
async def async_poll_value(self, service: ServiceCall) -> None:
"""Poll value on a node."""
for entity_id in service.data[ATTR_ENTITY_ID]:
entry = self._ent_reg.async_get(entity_id)
assert entry # Schema validation would have failed if we can't do this
async_dispatcher_send(
self._hass,
f"{const.DOMAIN}_{entry.unique_id}_poll_value",
service.data[const.ATTR_REFRESH_ALL_VALUES],
)
async def async_set_value(self, service: ServiceCall) -> None:
"""Set a value on a node."""
nodes = service.data[const.ATTR_NODES]
command_class = service.data[const.ATTR_COMMAND_CLASS]
property_ = service.data[const.ATTR_PROPERTY]
property_key = service.data.get(const.ATTR_PROPERTY_KEY)
endpoint = service.data.get(const.ATTR_ENDPOINT)
new_value = service.data[const.ATTR_VALUE]
wait_for_result = service.data.get(const.ATTR_WAIT_FOR_RESULT)
options = service.data.get(const.ATTR_OPTIONS)
for node in nodes:
success = await node.async_set_value(
get_value_id(
node,
command_class,
property_,
endpoint=endpoint,
property_key=property_key,
),
new_value,
options=options,
wait_for_result=wait_for_result,
)
if success is False:
raise SetValueFailed(
"Unable to set value, refer to "
"https://zwave-js.github.io/node-zwave-js/#/api/node?id=setvalue "
"for possible reasons"
)
async def async_multicast_set_value(self, service: ServiceCall) -> None:
"""Set a value via multicast to multiple nodes."""
nodes = service.data[const.ATTR_NODES]
broadcast: bool = service.data[const.ATTR_BROADCAST]
options = service.data.get(const.ATTR_OPTIONS)
if not broadcast and len(nodes) == 1:
const.LOGGER.warning(
"Passing the zwave_js.multicast_set_value service call to the "
"zwave_js.set_value service since only one node was targeted"
)
await self.async_set_value(service)
return
value = {
"commandClass": service.data[const.ATTR_COMMAND_CLASS],
"property": service.data[const.ATTR_PROPERTY],
"propertyKey": service.data.get(const.ATTR_PROPERTY_KEY),
"endpoint": service.data.get(const.ATTR_ENDPOINT),
}
new_value = service.data[const.ATTR_VALUE]
# If there are no nodes, we can assume there is only one config entry due to
# schema validation and can use that to get the client, otherwise we can just
# get the client from the node.
client: ZwaveClient = None
first_node = next((node for node in nodes), None)
if first_node:
client = first_node.client
else:
entry_id = self._hass.config_entries.async_entries(const.DOMAIN)[0].entry_id
client = self._hass.data[const.DOMAIN][entry_id][const.DATA_CLIENT]
success = await async_multicast_set_value(
client=client,
new_value=new_value,
value_data={k: v for k, v in value.items() if v is not None},
nodes=None if broadcast else list(nodes),
options=options,
)
if success is False:
raise SetValueFailed("Unable to set value via multicast")
async def async_ping(self, service: ServiceCall) -> None:
"""Ping node(s)."""
nodes: set[ZwaveNode] = service.data[const.ATTR_NODES]
await asyncio.gather(*(node.async_ping() for node in nodes))
| 39.407002
| 92
| 0.547282
|
479b7d66368e3fa1e8bafa81200b18f8abcf4beb
| 3,716
|
py
|
Python
|
muver/wrappers/samtools.py
|
lavenderca/muver
|
074c7b158610e4cedf99b20806721afbf69a21e1
|
[
"MIT"
] | null | null | null |
muver/wrappers/samtools.py
|
lavenderca/muver
|
074c7b158610e4cedf99b20806721afbf69a21e1
|
[
"MIT"
] | null | null | null |
muver/wrappers/samtools.py
|
lavenderca/muver
|
074c7b158610e4cedf99b20806721afbf69a21e1
|
[
"MIT"
] | 1
|
2019-07-22T07:31:03.000Z
|
2019-07-22T07:31:03.000Z
|
import os
import subprocess
from __init__ import PATHS, quiet_call
def mapq_filter(in_sam, out_sam, q=20):
'''
Filter reads by MAPQ value using samtools view.
'''
quiet_call([
PATHS['samtools'], 'view',
'-Sh',
'-q', str(q),
in_sam,
], stdout=out_sam)
def index_bam(bam_fn):
'''
Run samtools index.
'''
quiet_call([
PATHS['samtools'], 'index',
bam_fn,
])
def merge_bams(bam_list, out_bam):
'''
Run samtools merge.
bam_list -- list of BAM file names to be merged
'''
quiet_call([
PATHS['samtools'], 'merge', '-f',
out_bam,
] + bam_list)
index_bam(out_bam)
def run_mpileup(bam_file, ref_fn, output_file):
'''
Run samtools mpileup, writing to an output TXT file.
'''
quiet_call([
PATHS['samtools'], 'mpileup',
'-q', '5',
'-Q', '10',
'-B',
'-d', '100000',
'-f', ref_fn,
bam_file,
], stdout=output_file)
def mpileup_iter(bam_file, ref_fn):
'''
Run samtools mpileup, returning an iterable.
'''
proc = subprocess.Popen([
PATHS['samtools'], 'mpileup',
'-q', '5',
'-Q', '10',
'-B',
'-d', '100000',
'-f', ref_fn,
bam_file,
], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
return iter(proc.stdout.readline, '')
def faidx_index(ref_fn):
'''
Run samtools faidx, creating an index if none exists
'''
if not os.path.exists(ref_fn + '.fai'):
quiet_call([
PATHS['samtools'], 'faidx',
ref_fn,
])
def view_bam(input_bam):
'''
Run samtools view, returning an iterable.
'''
proc = subprocess.Popen([
PATHS['samtools'], 'view',
input_bam,
], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
return iter(proc.stdout.readline, '')
def get_mpileup_depths(input_bam, ref_fn, output_bedgraph):
'''
Run samtools mpileup, writing depths to an output bedGraph file.
'''
def print_line(chromosome, start, end, value, OUT):
if value: # Only prints non-zero values
OUT.write(
'{}\t{}\t{}\t{}\n'.format(
chromosome,
str(start - 1),
str(end),
str(value),
)
)
last_pos = None
last_val = None
last_chr = None
start = None
end = None
with open(output_bedgraph, 'w') as OUT:
for line in mpileup_iter(input_bam, ref_fn):
line_split = line.strip().split()
chromosome, position, reference_base, coverage = line_split[:4]
position = int(position)
coverage = int(coverage)
if int(coverage) > 0:
bases = line_split[4]
else:
bases = ''
i = 0
while i < len(bases):
if bases[i] == '^':
i += 1
elif bases[i] == '*':
coverage += 1
i += 1
if (chromosome == last_chr and coverage == last_val and \
position == last_pos + 1):
end = position
else:
print_line(last_chr, start, end, last_val, OUT)
start = position
end = position
last_pos = position
last_val = coverage
last_chr = chromosome
print_line(last_chr, start, end, last_val, OUT)
| 24.773333
| 76
| 0.483315
|
c9b16daa0da67497eb1e1e0a2845776b815dfe41
| 2,813
|
py
|
Python
|
pipscoin/consensus/pot_iterations.py
|
Pipscoin-Network/pipscoin-blockchain
|
f400d26956881eb319786230506bb441f76f64d9
|
[
"Apache-2.0"
] | 8
|
2021-08-29T15:13:45.000Z
|
2022-03-30T17:23:04.000Z
|
pipscoin/consensus/pot_iterations.py
|
Pipscoin-Network/pipscoin-blockchain
|
f400d26956881eb319786230506bb441f76f64d9
|
[
"Apache-2.0"
] | 28
|
2021-08-29T02:08:07.000Z
|
2022-03-24T23:32:00.000Z
|
pipscoin/consensus/pot_iterations.py
|
Pipscoin-Network/pipscoin-blockchain
|
f400d26956881eb319786230506bb441f76f64d9
|
[
"Apache-2.0"
] | 4
|
2021-08-29T12:59:05.000Z
|
2022-03-15T08:38:29.000Z
|
from pipscoin.consensus.constants import ConsensusConstants
from pipscoin.consensus.pos_quality import _expected_plot_size
from pipscoin.types.blockchain_format.sized_bytes import bytes32
from pipscoin.util.hash import std_hash
from pipscoin.util.ints import uint8, uint64, uint128
def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA
def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64:
assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0
return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT)
def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index)
def calculate_ip_iters(
constants: ConsensusConstants,
sub_slot_iters: uint64,
signage_point_index: uint8,
required_iters: uint64,
) -> uint64:
# Note that the SSI is for the block passed in, which might be in the previous epoch
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters)
if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters:
raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}")
if required_iters >= sp_interval_iters or required_iters == 0:
raise ValueError(
f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} "
f"{sub_slot_iters} or not >0."
)
return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters)
def calculate_iterations_quality(
difficulty_constant_factor: uint128,
quality_string: bytes32,
size: int,
difficulty: uint64,
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1))
| 42.621212
| 118
| 0.754355
|
0fbf8c8d325cd77864fc2a10479993ab93d7da5b
| 685
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
eferroni/recipe-app-api
|
2a242c156a97cc0d838f5afb1cc051dea7e84fc6
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
eferroni/recipe-app-api
|
2a242c156a97cc0d838f5afb1cc051dea7e84fc6
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
eferroni/recipe-app-api
|
2a242c156a97cc0d838f5afb1cc051dea7e84fc6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-10-14 23:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.541667
| 118
| 0.617518
|
6fc824b5c8812c901c9105fbfdee5596fddac519
| 264
|
py
|
Python
|
support_files/scraping/entries/proj_1935/proj_1935/items.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_1935/proj_1935/items.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_1935/proj_1935/items.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Proj1935Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 20.307692
| 53
| 0.715909
|
e6569812de2b898cbf7a065417413e956ac342e6
| 629
|
py
|
Python
|
Apps/Ayncio.py
|
srp98/Python-Stuff
|
fade8934718e01a3d30cf9db93515b8f02a20b18
|
[
"MIT"
] | null | null | null |
Apps/Ayncio.py
|
srp98/Python-Stuff
|
fade8934718e01a3d30cf9db93515b8f02a20b18
|
[
"MIT"
] | null | null | null |
Apps/Ayncio.py
|
srp98/Python-Stuff
|
fade8934718e01a3d30cf9db93515b8f02a20b18
|
[
"MIT"
] | 1
|
2019-10-31T03:16:04.000Z
|
2019-10-31T03:16:04.000Z
|
from time import sleep
import asyncio
# Standard (Synchronous) Python
def hello():
print('Hello')
sleep(3)
print('World')
if __name__ == '__main__':
hello()
# Asyncio Example1
loop = asyncio.get_event_loop()
@asyncio.coroutine
def hello2():
print('Hello')
yield from asyncio.sleep(3)
print('World!')
if __name__ == '__main__':
loop.run_until_complete(hello2())
# Asyncio Example2
loop = asyncio.get_event_loop()
@asyncio.coroutine
async def hello2():
print('Hello')
await asyncio.sleep(3)
print('World!')
if __name__ == '__main__':
loop.run_until_complete(hello2())
| 14.627907
| 37
| 0.669316
|
cf81062ee4bd673b8f8ce35fef75cb54a1b7df6f
| 6,697
|
py
|
Python
|
PyObjCTest/test_nsatstypesetter.py
|
Khan/pyobjc-framework-Cocoa
|
f8b015ea2a72d8d78be6084fb12925c4785b8f1f
|
[
"MIT"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsatstypesetter.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsatstypesetter.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSATSTypesetterHelper (NSATSTypesetter):
def willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_(
self, lineRect, glyphRange, usedRect, offset):
return None
def shouldBreakLineByWordBeforeCharacterAtIndex_(self, v):
return None
def shouldBreakLineByHyphenatingBeforeCharacterAtIndex_(self, v):
return True
def hyphenationFactorForGlyphAtIndex_(self, v):
return None
def hyphenCharacterForGlyphAtIndex_(self, v):
return None
def boundingBoxForControlGlyphAtIndex_forTextContainer_proposedLineFragment_glyphPosition_characterIndex_(self, v, v2, v3, v4, v5):
return None
def characterRangeForGlyphRange_actualGlyphRange_(self, v1, v2):
pass
def glyphRangeForCharacterRange_actualCharacterRange_(self, v1, v2):
pass
def getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_(self, v1, v2, v3, v4, v5):
pass
def setLineFragmentRect_forGlyphRange_usedRect_baselineOffset_(self, v1, v2, v3, v4):
pass
def substituteGlyphsInRange_withGlyphs_(self, v1, v2):
pass
def insertGlyph_atGlyphIndex_characterIndex_(self, v1, v2, v3):
pass
def deleteGlyphsInRange_(self, v1):
pass
def setNotShownAttribute_forGlyphRange_(self, v1, v2):
pass
def setLocation_withAdvancements_forStartOfGlyphRange_(self, v1, v2, v3):
pass
def setAttachmentSize_forGlyphRange_(self, v1, v2):
pass
def setBidiLevels_forGlyphRange_(self, v1, v2):
pass
class TestNSATSTypesetter (TestCase):
def testByRefArguments(self):
self.assertArgIsOut(NSATSTypesetter.lineFragmentRectForProposedRect_remainingRect_, 1)
self.assertArgIsInOut(NSATSTypesetter.layoutParagraphAtPoint_, 0)
self.assertArgIsOut(NSATSTypesetter.getLineFragmentRect_usedRect_forParagraphSeparatorGlyphRange_atProposedOrigin_, 0)
self.assertArgIsOut(NSATSTypesetter.getLineFragmentRect_usedRect_forParagraphSeparatorGlyphRange_atProposedOrigin_, 1)
o = TestNSATSTypesetterHelper.alloc().init()
m = o.willSetLineFragmentRect_forGlyphRange_usedRect_baselineOffset_.__metadata__()
self.assertStartswith(m['arguments'][2]['type'], b'N^{')
self.assertStartswith(m['arguments'][4]['type'], b'N^{')
self.assertStartswith(m['arguments'][5]['type'], b'N^' + objc._C_CGFloat)
m = o.shouldBreakLineByWordBeforeCharacterAtIndex_.__metadata__()
self.assertEqual(m['retval']['type'], objc._C_NSBOOL)
self.assertEqual(m['arguments'][2]['type'], objc._C_NSUInteger)
m = o.shouldBreakLineByHyphenatingBeforeCharacterAtIndex_.__metadata__()
self.assertEqual(m['retval']['type'], objc._C_NSBOOL)
self.assertEqual(m['arguments'][2]['type'], objc._C_NSUInteger)
m = o.hyphenationFactorForGlyphAtIndex_.__metadata__()
self.assertEqual(m['retval']['type'], objc._C_FLT)
self.assertEqual(m['arguments'][2]['type'], objc._C_NSUInteger)
#m = o.hyphenCharacterForGlyphAtIndex_.__metadata__()
#self.assertEqual(m['retval']['type'], objc._C_UINT)
#self.assertEqual(m['arguments'][2]['type'], objc._C_NSUInteger)
#m = o.boundingBoxForControlGlyphAtIndex_forTextContainer_proposedLineFragment_glyphPosition_characterIndex_.__metadata__()
#self.assertEqual(m['retval']['type'], NSRect.__typestr__)
#self.assertEqual(m['arguments'][2]['type'], objc._C_NSUInteger)
#self.assertEqual(m['arguments'][3]['type'], objc._C_ID)
#self.assertStartswith(m['arguments'][4]['type'], '{')
#self.assertStartswith(m['arguments'][5]['type'], '{')
#self.assertStartswith(m['arguments'][6]['type'], objc._C_NSUInteger)
#m = o.characterRangeForGlyphRange_actualGlyphRange_.__metadata__()
#self.assertStartswith(m['retval']['type'], '{')
#self.assertStartswith(m['arguments'][2]['type'], '{')
#self.assertStartswith(m['arguments'][3]['type'], 'o^{')
m = o.glyphRangeForCharacterRange_actualCharacterRange_.__metadata__()
self.assertStartswith(m['retval']['type'], b'{')
self.assertStartswith(m['arguments'][2]['type'], b'{')
self.assertStartswith(m['arguments'][3]['type'], b'o^{')
#m = o.getGlyphsInRange_glyphs_characterIndexes_glyphInscriptions_elasticBits_.__metadata__()
#self.assertEqual(m['retval']['type'], objc._C_NSUInteger)
#self.assertEqual(m['arguments'][2]['type'], NSRange.__typestr__)
#self.assertEqual(m['arguments'][3]['type'], 'o^S')
#self.assertEqual(m['arguments'][3]['c_array_length_in_arg'], 2)
#self.assertEqual(m['arguments'][4]['type'], objc._C_OUT + objc._C_PTR + objc._C_NSUInteger)
#self.assertEqual(m['arguments'][4]['c_array_length_in_arg'], 2)
#m = o.setLineFragmentRect_forGlyphRange_usedRect_baselineOffset_.__metadata__()
#m = o.substituteGlyphsInRange_withGlyphs_.__metadata__()
#m = o.insertGlyph_atGlyphIndex_characterIndex_.__metadata__()
#m = o.deleteGlyphsInRange_.__metadata__()
#m = o.setNotShownAttribute_forGlyphRange_.__metadata__()
#m = o.setLocation_withAdvancements_forStartOfGlyphRange_.__metadata__()
#m = o.setAttachmentSize_forGlyphRange_.__metadata__()
#m = o.setBidiLevels_forGlyphRange_.__metadata__()
self.assertResultIsBOOL(NSATSTypesetter.usesFontLeading)
self.assertArgIsBOOL(NSATSTypesetter.setUsesFontLeading_, 0)
self.assertResultIsBOOL(NSATSTypesetter.bidiProcessingEnabled)
self.assertArgIsBOOL(NSATSTypesetter.setBidiProcessingEnabled_, 0)
self.assertArgIsBOOL(NSATSTypesetter.setHardInvalidation_forGlyphRange_, 0)
self.assertArgIsBOOL(NSATSTypesetter.setNotShownAttribute_forGlyphRange_, 0)
self.assertArgIsBOOL(NSATSTypesetter.setDrawsOutsideLineFragment_forGlyphRange_, 0)
self.assertArgIsIn(NSATSTypesetter.setLocation_withAdvancements_forStartOfGlyphRange_, 1)
self.assertArgSizeInArg(NSATSTypesetter.setLocation_withAdvancements_forStartOfGlyphRange_, 1, 2)
self.assertArgSizeInArg(NSATSTypesetter.setBidiLevels_forGlyphRange_, 0, 1)
self.assertArgSizeInArg(NSATSTypesetter.setBidiLevels_forGlyphRange_, 0, 1)
def testSubclassProtocols(self):
self.assertResultIsBOOL(TestNSATSTypesetterHelper.shouldBreakLineByWordBeforeCharacterAtIndex_)
self.assertResultIsBOOL(TestNSATSTypesetterHelper.shouldBreakLineByHyphenatingBeforeCharacterAtIndex_)
if __name__ == "__main__":
main()
| 44.946309
| 135
| 0.727639
|
a1ee0b53b8703a6f38710ca4c419090132cf1f93
| 16,504
|
py
|
Python
|
qcodes/instrument_drivers/oxford/triton.py
|
riju-pal/QCoDeS_riju
|
816e76809160e9af457f6ef6d4aca1b0dd5eea82
|
[
"MIT"
] | 223
|
2016-10-29T15:00:24.000Z
|
2022-03-20T06:53:34.000Z
|
qcodes/instrument_drivers/oxford/triton.py
|
M1racleShih/Qcodes
|
c03029a6968e16379155aadc8b083a02e01876a6
|
[
"MIT"
] | 3,406
|
2016-10-25T10:44:50.000Z
|
2022-03-31T09:47:35.000Z
|
qcodes/instrument_drivers/oxford/triton.py
|
nikhartman/Qcodes
|
042c5e25ab9e40b20c316b4055c4842844834d1e
|
[
"MIT"
] | 263
|
2016-10-25T11:35:36.000Z
|
2022-03-31T08:53:20.000Z
|
import configparser
import re
from functools import partial
import logging
from traceback import format_exc
from typing import Optional, Any, Union, List, Dict
from qcodes import IPInstrument
from qcodes.utils.validators import Enum, Ints
from time import sleep
class Triton(IPInstrument):
r"""
Triton Driver
Args:
tmpfile: Optional: an exported windows registry file from the registry
path:
`[HKEY_CURRENT_USER\Software\Oxford Instruments\Triton System Control\Thermometry]`
and is used to extract the available temperature channels.
Status: beta-version.
TODO:
fetch registry directly from fridge-computer
"""
def __init__(
self,
name: str,
address: Optional[str] = None,
port: Optional[int] = None,
terminator: str = '\r\n',
tmpfile: Optional[str] = None,
timeout: float = 20,
**kwargs: Any):
super().__init__(name, address=address, port=port,
terminator=terminator, timeout=timeout, **kwargs)
self._heater_range_auto = False
self._heater_range_temp = [0.03, 0.1, 0.3, 1, 12, 40]
self._heater_range_curr = [0.316, 1, 3.16, 10, 31.6, 100]
self._control_channel = 5
self.add_parameter(name='time',
label='System Time',
get_cmd='READ:SYS:TIME',
get_parser=self._parse_time)
self.add_parameter(name='action',
label='Current action',
get_cmd='READ:SYS:DR:ACTN',
get_parser=self._parse_action)
self.add_parameter(name='status',
label='Status',
get_cmd='READ:SYS:DR:STATUS',
get_parser=self._parse_status)
self.add_parameter(name='pid_control_channel',
label='PID control channel',
get_cmd=self._get_control_channel,
set_cmd=self._set_control_channel,
vals=Ints(1, 16))
self.add_parameter(name='pid_mode',
label='PID Mode',
get_cmd=partial(self._get_control_param, 'MODE'),
set_cmd=partial(self._set_control_param, 'MODE'),
val_mapping={'on': 'ON', 'off': 'OFF'})
self.add_parameter(name='pid_ramp',
label='PID ramp enabled',
get_cmd=partial(self._get_control_param,
'RAMP:ENAB'),
set_cmd=partial(self._set_control_param,
'RAMP:ENAB'),
val_mapping={'on': 'ON', 'off': 'OFF'})
self.add_parameter(name='pid_setpoint',
label='PID temperature setpoint',
unit='K',
get_cmd=partial(self._get_control_param, 'TSET'),
set_cmd=partial(self._set_control_param, 'TSET'))
self.add_parameter(name='pid_rate',
label='PID ramp rate',
unit='K/min',
get_cmd=partial(self._get_control_param,
'RAMP:RATE'),
set_cmd=partial(self._set_control_param,
'RAMP:RATE'))
self.add_parameter(name='pid_range',
label='PID heater range',
# TODO: The units in the software are mA, how to
# do this correctly?
unit='mA',
get_cmd=partial(self._get_control_param, 'RANGE'),
set_cmd=partial(self._set_control_param, 'RANGE'),
vals=Enum(*self._heater_range_curr))
self.add_parameter(name='magnet_status',
label='Magnet status',
unit='',
get_cmd=partial(self._get_control_B_param, 'ACTN'))
self.add_parameter(name='magnet_sweeprate',
label='Magnet sweep rate',
unit='T/min',
get_cmd=partial(
self._get_control_B_param, 'RVST:RATE'),
set_cmd=partial(self._set_control_magnet_sweeprate_param))
self.add_parameter(name='magnet_sweeprate_insta',
label='Instantaneous magnet sweep rate',
unit='T/min',
get_cmd=partial(self._get_control_B_param, 'RFST'))
self.add_parameter(name='B',
label='Magnetic field',
unit='T',
get_cmd=partial(self._get_control_B_param, 'VECT'))
self.add_parameter(name='Bx',
label='Magnetic field x-component',
unit='T',
get_cmd=partial(
self._get_control_Bcomp_param, 'VECTBx'),
set_cmd=partial(self._set_control_Bx_param))
self.add_parameter(name='By',
label='Magnetic field y-component',
unit='T',
get_cmd=partial(
self._get_control_Bcomp_param, 'VECTBy'),
set_cmd=partial(self._set_control_By_param))
self.add_parameter(name='Bz',
label='Magnetic field z-component',
unit='T',
get_cmd=partial(
self._get_control_Bcomp_param, 'VECTBz'),
set_cmd=partial(self._set_control_Bz_param))
self.add_parameter(name='magnet_sweep_time',
label='Magnet sweep time',
unit='T/min',
get_cmd=partial(self._get_control_B_param, 'RVST:TIME'))
self.chan_alias: Dict[str, str] = {}
self.chan_temp_names: Dict[str, Dict[str, Optional[str]]] = {}
if tmpfile is not None:
self._get_temp_channel_names(tmpfile)
self._get_temp_channels()
self._get_pressure_channels()
try:
self._get_named_channels()
except:
logging.warning('Ignored an error in _get_named_channels\n' +
format_exc())
self.connect_message()
def set_B(self, x: float, y: float, z: float, s: float) -> None:
if 0 < s <= 0.2:
self.write('SET:SYS:VRM:COO:CART:RVST:MODE:RATE:RATE:' + str(s) +
':VSET:[' + str(x) + ' ' + str(y) + ' ' + str(z) + ']\r\n')
self.write('SET:SYS:VRM:ACTN:RTOS\r\n')
t_wait = self.magnet_sweep_time() * 60 + 10
print('Please wait ' + str(t_wait) +
' seconds for the field sweep...')
sleep(t_wait)
else:
print('Warning: set magnet sweep rate in range (0 , 0.2] T/min')
def _get_control_B_param(
self,
param: str
) -> Optional[Union[float, str, List[float]]]:
cmd = f'READ:SYS:VRM:{param}'
return self._get_response_value(self.ask(cmd))
def _get_control_Bcomp_param(
self,
param: str
) -> Optional[Union[float, str, List[float]]]:
cmd = f'READ:SYS:VRM:{param}'
return self._get_response_value(self.ask(cmd[:-2]) + cmd[-2:])
def _get_response(self, msg: str) -> str:
return msg.split(':')[-1]
def _get_response_value(
self,
msg: str
) -> Optional[Union[float, str, List[float]]]:
msg = self._get_response(msg)
if msg.endswith('NOT_FOUND'):
return None
elif msg.endswith('IDLE'):
return 'IDLE'
elif msg.endswith('RTOS'):
return 'RTOS'
elif msg.endswith('Bx'):
return float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[0])
elif msg.endswith('By'):
return float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[1])
elif msg.endswith('Bz'):
return float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[2])
elif len(re.findall(r"[-+]?\d*\.\d+|\d+", msg)) > 1:
return [float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[0]), float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[1]), float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[2])]
try:
return float(re.findall(r"[-+]?\d*\.\d+|\d+", msg)[0])
except Exception:
return msg
def get_idn(self) -> Dict[str, Optional[str]]:
""" Return the Instrument Identifier Message """
idstr = self.ask('*IDN?')
idparts = [p.strip() for p in idstr.split(':', 4)][1:]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def _get_control_channel(self, force_get: bool = False) -> int:
# verify current channel
if self._control_channel and not force_get:
tempval = self.ask(
f'READ:DEV:T{self._control_channel}:TEMP:LOOP:MODE')
if not tempval.endswith('NOT_FOUND'):
return self._control_channel
# either _control_channel is not set or wrong
for i in range(1, 17):
tempval = self.ask(f'READ:DEV:T{i}:TEMP:LOOP:MODE')
if not tempval.endswith('NOT_FOUND'):
self._control_channel = i
break
return self._control_channel
def _set_control_channel(self, channel: int) -> None:
self._control_channel = channel
self.write('SET:DEV:T{}:TEMP:LOOP:HTR:H1'.format(
self._get_control_channel()))
def _get_control_param(
self,
param: str
) -> Optional[Union[float, str, List[float]]]:
chan = self._get_control_channel()
cmd = f'READ:DEV:T{chan}:TEMP:LOOP:{param}'
return self._get_response_value(self.ask(cmd))
def _set_control_param(self, param: str, value: float) -> None:
chan = self._get_control_channel()
cmd = f'SET:DEV:T{chan}:TEMP:LOOP:{param}:{value}'
self.write(cmd)
def _set_control_magnet_sweeprate_param(self, s: float) -> None:
if 0 < s <= 0.2:
x = round(self.Bx(), 4)
y = round(self.By(), 4)
z = round(self.Bz(), 4)
self.write('SET:SYS:VRM:COO:CART:RVST:MODE:RATE:RATE:' + str(s) +
':VSET:[' + str(x) + ' ' + str(y) + ' ' + str(z) + ']\r\n')
else:
print(
'Warning: set sweeprate in range (0 , 0.2] T/min, not setting sweeprate')
def _set_control_Bx_param(self, x: float) -> None:
s = self.magnet_sweeprate()
y = round(self.By(), 4)
z = round(self.Bz(), 4)
self.write('SET:SYS:VRM:COO:CART:RVST:MODE:RATE:RATE:' + str(s) +
':VSET:[' + str(x) + ' ' + str(y) + ' ' + str(z) + ']\r\n')
self.write('SET:SYS:VRM:ACTN:RTOS\r\n')
# just to give an time estimate, +10s for overhead
t_wait = self.magnet_sweep_time() * 60 + 10
print('Please wait ' + str(t_wait) + ' seconds for the field sweep...')
while self.magnet_status() != 'IDLE':
pass
def _set_control_By_param(self, y: float) -> None:
s = self.magnet_sweeprate()
x = round(self.Bx(), 4)
z = round(self.Bz(), 4)
self.write('SET:SYS:VRM:COO:CART:RVST:MODE:RATE:RATE:' + str(s) +
':VSET:[' + str(x) + ' ' + str(y) + ' ' + str(z) + ']\r\n')
self.write('SET:SYS:VRM:ACTN:RTOS\r\n')
# just to give an time estimate, +10s for overhead
t_wait = self.magnet_sweep_time() * 60 + 10
print('Please wait ' + str(t_wait) + ' seconds for the field sweep...')
while self.magnet_status() != 'IDLE':
pass
def _set_control_Bz_param(self, z: float) -> None:
s = self.magnet_sweeprate()
x = round(self.Bx(), 4)
y = round(self.By(), 4)
self.write('SET:SYS:VRM:COO:CART:RVST:MODE:RATE:RATE:' + str(s) +
':VSET:[' + str(x) + ' ' + str(y) + ' ' + str(z) + ']\r\n')
self.write('SET:SYS:VRM:ACTN:RTOS\r\n')
# just to give an time estimate, +10s for overhead
t_wait = self.magnet_sweep_time() * 60 + 10
print('Please wait ' + str(t_wait) + ' seconds for the field sweep...')
while self.magnet_status() != 'IDLE':
pass
def _get_named_channels(self) -> None:
allchans_str = self.ask('READ:SYS:DR:CHAN')
allchans = allchans_str.replace('STAT:SYS:DR:CHAN:', '', 1).split(':')
for ch in allchans:
msg = 'READ:SYS:DR:CHAN:%s' % ch
rep = self.ask(msg)
if 'INVALID' not in rep and 'NONE' not in rep:
alias, chan = rep.split(':')[-2:]
self.chan_alias[alias] = chan
self.add_parameter(name=alias,
unit='K',
get_cmd='READ:DEV:%s:TEMP:SIG:TEMP' % chan,
get_parser=self._parse_temp)
def _get_pressure_channels(self) -> None:
chan_pressure_list = []
for i in range(1, 7):
chan = 'P%d' % i
chan_pressure_list.append(chan)
self.add_parameter(name=chan,
unit='bar',
get_cmd='READ:DEV:%s:PRES:SIG:PRES' % chan,
get_parser=self._parse_pres)
self.chan_pressure = set(chan_pressure_list)
def _get_temp_channel_names(self, file: str) -> None:
config = configparser.ConfigParser()
with open(file, encoding='utf16') as f:
next(f)
config.read_file(f)
for section in config.sections():
options = config.options(section)
namestr = '"m_lpszname"'
if namestr in options:
chan_number = int(section.split('\\')[-1].split('[')[-1]) + 1
# the names used in the register file are base 0 but the api and the gui
# uses base one names so add one
chan = 'T' + str(chan_number)
name = config.get(section, '"m_lpszname"').strip("\"")
self.chan_temp_names[chan] = {'name': name, 'value': None}
def _get_temp_channels(self) -> None:
chan_temps_list = []
for i in range(1, 17):
chan = 'T%d' % i
chan_temps_list.append(chan)
self.add_parameter(name=chan,
unit='K',
get_cmd='READ:DEV:%s:TEMP:SIG:TEMP' % chan,
get_parser=self._parse_temp)
self.chan_temps = set(chan_temps_list)
def _parse_action(self, msg: str) -> str:
""" Parse message and return action as a string
Args:
msg: message string
Returns
action: string describing the action
"""
action = msg[17:]
if action == 'PCL':
action = 'Precooling'
elif action == 'EPCL':
action = 'Empty precool loop'
elif action == 'COND':
action = 'Condensing'
elif action == 'NONE':
if self.MC.get() < 2:
action = 'Circulating'
else:
action = 'Idle'
elif action == 'COLL':
action = 'Collecting mixture'
else:
action = 'Unknown'
return action
def _parse_status(self, msg: str) -> str:
return msg[19:]
def _parse_time(self, msg: str) -> str:
return msg[14:]
def _parse_temp(self, msg: str) -> Optional[float]:
if 'NOT_FOUND' in msg:
return None
return float(msg.split('SIG:TEMP:')[-1].strip('K'))
def _parse_pres(self, msg: str) -> Optional[float]:
if 'NOT_FOUND' in msg:
return None
return float(msg.split('SIG:PRES:')[-1].strip('mB')) * 1e3
def _recv(self) -> str:
return super()._recv().rstrip()
| 40.253659
| 166
| 0.503454
|
d7d5bd0adfc158fb4c0e5fb4aef4582a281d1d79
| 1,774
|
py
|
Python
|
cognitive_services/speech_to_text/google_speech_to_text.py
|
franklinlindemberg/cognitive-services
|
043c6a625cd66a9351ae1366e05b1fef2907cd08
|
[
"MIT"
] | null | null | null |
cognitive_services/speech_to_text/google_speech_to_text.py
|
franklinlindemberg/cognitive-services
|
043c6a625cd66a9351ae1366e05b1fef2907cd08
|
[
"MIT"
] | null | null | null |
cognitive_services/speech_to_text/google_speech_to_text.py
|
franklinlindemberg/cognitive-services
|
043c6a625cd66a9351ae1366e05b1fef2907cd08
|
[
"MIT"
] | null | null | null |
import base64
import json
#from googleapiclient.discovery import build
from .speech_to_text import SpeechToText
from .speech_to_text_exception import SpeechToTextException
class GoogleSpeechToText(SpeechToText):
def __init__(self, credentials):
api_key = credentials.get('api_key')
if not api_key:
raise SpeechToTextException(code=400, message='Missing parameters: api_key')
#self.speechToText = build('speech', 'v1', developerKey=api_key)
def recognize(self, audio, language):
errors = []
if not language:
errors.append("language")
if not audio:
errors.append("audio")
if len(errors) > 0:
raise SpeechToTextException(code=400, message='Missing parameters: {0}'.format(', '.join(errors)))
if language == 'en':
language_code = 'en-US'
elif language == 'pt-br':
language_code = 'pt-BR'
else:
raise SpeechToTextException(code=400, message='Invalid language value')
body = {
'audio': {
'content': base64.b64encode(audio.read()).decode('utf-8')
},
'config': {
'languageCode': language_code
}
}
# try:
# return self.speechToText.speech().recognize(
# body=body
# ).execute()
# except Exception as ex:
# error = json.loads(ex.content).get('error')
# if error:
# raise SpeechToTextException(code=error['code'], message=error['message'])
# else:
# raise SpeechToTextException(code=500, message='Unknown error')
def recognize_from_url(self, audio_url, language):
pass
| 30.586207
| 110
| 0.57779
|
e05659474c38b80529682a75f506e7e4ae9ef5c3
| 1,818
|
py
|
Python
|
tests/helper.py
|
Docheinstein/lzw3
|
39098f01b66f8860390123e2662ef0662667748d
|
[
"MIT"
] | 1
|
2020-06-11T10:16:55.000Z
|
2020-06-11T10:16:55.000Z
|
tests/helper.py
|
Docheinstein/LZW3
|
39098f01b66f8860390123e2662ef0662667748d
|
[
"MIT"
] | 2
|
2019-03-05T22:51:19.000Z
|
2021-01-27T22:45:44.000Z
|
tests/helper.py
|
Docheinstein/LZW3
|
39098f01b66f8860390123e2662ef0662667748d
|
[
"MIT"
] | null | null | null |
import os
import shutil
from typing import List
from unittest import TestCase
from lzw3.commons.constants import LZWConstants
from lzw3.commons.log import Logger
from lzw3.commons.utils import read_binary_file
from lzw3.compressor import LZWCompressor
from lzw3.decompressor import LZWDecompressor
def remove_folder(path: str):
if os.path.exists(path):
shutil.rmtree(path)
def create_folder(path: str):
if not os.path.exists(path):
os.makedirs(path)
class LZWTestHelper(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.verbose = False
Logger.enable_logger(False)
def _test_files(self, files: List[str]):
for file in files:
uncompressed_file_path = file
compressed_file_path = file + LZWConstants.COMPRESSED_FILE_EXTENSION
decompressed_file_path = compressed_file_path + ".after"
file_content = read_binary_file(uncompressed_file_path)
self.__print("Compressing '" + uncompressed_file_path + "'")
LZWCompressor().compress(
uncompressed_file_path,
compressed_file_path
)
self.__print("Decompressing '" + compressed_file_path + "'")
LZWDecompressor().decompress(
compressed_file_path,
decompressed_file_path
)
file_content_after = read_binary_file(decompressed_file_path)
self.assertEqual(file_content, file_content_after,
"Content after compression and decompression of file '"
+ file + "' is not the same!")
self.__print("")
def __print(self, *args, **kwargs):
if self.verbose:
print(*args, **kwargs)
| 30.813559
| 84
| 0.636964
|
27796fdc630f4e1dd508a2fbbeb3141aab1361df
| 1,189
|
py
|
Python
|
dl/data/txtrecog/datasets/base.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2
|
2021-02-06T22:40:13.000Z
|
2021-03-26T09:15:34.000Z
|
dl/data/txtrecog/datasets/base.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 8
|
2020-07-11T07:10:51.000Z
|
2022-03-12T00:39:03.000Z
|
dl/data/txtrecog/datasets/base.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2
|
2021-03-26T09:19:42.000Z
|
2021-07-27T02:38:09.000Z
|
import string
from ...objrecog.datasets.base import ObjectRecognitionDatasetBase
Alphabet_labels = list(string.ascii_lowercase)
Alphabet_with_upper_labels = Alphabet_labels + list(string.ascii_uppercase)
Alphabet_numbers = len(Alphabet_labels)
Alphabet_with_upper_numbers = len(Alphabet_with_upper_labels)
Number_labels = [str(i) for i in range(10)]
Number_numbers = len(Number_labels)
Alphanumeric_labels = Alphabet_labels + Number_labels
Alphanumeric_numbers = Alphabet_numbers + Number_numbers
Alphanumeric_with_upper_labels = Alphabet_with_upper_labels + Number_labels
Alphanumeric_with_upper_numbers = Alphabet_with_upper_numbers + Number_numbers
Alphanumeric_with_blank_labels = ['-'] + Alphabet_labels + Number_labels
Alphanumeric_with_blank_numbers = 1 + Alphabet_numbers + Number_numbers
Alphanumeric_with_upper_and_blank_labels = ['-'] + Alphabet_with_upper_labels + Number_labels
Alphanumeric_with_upper_and_blank_numbers = 1 + Alphabet_with_upper_numbers + Number_numbers
class TextRecognitionDatasetBase(ObjectRecognitionDatasetBase):
def __getitem__(self, index):
img, targets = self.get_imgtarget(index)
texts = targets[0]
return img, texts
| 44.037037
| 93
| 0.830109
|
6a2e8c7a0db65f1823e061925af11794f91469d5
| 8,199
|
py
|
Python
|
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/grammar_notation.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/grammar_notation.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
LogSystem_JE/venv/Lib/site-packages/pygments/lexers/grammar_notation.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pygments.lexers.grammar_notation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for grammar notations like BNF.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, this, using, words
from pygments.token import Comment, Keyword, Literal, Name, Number, \
Operator, Punctuation, String, Text
__all__ = ['BnfLexer', 'AbnfLexer', 'JsgfLexer', 'PegLexer']
class BnfLexer(RegexLexer):
"""
This lexer is for grammar notations which are similar to
original BNF.
In order to maximize a number of targets of this lexer,
let's decide some designs:
* We don't distinguish `Terminal Symbol`.
* We do assume that `NonTerminal Symbol` are always enclosed
with arrow brackets.
* We do assume that `NonTerminal Symbol` may include
any printable characters except arrow brackets and ASCII 0x20.
This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_.
* We do assume that target notation doesn't support comment.
* We don't distinguish any operators and punctuation except
`::=`.
Though these desision making might cause too minimal highlighting
and you might be disappointed, but it is reasonable for us.
.. versionadded:: 2.1
"""
name = 'BNF'
aliases = ['bnf']
filenames = ['*.bnf']
mimetypes = ['text/x-bnf']
tokens = {
'root': [
(r'(<)([ -;=?-~]+)(>)',
bygroups(Punctuation, Name.Class, Punctuation)),
# an only operator
(r'::=', Operator),
# fallback
(r'[^<>:]+', Text), # for performance
(r'.', Text),
],
}
class AbnfLexer(RegexLexer):
"""
Lexer for `IETF 7405 ABNF
<http://www.ietf.org/rfc/rfc7405.txt>`_
(Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_)
grammars.
.. versionadded:: 2.1
"""
name = 'ABNF'
aliases = ['abnf']
filenames = ['*.abnf']
mimetypes = ['text/x-abnf']
_core_rules = (
'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
'SP', 'VCHAR', 'WSP')
tokens = {
'root': [
# comment
(r';.*$', Comment.Single),
# quoted
# double quote itself in this state, it is as '%x22'.
(r'(%[si])?"[^"]*"', Literal),
# binary (but i have never seen...)
(r'%b[01]+\-[01]+\b', Literal), # range
(r'%b[01]+(\.[01]+)*\b', Literal), # concat
# decimal
(r'%d[0-9]+\-[0-9]+\b', Literal), # range
(r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
# hexadecimal
(r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
(r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
# repetition (<a>*<b>element) including nRule
(r'\b[0-9]+\*[0-9]+', Operator),
(r'\b[0-9]+\*', Operator),
(r'\b[0-9]+', Operator),
(r'\*', Operator),
# Strictly speaking, these are not keyword but
# are called `Core Rule'.
(words(_core_rules, suffix=r'\b'), Keyword),
# nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
(r'[a-zA-Z][a-zA-Z0-9-]+\b', Name.Class),
# operators
(r'(=/|=|/)', Operator),
# punctuation
(r'[\[\]()]', Punctuation),
# fallback
(r'\s+', Text),
(r'.', Text),
],
}
class JsgfLexer(RegexLexer):
"""
For `JSpeech Grammar Format <https://www.w3.org/TR/jsgf/>`_
grammars.
.. versionadded:: 2.2
"""
name = 'JSGF'
aliases = ['jsgf']
filenames = ['*.jsgf']
mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
include('comments'),
include('non-comments'),
],
'comments': [
(r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
],
'non-comments': [
(r'\A#JSGF[^;]*', Comment.Preproc),
(r'\s+', Text),
(r';', Punctuation),
(r'[=|()\[\]*+]', Operator),
(r'/[^/]+/', Number.Float),
(r'"', String.Double, 'string'),
(r'\{', String.Other, 'tag'),
(words(('import', 'public'), suffix=r'\b'), Keyword.Reserved),
(r'grammar\b', Keyword.Reserved, 'grammar name'),
(r'(<)(NULL|VOID)(>)',
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r'<', Punctuation, 'rulename'),
(r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'[^\\"]+', String.Double),
],
'tag': [
(r'\}', String.Other, '#pop'),
(r'\\.', String.Escape),
(r'[^\\}]+', String.Other),
],
'grammar name': [
(r';', Punctuation, '#pop'),
(r'\s+', Text),
(r'\.', Punctuation),
(r'[^;\s.]+', Name.Namespace),
],
'rulename': [
(r'>', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'\s+', Text),
(r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)),
(r'[^.>]+', Name.Constant),
],
'documentation comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'(^\s*\*?\s*)(@(?:example|see)\s+)'
r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))',
bygroups(Comment.Multiline, Comment.Special,
using(this, state='example'))),
(r'(^\s*\*?\s*)(@\S*)',
bygroups(Comment.Multiline, Comment.Special)),
(r'[^*\n@]+|\w|\W', Comment.Multiline),
],
'example': [
(r'\n\s*\*', Comment.Multiline),
include('non-comments'),
(r'.', Comment.Multiline),
],
}
class PegLexer(RegexLexer):
"""
This lexer is for `Parsing Expression Grammars
<https://bford.info/pub/lang/peg.pdf>`_ (PEG).
Various implementations of PEG have made different decisions
regarding the syntax, so let's try to be accommodating:
* `<-`, `←`, `:`, and `=` are all accepted as rule operators.
* Both `|` and `/` are choice operators.
* `^`, `↑`, and `~` are cut operators.
* A single `a-z` character immediately before a string, or
multiple `a-z` characters following a string, are part of the
string (e.g., `r"..."` or `"..."ilmsuxa`).
.. versionadded:: 2.6
"""
name = 'PEG'
aliases = ['peg']
filenames = ['*.peg']
mimetypes = ['text/x-peg']
tokens = {
'root': [
# Comments
(r'#.*', Comment.Single),
# All operators
(r'<-|[←:=/|&!?*+^↑~]', Operator),
# Other punctuation
(r'[()]', Punctuation),
# Keywords
(r'\.', Keyword),
# Character classes
(r'(\[)([^\]]*(?:\\.[^\]\\]*)*)(\])',
bygroups(Punctuation, String, Punctuation)),
# Single and double quoted strings (with optional modifiers)
(r'[a-z]?"[^"\\]*(?:\\.[^"\\]*)*"[a-z]*', String.Double),
(r"[a-z]?'[^'\\]*(?:\\.[^'\\]*)*'[a-z]*", String.Single),
# Nonterminals are not whitespace, operators, or punctuation
(r'[^\s<←:=/|&!?*+\^↑~()\[\]"\'#]+', Name.Class),
# Fallback
(r'.', Text),
],
}
| 30.254613
| 81
| 0.450055
|
90a0282f1f88ea48a054807762b01f8954bef1fa
| 11,801
|
py
|
Python
|
lib/python3.7/site-packages/elasticsearch_dsl/faceted_search.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | 2
|
2020-04-12T02:40:34.000Z
|
2021-08-10T17:59:55.000Z
|
lib/python3.7/site-packages/elasticsearch_dsl/faceted_search.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | 7
|
2020-06-06T01:06:19.000Z
|
2022-02-10T11:15:14.000Z
|
lib/python3.7/site-packages/elasticsearch_dsl/faceted_search.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T05:10:30.000Z
|
2020-07-27T05:10:30.000Z
|
from datetime import timedelta, datetime
from six import iteritems, itervalues
from .search import Search
from .aggs import A
from .utils import AttrDict
from .response import Response
from .query import Terms, Nested, Range, MatchAll
__all__ = [
'FacetedSearch', 'HistogramFacet', 'TermsFacet', 'DateHistogramFacet', 'RangeFacet',
'NestedFacet',
]
class Facet(object):
"""
A facet on faceted search. Wraps and aggregation and provides functionality
to create a filter for selected values and return a list of facet values
from the result of the aggregation.
"""
agg_type = None
def __init__(self, metric=None, metric_sort='desc', **kwargs):
self.filter_values = ()
self._params = kwargs
self._metric = metric
if metric and metric_sort:
self._params['order'] = {'metric': metric_sort}
def get_aggregation(self):
"""
Return the aggregation object.
"""
agg = A(self.agg_type, **self._params)
if self._metric:
agg.metric('metric', self._metric)
return agg
def add_filter(self, filter_values):
"""
Construct a filter.
"""
if not filter_values:
return
f = self.get_value_filter(filter_values[0])
for v in filter_values[1:]:
f |= self.get_value_filter(v)
return f
def get_value_filter(self, filter_value):
"""
Construct a filter for an individual value
"""
pass
def is_filtered(self, key, filter_values):
"""
Is a filter active on the given key.
"""
return key in filter_values
def get_value(self, bucket):
"""
return a value representing a bucket. Its key as default.
"""
return bucket['key']
def get_metric(self, bucket):
"""
Return a metric, by default doc_count for a bucket.
"""
if self._metric:
return bucket['metric']['value']
return bucket['doc_count']
def get_values(self, data, filter_values):
"""
Turn the raw bucket data into a list of tuples containing the key,
number of documents and a flag indicating whether this value has been
selected or not.
"""
out = []
for bucket in data.buckets:
key = self.get_value(bucket)
out.append((
key,
self.get_metric(bucket),
self.is_filtered(key, filter_values)
))
return out
class TermsFacet(Facet):
agg_type = 'terms'
def add_filter(self, filter_values):
""" Create a terms filter instead of bool containing term filters. """
if filter_values:
return Terms(_expand__to_dot=False, **{self._params['field']: filter_values})
class RangeFacet(Facet):
agg_type = 'range'
def _range_to_dict(self, range):
key, range = range
out = {'key': key}
if range[0] is not None:
out['from'] = range[0]
if range[1] is not None:
out['to'] = range[1]
return out
def __init__(self, ranges, **kwargs):
super(RangeFacet, self).__init__(**kwargs)
self._params['ranges'] = list(map(self._range_to_dict, ranges))
self._params['keyed'] = False
self._ranges = dict(ranges)
def get_value_filter(self, filter_value):
f, t = self._ranges[filter_value]
limits = {}
if f is not None:
limits['gte'] = f
if t is not None:
limits['lt'] = t
return Range(_expand__to_dot=False, **{
self._params['field']: limits
})
class HistogramFacet(Facet):
agg_type = 'histogram'
def get_value_filter(self, filter_value):
return Range(_expand__to_dot=False, **{
self._params['field']: {
'gte': filter_value,
'lt': filter_value + self._params['interval']
}
})
class DateHistogramFacet(Facet):
agg_type = 'date_histogram'
DATE_INTERVALS = {
'month': lambda d: (d+timedelta(days=32)).replace(day=1),
'week': lambda d: d+timedelta(days=7),
'day': lambda d: d+timedelta(days=1),
'hour': lambda d: d+timedelta(hours=1),
}
def __init__(self, **kwargs):
kwargs.setdefault("min_doc_count", 0)
super(DateHistogramFacet, self).__init__(**kwargs)
def get_value(self, bucket):
if not isinstance(bucket['key'], datetime):
# Elasticsearch returns key=None instead of 0 for date 1970-01-01,
# so we need to set key to 0 to avoid TypeError exception
if bucket['key'] is None:
bucket['key'] = 0
# Preserve milliseconds in the datetime
return datetime.utcfromtimestamp(int(bucket['key']) / 1000.0)
else:
return bucket['key']
def get_value_filter(self, filter_value):
return Range(_expand__to_dot=False, **{
self._params['field']: {
'gte': filter_value,
'lt': self.DATE_INTERVALS[self._params['interval']](filter_value)
}
})
class NestedFacet(Facet):
agg_type = 'nested'
def __init__(self, path, nested_facet):
self._path = path
self._inner = nested_facet
super(NestedFacet, self).__init__(path=path, aggs={'inner': nested_facet.get_aggregation()})
def get_values(self, data, filter_values):
return self._inner.get_values(data.inner, filter_values)
def add_filter(self, filter_values):
inner_q = self._inner.add_filter(filter_values)
if inner_q:
return Nested(path=self._path, query=inner_q)
class FacetedResponse(Response):
@property
def query_string(self):
return self._faceted_search._query
@property
def facets(self):
if not hasattr(self, '_facets'):
super(AttrDict, self).__setattr__('_facets', AttrDict({}))
for name, facet in iteritems(self._faceted_search.facets):
self._facets[name] = facet.get_values(
getattr(getattr(self.aggregations, '_filter_' + name), name),
self._faceted_search.filter_values.get(name, ())
)
return self._facets
class FacetedSearch(object):
"""
Abstraction for creating faceted navigation searches that takes care of
composing the queries, aggregations and filters as needed as well as
presenting the results in an easy-to-consume fashion::
class BlogSearch(FacetedSearch):
index = 'blogs'
doc_types = [Blog, Post]
fields = ['title^5', 'category', 'description', 'body']
facets = {
'type': TermsFacet(field='_type'),
'category': TermsFacet(field='category'),
'weekly_posts': DateHistogramFacet(field='published_from', interval='week')
}
def search(self):
' Override search to add your own filters '
s = super(BlogSearch, self).search()
return s.filter('term', published=True)
# when using:
blog_search = BlogSearch("web framework", filters={"category": "python"})
# supports pagination
blog_search[10:20]
response = blog_search.execute()
# easy access to aggregation results:
for category, hit_count, is_selected in response.facets.category:
print(
"Category %s has %d hits%s." % (
category,
hit_count,
' and is chosen' if is_selected else ''
)
)
"""
index = None
doc_types = None
fields = None
facets = {}
using = 'default'
def __init__(self, query=None, filters={}, sort=()):
"""
:arg query: the text to search for
:arg filters: facet values to filter
:arg sort: sort information to be passed to :class:`~elasticsearch_dsl.Search`
"""
self._query = query
self._filters = {}
self._sort = sort
self.filter_values = {}
for name, value in iteritems(filters):
self.add_filter(name, value)
self._s = self.build_search()
def count(self):
return self._s.count()
def __getitem__(self, k):
self._s = self._s[k]
return self
def __iter__(self):
return iter(self._s)
def add_filter(self, name, filter_values):
"""
Add a filter for a facet.
"""
# normalize the value into a list
if not isinstance(filter_values, (tuple, list)):
if filter_values is None:
return
filter_values = [filter_values, ]
# remember the filter values for use in FacetedResponse
self.filter_values[name] = filter_values
# get the filter from the facet
f = self.facets[name].add_filter(filter_values)
if f is None:
return
self._filters[name] = f
def search(self):
"""
Returns the base Search object to which the facets are added.
You can customize the query by overriding this method and returning a
modified search object.
"""
s = Search(doc_type=self.doc_types, index=self.index, using=self.using)
return s.response_class(FacetedResponse)
def query(self, search, query):
"""
Add query part to ``search``.
Override this if you wish to customize the query used.
"""
if query:
if self.fields:
return search.query('multi_match', fields=self.fields, query=query)
else:
return search.query('multi_match', query=query)
return search
def aggregate(self, search):
"""
Add aggregations representing the facets selected, including potential
filters.
"""
for f, facet in iteritems(self.facets):
agg = facet.get_aggregation()
agg_filter = MatchAll()
for field, filter in iteritems(self._filters):
if f == field:
continue
agg_filter &= filter
search.aggs.bucket(
'_filter_' + f,
'filter',
filter=agg_filter
).bucket(f, agg)
def filter(self, search):
"""
Add a ``post_filter`` to the search request narrowing the results based
on the facet filters.
"""
if not self._filters:
return search
post_filter = MatchAll()
for f in itervalues(self._filters):
post_filter &= f
return search.post_filter(post_filter)
def highlight(self, search):
"""
Add highlighting for all the fields
"""
return search.highlight(*(f if '^' not in f else f.split('^', 1)[0]
for f in self.fields))
def sort(self, search):
"""
Add sorting information to the request.
"""
if self._sort:
search = search.sort(*self._sort)
return search
def build_search(self):
"""
Construct the ``Search`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
if self.fields:
s = self.highlight(s)
s = self.sort(s)
self.aggregate(s)
return s
def execute(self):
"""
Execute the search and return the response.
"""
r = self._s.execute()
r._faceted_search = self
return r
| 30.104592
| 100
| 0.569189
|
7b7f9625c782cf82d7d634b47b483de41c126d44
| 2,325
|
py
|
Python
|
lino_book/projects/eric/tests/test_ddh.py
|
khchine5/book
|
b6272d33d49d12335d25cf0a2660f7996680b1d1
|
[
"BSD-2-Clause"
] | 1
|
2018-01-12T14:09:58.000Z
|
2018-01-12T14:09:58.000Z
|
lino_book/projects/eric/tests/test_ddh.py
|
khchine5/book
|
b6272d33d49d12335d25cf0a2660f7996680b1d1
|
[
"BSD-2-Clause"
] | 4
|
2018-02-06T19:53:10.000Z
|
2019-08-01T21:47:44.000Z
|
lino_book/projects/eric/tests/test_ddh.py
|
khchine5/book
|
b6272d33d49d12335d25cf0a2660f7996680b1d1
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2016-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""Runs some tests about the disable-delete handler and cascading deletes.
You can run only these tests by issuing::
$ go team
$ python manage.py test tests.test_ddh
Or::
$ go book
$ python setup.py test -s tests.ProjectsTests.test_ddh
"""
from __future__ import unicode_literals
from __future__ import print_function
from django.core.exceptions import ValidationError
from lino.utils.djangotest import RemoteAuthTestCase
from lino.api import rt
from lino.utils.instantiator import create_row as create
class DDHTests(RemoteAuthTestCase):
maxDiff = None
def test01(self):
from lino.modlib.users.choicelists import UserTypes
Ticket = rt.models.tickets.Ticket
Project = rt.models.tickets.Project
User = rt.models.users.User
Star = rt.models.votes.Vote
# ContentType = rt.models.contenttypes.ContentType
# ct_Ticket = ContentType.objects.get_for_model(Ticket)
create(Project, name='project')
robin = create(User, username='robin',
first_name="Robin",
user_type=UserTypes.admin,
language="en")
def createit():
return create(Ticket, summary="Test", user=robin)
#
# If there are no vetos, user can ask to delete it
#
obj = createit()
obj.delete()
obj = createit()
if False:
try:
robin.delete()
self.fail("Expected veto")
except Warning as e:
self.assertEqual(
str(e), "Cannot delete User Robin "
"because 1 Tickets refer to it.")
create(Star, votable=obj, user=robin)
try:
robin.delete()
self.fail("Expected veto")
except Warning as e:
self.assertEqual(
str(e), "Cannot delete User Robin "
"because 1 Tickets refer to it.")
self.assertEqual(Star.objects.count(), 1)
self.assertEqual(Ticket.objects.count(), 1)
obj.delete()
self.assertEqual(Star.objects.count(), 0)
self.assertEqual(Ticket.objects.count(), 0)
| 27.678571
| 74
| 0.597419
|
f3f5bf66cb91b820816d306a005dd2226274b05c
| 5,730
|
py
|
Python
|
pypbbot/affairs/registrar.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
pypbbot/affairs/registrar.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
pypbbot/affairs/registrar.py
|
PHIKN1GHT/pypbbot_archived
|
8ab70830509c43b0babc53c9972d0a73481bdaa2
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from .filters import _on_unloading
from .filters import _on_loading
from pypbbot.protocol import GroupMessageEvent, GroupRecallNoticeEvent
from pypbbot.protocol import PrivateMessageEvent, FriendRecallNoticeEvent
from .filters import _unfilterable
from .filters import _on_starts_with_filter
import typing
from typing import Callable, Coroutine, Any, List
if typing.TYPE_CHECKING:
from typing import Optional, Callable, Coroutine, Any
from pypbbot.affairs import ChatAffair, Filter, HandlerDecorator, BaseAffair
import functools
from pypbbot.logging import logger
from pypbbot.plugin import _register
from pypbbot.affairs.filters import partial_filter
from pypbbot.affairs import HandlerPriority, ChatAffair
from pypbbot.utils import asyncify
import inspect
__all__ = ['useFilter', 'unfilterable', 'onPrivateMessage', 'onGroupMessage',
'onStartsWith', 'onEndsWith', 'onLoading', 'onUnloading', 'onMessage']
def useFilter(ftr: Filter, priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
'''An decorator to register an affair handler for a specific affait filter.
Args:
ftr: the filter function.
priority: the priority of the handler
'''
try:
getattr(ftr, '__name__')
except AttributeError:
logger.error(
'Unnamed filter funcion detected. You SHOULD NOT use lambda expression.')
setattr(ftr, '__name__', '[UNKNOWN]')
'''
try:
getattr(useFilter, ftr.__name__)
except AttributeError:
setattr(useFilter, ftr.__name__, ftr)
'''
def decorator(func: Callable[[BaseAffair], Coroutine[Any, Any, None]]) -> Callable[[BaseAffair], Coroutine[Any, Any, None]]:
# DO NOT USE LAMBDA EXPRESSION
if not inspect.iscoroutinefunction(func):
func = asyncify(func)
logger.warning(
"Function {} has been asyncified for being registered.".format(func.__name__))
_register(ftr.__name__, ftr, func, priority)
@functools.wraps(func)
def wrapper(affair: BaseAffair) -> Coroutine[Any, Any, None]:
return func(affair)
return wrapper
return decorator
def unfilterable(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
return useFilter(_unfilterable, priority)
def onPrivateMessage(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _private_message_filter(_: BaseAffair) -> bool:
return isinstance(_.event, PrivateMessageEvent)
return useFilter(_private_message_filter, priority)
def onGroupMessage(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _group_message_filter(_: BaseAffair) -> bool:
return isinstance(_.event, GroupMessageEvent)
return useFilter(_group_message_filter, priority)
def onStartsWith(prefix: str, priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
_filter = partial_filter(_on_starts_with_filter, (prefix, ))
return useFilter(_filter, priority)
def onEndsWith(suffix: str, priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _on_ends_with_filter(_: BaseAffair) -> bool:
return isinstance(_, ChatAffair) and _.event.raw_message.endswith(suffix)
return useFilter(_on_ends_with_filter, priority)
def onLoading(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
return useFilter(_on_loading, priority)
def onGroupRecall(group_id: Optional[int] = None, priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _filter(_: BaseAffair) -> bool:
return isinstance(_.event, GroupRecallNoticeEvent) if group_id is None else (isinstance(_.event, GroupRecallNoticeEvent) and _.event.group_id == group_id)
return useFilter(_filter, priority)
def onFriendRecall(friend_id: Optional[int] = None, priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _filter(_: BaseAffair) -> bool:
return isinstance(_.event, FriendRecallNoticeEvent) if friend_id is None else (isinstance(_.event, GroupRecallNoticeEvent) and _.event.user_id == friend_id)
return useFilter(_filter, priority)
def onUnloading(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
return useFilter(_on_unloading, priority)
def onMessage(priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
def _message_filter(_: BaseAffair) -> bool:
return isinstance(_, ChatAffair)
return useFilter(_message_filter, priority)
def onAllSatisfied(filter_helpers: List[Filter, Any], priority: HandlerPriority = HandlerPriority.NORMAL) -> HandlerDecorator:
# args 1 should be named tuples
global useFilter
_useFilter: Callable[[Filter, HandlerPriority],
HandlerDecorator] = useFilter
ftrs: List[Filter] = []
def getAllNamedFilters(ftr: Filter, *args: Any) -> None:
try:
getattr(ftr, '__name__')
except AttributeError:
setattr(ftr, '__name__', '[UNKNOWN]')
ftrs.append(ftr)
useFilter: Callable = getAllNamedFilters
for fhelper in filter_helpers:
ftr, params = fhelper[0], fhelper[1:]
ftr(*params)
useFilter: Callable = _useFilter
def _on_all_satisfied_(affair: BaseAffair) -> bool:
result = True
for ftr in ftrs:
if not result:
return
result = result and ftr(affair)
return result
_on_all_satisfied_.__name__ = "_on_all_satisfied:" + \
"&".join([ftr.__name__ for ftr in ftrs])
return useFilter(_on_all_satisfied_, priority)
| 39.246575
| 164
| 0.721466
|
ad9995867b19dbc32bd926b4565581dfd86840d3
| 8,729
|
py
|
Python
|
tests/train_config_test.py
|
FranciscoShi/piepline
|
6105788339fc18bab39ea07625b5fd26ad687254
|
[
"MIT"
] | 5
|
2020-06-24T15:25:26.000Z
|
2021-07-01T08:16:51.000Z
|
tests/train_config_test.py
|
FranciscoShi/piepline
|
6105788339fc18bab39ea07625b5fd26ad687254
|
[
"MIT"
] | 21
|
2020-05-20T13:50:06.000Z
|
2020-07-26T09:56:19.000Z
|
tests/train_config_test.py
|
FranciscoShi/piepline
|
6105788339fc18bab39ea07625b5fd26ad687254
|
[
"MIT"
] | 2
|
2021-06-17T12:57:37.000Z
|
2021-07-16T01:55:24.000Z
|
import unittest
import numpy as np
import torch
from torch.nn import functional as F
from torch import Tensor
from piepline.train import Trainer
from piepline.data_producer import DataProducer
from piepline.train_config.metrics import MetricsGroup, AbstractMetric
from piepline.train_config.train_config import BaseTrainConfig
from piepline.train_config.stages import TrainStage
from piepline.utils.fsm import FileStructManager
from piepline.train_config.metrics_processor import MetricsProcessor
from tests.common import UseFileStructure
from tests.data_processor_test import SimpleModel, SimpleLoss
from tests.data_producer_test import TestDataProducer
__all__ = ['TrainConfigTest']
class SimpleMetric(AbstractMetric):
def __init__(self):
super().__init__('SimpleMetric')
@staticmethod
def calc(output: Tensor, target: Tensor) -> np.ndarray or float:
return F.pairwise_distance(output, target, p=2).numpy()
class FakeMetricsProcessor(MetricsProcessor):
def __init__(self):
super().__init__()
self.call_num = 0
def calc_metrics(self, output, target):
self.call_num += 1
class TrainConfigTest(UseFileStructure):
def test_metric(self):
metric = SimpleMetric()
for i in range(10):
output, target = torch.rand(1, 3), torch.rand(1, 3)
res = metric.calc(output, target)[0]
self.assertAlmostEqual(res, np.linalg.norm(output.numpy() - target.numpy()), delta=1e-5)
vals = metric.get_values()
self.assertEqual(vals.size, 0)
values = []
for i in range(10):
output, target = torch.rand(1, 3), torch.rand(1, 3)
metric._calc(output, target)
values.append(np.linalg.norm(output.numpy() - target.numpy()))
vals = metric.get_values()
self.assertEqual(vals.size, len(values))
for v1, v2 in zip(values, vals):
self.assertAlmostEqual(v1, v2, delta=1e-5)
metric.reset()
self.assertEqual(metric.get_values().size, 0)
self.assertEqual(metric.name(), "SimpleMetric")
def test_metrics_group_nested(self):
metrics_group_lv1 = MetricsGroup('lvl')
metrics_group_lv2 = MetricsGroup('lv2')
metrics_group_lv1.add(metrics_group_lv2)
self.assertTrue(metrics_group_lv1.have_groups())
self.assertRaises(MetricsGroup.MGException, lambda: metrics_group_lv2.add(MetricsGroup('lv3')))
metrics_group_lv1 = MetricsGroup('lvl')
metrics_group_lv2 = MetricsGroup('lv2')
metrics_group_lv3 = MetricsGroup('lv2')
metrics_group_lv2.add(metrics_group_lv3)
self.assertRaises(MetricsGroup.MGException, lambda: metrics_group_lv1.add(metrics_group_lv2))
def test_metrics_group_calculation(self):
metrics_group_lv1 = MetricsGroup('lvl').add(SimpleMetric())
metrics_group_lv2 = MetricsGroup('lv2').add(SimpleMetric())
metrics_group_lv1.add(metrics_group_lv2)
values = []
for i in range(10):
output, target = torch.rand(1, 3), torch.rand(1, 3)
metrics_group_lv1.calc(output, target)
values.append(np.linalg.norm(output.numpy() - target.numpy()))
for metrics_group in [metrics_group_lv1, metrics_group_lv2]:
for m in metrics_group.metrics():
for v1, v2 in zip(values, m.get_values()):
self.assertAlmostEqual(v1, v2, delta=1e-5)
metrics_group_lv1.reset()
self.assertEqual(metrics_group_lv1.metrics()[0].get_values().size, 0)
self.assertEqual(metrics_group_lv2.metrics()[0].get_values().size, 0)
def test_metrics_pocessor_calculation(self):
metrics_group_lv11 = MetricsGroup('lvl').add(SimpleMetric())
metrics_group_lv21 = MetricsGroup('lv2').add(SimpleMetric())
metrics_group_lv11.add(metrics_group_lv21)
metrics_processor = MetricsProcessor()
metrics_group_lv12 = MetricsGroup('lvl').add(SimpleMetric())
metrics_group_lv22 = MetricsGroup('lv2').add(SimpleMetric())
metrics_group_lv12.add(metrics_group_lv22)
metrics_processor.add_metrics_group(metrics_group_lv11)
metrics_processor.add_metrics_group(metrics_group_lv12)
m1, m2 = SimpleMetric(), SimpleMetric()
metrics_processor.add_metric(m1)
metrics_processor.add_metric(m2)
values = []
for i in range(10):
output, target = torch.rand(1, 3), torch.rand(1, 3)
metrics_processor.calc_metrics(output, target)
values.append(np.linalg.norm(output.numpy() - target.numpy()))
for metrics_group in [metrics_group_lv11, metrics_group_lv21, metrics_group_lv12, metrics_group_lv22]:
for m in metrics_group.metrics():
for v1, v2 in zip(values, m.get_values()):
self.assertAlmostEqual(v1, v2, delta=1e-5)
for m in [m1, m2]:
for v1, v2 in zip(values, m.get_values()):
self.assertAlmostEqual(v1, v2, delta=1e-5)
metrics_processor.reset_metrics()
self.assertEqual(metrics_group_lv11.metrics()[0].get_values().size, 0)
self.assertEqual(metrics_group_lv21.metrics()[0].get_values().size, 0)
self.assertEqual(metrics_group_lv12.metrics()[0].get_values().size, 0)
self.assertEqual(metrics_group_lv22.metrics()[0].get_values().size, 0)
self.assertEqual(m1.get_values().size, 0)
self.assertEqual(m2.get_values().size, 0)
def test_metrics_and_groups_collection(self):
m1 = SimpleMetric()
name = 'lv1'
metrics_group_lv1 = MetricsGroup(name)
self.assertEqual(metrics_group_lv1.metrics(), [])
metrics_group_lv1.add(m1)
self.assertEqual(metrics_group_lv1.groups(), [])
self.assertEqual(metrics_group_lv1.metrics(), [m1])
metrics_group_lv2 = MetricsGroup('lv2').add(SimpleMetric())
metrics_group_lv1.add(metrics_group_lv2)
self.assertEqual(metrics_group_lv1.groups(), [metrics_group_lv2])
self.assertEqual(metrics_group_lv1.metrics(), [m1])
metrics_group_lv22 = MetricsGroup('lv2').add(SimpleMetric())
metrics_group_lv1.add(metrics_group_lv22)
self.assertEqual(metrics_group_lv1.groups(), [metrics_group_lv2, metrics_group_lv22])
self.assertEqual(metrics_group_lv1.metrics(), [m1])
self.assertEqual(metrics_group_lv1.name(), name)
def test_train_stage(self):
data_producer = DataProducer([{'data': torch.rand(1, 3), 'target': torch.rand(1)} for _ in list(range(20))])
metrics_processor = FakeMetricsProcessor()
train_stage = TrainStage(data_producer).enable_hard_negative_mining(0.1)
metrics_processor.subscribe_to_stage(train_stage)
fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
model = SimpleModel()
Trainer(BaseTrainConfig(model, [train_stage], SimpleLoss(), torch.optim.SGD(model.parameters(), lr=1)), fsm) \
.set_epoch_num(1).train()
self.assertEqual(metrics_processor.call_num, len(data_producer))
def test_hard_negatives_mining(self):
with self.assertRaises(ValueError):
stage = TrainStage(None).enable_hard_negative_mining(0)
with self.assertRaises(ValueError):
stage = TrainStage(None).enable_hard_negative_mining(1)
with self.assertRaises(ValueError):
stage = TrainStage(None).enable_hard_negative_mining(-1)
with self.assertRaises(ValueError):
stage = TrainStage(None).enable_hard_negative_mining(1.1)
dp = TestDataProducer([{'data': torch.Tensor([i]), 'target': torch.rand(1)} for i in list(range(20))]).pass_indices(True)
stage = TrainStage(dp).enable_hard_negative_mining(0.1)
losses = np.random.rand(20)
samples = []
def on_batch(batch, data_processor):
samples.append(batch)
stage.hnm._losses = np.array([0])
stage.hnm._process_batch = on_batch
stage.hnm.exec(None, losses, [[str(i)] for i in range(20)])
self.assertEqual(len(samples), 2)
losses = [float(v) for v in losses]
idxs = [int(s['data']) for s in samples]
max_losses = [losses[i] for i in idxs]
idxs.sort(reverse=True)
for i in idxs:
del losses[i]
for l in losses:
self.assertLess(l, min(max_losses))
stage.on_epoch_end()
self.assertIsNone(stage.hnm._losses)
stage.disable_hard_negative_mining()
self.assertIsNone(stage.hnm)
for data in dp:
self.assertIn('data_idx', data)
if __name__ == '__main__':
unittest.main()
| 39.677273
| 129
| 0.671784
|
dedcdd33423f2dadae2329e75f772230b6704a5b
| 1,323
|
py
|
Python
|
waffler.py
|
thfm/waffler
|
275e430114e1ab011b71de682aada26653ad1144
|
[
"MIT"
] | null | null | null |
waffler.py
|
thfm/waffler
|
275e430114e1ab011b71de682aada26653ad1144
|
[
"MIT"
] | null | null | null |
waffler.py
|
thfm/waffler
|
275e430114e1ab011b71de682aada26653ad1144
|
[
"MIT"
] | null | null | null |
import click
from nltk.corpus import wordnet
# Gets the unique synonyms of a word
def __get_synonyms(word: str):
synonyms = []
for synset in wordnet.synsets(word):
name = synset.lemmas()[0].name()
# Checks that the synonym is not the original word
if name != word:
synonyms.append(name)
# Removes underscores from those synonyms with multiple words
synonyms = [s.replace("_", " ") for s in synonyms]
synonyms = list(set(synonyms)) # Removes duplicate synonyms
return synonyms
# Replaces each 'significant' word of an essay with
# a more complicated counterpart
@click.command()
@click.argument("source")
@click.option("-f", "--file", "is_file", is_flag=True,
help="Source the essay content from a file.")
def waffle(source: str, is_file: bool):
essay = open(source).read() if is_file else source
waffled = ""
for word in essay.split(" "):
synonyms = __get_synonyms(word)
# If the word is not 'significant'
# i.e. it is either short or has no synonyms...
if len(word) <= 3 or len(synonyms) == 0:
# ... then the original word is used
waffled += word + " "
else:
waffled += synonyms[0] + " "
click.echo(waffled)
if __name__ == "__main__":
waffle()
| 30.767442
| 65
| 0.621315
|
d5365fdfcc91500ebf4cd1600f83d374287dfb97
| 2,484
|
py
|
Python
|
src/aiy/vision/models/face_detection.py
|
sonjaq/aiyprojects-raspbian
|
101403c1b80433f80aad483d7f4d1ad757112cd9
|
[
"Apache-2.0"
] | null | null | null |
src/aiy/vision/models/face_detection.py
|
sonjaq/aiyprojects-raspbian
|
101403c1b80433f80aad483d7f4d1ad757112cd9
|
[
"Apache-2.0"
] | null | null | null |
src/aiy/vision/models/face_detection.py
|
sonjaq/aiyprojects-raspbian
|
101403c1b80433f80aad483d7f4d1ad757112cd9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Face Detection."""
from __future__ import division
from aiy.vision.inference import ModelDescriptor
from aiy.vision.models import utils
_COMPUTE_GRAPH_NAME = 'face_detection.binaryproto'
def _reshape(array, width):
assert len(array) % width == 0
height = len(array) // width
return [array[i * width:(i + 1) * width] for i in range(height)]
class Face(object):
"""Face detection result."""
def __init__(self, bounding_box, face_score, joy_score):
"""Creates a new Face instance.
Args:
bounding_box: (x, y, width, height).
face_score: float, face confidence score.
joy_score: float, face joy score.
"""
self.bounding_box = bounding_box
self.face_score = face_score
self.joy_score = joy_score
def __str__(self):
return 'face_score=%f, joy_score=%f, bbox=%s' % (self.face_score,
self.joy_score,
str(self.bounding_box))
def model():
# Face detection model has special implementation in VisionBonnet firmware.
# input_shape, input_normalizer, and computate_graph params have on effect.
return ModelDescriptor(
name='FaceDetection',
input_shape=(1, 0, 0, 3),
input_normalizer=(0, 0),
compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
def get_faces(result):
"""Retunrs list of Face objects decoded from the inference result."""
assert len(result.tensors) == 3
# TODO(dkovalev): check tensor shapes
bboxes = _reshape(result.tensors['bounding_boxes'].data, 4)
face_scores = result.tensors['face_scores'].data
joy_scores = result.tensors['joy_scores'].data
assert len(bboxes) == len(joy_scores)
assert len(bboxes) == len(face_scores)
return [
Face(tuple(bbox), face_score, joy_score)
for bbox, face_score, joy_score in zip(bboxes, face_scores, joy_scores)
]
| 33.567568
| 77
| 0.692432
|
b6e37dc494ca34bc4a7c9eefb1c74b6545eadddf
| 8,192
|
py
|
Python
|
grr/server/grr_response_server/output_plugins/sqlite_plugin.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/output_plugins/sqlite_plugin.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/output_plugins/sqlite_plugin.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | 1
|
2020-07-09T01:08:48.000Z
|
2020-07-09T01:08:48.000Z
|
#!/usr/bin/env python
"""Plugin that exports results as SQLite db scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import io
import os
import zipfile
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import itervalues
import sqlite3
import yaml
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import collection
from grr_response_server import instant_output_plugin
class Rdf2SqliteAdapter(object):
"""An adapter for converting RDF values to a SQLite-friendly form."""
class Converter(object):
def __init__(self, sqlite_type, convert_fn):
self.sqlite_type = sqlite_type
self.convert_fn = convert_fn
# PySQLite prefers dealing with unicode objects.
DEFAULT_CONVERTER = Converter("TEXT", utils.SmartUnicode)
INT_CONVERTER = Converter("INTEGER", int)
# Converters for fields that have a semantic type annotation in their
# protobuf definition.
SEMANTIC_CONVERTERS = {
rdfvalue.RDFInteger:
INT_CONVERTER,
rdfvalue.RDFBool:
INT_CONVERTER, # Sqlite does not have a bool type.
rdfvalue.RDFDatetime:
Converter("INTEGER", lambda x: x.AsMicrosecondsSinceEpoch()),
rdfvalue.RDFDatetimeSeconds:
Converter("INTEGER", lambda x: x.AsSecondsSinceEpoch() * 1000000),
rdfvalue.Duration:
Converter("INTEGER", lambda x: x.microseconds),
}
# Converters for fields that do not have a semantic type annotation in their
# protobuf definition.
NON_SEMANTIC_CONVERTERS = {
rdf_structs.ProtoUnsignedInteger: INT_CONVERTER,
rdf_structs.ProtoSignedInteger: INT_CONVERTER,
rdf_structs.ProtoFixed32: INT_CONVERTER,
rdf_structs.ProtoFixed64: INT_CONVERTER,
rdf_structs.ProtoFloat: Converter("REAL", float),
rdf_structs.ProtoDouble: Converter("REAL", float),
rdf_structs.ProtoBoolean: INT_CONVERTER,
}
@staticmethod
def GetConverter(type_info):
if type_info.__class__ is rdf_structs.ProtoRDFValue:
return Rdf2SqliteAdapter.SEMANTIC_CONVERTERS.get(
type_info.type, Rdf2SqliteAdapter.DEFAULT_CONVERTER)
else:
return Rdf2SqliteAdapter.NON_SEMANTIC_CONVERTERS.get(
type_info.__class__, Rdf2SqliteAdapter.DEFAULT_CONVERTER)
class SqliteInstantOutputPlugin(
instant_output_plugin.InstantOutputPluginWithExportConversion):
"""Instant output plugin that converts results into SQLite db commands."""
plugin_name = "sqlite-zip"
friendly_name = "SQLite scripts (zipped)"
description = "Output ZIP archive containing SQLite scripts."
output_file_extension = ".zip"
ROW_BATCH = 100
def __init__(self, *args, **kwargs):
super(SqliteInstantOutputPlugin, self).__init__(*args, **kwargs)
self.archive_generator = None # Created in Start()
self.export_counts = {}
@property
def path_prefix(self):
prefix, _ = os.path.splitext(self.output_file_name)
return prefix
def Start(self):
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
self.export_counts = {}
return []
def ProcessSingleTypeExportedValues(self, original_value_type,
exported_values):
first_value = next(exported_values, None)
if not first_value:
return
if not isinstance(first_value, rdf_structs.RDFProtoStruct):
raise ValueError("The SQLite plugin only supports export-protos")
yield self.archive_generator.WriteFileHeader(
"%s/%s_from_%s.sql" % (self.path_prefix, first_value.__class__.__name__,
original_value_type.__name__))
table_name = "%s.from_%s" % (first_value.__class__.__name__,
original_value_type.__name__)
schema = self._GetSqliteSchema(first_value.__class__)
# We will buffer the sql statements into an in-memory sql database before
# dumping them to the zip archive. We rely on the PySQLite library for
# string escaping.
db_connection = sqlite3.connect(":memory:")
db_cursor = db_connection.cursor()
yield self.archive_generator.WriteFileChunk(
"BEGIN TRANSACTION;\n".encode("utf-8"))
with db_connection:
buf = io.StringIO()
buf.write(u"CREATE TABLE \"%s\" (\n " % table_name)
column_types = [(k, v.sqlite_type) for k, v in iteritems(schema)]
buf.write(u",\n ".join([u"\"%s\" %s" % (k, v) for k, v in column_types]))
buf.write(u"\n);")
db_cursor.execute(buf.getvalue())
chunk = (buf.getvalue() + "\n").encode("utf-8")
yield self.archive_generator.WriteFileChunk(chunk)
self._InsertValueIntoDb(table_name, schema, first_value, db_cursor)
for sql in self._FlushAllRows(db_connection, table_name):
yield sql
counter = 1
for batch in collection.Batch(exported_values, self.ROW_BATCH):
counter += len(batch)
with db_connection:
for value in batch:
self._InsertValueIntoDb(table_name, schema, value, db_cursor)
for sql in self._FlushAllRows(db_connection, table_name):
yield sql
db_connection.close()
yield self.archive_generator.WriteFileChunk("COMMIT;\n".encode("utf-8"))
yield self.archive_generator.WriteFileFooter()
counts_for_original_type = self.export_counts.setdefault(
original_value_type.__name__, dict())
counts_for_original_type[first_value.__class__.__name__] = counter
def _GetSqliteSchema(self, proto_struct_class, prefix=""):
"""Returns a mapping of SQLite column names to Converter objects."""
schema = collections.OrderedDict()
for type_info in proto_struct_class.type_infos:
if type_info.__class__ is rdf_structs.ProtoEmbedded:
schema.update(
self._GetSqliteSchema(
type_info.type, prefix="%s%s." % (prefix, type_info.name)))
else:
field_name = utils.SmartStr(prefix + type_info.name)
schema[field_name] = Rdf2SqliteAdapter.GetConverter(type_info)
return schema
def _InsertValueIntoDb(self, table_name, schema, value, db_cursor):
sql_dict = self._ConvertToCanonicalSqlDict(schema, value.ToPrimitiveDict())
buf = io.StringIO()
buf.write(u"INSERT INTO \"%s\" (\n " % table_name)
buf.write(u",\n ".join(["\"%s\"" % k for k in iterkeys(sql_dict)]))
buf.write(u"\n)")
buf.write(u"VALUES (%s);" % u",".join([u"?"] * len(sql_dict)))
db_cursor.execute(buf.getvalue(), list(itervalues(sql_dict)))
def _ConvertToCanonicalSqlDict(self, schema, raw_dict, prefix=""):
"""Converts a dict of RDF values into a SQL-ready form."""
flattened_dict = {}
for k, v in iteritems(raw_dict):
if isinstance(v, dict):
flattened_dict.update(
self._ConvertToCanonicalSqlDict(
schema, v, prefix="%s%s." % (prefix, k)))
else:
field_name = prefix + k
flattened_dict[field_name] = schema[field_name].convert_fn(v)
return flattened_dict
def _FlushAllRows(self, db_connection, table_name):
"""Copies rows from the given db into the output file then deletes them."""
for sql in db_connection.iterdump():
if (sql.startswith("CREATE TABLE") or
sql.startswith("BEGIN TRANSACTION") or sql.startswith("COMMIT")):
# These statements only need to be written once.
continue
# The archive generator expects strings (not Unicode objects returned by
# the pysqlite library).
yield self.archive_generator.WriteFileChunk((sql + "\n").encode("utf-8"))
with db_connection:
db_connection.cursor().execute("DELETE FROM \"%s\";" % table_name)
def Finish(self):
manifest = {"export_stats": self.export_counts}
header = self.path_prefix + "/MANIFEST"
yield self.archive_generator.WriteFileHeader(header.encode("utf-8"))
yield self.archive_generator.WriteFileChunk(yaml.safe_dump(manifest))
yield self.archive_generator.WriteFileFooter()
yield self.archive_generator.Close()
| 37.925926
| 80
| 0.705444
|
314e81dba312d97e64e18cb88e377e92cac7104f
| 7,222
|
py
|
Python
|
pypureclient/flasharray/FA_2_9/models/host.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
pypureclient/flasharray/FA_2_9/models/host.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
pypureclient/flasharray/FA_2_9/models/host.py
|
ashahid-ps/py-pure-client
|
2e3565d37b2a41db69308769f6f485d08a7c46c3
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class Host(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'chap': 'Chap',
'connection_count': 'int',
'host_group': 'ReferenceNoId',
'iqns': 'list[str]',
'nqns': 'list[str]',
'personality': 'str',
'port_connectivity': 'HostPortConnectivity',
'preferred_arrays': 'list[Reference]',
'space': 'Space',
'wwns': 'list[str]',
'is_local': 'bool'
}
attribute_map = {
'name': 'name',
'chap': 'chap',
'connection_count': 'connection_count',
'host_group': 'host_group',
'iqns': 'iqns',
'nqns': 'nqns',
'personality': 'personality',
'port_connectivity': 'port_connectivity',
'preferred_arrays': 'preferred_arrays',
'space': 'space',
'wwns': 'wwns',
'is_local': 'is_local'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
chap=None, # type: models.Chap
connection_count=None, # type: int
host_group=None, # type: models.ReferenceNoId
iqns=None, # type: List[str]
nqns=None, # type: List[str]
personality=None, # type: str
port_connectivity=None, # type: models.HostPortConnectivity
preferred_arrays=None, # type: List[models.Reference]
space=None, # type: models.Space
wwns=None, # type: List[str]
is_local=None, # type: bool
):
"""
Keyword args:
name (str): A user-specified name. The name must be locally unique and can be changed.
chap (Chap)
connection_count (int): The number of volumes connected to the specified host.
host_group (ReferenceNoId): The host group to which the host should be associated.
iqns (list[str]): The iSCSI qualified name (IQN) associated with the host.
nqns (list[str]): The NVMe Qualified Name (NQN) associated with the host.
personality (str): Determines how the system tunes the array to ensure that it works optimally with the host. Set `personality` to the name of the host operating system or virtual memory system. Valid values are `aix`, `esxi`, `hitachi-vsp`, `hpux`, `oracle-vm-server`, `solaris`, and `vms`. If your system is not listed as one of the valid host personalities, do not set the option. By default, the personality is not set.
port_connectivity (HostPortConnectivity)
preferred_arrays (list[Reference]): For synchronous replication configurations, sets a host's preferred array to specify which array exposes active/optimized paths to that host. Enter multiple preferred arrays in comma-separated format. If a preferred array is set for a host, then the other arrays in the same pod will expose active/non-optimized paths to that host. If the host is in a host group, `preferred_arrays` cannot be set because host groups have their own preferred arrays. On a preferred array of a certain host, all the paths on all the ports (for both the primary and secondary controllers) are set up as A/O (active/optimized) paths, while on a non-preferred array, all the paths are A/N (Active/Non-optimized) paths.
space (Space): Displays provisioned size and physical storage consumption information for the sum of all volumes connected to the specified host.
wwns (list[str]): The Fibre Channel World Wide Name (WWN) associated with the host.
is_local (bool): -> If set to `true`, the location reference is to the local array. If set to `false`, the location reference is to a remote location, such as a remote array or offload target.
"""
if name is not None:
self.name = name
if chap is not None:
self.chap = chap
if connection_count is not None:
self.connection_count = connection_count
if host_group is not None:
self.host_group = host_group
if iqns is not None:
self.iqns = iqns
if nqns is not None:
self.nqns = nqns
if personality is not None:
self.personality = personality
if port_connectivity is not None:
self.port_connectivity = port_connectivity
if preferred_arrays is not None:
self.preferred_arrays = preferred_arrays
if space is not None:
self.space = space
if wwns is not None:
self.wwns = wwns
if is_local is not None:
self.is_local = is_local
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Host`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Host, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Host):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 40.573034
| 745
| 0.599695
|
e0fd5c31b56dcbfc8fedda3e8dabf70a298414e5
| 5,026
|
py
|
Python
|
mycroft/audio/speech.py
|
fortwally/mycroft-core
|
a8f7aee57294078dff39b824e6300001374f0b98
|
[
"Apache-2.0"
] | null | null | null |
mycroft/audio/speech.py
|
fortwally/mycroft-core
|
a8f7aee57294078dff39b824e6300001374f0b98
|
[
"Apache-2.0"
] | null | null | null |
mycroft/audio/speech.py
|
fortwally/mycroft-core
|
a8f7aee57294078dff39b824e6300001374f0b98
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import time
from threading import Lock
from mycroft.configuration import Configuration
from mycroft.metrics import report_timing, Stopwatch
from mycroft.tts import TTSFactory
from mycroft.util import create_signal, check_for_signal
from mycroft.util.log import LOG
from mycroft.messagebus.message import Message
ws = None # TODO:18.02 - Rename to "messagebus"
config = None
tts = None
tts_hash = None
lock = Lock()
_last_stop_signal = 0
def _start_listener(message):
"""
Force Mycroft to start listening (as if 'Hey Mycroft' was spoken)
"""
create_signal('startListening')
def handle_speak(event):
"""
Handle "speak" message
"""
config = Configuration.get()
Configuration.init(ws)
global _last_stop_signal
# Get conversation ID
if event.context and 'ident' in event.context:
ident = event.context['ident']
else:
ident = 'unknown'
with lock:
stopwatch = Stopwatch()
stopwatch.start()
utterance = event.data['utterance']
if event.data.get('expect_response', False):
# When expect_response is requested, the listener will be restarted
# at the end of the next bit of spoken audio.
ws.once('recognizer_loop:audio_output_end', _start_listener)
# This is a bit of a hack for Picroft. The analog audio on a Pi blocks
# for 30 seconds fairly often, so we don't want to break on periods
# (decreasing the chance of encountering the block). But we will
# keep the split for non-Picroft installs since it give user feedback
# faster on longer phrases.
#
# TODO: Remove or make an option? This is really a hack, anyway,
# so we likely will want to get rid of this when not running on Mimic
if (config.get('enclosure', {}).get('platform') != "picroft" and
len(re.findall('<[^>]*>', utterance)) == 0):
start = time.time()
chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s',
utterance)
for chunk in chunks:
try:
mute_and_speak(chunk, ident)
except KeyboardInterrupt:
raise
except Exception:
LOG.error('Error in mute_and_speak', exc_info=True)
if (_last_stop_signal > start or
check_for_signal('buttonPress')):
break
else:
mute_and_speak(utterance, ident)
stopwatch.stop()
report_timing(ident, 'speech', stopwatch, {'utterance': utterance,
'tts': tts.__class__.__name__})
def mute_and_speak(utterance, ident):
"""
Mute mic and start speaking the utterance using selected tts backend.
Args:
utterance: The sentence to be spoken
ident: Ident tying the utterance to the source query
"""
global tts_hash
# update TTS object if configuration has changed
if tts_hash != hash(str(config.get('tts', ''))):
global tts
# Stop tts playback thread
tts.playback.stop()
tts.playback.join()
# Create new tts instance
tts = TTSFactory.create()
tts.init(ws)
tts_hash = hash(str(config.get('tts', '')))
LOG.info("Speak: " + utterance)
try:
tts.execute(utterance, ident)
except Exception as e:
LOG.error('TTS execution failed ({})'.format(repr(e)))
def handle_stop(event):
"""
handle stop message
"""
global _last_stop_signal
if check_for_signal("isSpeaking", -1):
_last_stop_signal = time.time()
tts.playback.clear_queue()
tts.playback.clear_visimes()
ws.emit(Message("mycroft.stop.handled", {"by": "TTS"}))
def init(websocket):
"""
Start speech related handlers
"""
global ws
global tts
global tts_hash
global config
ws = websocket
Configuration.init(ws)
config = Configuration.get()
ws.on('mycroft.stop', handle_stop)
ws.on('mycroft.audio.speech.stop', handle_stop)
ws.on('speak', handle_speak)
ws.on('mycroft.mic.listen', _start_listener)
tts = TTSFactory.create()
tts.init(ws)
tts_hash = config.get('tts')
def shutdown():
if tts:
tts.playback.stop()
tts.playback.join()
| 30.834356
| 79
| 0.619976
|
ffb6837d939721d44b5c6ad7e9c6ac62769246e9
| 4,046
|
py
|
Python
|
plugins/earthAnimation.py
|
AY-ME/botShell
|
16f8a7364255cefa76844d38b3df3278a47988d3
|
[
"MIT"
] | 24
|
2020-07-22T00:14:25.000Z
|
2021-09-24T12:30:05.000Z
|
plugins/earthAnimation.py
|
AY-ME/botShell
|
16f8a7364255cefa76844d38b3df3278a47988d3
|
[
"MIT"
] | null | null | null |
plugins/earthAnimation.py
|
AY-ME/botShell
|
16f8a7364255cefa76844d38b3df3278a47988d3
|
[
"MIT"
] | 19
|
2020-07-17T21:01:31.000Z
|
2022-02-17T11:07:43.000Z
|
import asyncio
from utilities import utilities
async def run(message, matches, chat_id, step, crons=None):
response = []
if not (message.out):
message = await message.reply(matches)
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌖▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌍▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌒▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️🌖▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌏▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️🌒▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌖▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌎▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌒▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌖▪️\r\n▪️▪️▪️🌍▪️▪️▪️\r\n▪️🌒▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌒▪️🌎▪️🌖▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌒▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌏▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌖▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌒▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌍▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌖▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️🌒▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌏▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️🌖▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌒▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌎▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌖▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️🌒▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌍▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️🌖▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌒▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌎▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌖▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌒▪️\r\n▪️▪️▪️🌏▪️▪️▪️\r\n▪️🌖▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌖▪️🌍▪️🌒▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌖▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌎▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌒▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️🌖▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌏▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️🌒▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️🌖▪️▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌎▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️▪️🌒▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
msg = "▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌖▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌍▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️\r\n▪️▪️▪️🌒▪️▪️▪️\r\n▪️▪️▪️▪️▪️▪️▪️"
response.append(message.edit(msg))
response.append(asyncio.sleep(0.5))
return response
plugin = {
"name": "Earth Animation",
"desc": "Earth with moon and sun turn around it.",
"usage": ["[!/#]earth Earth with moon and sun turn around it."],
"run": run,
"sudo": True,
"patterns": ["^[!/#]earth$"],
}
| 56.194444
| 131
| 0.367771
|
6544c583f715d0f828fc7b0a022bb630f6a84000
| 6,793
|
py
|
Python
|
trimage/ui.py
|
kalmi/Trimage
|
269e052ba407c16ab32953276c776d543930b917
|
[
"MIT"
] | 434
|
2015-01-04T02:19:36.000Z
|
2022-03-29T20:26:28.000Z
|
trimage/ui.py
|
kalmi/Trimage
|
269e052ba407c16ab32953276c776d543930b917
|
[
"MIT"
] | 61
|
2015-03-02T15:06:11.000Z
|
2022-01-30T02:53:03.000Z
|
trimage/ui.py
|
kalmi/Trimage
|
269e052ba407c16ab32953276c776d543930b917
|
[
"MIT"
] | 61
|
2015-01-14T16:45:36.000Z
|
2021-12-04T01:03:24.000Z
|
#!/usr/bin/env python3
from os import path
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class TrimageTableView(QTableView):
drop_event_signal = pyqtSignal(list)
"""Init the table drop event."""
def __init__(self, parent=None):
super(TrimageTableView, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
event.accept()
def dropEvent(self, event):
event.accept()
filelist = []
for url in event.mimeData().urls():
filelist.append(url.toLocalFile())
self.drop_event_signal.emit(filelist)
class Ui_trimage():
def get_image(self, image):
"""Get the correct link to the images used in the UI."""
imagelink = path.join(path.dirname(path.dirname(path.realpath(__file__))), "trimage/" + image)
return imagelink
def setupUi(self, trimage):
"""Setup the entire UI."""
trimage.setObjectName("trimage")
trimage.resize(600, 170)
trimageIcon = QIcon(self.get_image("pixmaps/trimage-icon.png"))
trimage.setWindowIcon(trimageIcon)
self.centralwidget = QWidget(trimage)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QGridLayout(self.centralwidget)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.widget = QWidget(self.centralwidget)
self.widget.setEnabled(True)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(
self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName("widget")
self.verticalLayout = QVBoxLayout(self.widget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QFrame(self.widget)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QVBoxLayout(self.frame)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout.setObjectName("horizontalLayout")
self.addfiles = QPushButton(self.frame)
font = QFont()
font.setPointSize(9)
self.addfiles.setFont(font)
self.addfiles.setCursor(Qt.PointingHandCursor)
icon = QIcon()
icon.addPixmap(QPixmap(self.get_image("pixmaps/list-add.png")), QIcon.Normal, QIcon.Off)
self.addfiles.setIcon(icon)
self.addfiles.setObjectName("addfiles")
self.addfiles.setAcceptDrops(True)
self.horizontalLayout.addWidget(self.addfiles)
self.label = QLabel(self.frame)
font = QFont()
font.setPointSize(8)
self.label.setFont(font)
self.label.setFrameShadow(QFrame.Plain)
self.label.setContentsMargins(1, 1, 1, 1)
self.label.setIndent(10)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
spacerItem = QSpacerItem(498, 20, QSizePolicy.Expanding,
QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.recompress = QPushButton(self.frame)
font = QFont()
font.setPointSize(9)
self.recompress.setFont(font)
self.recompress.setCursor(Qt.PointingHandCursor)
icon1 = QIcon()
icon1.addPixmap(QPixmap(self.get_image("pixmaps/view-refresh.png")), QIcon.Normal, QIcon.Off)
self.recompress.setIcon(icon1)
self.recompress.setCheckable(False)
self.recompress.setObjectName("recompress")
self.horizontalLayout.addWidget(self.recompress)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.processedfiles = TrimageTableView(self.frame)
self.processedfiles.setEnabled(True)
self.processedfiles.setFrameShape(QFrame.NoFrame)
self.processedfiles.setFrameShadow(QFrame.Plain)
self.processedfiles.setLineWidth(0)
self.processedfiles.setMidLineWidth(0)
self.processedfiles.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.processedfiles.setTabKeyNavigation(True)
self.processedfiles.setAlternatingRowColors(True)
self.processedfiles.setTextElideMode(Qt.ElideRight)
self.processedfiles.setShowGrid(True)
self.processedfiles.setGridStyle(Qt.NoPen)
self.processedfiles.setSortingEnabled(False)
self.processedfiles.setObjectName("processedfiles")
self.processedfiles.resizeColumnsToContents()
self.processedfiles.setSelectionMode(QAbstractItemView.NoSelection)
self.verticalLayout_2.addWidget(self.processedfiles)
self.verticalLayout.addWidget(self.frame)
self.gridLayout_2.addWidget(self.widget, 0, 0, 1, 1)
trimage.setCentralWidget(self.centralwidget)
self.retranslateUi(trimage)
QMetaObject.connectSlotsByName(trimage)
def retranslateUi(self, trimage):
"""Fill in the texts for all UI elements."""
trimage.setWindowTitle(QApplication.translate("trimage",
"Trimage image compressor", None))
self.addfiles.setToolTip(QApplication.translate("trimage",
"Add file to the compression list", None))
self.addfiles.setText(QApplication.translate("trimage",
"&Add and compress", None))
self.addfiles.setShortcut(QApplication.translate("trimage",
"Alt+A", None))
self.label.setText(QApplication.translate("trimage",
"Drag and drop images onto the table", None))
self.recompress.setToolTip(QApplication.translate("trimage",
"Recompress all images", None))
self.recompress.setText(QApplication.translate("trimage",
"&Recompress", None))
self.recompress.setShortcut(QApplication.translate("trimage",
"Alt+R", None))
self.processedfiles.setToolTip(QApplication.translate("trimage",
"Drag files in here", None))
self.processedfiles.setWhatsThis(QApplication.translate("trimage",
"Drag files in here", None))
| 39.265896
| 102
| 0.676579
|
e34bebe87d37fb2894c2ee6058b6dcf970136e1a
| 7,768
|
py
|
Python
|
from_cpython/Lib/test/test_imaplib.py
|
jmgc/pyston
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/test/test_imaplib.py
|
jmgc/pyston
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/test/test_imaplib.py
|
jmgc/pyston
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from test import test_support as support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import SocketServer
import time
# Pyston change: changed to absolute import
from test.test_support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(SocketServer.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print "SENT:", message.strip()
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send('* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = ''
while 1:
try:
part = self.rfile.read(1)
if part == '':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets raise exceptions.
return
if line.endswith('\r\n'):
break
if verbose: print 'GOT:', line.strip()
splitline = line.split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_%s' % (cmd,)):
getattr(self, 'cmd_%s' % (cmd,))(tag, args)
else:
self._send('%s BAD %s unknown\r\n' % (tag, cmd))
def cmd_CAPABILITY(self, tag, args):
self._send('* CAPABILITY IMAP4rev1\r\n')
self._send('%s OK CAPABILITY completed\r\n' % (tag,))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print "creating server"
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", self.server_class
print "HDLR =", server.RequestHandlerClass
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
return server, t
def reap_server(self, server, thread):
if verbose: print "waiting for server"
server.shutdown()
thread.join()
if verbose: print "done"
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(SocketServer.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write('* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
def test_linetoolong(self):
class TooLongHandler(SimpleIMAPHandler):
def handle(self):
# Send a very long response line
self.wfile.write('* OK ' + imaplib._MAXLINE*'x' + '\r\n')
with self.reaped_server(TooLongHandler) as server:
self.assertRaises(imaplib.IMAP4.error,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
def test_linetoolong(self):
raise unittest.SkipTest("test is not reliable on 2.7; see issue 20118")
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
self.server.logout()
def test_logincapa(self):
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
def test_anonlogin(self):
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| 30.582677
| 79
| 0.608136
|
9c95c84897795c56e0bae2591c7b8f5502e229dc
| 2,442
|
py
|
Python
|
svg/tests.py
|
alexanderisora/django-inline-svg
|
33a86b8f3e4f60067bec8bb8d68827e1cde4097f
|
[
"MIT"
] | 76
|
2016-03-24T14:04:07.000Z
|
2021-11-08T09:04:05.000Z
|
svg/tests.py
|
alexanderisora/django-inline-svg
|
33a86b8f3e4f60067bec8bb8d68827e1cde4097f
|
[
"MIT"
] | 7
|
2016-03-24T14:52:35.000Z
|
2020-10-12T11:27:56.000Z
|
svg/tests.py
|
alexanderisora/django-inline-svg
|
33a86b8f3e4f60067bec8bb8d68827e1cde4097f
|
[
"MIT"
] | 17
|
2016-03-31T15:43:46.000Z
|
2021-11-14T06:30:21.000Z
|
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import Context, Template
from django.test.testcases import SimpleTestCase
from svg.exceptions import SVGNotFound
class SVGTemplateTagTest(SimpleTestCase):
def test_should_render_svg(self):
svg_file = open(os.path.join(settings.BASE_DIR,
'static',
'svg',
'django.svg')).read()
template = Template("{% load svg %}{% svg 'django' %}")
self.assertEqual(svg_file, template.render(Context()))
def test_when_given_invalid_file_it_should_fail_silently(self):
template = Template("{% load svg %}{% svg 'thisdoesntexist' %}")
self.assertEqual('', template.render(Context()))
def test_when_debug_it_should_raise_an_error(self):
template = Template("{% load svg %}{% svg 'thisdoesntexist' %}")
with self.settings(DEBUG=True):
with self.assertRaises(SVGNotFound):
template.render(Context())
def test_should_load_svg_from_custom_directory(self):
with self.settings(SVG_DIRS=[os.path.join(settings.BASE_DIR,
'static', 'custom-dir')]):
svg_file = open(os.path.join(settings.BASE_DIR,
'static',
'custom-dir',
'other.svg')).read()
template = Template("{% load svg %}{% svg 'other' %}")
self.assertEqual(svg_file, template.render(Context()))
def test_when_given_invalid_file_and_using_custom_directory_it_should_fail(self): # noqa
with self.settings(SVG_DIRS=[os.path.join(settings.BASE_DIR,
'static', 'custom-dir')]):
template = Template("{% load svg %}{% svg 'nonexistent' %}")
self.assertEqual('', template.render(Context()))
def test_when_SVG_DIRS_isnt_a_list_it_should_raise_an_error(self):
with self.settings(SVG_DIRS=os.path.join(settings.BASE_DIR,
'static', 'custom-dir')):
template = Template("{% load svg %}{% svg 'other' %}")
with self.assertRaises(ImproperlyConfigured):
template.render(Context())
| 42.103448
| 93
| 0.572072
|
274de8df36fe128e7e8a5719260c80ccdf75929d
| 44
|
py
|
Python
|
guillotina_volto/fields/__init__.py
|
enfold/guillotina-volto
|
d38ee300470c813c99341eaeb2ba8a2b5fb7d778
|
[
"BSD-2-Clause"
] | 1
|
2020-11-14T13:09:49.000Z
|
2020-11-14T13:09:49.000Z
|
guillotina_volto/fields/__init__.py
|
enfold/guillotina-volto
|
d38ee300470c813c99341eaeb2ba8a2b5fb7d778
|
[
"BSD-2-Clause"
] | 4
|
2021-05-14T20:21:03.000Z
|
2021-11-18T01:27:04.000Z
|
guillotina_volto/fields/__init__.py
|
enfold/guillotina-volto
|
d38ee300470c813c99341eaeb2ba8a2b5fb7d778
|
[
"BSD-2-Clause"
] | null | null | null |
from .richtext import RichTextField # noqa
| 22
| 43
| 0.795455
|
b936f18101a54142c16cecc67745efd3a5d6cced
| 8,389
|
py
|
Python
|
dump/main.py
|
liuh-80/sonic-utilities
|
3d3c89bd75e3c70881c64e2a59043177c56111b4
|
[
"Apache-2.0"
] | null | null | null |
dump/main.py
|
liuh-80/sonic-utilities
|
3d3c89bd75e3c70881c64e2a59043177c56111b4
|
[
"Apache-2.0"
] | null | null | null |
dump/main.py
|
liuh-80/sonic-utilities
|
3d3c89bd75e3c70881c64e2a59043177c56111b4
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import json
import re
import click
from tabulate import tabulate
from sonic_py_common import multi_asic
from utilities_common.constants import DEFAULT_NAMESPACE
from swsscommon.swsscommon import ConfigDBConnector
from dump.match_infra import RedisSource, JsonSource, ConnectionPool
from dump import plugins
# Autocompletion Helper
def get_available_modules(ctx, args, incomplete):
return [k for k in plugins.dump_modules.keys() if incomplete in k]
# Display Modules Callback
def show_modules(ctx, param, value):
if not value or ctx.resilient_parsing:
return
header = ["Module", "Identifier"]
display = []
for mod in plugins.dump_modules:
display.append((mod, plugins.dump_modules[mod].ARG_NAME))
click.echo(tabulate(display, header))
ctx.exit()
@click.group()
def dump():
pass
@dump.command()
@click.pass_context
@click.argument('module', required=True, type=str, autocompletion=get_available_modules)
@click.argument('identifier', required=True, type=str)
@click.option('--show', '-s', is_flag=True, default=False, expose_value=False,
callback=show_modules, help='Display Modules Available', is_eager=True)
@click.option('--db', '-d', multiple=True,
help='Only dump from these Databases or the CONFIG_FILE')
@click.option('--table', '-t', is_flag=True, default=False,
help='Print in tabular format', show_default=True)
@click.option('--key-map', '-k', is_flag=True, default=False, show_default=True,
help="Only fetch the keys matched, don't extract field-value dumps")
@click.option('--verbose', '-v', is_flag=True, default=False, show_default=True,
help="Prints any intermediate output to stdout useful for dev & troubleshooting")
@click.option('--namespace', '-n', default=DEFAULT_NAMESPACE, type=str,
show_default=True, help='Dump the redis-state for this namespace.')
def state(ctx, module, identifier, db, table, key_map, verbose, namespace):
"""
Dump the current state of the identifier for the specified module from Redis DB or CONFIG_FILE
"""
if not multi_asic.is_multi_asic() and namespace != DEFAULT_NAMESPACE:
click.echo("Namespace option is not valid for a single-ASIC device")
ctx.exit()
if multi_asic.is_multi_asic() and (namespace != DEFAULT_NAMESPACE and namespace not in multi_asic.get_namespace_list()):
click.echo("Namespace option is not valid. Choose one of {}".format(multi_asic.get_namespace_list()))
ctx.exit()
if module not in plugins.dump_modules:
click.echo("No Matching Plugin has been Implemented")
ctx.exit()
if verbose:
os.environ["VERBOSE"] = "1"
else:
os.environ["VERBOSE"] = "0"
ctx.module = module
obj = plugins.dump_modules[module]()
if identifier == "all":
ids = obj.get_all_args(namespace)
else:
ids = identifier.split(",")
params = {}
collected_info = {}
params['namespace'] = namespace
for arg in ids:
params[plugins.dump_modules[module].ARG_NAME] = arg
try:
collected_info[arg] = obj.execute(params)
except ValueError as err:
click.fail(f"Failed to execute plugin: {err}")
if len(db) > 0:
collected_info = filter_out_dbs(db, collected_info)
vidtorid = extract_rid(collected_info, namespace)
if not key_map:
collected_info = populate_fv(collected_info, module, namespace)
for id in vidtorid.keys():
collected_info[id]["ASIC_DB"]["vidtorid"] = vidtorid[id]
print_dump(collected_info, table, module, identifier, key_map)
return
def extract_rid(info, ns):
r = RedisSource(ConnectionPool())
r.connect("ASIC_DB", ns)
vidtorid = {}
vid_cache = {} # Cache Entries to reduce number of Redis Calls
for arg in info.keys():
mp = get_v_r_map(r, info[arg], vid_cache)
if mp:
vidtorid[arg] = mp
return vidtorid
def get_v_r_map(r, single_dict, vid_cache):
v_r_map = {}
asic_obj_ptrn = "ASIC_STATE:.*:oid:0x\w{1,14}"
if "ASIC_DB" in single_dict and "keys" in single_dict["ASIC_DB"]:
for redis_key in single_dict["ASIC_DB"]["keys"]:
if re.match(asic_obj_ptrn, redis_key):
matches = re.findall(r"oid:0x\w{1,14}", redis_key)
if matches:
vid = matches[0]
if vid in vid_cache:
rid = vid_cache[vid]
else:
rid = r.hget("ASIC_DB", "VIDTORID", vid)
vid_cache[vid] = rid
v_r_map[vid] = rid if rid else "Real ID Not Found"
return v_r_map
# Filter dbs which are not required
def filter_out_dbs(db_list, collected_info):
args_ = list(collected_info.keys())
for arg in args_:
dbs = list(collected_info[arg].keys())
for db in dbs:
if db not in db_list:
del collected_info[arg][db]
return collected_info
def populate_fv(info, module, namespace):
all_dbs = set()
for id in info.keys():
for db_name in info[id].keys():
all_dbs.add(db_name)
db_cfg_file = JsonSource()
db_conn = ConnectionPool().initialize_connector(namespace)
for db_name in all_dbs:
if db_name == "CONFIG_FILE":
db_cfg_file.connect(plugins.dump_modules[module].CONFIG_FILE, namespace)
else:
db_conn.connect(db_name)
final_info = {}
for id in info.keys():
final_info[id] = {}
for db_name in info[id].keys():
final_info[id][db_name] = {}
final_info[id][db_name]["keys"] = []
final_info[id][db_name]["tables_not_found"] = info[id][db_name]["tables_not_found"]
for key in info[id][db_name]["keys"]:
if db_name == "CONFIG_FILE":
fv = db_cfg_file.get(db_name, key)
else:
fv = db_conn.get_all(db_name, key)
final_info[id][db_name]["keys"].append({key: fv})
return final_info
def get_dict_str(key_obj):
conn = ConfigDBConnector()
table = []
key_obj = conn.raw_to_typed(key_obj)
for field, value in key_obj.items():
if isinstance(value, list):
value = "\n".join(value)
table.append((field, value))
return tabulate(table, headers=["field", "value"], tablefmt="psql")
# print dump
def print_dump(collected_info, table, module, identifier, key_map):
if not table:
click.echo(json.dumps(collected_info, indent=4))
return
top_header = [plugins.dump_modules[module].ARG_NAME, "DB_NAME", "DUMP"]
final_collection = []
for ids in collected_info.keys():
for db in collected_info[ids].keys():
total_info = ""
if collected_info[ids][db]["tables_not_found"]:
tabulate_fmt = []
for tab in collected_info[ids][db]["tables_not_found"]:
tabulate_fmt.append([tab])
total_info += tabulate(tabulate_fmt, ["Tables Not Found"], tablefmt="grid")
total_info += "\n"
if not key_map:
values = []
hdrs = ["Keys", "field-value pairs"]
for key_obj in collected_info[ids][db]["keys"]:
if isinstance(key_obj, dict) and key_obj:
key = list(key_obj.keys())[0]
values.append([key, get_dict_str(key_obj[key])])
total_info += str(tabulate(values, hdrs, tablefmt="grid"))
else:
temp = []
for key_ in collected_info[ids][db]["keys"]:
temp.append([key_])
total_info += str(tabulate(temp, headers=["Keys Collected"], tablefmt="grid"))
total_info += "\n"
if "vidtorid" in collected_info[ids][db]:
temp = []
for pair in collected_info[ids][db]["vidtorid"].items():
temp.append(list(pair))
total_info += str(tabulate(temp, headers=["vid", "rid"], tablefmt="grid"))
final_collection.append([ids, db, total_info])
click.echo(tabulate(final_collection, top_header, tablefmt="grid"))
return
if __name__ == '__main__':
dump()
| 35.247899
| 124
| 0.614138
|
e75bf47239910c9b8d43faf733006d1125b9929f
| 104,501
|
py
|
Python
|
__main__.py
|
labscript-suite-temp-archive/lyse-fork--cavitylab-lyse--forked-from--labscript_suite-lyse
|
b77583aad39800f9e22e638db7e878160e0b43ff
|
[
"BSD-2-Clause"
] | null | null | null |
__main__.py
|
labscript-suite-temp-archive/lyse-fork--cavitylab-lyse--forked-from--labscript_suite-lyse
|
b77583aad39800f9e22e638db7e878160e0b43ff
|
[
"BSD-2-Clause"
] | null | null | null |
__main__.py
|
labscript-suite-temp-archive/lyse-fork--cavitylab-lyse--forked-from--labscript_suite-lyse
|
b77583aad39800f9e22e638db7e878160e0b43ff
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
# stdlib imports
import os
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# Turn on our error catching for all subsequent imports
import labscript_utils.excepthook
# 3rd party imports:
import numpy as np
import labscript_utils.h5_lock
import h5py
import pandas
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('qtutils', '2.1.0', '3.0.0')
import zprocess.locking
from zprocess import ZMQServer
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.outputbox import OutputBox
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
# Set working directory to lyse folder, resolving symlinks
lyse_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(lyse_dir)
# Set a meaningful name for zprocess.locking's client id:
zprocess.locking.set_client_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# convert_objects() picks fixed datatypes for columns that are
# compatible with fixed datatypes, dramatically speeding up
# pickling. But we don't impose fixed datatypes earlier than now
# because the user is free to use mixed datatypes in a column, and
# we won't want to prevent values of a different type being added
# in the future. All kwargs False because we don't want to coerce
# strings to numbers or anything - just choose the correct
# datatype for columns that are already a single datatype:
return app.filebox.shots_model.dataframe.convert_objects(
convert_dates=False, convert_numeric=False, convert_timedeltas=False)
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
def closeEvent(self, event):
if app.on_close_event():
return QtWidgets.QMainWindow.closeEvent(self, event)
else:
event.ignore()
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
child_handles = zprocess.subprocess_with_queues('analysis_subprocess.py', self.output_box_port)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit',None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load('routinebox.ui')
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
# TESTING ONLY REMOVE IN PRODUCTION
def queue_dummy_routines(self):
folder = os.path.abspath('test_routines')
for filepath in ['hello.py', 'test.py']:
routine = AnalysisRoutine(os.path.join(folder, filepath), self.model, self.output_box_port)
self.routines.append(routine)
self.update_select_all_checkstate()
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load('edit_columns.ui', EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
headerview_style = """
QHeaderView {
font-size: 8pt;
color: black;
}
QHeaderView::section{
font-size: 8pt;
color: black;
}
"""
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
self._vertheader.setStyleSheet(headerview_style)
self._header.setStyleSheet(headerview_style)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.get_value(row_number, filepath_colname)
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.set_value(row_number, column_name, value)
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.set_value(row_number, column_name, None)
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.set_value(row_number, column_name, value)
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
if new_column_names:
# Update the visibility state of new columns, in case some new columns are hidden:
self.set_columns_visible
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath):
status_item = QtGui.QStandardItem()
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. |'.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
try:
val = self.dataframe[col].values[row_number]
header_strings.append(' {:04d}'.format(val))
except (KeyError, ValueError):
header_strings.append('----')
vert_header_text += ' |'.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += ' ' + basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load('filebox.ui')
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
loader = UiLoader()
self.ui = loader.load('main.ui', LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Defer this until 50ms after the window has shown,
# so that the GUI pops up faster in the meantime
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
return True
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
server = WebServer(app.port)
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
qapplication.exec_()
server.shutdown()
| 47.264134
| 160
| 0.620855
|
75b24703b4c357ed6d98a2a1b2b1ffea20540ce5
| 4,720
|
py
|
Python
|
Lib/idlelib/textview.py
|
Psycojoker/cpython
|
0afada163c7ef25c3a9d46ed445481fb69f2ecaf
|
[
"PSF-2.0"
] | 2
|
2019-07-05T09:19:52.000Z
|
2019-12-18T10:31:38.000Z
|
Lib/idlelib/textview.py
|
Psycojoker/cpython
|
0afada163c7ef25c3a9d46ed445481fb69f2ecaf
|
[
"PSF-2.0"
] | 1
|
2020-07-28T16:54:08.000Z
|
2020-07-28T16:54:08.000Z
|
Lib/idlelib/textview.py
|
izbyshev/cpython
|
ef8861c112ed1dac9351958c121bc24ca4ecdb08
|
[
"PSF-2.0"
] | 1
|
2019-09-29T03:20:42.000Z
|
2019-09-29T03:20:42.000Z
|
"""Simple text browser for IDLE
"""
from tkinter import Toplevel, Text
from tkinter.ttk import Frame, Scrollbar, Button
from tkinter.messagebox import showerror
class TextFrame(Frame):
"Display text with scrollbar."
def __init__(self, parent, rawtext):
"""Create a frame for Textview.
parent - parent widget for this frame
rawtext - text to display
"""
super().__init__(parent)
self['relief'] = 'sunken'
self['height'] = 700
# TODO: get fg/bg from theme.
self.bg = '#ffffff'
self.fg = '#000000'
self.text = text = Text(self, wrap='word', highlightthickness=0,
fg=self.fg, bg=self.bg)
self.scroll = scroll = Scrollbar(self, orient='vertical',
takefocus=False, command=text.yview)
text['yscrollcommand'] = scroll.set
text.insert(0.0, rawtext)
text['state'] = 'disabled'
text.focus_set()
scroll.pack(side='right', fill='y')
text.pack(side='left', expand=True, fill='both')
class ViewFrame(Frame):
"Display TextFrame and Close button."
def __init__(self, parent, text):
super().__init__(parent)
self.parent = parent
self.bind('<Return>', self.ok)
self.bind('<Escape>', self.ok)
self.textframe = TextFrame(self, text)
self.button_ok = button_ok = Button(
self, text='Close', command=self.ok, takefocus=False)
self.textframe.pack(side='top', expand=True, fill='both')
button_ok.pack(side='bottom')
def ok(self, event=None):
"""Dismiss text viewer dialog."""
self.parent.destroy()
class ViewWindow(Toplevel):
"A simple text viewer dialog for IDLE."
def __init__(self, parent, title, text, modal=True,
*, _htest=False, _utest=False):
"""Show the given text in a scrollable window with a 'close' button.
If modal is left True, users cannot interact with other windows
until the textview window is closed.
parent - parent of this dialog
title - string which is title of popup dialog
text - text to display in dialog
_htest - bool; change box location when running htest.
_utest - bool; don't wait_window when running unittest.
"""
super().__init__(parent)
self['borderwidth'] = 5
# Place dialog below parent if running htest.
x = parent.winfo_rootx() + 10
y = parent.winfo_rooty() + (10 if not _htest else 100)
self.geometry(f'=750x500+{x}+{y}')
self.title(title)
self.viewframe = ViewFrame(self, text)
self.protocol("WM_DELETE_WINDOW", self.ok)
self.button_ok = button_ok = Button(self, text='Close',
command=self.ok, takefocus=False)
self.viewframe.pack(side='top', expand=True, fill='both')
self.is_modal = modal
if self.is_modal:
self.transient(parent)
self.grab_set()
if not _utest:
self.wait_window()
def ok(self, event=None):
"""Dismiss text viewer dialog."""
if self.is_modal:
self.grab_release()
self.destroy()
def view_text(parent, title, text, modal=True, _utest=False):
"""Create text viewer for given text.
parent - parent of this dialog
title - string which is the title of popup dialog
text - text to display in this dialog
modal - controls if users can interact with other windows while this
dialog is displayed
_utest - bool; controls wait_window on unittest
"""
return ViewWindow(parent, title, text, modal, _utest=_utest)
def view_file(parent, title, filename, encoding, modal=True, _utest=False):
"""Create text viewer for text in filename.
Return error message if file cannot be read. Otherwise calls view_text
with contents of the file.
"""
try:
with open(filename, 'r', encoding=encoding) as file:
contents = file.read()
except OSError:
showerror(title='File Load Error',
message=f'Unable to load file {filename!r} .',
parent=parent)
except UnicodeDecodeError as err:
showerror(title='Unicode Decode Error',
message=str(err),
parent=parent)
else:
return view_text(parent, title, contents, modal, _utest=_utest)
return None
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_textview', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(ViewWindow)
| 33.475177
| 77
| 0.606992
|
e7c1141467900fd41463e4cc0a1159678f80104b
| 4,314
|
py
|
Python
|
examples/mark_bad_channels.py
|
ethanknights/mne-bids
|
cfb2ee9c7ddad6e3590427d65844de9f47e66d2d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mark_bad_channels.py
|
ethanknights/mne-bids
|
cfb2ee9c7ddad6e3590427d65844de9f47e66d2d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mark_bad_channels.py
|
ethanknights/mne-bids
|
cfb2ee9c7ddad6e3590427d65844de9f47e66d2d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===============================================
03. Changing which channels are marked as "bad"
===============================================
You can use MNE-BIDS to mark MEG or (i)EEG recording channels as "bad", for
example if the connected sensor produced mostly noise – or no signal at
all.
Similarly, you can declare channels as "good", should you discover they were
incorrectly marked as bad.
"""
# Authors: Richard Höchenberger <richard.hoechenberger@gmail.com>
# License: BSD (3-clause)
###############################################################################
# We will demonstrate how to mark individual channels as bad on the MNE
# "sample" dataset. After that, we will mark channels as good again.
#
# Let's start by importing the required modules and functions, reading the
# "sample" data, and writing it in the BIDS format.
import os.path as op
import mne
from mne_bids import BIDSPath, write_raw_bids, read_raw_bids, mark_bad_channels
data_path = mne.datasets.sample.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
bids_root = op.join(data_path, '..', 'MNE-sample-data-bids')
bids_path = BIDSPath(subject='01', session='01', task='audiovisual', run='01',
root=bids_root)
raw = mne.io.read_raw_fif(raw_fname, verbose=False)
raw.info['line_freq'] = 60 # Specify power line frequency as required by BIDS.
write_raw_bids(raw, bids_path=bids_path, overwrite=True, verbose=False)
###############################################################################
# Read the (now BIDS-formatted) data and print a list of channels currently
# marked as bad.
raw = read_raw_bids(bids_path=bids_path, verbose=False)
print(f'The following channels are currently marked as bad:\n'
f' {", ".join(raw.info["bads"])}\n')
###############################################################################
# So currently, two channels are maked as bad: ``EEG 053`` and ``MEG 2443``.
# Let's assume that through visual data inspection, we found that two more
# MEG channels are problematic, and we would like to mark them as bad as well.
# To do that, we simply add them to a list, which we then pass to
# :func:`mne_bids.mark_bad_channels`:
bads = ['MEG 0112', 'MEG 0131']
mark_bad_channels(ch_names=bads, bids_path=bids_path, verbose=False)
###############################################################################
# That's it! Let's verify the result.
raw = read_raw_bids(bids_path=bids_path, verbose=False)
print(f'After marking MEG 0112 and MEG 0131 as bad, the following channels '
f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n')
###############################################################################
# As you can see, now a total of **four** channels is marked as bad: the ones
# that were already bad when we started – ``EEG 053`` and ``MEG 2443`` – and
# the two channels we passed to :func:`mne_bids.mark_bad_channels` –
# ``MEG 0112`` and ``MEG 0131``. This shows that marking bad channels via
# :func:`mne_bids.mark_bad_channels`, by default, is an **additive** procedure,
# which allows you to mark additional channels as bad while retaining the
# information about all channels that had *previously* been marked as bad.
#
# If you instead would like to **replace** the collection of bad channels
# entirely, pass the argument ``overwrite=True``:
bads = ['MEG 0112', 'MEG 0131']
mark_bad_channels(ch_names=bads, bids_path=bids_path, overwrite=True,
verbose=False)
raw = read_raw_bids(bids_path=bids_path, verbose=False)
print(f'After marking MEG 0112 and MEG 0131 as bad and passing '
f'`overwrite=True`, the following channels '
f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n')
###############################################################################
# Lastly, if you're looking for a way to mark all channels as good, simply
# pass an empty list as ``ch_names``, combined with ``overwrite=True``:
bads = []
mark_bad_channels(ch_names=bads, bids_path=bids_path, overwrite=True,
verbose=False)
raw = read_raw_bids(bids_path=bids_path, verbose=False)
print(f'After passing `ch_names=[]` and `overwrite=True`, the following '
f'channels are now marked as bad:\n {", ".join(raw.info["bads"])}\n')
| 45.410526
| 79
| 0.624942
|
dc041b92b14ffd46fe64e3bd6646d0af18096a4a
| 5,113
|
py
|
Python
|
tensorflow/python/keras/applications/applications_load_weight_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/keras/applications/applications_load_weight_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/keras/applications/applications_load_weight_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras applications."""
from absl import flags
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras.applications import densenet
from tensorflow.python.keras.applications import efficientnet
from tensorflow.python.keras.applications import inception_resnet_v2
from tensorflow.python.keras.applications import inception_v3
from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.applications import mobilenet_v3
from tensorflow.python.keras.applications import nasnet
from tensorflow.python.keras.applications import resnet
from tensorflow.python.keras.applications import resnet_v2
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.applications import vgg19
from tensorflow.python.keras.applications import xception
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.platform import test
ARG_TO_MODEL = {
'resnet': (resnet, [resnet.ResNet50, resnet.ResNet101, resnet.ResNet152]),
'resnet_v2': (resnet_v2, [resnet_v2.ResNet50V2, resnet_v2.ResNet101V2,
resnet_v2.ResNet152V2]),
'vgg16': (vgg16, [vgg16.VGG16]),
'vgg19': (vgg19, [vgg19.VGG19]),
'xception': (xception, [xception.Xception]),
'inception_v3': (inception_v3, [inception_v3.InceptionV3]),
'inception_resnet_v2': (inception_resnet_v2,
[inception_resnet_v2.InceptionResNetV2]),
'mobilenet': (mobilenet, [mobilenet.MobileNet]),
'mobilenet_v2': (mobilenet_v2, [mobilenet_v2.MobileNetV2]),
'mobilenet_v3_small': (mobilenet_v3, [mobilenet_v3.MobileNetV3Small]),
'mobilenet_v3_large': (mobilenet_v3, [mobilenet_v3.MobileNetV3Large]),
'densenet': (densenet, [densenet.DenseNet121,
densenet.DenseNet169, densenet.DenseNet201]),
'nasnet_mobile': (nasnet, [nasnet.NASNetMobile]),
'nasnet_large': (nasnet, [nasnet.NASNetLarge]),
'efficientnet': (efficientnet,
[efficientnet.EfficientNetB0, efficientnet.EfficientNetB1,
efficientnet.EfficientNetB2, efficientnet.EfficientNetB3,
efficientnet.EfficientNetB4, efficientnet.EfficientNetB5,
efficientnet.EfficientNetB6, efficientnet.EfficientNetB7])
}
TEST_IMAGE_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/tests/elephant.jpg')
_IMAGENET_CLASSES = 1000
# Add a flag to define which application module file is tested.
# This is set as an 'arg' in the build target to guarantee that
# it only triggers the tests of the application models in the module
# if that module file has been modified.
FLAGS = flags.FLAGS
flags.DEFINE_string('module', None,
'Application module used in this test.')
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
test_image = data_utils.get_file('elephant.jpg', TEST_IMAGE_PATH)
img = image.load_img(test_image, target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
class ApplicationsLoadWeightTest(test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
'Shapes are different rank: %s vs %s' % (shape1, shape2))
if shape1 != shape2:
raise AssertionError('Shapes differ: %s vs %s' % (shape1, shape2))
def test_application_pretrained_weights_loading(self):
app_module = ARG_TO_MODEL[FLAGS.module][0]
apps = ARG_TO_MODEL[FLAGS.module][1]
for app in apps:
model = app(weights='imagenet')
self.assertShapeEqual(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant(model.input_shape[1:3])
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn('African_elephant', names[:3])
if __name__ == '__main__':
test.main()
| 44.46087
| 80
| 0.722472
|
8311b3bf8b6afe53e1ec69281af28e83e6c0641f
| 3,182
|
py
|
Python
|
pyt_regression1.py
|
mjbhobe/dl-pytorch
|
c1c443a0540b2678c38db364b56dfa1d9a1d1ae2
|
[
"MIT"
] | 5
|
2020-01-09T08:58:32.000Z
|
2021-12-26T09:06:35.000Z
|
pyt_regression1.py
|
mjbhobe/dl-pytorch
|
c1c443a0540b2678c38db364b56dfa1d9a1d1ae2
|
[
"MIT"
] | 1
|
2020-01-09T09:00:13.000Z
|
2020-01-09T09:00:13.000Z
|
pyt_regression1.py
|
mjbhobe/dl-pytorch
|
c1c443a0540b2678c38db364b56dfa1d9a1d1ae2
|
[
"MIT"
] | 2
|
2020-01-09T08:58:40.000Z
|
2020-10-25T03:49:55.000Z
|
"""
pyt_regression1.py: figure out the regression function between X & y
@author: Manish Bhobe
My experiments with Python, Machine Learning & Deep Learning.
This code is meant for education purposes only & is not intended for commercial/production use!
Use at your own risk!! I am not responsible if your CPU or GPU gets fried :D
"""
import warnings
warnings.filterwarnings('ignore')
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import r2_score
# tweaks for libraries
np.set_printoptions(precision=6, linewidth=1024, suppress=True)
plt.style.use('seaborn')
sns.set_style('darkgrid')
sns.set_context('notebook', font_scale=1.10)
# Pytorch imports
import torch
print('Using Pytorch version: ', torch.__version__)
import torch.nn as nn
from torch import optim
# My helper functions for training/evaluating etc.
import pytorch_toolkit as pytk
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# ---------------------------------------------------------------------------
# Example:1 - with synthesized data
# ---------------------------------------------------------------------------
def get_data():
""" generate simple arrays """
""" NOTE: relationship is y = 2 * x - 1"""
X = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
X = X.reshape(-1, 1)
y = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
y = y.reshape(-1, 1)
return (X, y)
# our regression model
class Net(pytk.PytkModule):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1, 1)
def forward(self, inp):
out = self.fc1(inp)
return out
def main():
# generate data with noise
X, y = get_data()
print(f"X.shape: {X.shape} - y.shape: {y.shape}")
# build our network
net = Net()
print('Before training: ')
print(' Weight: %.3f bias: %.3f' %
(net.fc1.weight.item(), net.fc1.bias.item()))
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.001)
net.compile(loss=criterion, optimizer=optimizer,
metrics=['mse', 'rmse', 'mae', 'r2_score'])
print(net)
# train on the data
hist = net.fit(X, y, epochs=5000, report_interval=100)
pytk.show_plots(hist, metric='r2_score', plot_title="Performance Metrics")
# print the results
print('After training: ')
W, b = net.fc1.weight.item(), net.fc1.bias.item()
print(f"After training -> Weight: {W:.3f} - bias: {b:.3f}")
# get predictions (need to pass Tensors!)
y_pred = net.predict(X)
# what is my r2_score?
print('R2 score (sklearn): %.3f' % r2_score(y, y_pred))
print('R2 score (pytk): %.3f' % pytk.r2_score(
torch.Tensor(y_pred), torch.Tensor(y)))
# display plot
plt.figure(figsize=(8, 6))
plt.scatter(X, y, s=40, c='steelblue')
plt.plot(X, y_pred, lw=2, color='firebrick')
plt.title('Predicted Line -> $y = %.3f * X + %.3f$' % (W, b))
plt.show()
if __name__ == '__main__':
main()
# Results:
# Before training:
# W: 2.0, b: -1.0
# After training (5000 epochs)
# W: 1.997, b: -0.991
# R2 score: 0.765
| 27.669565
| 95
| 0.612194
|
6c3495ad5441f0303c1fb34a15cfbab9765561a0
| 857
|
py
|
Python
|
src/examples/C4-DDQNvsMinimax.py
|
kirarpit/connect4
|
ed5f2b9d4cca26e5230a124a4e3f9efe3efaf229
|
[
"MIT"
] | 41
|
2018-07-14T10:05:04.000Z
|
2022-03-14T20:41:21.000Z
|
src/examples/C4-DDQNvsMinimax.py
|
kirarpit/connect4
|
ed5f2b9d4cca26e5230a124a4e3f9efe3efaf229
|
[
"MIT"
] | 14
|
2019-07-22T23:28:46.000Z
|
2022-03-11T23:28:30.000Z
|
src/examples/C4-DDQNvsMinimax.py
|
kirarpit/connect4
|
ed5f2b9d4cca26e5230a124a4e3f9efe3efaf229
|
[
"MIT"
] | 3
|
2019-12-05T16:12:15.000Z
|
2021-04-06T01:37:48.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 20:26:39 2018
@author: Arpit
"""
from games.c4Game import C4Game
from environment import Environment
from players.minimaxC4Player import MinimaxC4Player
from players.qPlayer import QPlayer
from brains.qBrain import QBrain
isConv = False
layers = [
{'filters':16, 'kernel_size': (2,2), 'size':24}
, {'filters':16, 'kernel_size': (2,2), 'size':24}
]
game = C4Game(4, 5, isConv=isConv)
brain = QBrain('c4DDQN', game, layers=layers, load_weights=False, plotModel=True)
tBrain = QBrain('c4DDQN', game, layers=layers)
player_config = {"mem_size":25000, "brain":brain, "tBrain":tBrain,
"batch_size":64, "gamma":0.90, "n_step":11}
p1 = QPlayer(1, game, **player_config)
p2 = MinimaxC4Player(2, game, epsilon=0)
env = Environment(game, p1, p2)
env.run()
| 27.645161
| 81
| 0.681447
|
6955ec3a4a71b815b05675087b5589be36cf5bce
| 13,105
|
py
|
Python
|
tests/test_deadfixtures.py
|
Ivan-Feofanov/pytest-deadfixtures
|
d715a36d3a529403c85b1bd09c12c0b9ebe672c2
|
[
"MIT"
] | null | null | null |
tests/test_deadfixtures.py
|
Ivan-Feofanov/pytest-deadfixtures
|
d715a36d3a529403c85b1bd09c12c0b9ebe672c2
|
[
"MIT"
] | null | null | null |
tests/test_deadfixtures.py
|
Ivan-Feofanov/pytest-deadfixtures
|
d715a36d3a529403c85b1bd09c12c0b9ebe672c2
|
[
"MIT"
] | null | null | null |
import pytest
from pytest_deadfixtures import (
DUPLICATE_FIXTURES_HEADLINE,
EXIT_CODE_ERROR,
EXIT_CODE_SUCCESS, UNUSED_FIXTURES_FOUND_HEADLINE,
)
def test_error_exit_code_on_dead_fixtures_found(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
return 1
"""
)
result = pytester.runpytest("--dead-fixtures")
assert result.ret == EXIT_CODE_ERROR
def test_success_exit_code_on_dead_fixtures_found(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
return 1
def test_simple(some_fixture):
assert 1 == some_fixture
"""
)
result = pytester.runpytest("--dead-fixtures")
assert result.ret == EXIT_CODE_SUCCESS
def test_dont_list_autouse_fixture(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture(autouse=True)
def autouse_fixture():
return 1
def test_simple():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format("autouse_fixture", "test_dont_list_autouse_fixture")
assert message not in result.stdout.str()
def test_dont_list_same_file_fixture(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def same_file_fixture():
return 1
def test_simple(same_file_fixture):
assert 1 == same_file_fixture
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format(
"same_file_fixture", "test_dont_list_same_file_fixture"
)
assert message not in result.stdout.str()
def test_list_same_file_unused_fixture(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def same_file_fixture():
return 1
def test_simple():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format(
"same_file_fixture", "test_list_same_file_unused_fixture"
)
output = result.stdout.str()
assert message in output
assert UNUSED_FIXTURES_FOUND_HEADLINE.format(count=1) in output
def test_list_same_file_multiple_unused_fixture(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def same_file_fixture():
return 1
@pytest.fixture()
def plus_same_file_fixture():
return 2
def test_simple():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
first = message_template.format(
"same_file_fixture", "test_list_same_file_multiple_unused_fixture"
)
second = message_template.format(
"plus_same_file_fixture", "test_list_same_file_multiple_unused_fixture"
)
output = result.stdout.str()
assert first in output
assert second in output
assert output.index(first) < output.index(second)
assert UNUSED_FIXTURES_FOUND_HEADLINE.format(count=2) in output
def test_dont_list_conftest_fixture(pytester, message_template):
pytester.makepyfile(
conftest="""
import pytest
@pytest.fixture()
def conftest_fixture():
return 1
"""
)
pytester.makepyfile(
"""
import pytest
def test_conftest_fixture(conftest_fixture):
assert 1 == conftest_fixture
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format("conftest_fixture", "conftest")
assert message not in result.stdout.str()
def test_list_conftest_unused_fixture(pytester, message_template):
pytester.makepyfile(
conftest="""
import pytest
@pytest.fixture()
def conftest_fixture():
return 1
"""
)
pytester.makepyfile(
"""
import pytest
def test_conftest_fixture():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format("conftest_fixture", "conftest")
assert message in result.stdout.str()
def test_list_conftest_multiple_unused_fixture(pytester, message_template):
pytester.makepyfile(
conftest="""
import pytest
@pytest.fixture()
def conftest_fixture():
return 1
@pytest.fixture()
def plus_conftest_fixture():
return 2
"""
)
pytester.makepyfile(
"""
import pytest
def test_conftest_fixture():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
first = message_template.format("conftest_fixture", "conftest")
second = message_template.format("plus_conftest_fixture", "conftest")
output = result.stdout.str()
assert first in output
assert second in output
assert output.index(first) < output.index(second)
def test_dont_list_decorator_usefixtures(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def decorator_usefixtures():
return 1
@pytest.mark.usefixtures('decorator_usefixtures')
def test_decorator_usefixtures():
assert 1 == decorator_usefixtures
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format(
"decorator_usefixtures", "test_dont_list_decorator_usefixtures"
)
assert message not in result.stdout.str()
def test_write_docs_when_verbose(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
'''Blabla fixture docs'''
return 1
def test_simple():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures", "-v")
assert "Blabla fixture docs" in result.stdout.str()
def test_repeated_fixtures_not_found(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
return 1
def test_simple(some_fixture):
assert 1 == some_fixture
"""
)
result = pytester.runpytest("--dup-fixtures")
assert DUPLICATE_FIXTURES_HEADLINE not in result.stdout.str()
def test_repeated_fixtures_found(pytester):
pytester.makepyfile(
"""
import pytest
class SomeClass:
a = 1
def spam(self):
return 'and eggs'
@pytest.fixture()
def someclass_fixture():
return SomeClass()
@pytest.fixture()
def someclass_samefixture():
return SomeClass()
def test_simple(someclass_fixture):
assert 1 == 1
def test_simple_again(someclass_samefixture):
assert 2 == 2
"""
)
result = pytester.runpytest("--dup-fixtures")
assert DUPLICATE_FIXTURES_HEADLINE in result.stdout.str()
assert "someclass_samefixture" in result.stdout.str()
@pytest.mark.parametrize("directory", ("site-packages", "dist-packages", "<string>"))
def test_should_not_list_fixtures_from_unrelated_directories(
pytester, message_template, directory
):
pytester.tmpdir = pytester.mkdir(directory)
pytester.makepyfile(
conftest="""
import pytest
@pytest.fixture()
def conftest_fixture():
return 1
"""
)
pytester.makepyfile(
"""
import pytest
def test_conftest_fixture():
assert 1 == 1
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format("conftest_fixture", "{}/conftest".format(directory))
assert message not in result.stdout.str()
def test_dont_list_fixture_used_after_test_which_does_not_use_fixtures(
pytester, message_template
):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def same_file_fixture():
return 1
def test_no_fixture_used():
assert True
def test_simple(same_file_fixture):
assert 1 == same_file_fixture
"""
)
result = pytester.runpytest("--dead-fixtures")
message = message_template.format(
"same_file_fixture",
"test_dont_list_fixture_used_after_test_which_does_not_use_fixtures",
)
assert message not in result.stdout.str()
def test_doctest_should_not_result_in_false_positive(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def same_file_fixture():
return 1
def something():
''' a doctest in a docstring
>>> something()
42
'''
return 42
def test_simple(same_file_fixture):
assert 1 == same_file_fixture
"""
)
result = pytester.runpytest("--dead-fixtures", "--doctest-modules")
message = message_template.format(
"same_file_fixture", "test_doctest_should_not_result_in_false_positive"
)
assert message not in result.stdout.str()
def test_dont_list_fixture_used_by_another_fixture(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
return 1
@pytest.fixture()
def a_derived_fixture(some_fixture):
return some_fixture + 1
def test_something(a_derived_fixture):
assert a_derived_fixture == 2
"""
)
result = pytester.runpytest("--dead-fixtures")
for fixture_name in ["some_fixture", "a_derived_fixture"]:
message = message_template.format(
fixture_name,
"test_dont_list_fixture_used_by_another_fixture",
)
assert message not in result.stdout.str()
def test_list_derived_fixtures_if_not_used_by_tests(pytester, message_template):
pytester.makepyfile(
"""
import pytest
@pytest.fixture()
def some_fixture():
return 1
@pytest.fixture()
def a_derived_fixture(some_fixture):
return some_fixture + 1
def test_something():
assert True
"""
)
result = pytester.runpytest("--dead-fixtures")
# although some_fixture is used by a_derived_fixture, since neither are used by a test case,
# they should be reported.
for fixture_name in ["some_fixture", "a_derived_fixture"]:
message = message_template.format(
fixture_name,
"test_list_derived_fixtures_if_not_used_by_tests",
)
assert message in result.stdout.str()
def test_imported_fixtures(pytester):
pytester.makepyfile(
conftest="""
import pytest
pytest_plugins = [
'more_fixtures',
]
@pytest.fixture
def some_common_fixture():
return 'ok'
"""
)
pytester.makepyfile(
more_fixtures="""
import pytest
@pytest.fixture
def some_fixture():
return 1
@pytest.fixture
def a_derived_fixture(some_fixture):
return some_fixture + 1
@pytest.fixture
def some_unused_fixture():
return 'nope'
"""
)
pytester.makepyfile(
"""
import pytest
def test_some_common_thing(some_common_fixture):
assert True
def test_some_derived_thing(a_derived_fixture):
assert True
"""
)
result = pytester.runpytest("--dead-fixtures")
for fixture_name in ["some_fixture", "a_derived_fixture", "some_common_fixture"]:
assert fixture_name not in result.stdout.str()
assert "some_unused_fixture" in result.stdout.str()
@pytest.mark.xfail(reason="https://github.com/jllorencetti/pytest-deadfixtures/issues/28")
def test_parameterized_fixture(pytester):
pytester.makepyfile(
conftest="""
import pytest
@pytest.fixture
def some_common_fixture():
return 1
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=['some_common_fixture'])
def another_fixture(request)
fixture_value = request.getfixturevalue(request.param)
return fixture_value + 1
def test_a_thing(another_fixture):
assert another_fixture == 2
"""
)
result = pytester.runpytest("--dead-fixtures")
# Currently these cases are recognized as a false positive, whereas they shouldn't be.
# Due to the dynamic lookup of the fixture, this is going to be hard to recognize.
assert "some_common_fixture" not in result.stdout.str()
| 22.287415
| 96
| 0.619535
|
e556b03c3d549dbf7d6d7d6793621d4106f01f18
| 1,542
|
py
|
Python
|
libs/layers/roi_align_tf/roi_align_tf.py
|
FullStackD3vs/Detectron-PYTORCH
|
b42c78b393098c8b678bb21bd4a48cc41028141b
|
[
"Apache-2.0"
] | 37
|
2018-07-25T16:30:47.000Z
|
2022-03-31T00:44:32.000Z
|
libs/layers/roi_align_tf/roi_align_tf.py
|
FullStackD3vs/Detectron-PYTORCH
|
b42c78b393098c8b678bb21bd4a48cc41028141b
|
[
"Apache-2.0"
] | 2
|
2018-08-06T06:25:37.000Z
|
2019-04-30T03:41:04.000Z
|
libs/layers/roi_align_tf/roi_align_tf.py
|
FullStackD3vs/Detectron-PYTORCH
|
b42c78b393098c8b678bb21bd4a48cc41028141b
|
[
"Apache-2.0"
] | 14
|
2019-01-15T08:42:19.000Z
|
2021-12-25T22:35:29.000Z
|
import torch
from torch import nn
from crop_and_resize import CropAndResizeFunction
class RoIAlign(nn.Module):
def __init__(self, crop_height, crop_width, extrapolation_value=0):
super(RoIAlign, self).__init__()
self.crop_height = crop_height
self.crop_width = crop_width
self.extrapolation_value = extrapolation_value
def forward(self, featuremap, rois, spatial_scale):
"""
RoIAlign based on crop_and_resize.
:param featuremap: NxCxHxW
:param rois: Mx5 float box with (id, x1, y1, x2, y2) **without normalization**
:param spatial_scale: a float, indicating the size ratio w.r.t. the original image
:return: MxCxoHxoW
"""
boxes = rois[:, 1:5].contiguous()
box_ind = rois[:, 0].int().contiguous()
boxes = float(spatial_scale) * boxes
x1, y1, x2, y2 = torch.split(boxes, 1, dim=1)
spacing_w = (x2 - x1) / float(self.crop_width)
spacing_h = (y2 - y1) / float(self.crop_height)
image_height, image_width = featuremap.size()[2:4]
nx0 = (x1 + spacing_w / 2 - 0.5) / float(image_width - 1)
ny0 = (y1 + spacing_h / 2 - 0.5) / float(image_height - 1)
nw = spacing_w * float(self.crop_width - 1) / float(image_width - 1)
nh = spacing_h * float(self.crop_height - 1) / float(image_height - 1)
boxes = torch.cat((ny0, nx0, ny0 + nh, nx0 + nw), 1)
return CropAndResizeFunction(self.crop_height, self.crop_width, 0)(featuremap, boxes, box_ind)
| 35.860465
| 102
| 0.633593
|
200e73150881631947dbb2ae6b98ceac2c0768be
| 2,371
|
py
|
Python
|
tests/__init__.py
|
chronitis/ipyrmd
|
1e10d5de5b4df281a906159d51b3e95f6972e7d9
|
[
"MIT"
] | 61
|
2015-05-27T19:59:38.000Z
|
2021-05-05T17:19:29.000Z
|
tests/__init__.py
|
chronitis/ipyrmd
|
1e10d5de5b4df281a906159d51b3e95f6972e7d9
|
[
"MIT"
] | 7
|
2016-02-21T10:54:29.000Z
|
2019-11-15T05:14:33.000Z
|
tests/__init__.py
|
chronitis/ipyrmd
|
1e10d5de5b4df281a906159d51b3e95f6972e7d9
|
[
"MIT"
] | 5
|
2016-02-19T05:07:36.000Z
|
2020-11-27T10:56:52.000Z
|
# run the test cases with `python3 -m unittest` from the project root
import unittest
import nbformat
import tempfile
import ipyrmd
NBFORMAT_VERSION = 4
# generic test classes which start from either an ipynb or rmd source
# file, write it to a tempfile, then convert it back and forth, after
# which the outputs can be compared
class IpynbTest(unittest.TestCase):
default_metadata = {
"language_info": {
"name": "R",
}
}
cells = []
metadata = None
use_rmd = True
def setUp(self):
if self.metadata is None:
metadata = self.default_metadata
else:
metadata = self.metadata
self.orig = nbformat.from_dict({
"nbformat": NBFORMAT_VERSION,
"nbformat_minor": 0,
"metadata": metadata,
"cells": self.cells
})
with tempfile.TemporaryDirectory() as d:
ipynb0_name = d + "/0"
rmd_name = d + "/1"
ipynb1_name = d + "/2"
with open(ipynb0_name, "w") as f:
nbformat.write(self.orig, f)
if self.use_rmd:
ipyrmd.ipynb_to_rmd(ipynb0_name, rmd_name)
ipyrmd.rmd_to_ipynb(rmd_name, ipynb1_name)
else:
ipyrmd.ipynb_to_spin(ipynb0_name, rmd_name)
ipyrmd.spin_to_ipynb(rmd_name, ipynb1_name)
with open(rmd_name) as f:
self.rmd = f.read()
with open(ipynb1_name) as f:
self.roundtrip = nbformat.read(f, NBFORMAT_VERSION)
class RmdTest(unittest.TestCase):
source = ""
use_rmd = True
def setUp(self):
with tempfile.TemporaryDirectory() as d:
rmd0_name = d + "/0"
ipynb_name = d + "/1"
rmd1_name = d + "/2"
with open(rmd0_name, "w") as f:
f.write(self.source)
if self.use_rmd:
ipyrmd.rmd_to_ipynb(rmd0_name, ipynb_name)
ipyrmd.ipynb_to_rmd(ipynb_name, rmd1_name)
else:
ipyrmd.spin_to_ipynb(rmd0_name, ipynb_name)
ipyrmd.ipynb_to_spin(ipynb_name, rmd1_name)
with open(ipynb_name) as f:
self.ipynb = nbformat.read(f, NBFORMAT_VERSION)
with open(rmd1_name) as f:
self.roundtrip = f.read()
| 29.271605
| 69
| 0.561367
|
84c65a1038e0fd531fb0c8046ef636479e36737a
| 8,863
|
py
|
Python
|
backtraderbd/strategies/base.py
|
rochi88/backtraderbd
|
dddc7694bda77b0dd91a2cd87d190bcb4004f99f
|
[
"MIT"
] | 2
|
2020-04-09T06:35:02.000Z
|
2021-05-11T22:11:58.000Z
|
backtraderbd/strategies/base.py
|
rochi88/backtraderbd
|
dddc7694bda77b0dd91a2cd87d190bcb4004f99f
|
[
"MIT"
] | null | null | null |
backtraderbd/strategies/base.py
|
rochi88/backtraderbd
|
dddc7694bda77b0dd91a2cd87d190bcb4004f99f
|
[
"MIT"
] | 5
|
2020-04-10T14:45:58.000Z
|
2021-09-06T12:29:38.000Z
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import sys
import datetime as dt
import math
import pandas as pd
import backtrader as bt
import backtraderbd.data.bdshare as bds
import backtraderbd.strategies.utils as bsu
from backtraderbd.settings import settings as conf
from backtraderbd.libs.log import get_logger
from backtraderbd.libs.models import get_or_create_library
logger = get_logger(__name__)
class BaseStrategy(bt.Strategy):
"""
Base Strategy template for all strategies to be added
"""
def __init__(self):
# Global variables
self.init_cash = conf.DEFAULT_CASH
self.buy_prop = conf.BUY_PROP
self.sell_prop = conf.SELL_PROP
self.execution_type = conf.EXECUTION_TYPE
self.periodic_logging = conf.PERIODIC_LOGGING
self.transaction_logging = conf.TRANSACTION_LOGGING
print("===Global level arguments===")
print("init_cash : {}".format(self.init_cash))
print("buy_prop : {}".format(self.buy_prop))
print("sell_prop : {}".format(self.sell_prop))
self.dataclose = self.datas[0].close # Keep a reference to the "close" line in the data[0] dataseries
self.dataopen = self.datas[0].open
self.order = None # To keep track of pending orders
self.buyprice = None
self.buycomm = None
self.len_data = len(list(self.datas[0])) # Number of ticks in the input data
def buy_signal(self):
return True
def sell_signal(self):
return True
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
if self.transaction_logging:
bsu.Utils.log(
"BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f"
% (
order.executed.price,
order.executed.value,
order.executed.comm,
)
)
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
if self.transaction_logging:
bsu.Utils.log(
"SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f"
% (
order.executed.price,
order.executed.value,
order.executed.comm,
)
)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
if self.transaction_logging:
if not self.periodic_logging:
bsu.Utils.log("Cash %s Value %s" % (self.cash, self.value))
bsu.Utils.log("Order Canceled/Margin/Rejected")
bsu.Utils.log("Canceled: {}".format(order.status == order.Canceled))
bsu.Utils.log("Margin: {}".format(order.status == order.Margin))
bsu.Utils.log("Rejected: {}".format(order.status == order.Rejected))
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
if self.transaction_logging:
bsu.Utils.log(
"OPERATION PROFIT, GROSS %.2f, NET %.2f"
% (trade.pnl, trade.pnlcomm)
)
def notify_cashvalue(self, cash, value):
# Update cash and value every period
if self.periodic_logging:
bsu.Utils.log("Cash %s Value %s" % (cash, value))
self.cash = cash
self.value = value
def next(self):
# Simply log the closing price of the series from the reference
if self.periodic_logging:
bsu.Utils.log("Close, %.2f" % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Skip the last observation since purchases are based on next day closing prices (no value for the last observation)
if len(self) + 1 >= self.len_data:
return
if self.periodic_logging:
bsu.Utils.log("CURRENT POSITION SIZE: {}".format(self.position.size))
# Only buy if there is enough cash for at least one stock
if self.cash >= self.dataclose[0]:
if self.buy_signal():
if self.transaction_logging:
bsu.Utils.log("BUY CREATE, %.2f" % self.dataclose[0])
# Take a 10% long position every time it's a buy signal (or whatever is afforded by the current cash position)
# "size" refers to the number of stocks to purchase
# Afforded size is based on closing price for the current trading day
# Margin is required for buy commission
# Add allowance to commission per transaction (avoid margin)
afforded_size = int(
self.cash
/ (
self.dataclose[0]
* (1 + conf.COMMISSION_PER_TRANSACTION + 0.001)
)
)
buy_prop_size = int(afforded_size * self.buy_prop)
# Buy based on the closing price of the next closing day
if self.execution_type == "close":
final_size = min(buy_prop_size, afforded_size)
if self.transaction_logging:
bsu.Utils.log("Cash: {}".format(self.cash))
bsu.Utils.log("Price: {}".format(self.dataclose[0]))
bsu.Utils.log("Buy prop size: {}".format(buy_prop_size))
bsu.Utils.log("Afforded size: {}".format(afforded_size))
bsu.Utils.log("Final size: {}".format(final_size))
# Explicitly setting exectype=bt.Order.Close will make the next day's closing the reference price
self.order = self.buy(size=final_size)
# Buy based on the opening price of the next closing day (only works "open" data exists in the dataset)
else:
# Margin is required for buy commission
afforded_size = int(
self.cash
/ (
self.dataopen[1]
* (1 + conf.COMMISSION_PER_TRANSACTION + 0.001)
)
)
final_size = min(buy_prop_size, afforded_size)
if self.transaction_logging:
bsu.Utils.log("Buy prop size: {}".format(buy_prop_size))
bsu.Utils.log("Afforded size: {}".format(afforded_size))
bsu.Utils.log("Final size: {}".format(final_size))
self.order = self.buy(size=final_size)
# Only sell if you hold least one unit of the stock (and sell only that stock, so no short selling)
stock_value = self.value - self.cash
if stock_value > 0:
if self.sell_signal():
if self.transaction_logging:
bsu.Utils.log("SELL CREATE, %.2f" % self.dataclose[1])
# Sell a 5% sell position (or whatever is afforded by the current stock holding)
# "size" refers to the number of stocks to purchase
if self.execution_type == "close":
if conf.SELL_PROP == 1:
self.order = self.sell(
size=self.position.size, exectype=bt.Order.Close
)
else:
# Sell based on the closing price of the next closing day
self.order = self.sell(
size=int(
(stock_value / (self.dataclose[1]))
* self.sell_prop
),
exectype=bt.Order.Close,
)
else:
# Sell based on the opening price of the next closing day (only works "open" data exists in the dataset)
self.order = self.sell(
size=int(
(self.init_cash / self.dataopen[1])
* self.sell_prop
)
)
| 43.876238
| 126
| 0.528941
|
721abc0034e36d68f1f56ead0395216990166e67
| 827
|
py
|
Python
|
cart_venv/Lib/site-packages/tensorflow_core/python/keras/api/_v2/keras/applications/resnet50/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 2
|
2019-08-04T20:28:14.000Z
|
2019-10-27T23:26:42.000Z
|
cart_venv/Lib/site-packages/tensorflow_core/python/keras/api/_v2/keras/applications/resnet50/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | null | null | null |
cart_venv/Lib/site-packages/tensorflow_core/python/keras/api/_v2/keras/applications/resnet50/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 1
|
2020-11-04T03:16:29.000Z
|
2020-11-04T03:16:29.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications.resnet50 namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "keras.applications.resnet50", public_apis=None, deprecation=False,
has_lite=False)
| 37.590909
| 97
| 0.823458
|
84cf0bfa34bf909c7efc30c05fd287e0debdaf8a
| 1,845
|
py
|
Python
|
openstack/network/v2/auto_allocated_topology.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | null | null | null |
openstack/network/v2/auto_allocated_topology.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | null | null | null |
openstack/network/v2/auto_allocated_topology.py
|
teresa-ho/stx-openstacksdk
|
7d723da3ffe9861e6e9abcaeadc1991689f782c5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class AutoAllocatedTopology(resource.Resource):
resource_name = 'auto_allocated_topology'
resource_key = 'auto_allocated_topology'
base_path = '/auto-allocated-topology'
service = network_service.NetworkService()
# Capabilities
allow_create = False
allow_get = True
allow_update = False
allow_delete = True
allow_list = False
# NOTE: this resource doesn't support list or query
# Properties
#: Project ID
#: If project is not specified the topology will be created
#: for project user is authenticated against.
#: Will return in error if resources have not been configured correctly
#: To use this feature auto-allocated-topology, subnet_allocation,
#: external-net and router extensions must be enabled and set up.
project_id = resource.Body('tenant_id')
class ValidateTopology(AutoAllocatedTopology):
base_path = '/auto-allocated-topology/%(project)s?fields=dry-run'
#: Validate requirements before running (Does not return topology)
#: Will return "Deployment error:" if the resources required have not
#: been correctly set up.
dry_run = resource.Body('dry_run')
project = resource.URI('project')
| 36.9
| 75
| 0.742005
|
4853d673869c55d62771f7cb403c5002b3c4ebb1
| 329
|
py
|
Python
|
news/migrations/0006_remove_facebookpost_slug.py
|
kermox/schronisko-krakow
|
8f8c546894e4b683ce463debad27db72ef820f90
|
[
"MIT"
] | 1
|
2020-11-17T18:50:44.000Z
|
2020-11-17T18:50:44.000Z
|
news/migrations/0006_remove_facebookpost_slug.py
|
kermox/schronisko-krakow
|
8f8c546894e4b683ce463debad27db72ef820f90
|
[
"MIT"
] | 9
|
2020-10-23T18:42:45.000Z
|
2022-03-12T00:39:57.000Z
|
news/migrations/0006_remove_facebookpost_slug.py
|
kermox/schronisko-krakow
|
8f8c546894e4b683ce463debad27db72ef820f90
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-08-14 18:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0005_auto_20200814_1650'),
]
operations = [
migrations.RemoveField(
model_name='facebookpost',
name='slug',
),
]
| 18.277778
| 47
| 0.592705
|
7369c41f92bcb12c2440e8751bb10845fa18ce66
| 1,077
|
py
|
Python
|
setup.py
|
Azmirol/click-man
|
827a2e2fd00363e5116a4d3fedd19ca617ba599a
|
[
"MIT"
] | 105
|
2017-01-04T17:41:20.000Z
|
2022-03-31T13:00:54.000Z
|
setup.py
|
Azmirol/click-man
|
827a2e2fd00363e5116a4d3fedd19ca617ba599a
|
[
"MIT"
] | 33
|
2017-03-06T16:02:13.000Z
|
2022-01-30T09:58:38.000Z
|
setup.py
|
Azmirol/click-man
|
827a2e2fd00363e5116a4d3fedd19ca617ba599a
|
[
"MIT"
] | 31
|
2016-12-16T17:20:45.000Z
|
2022-03-26T09:54:27.000Z
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='click-man',
version='0.4.2',
url='https://github.com/click-contrib/click-man',
license='MIT',
description='Generate man pages for click based CLI applications',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Timo Furrer',
author_email='tuxtimo@gmail.com',
install_requires=[
'click',
'setuptools',
],
packages=find_packages(exclude=('tests', )),
entry_points={
'console_scripts': [
'click-man = click_man.__main__:cli',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
],
)
| 27.615385
| 70
| 0.617456
|
1dd161e7daf343c5d605369339b67d506bfd1bfe
| 2,304
|
py
|
Python
|
agent.py
|
tdelubac/A3C
|
ad970b24fe2f2a532ccb42cf7e034894ef9b2838
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
agent.py
|
tdelubac/A3C
|
ad970b24fe2f2a532ccb42cf7e034894ef9b2838
|
[
"BSD-3-Clause-Clear"
] | 15
|
2020-01-28T22:11:56.000Z
|
2022-03-11T23:16:57.000Z
|
agent.py
|
tdelubac/A3C
|
ad970b24fe2f2a532ccb42cf7e034894ef9b2838
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
import random
N_STEP_RETURN = 8
FRAME = 0
class Agent:
def __init__(self, brain, eps_start, eps_end, eps_steps):
self.brain = brain
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_steps = eps_steps
self.memory = []
self.R = 0.
def epsilon(self):
if FRAME > self.eps_steps:
return self.eps_end
else:
return self.eps_start - (self.eps_start - self.eps_end) / self.eps_steps * FRAME
def act(self,s,veto_a=None):
global FRAME; FRAME = FRAME+1
if random.random() < self.epsilon():
return random.randint(0, self.brain.n_actions-1)
else:
s = np.asarray([s])
p = self.brain.predict_p(s)
p = p[0]
if veto_a!=None:
p_veto_a = p[veto_a]
p[veto_a] = 0
for a in range(self.brain.n_actions):
if a == veto_a: continue
p[a]+= p_veto_a / (self.brain.n_actions-1) # divide p_veto_a among other actions
return np.random.choice(range(self.brain.n_actions), p=p)
def train(self,s,a,r,s_,done,total_r):
def get_sample(memory, n):
s, a, _, _, _, _ = memory[0]
_, _, _, s_, _, _ = memory[n-1]
return s, a, self.R, s_, done, total_r
a_cats = np.zeros(self.brain.n_actions) # turn action into one-hot representation
a_cats[a] = 1
self.memory.append( (s, a_cats, r, s_, done, total_r) )
self.R = ( self.R + r * self.brain.gamma**N_STEP_RETURN ) / self.brain.gamma
if done is True:
while len(self.memory) > 0:
n = len(self.memory)
s, a, r, s_, done, total_r = get_sample(self.memory, n)
self.brain.train_push(s, a, r, s_, done, total_r)
self.R = ( self.R - self.memory[0][2] ) / self.brain.gamma
self.memory.pop(0)
self.R = 0
if len(self.memory) >= N_STEP_RETURN:
s, a, r, s_, done, total_r = get_sample(self.memory, N_STEP_RETURN)
self.brain.train_push(s, a, r, s_, done, total_r)
self.R = self.R - self.memory[0][2]
self.memory.pop(0)
| 32
| 100
| 0.532118
|
87a5a81b75124cd10e8e438185edb32579fa0b90
| 12,869
|
py
|
Python
|
tests/test_keep_largest_connected_component.py
|
wizofe/MONAI
|
d6327a765cf23dafd680b2a4b472edf9639e1087
|
[
"Apache-2.0"
] | null | null | null |
tests/test_keep_largest_connected_component.py
|
wizofe/MONAI
|
d6327a765cf23dafd680b2a4b472edf9639e1087
|
[
"Apache-2.0"
] | null | null | null |
tests/test_keep_largest_connected_component.py
|
wizofe/MONAI
|
d6327a765cf23dafd680b2a4b472edf9639e1087
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn.functional as F
from parameterized import parameterized
from monai.transforms import KeepLargestConnectedComponent
from monai.transforms.utils_pytorch_numpy_unification import moveaxis
from monai.utils.type_conversion import convert_to_dst_type
from tests.utils import TEST_NDARRAYS, assert_allclose
def to_onehot(x):
out = moveaxis(F.one_hot(torch.as_tensor(x).long())[0], -1, 0)
out, *_ = convert_to_dst_type(out, x)
return out
grid_1 = [[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]
grid_2 = [[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 0, 1, 1, 2], [1, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]
grid_3 = [
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
grid_4 = [
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
grid_5 = [[[0, 0, 1, 0, 0], [0, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 1, 0, 1, 0], [1, 1, 0, 0, 1]]]
TESTS = []
for p in TEST_NDARRAYS:
TESTS.append(
[
"value_1",
{"independent": False, "applied_labels": 1, "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]),
]
)
TESTS.append(
[
"value_2",
{"independent": False, "applied_labels": [2], "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]),
]
)
TESTS.append(
[
"independent_value_1_2",
{"independent": True, "applied_labels": [1, 2], "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]),
]
)
TESTS.append(
[
"dependent_value_1_2",
{"independent": False, "applied_labels": [1, 2], "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]),
]
)
TESTS.append(
[
"value_1",
{"independent": True, "applied_labels": [1], "is_onehot": False},
p(grid_2),
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]),
]
)
TESTS.append(
[
"independent_value_1_2",
{"independent": True, "applied_labels": [1, 2], "is_onehot": False},
p(grid_2),
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]),
]
)
TESTS.append(
[
"dependent_value_1_2",
{"independent": False, "applied_labels": [1, 2], "is_onehot": False},
p(grid_2),
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]),
]
)
TESTS.append(
[
"value_1_connect_1",
{"independent": False, "applied_labels": [1], "connectivity": 1, "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 2]]]),
]
)
TESTS.append(
[
"independent_value_1_2_connect_1",
{"independent": True, "applied_labels": [1, 2], "connectivity": 1, "is_onehot": False},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]),
]
)
TESTS.append(
[
"onehot_none_dependent_value_1_2_connect_1",
{"independent": False, "applied_labels": [1, 2], "connectivity": 1},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]),
]
)
TESTS.append(
[
"onehot_independent_batch_2_apply_label_1_connect_1",
{"independent": True, "applied_labels": [1], "connectivity": 1, "is_onehot": True},
p(grid_3),
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
),
]
)
TESTS.append(
[
"onehot_independent_batch_2_apply_label_1_connect_2",
{"independent": True, "applied_labels": [1], "connectivity": 2, "is_onehot": True},
p(grid_3),
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
),
]
)
TESTS.append(
[
"onehot_independent_batch_2_apply_label_1_2_connect_2",
{"independent": True, "applied_labels": [1, 2], "connectivity": 2, "is_onehot": True},
p(grid_3),
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
],
]
),
]
)
TESTS.append(
[
"onehot_dependent_batch_2_apply_label_1_2_connect_2",
{"independent": False, "applied_labels": [1, 2], "connectivity": 2, "is_onehot": True},
p(grid_4),
torch.tensor(
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
),
]
)
TESTS.append(
[
"onehot_none_dependent_batch_2_apply_label_1_2_connect_1",
{"independent": False, "applied_labels": [1, 2], "connectivity": 1},
p(grid_4),
torch.tensor(
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
),
]
)
TESTS.append(
[
"all_non_zero_labels",
{"independent": True},
p(grid_1),
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]),
]
)
class TestKeepLargestConnectedComponent(unittest.TestCase):
@parameterized.expand(TESTS)
def test_correct_results(self, _, args, input_image, expected):
converter = KeepLargestConnectedComponent(**args)
result = converter(input_image)
assert_allclose(result, expected, type_test=False)
if "is_onehot" in args:
args["is_onehot"] = not args["is_onehot"]
# if not onehotted, onehot it and make sure result stays the same
if input_image.shape[0] == 1:
img = to_onehot(input_image)
result2 = KeepLargestConnectedComponent(**args)(img)
result2 = result2.argmax(0)[None]
assert_allclose(result, result2)
# if onehotted, un-onehot and check result stays the same
else:
img = input_image.argmax(0)[None]
result2 = KeepLargestConnectedComponent(**args)(img)
assert_allclose(result.argmax(0)[None], result2)
if __name__ == "__main__":
unittest.main()
| 34.781081
| 114
| 0.361178
|
e8d7f46cf22ba1a00b9bbf2ecdd52f1c8627c537
| 2,519
|
py
|
Python
|
delta/data/utils/vocabulary.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 1,442
|
2019-07-09T07:34:28.000Z
|
2020-11-15T09:52:09.000Z
|
delta/data/utils/vocabulary.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 93
|
2019-07-22T09:20:20.000Z
|
2020-11-13T01:59:30.000Z
|
delta/data/utils/vocabulary.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 296
|
2019-07-09T07:35:28.000Z
|
2020-11-16T02:27:51.000Z
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Going to be deprecated"""
import copy
import collections
# pylint: disable=too-many-instance-attributes
class Vocabulary:
''' vocabulary '''
def __init__(self, use_default_dict):
self._padding_token = "<pad>"
self._unknown_token = "<unk>"
self._start_of_sentence = "<sos>"
self._end_of_sentence = "<eos>"
self._s_token = "<s>"
self._slash_s_token = "</s>"
self._default_dict = {
self._padding_token: 0,
self._s_token: 1,
self._slash_s_token: 2,
self._unknown_token: 3,
self._start_of_sentence: 4,
self._end_of_sentence: 5
}
self.use_default_dict = use_default_dict
if self.use_default_dict:
self._mapping = copy.deepcopy(self._default_dict)
else:
self._mapping = {}
self._freq = collections.defaultdict(int)
def __getitem__(self, key):
return self._mapping[key]
def add(self, word):
''' update vocab statis'''
if word not in self._mapping:
self._mapping[word] = len(self._mapping)
self._freq[word] += 1
def trim(self, min_frequency):
''' trim word freq less than min_frequency'''
# sort by frequency
self._freq = sorted(self._freq.items(), key=lambda x: x[1], reverse=True)
if self.use_default_dict:
self._mapping = copy.deepcopy(self._default_dict)
idx = len(self._default_dict)
else:
self._mapping = {}
idx = 0
for word, count in self._freq:
if count < min_frequency:
break
if word in self._mapping:
continue
self._mapping[word] = idx
idx += 1
self._freq = dict(self._freq[:idx - 1])
@property
def freq(self):
'''candy _freq'''
return self._freq
@property
def mapping(self):
''' candy _mapping'''
return self._mapping
| 28.954023
| 80
| 0.647082
|
846a3c36e833a2c562ed026b2479289a6a4b56ba
| 1,534
|
py
|
Python
|
attendees/occasions/apps.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | null | null | null |
attendees/occasions/apps.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | 5
|
2022-01-21T03:26:40.000Z
|
2022-02-04T17:32:16.000Z
|
attendees/occasions/apps.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | null | null | null |
import pghistory
from django.apps import AppConfig
from django.apps import apps as django_apps
class OccasionsConfig(AppConfig):
name = "attendees.occasions"
def ready(self):
schedule_calendar_model = django_apps.get_model("schedule.Calendar", require_ready=False)
schedule_calendarrelation_model = django_apps.get_model("schedule.CalendarRelation", require_ready=False)
schedule_event_model = django_apps.get_model("schedule.Event", require_ready=False)
schedule_eventrelation_model = django_apps.get_model("schedule.EventRelation", require_ready=False)
pghistory.track(
pghistory.Snapshot('calendar.snapshot'),
related_name='history',
model_name='CalendarHistory',
app_label='occasions'
)(schedule_calendar_model)
pghistory.track(
pghistory.Snapshot('calendarrelation.snapshot'),
related_name='history',
model_name='CalendarRelationHistory',
app_label='occasions',
)(schedule_calendarrelation_model)
pghistory.track(
pghistory.Snapshot('event.snapshot'),
related_name='history',
model_name='EventHistory',
app_label='occasions'
)(schedule_event_model)
pghistory.track(
pghistory.Snapshot('eventrelation.snapshot'),
related_name='history',
model_name='EventRelationHistory',
app_label='occasions',
)(schedule_eventrelation_model)
| 36.52381
| 113
| 0.671447
|
111d0777b494bcd3d5c49aca04ebd89162294ffb
| 444
|
py
|
Python
|
model/InvertedIndexM.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
model/InvertedIndexM.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
model/InvertedIndexM.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
from system import *
class InvertedIndexM:
def __init__(self, dicts = {}):
self.postman = Database.init()
for key,val in dicts.items():
setattr(self, str(key), val)
def createmany(self, lst):
query = '''
INSERT INTO `inverted_index`
( `term`, `id`, `term_encod`)
VALUES
( %s, %s, %s )
'''
self.postman.executemany(query, lst)
| 26.117647
| 45
| 0.502252
|
b2a832951132ceefa41e093eaa65f36461d93686
| 19,299
|
py
|
Python
|
code/ARAX/ARAXQuery/ARAX_ranker.py
|
RichardBruskiewich/RTX
|
ce126fdc5df6b5b13cc3ac2857ffee23954a0a7f
|
[
"MIT"
] | null | null | null |
code/ARAX/ARAXQuery/ARAX_ranker.py
|
RichardBruskiewich/RTX
|
ce126fdc5df6b5b13cc3ac2857ffee23954a0a7f
|
[
"MIT"
] | null | null | null |
code/ARAX/ARAXQuery/ARAX_ranker.py
|
RichardBruskiewich/RTX
|
ce126fdc5df6b5b13cc3ac2857ffee23954a0a7f
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
import sys
def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs)
import os
import json
import ast
import re
from datetime import datetime
import numpy as np
from response import Response
from query_graph_info import QueryGraphInfo
from knowledge_graph_info import KnowledgeGraphInfo
from ARAX_messenger import ARAXMessenger
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../")
from RTXConfiguration import RTXConfiguration
class ARAXRanker:
# #### Constructor
def __init__(self):
self.response = None
self.message = None
self.parameters = None
# #### ############################################################################################
# #### For each result[], aggregate all available confidence metrics and other scores to compute a final score
def aggregate_scores(self, message, response=None):
# #### Set up the response object if one is not already available
if response is None:
if self.response is None:
response = Response()
else:
response = self.response
else:
self.response = response
self.message = message
# #### Compute some basic information about the query_graph
query_graph_info = QueryGraphInfo()
result = query_graph_info.assess(message)
#response.merge(result)
#if result.status != 'OK':
# print(response.show(level=Response.DEBUG))
# return response
# DMK FIXME: This need to be refactored so that:
# 1. The attribute names are dynamically mapped to functions that handle their weightings (for ease of renaming attribute names)
# 2. Weighting of individual attributes (eg. "probability" should be trusted MUCH less than "probability_treats")
# 3. Auto-handling of normalizing scores to be in [0,1] (eg. observed_expected ration \in (-inf, inf) while probability \in (0,1)
# 4. Auto-thresholding of values (eg. if chi_square <0.05, penalize the most, if probability_treats < 0.8, penalize the most, etc.)
# 5. Allow for ranked answers (eg. observed_expected can have a single, huge value, skewing the rest of them
# #### Iterate through all the edges in the knowledge graph to:
# #### 1) Create a dict of all edges by id
# #### 2) Collect some min,max stats for edge_attributes that we may need later
kg_edges = {}
score_stats = {}
for edge in message.knowledge_graph.edges:
kg_edges[edge.id] = edge
if edge.edge_attributes is not None:
for edge_attribute in edge.edge_attributes:
# FIXME: DMK: We should probably have some some way to dynamically get the attribute names since they appear to be constantly changing
# DMK: Crazy idea: have the individual ARAXi commands pass along their attribute names along with what they think of is a good way to handle them
# DMK: eg. "higher is better" or "my range of [0, inf]" or "my value is a probability", etc.
for attribute_name in [ 'probability', 'normalized_google_distance', 'jaccard_index',
'probability_treats', 'paired_concept_frequency',
'observed_expected_ratio', 'chi_square']:
if edge_attribute.name == attribute_name:
if attribute_name not in score_stats:
score_stats[attribute_name] = {'minimum': None, 'maximum': None} # FIXME: doesn't handle the case when all values are inf or NaN
value = float(edge_attribute.value)
# TODO: don't set to max here, since returning inf for some edge attributes means "I have no data"
#if np.isinf(value):
# value = 9999
# initialize if not None already
if not np.isinf(value) and not np.isinf(-value) and not np.isnan(value): # Ignore inf, -inf, and nan
if not score_stats[attribute_name]['minimum']:
score_stats[attribute_name]['minimum'] = value
if not score_stats[attribute_name]['maximum']:
score_stats[attribute_name]['maximum'] = value
if value > score_stats[attribute_name]['maximum']: # DMK FIXME: expected type 'float', got 'None' instead
score_stats[attribute_name]['maximum'] = value
if value < score_stats[attribute_name]['minimum']: # DMK FIXME: expected type 'float', got 'None' instead
score_stats[attribute_name]['minimum'] = value
response.info(f"Summary of available edge metrics: {score_stats}")
# #### Loop through the results[] in order to compute aggregated scores
i_result = 0
for result in message.results:
#response.debug(f"Metrics for result {i_result} {result.essence}: ")
# #### Begin with a default score of 1.0 for everything
score = 1.0
# #### There are often many edges associated with a result[]. Some are great, some are terrible.
# #### For now, the score will be based on the best one. Maybe combining probabilities in quadrature would be better
best_probability = 0.0 # TODO: What's this? the best probability of what?
eps = np.finfo(np.float).eps # epsilon to avoid division by 0
penalize_factor = 0.7 # multiplicative factor to penalize by if the KS/KP return NaN or Inf indicating they haven't seen it before
# #### Loop through each edge in the result
for edge in result.edge_bindings:
kg_edge_id = edge.kg_id
# #### Set up a string buffer to keep some debugging information that could be printed
buf = ''
# #### If the edge has a confidence value, then multiply that into the final score
if kg_edges[kg_edge_id].confidence is not None:
buf += f" confidence={kg_edges[kg_edge_id].confidence}"
score *= float(kg_edges[kg_edge_id].confidence)
# #### If the edge has attributes, loop through those looking for scores that we know how to handle
if kg_edges[kg_edge_id].edge_attributes is not None:
for edge_attribute in kg_edges[kg_edge_id].edge_attributes:
# FIXME: These are chemical_substance->protein binding probabilities, may not want be treating them like this....
#### EWD: Vlado has suggested that any of these links with chemical_substance->protein binding probabilities are
#### EWD: mostly junk. very low probablility of being correct. His opinion seemed to be that they shouldn't be in the KG
#### EWD: If we keep them, maybe their probabilities should be knocked down even further, in half, in quarter..
# DMK: I agree: hence why I said we should probably not be treating them like this (and not trusting them a lot)
# #### If the edge_attribute is named 'probability', then for now use it to record the best probability only
if edge_attribute.name == 'probability':
value = float(edge_attribute.value)
buf += f" probability={edge_attribute.value}"
if value > best_probability:
best_probability = value
# #### If the edge_attribute is named 'probability_drug_treats', then for now we won't do anything
# #### because this value also seems to be copied into the edge confidence field, so is already
# #### taken into account
#if edge_attribute.name == 'probability_drug_treats': # this is already put in confidence
# buf += f" probability_drug_treats={edge_attribute.value}"
# score *= value
# DMK FIXME: Do we actually have 'probability_drug_treats' attributes?, the probability_drug_treats is *not* put in the confidence see: confidence = None in `predict_drug_treats_disease.py`
# DMK: also note the edge type is: edge_type = "probably_treats"
# If the edge_attribute is named 'probability_treats', use the value more or less as a probability
#### EWD says: but note that when I last worked on this, the probability_treats was repeated in an edge attribute
#### EWD says: as well as in the edge confidence score, so I commented out this section (see immediately above) DMK (same re: comment above :) )
#### EWD says: so that it wouldn't be counted twice. But that may have changed in the mean time.
if edge_attribute.name == "probability_treats":
prob_treats = float(edge_attribute.value)
# Don't treat as a good prediction if the ML model returns a low value
if prob_treats < penalize_factor:
factor = penalize_factor
else:
factor = prob_treats
score *= factor # already a number between 0 and 1, so just multiply
# #### If the edge_attribute is named 'ngd', then use some hocus pocus to convert to a confidence
if edge_attribute.name == 'normalized_google_distance':
ngd = float(edge_attribute.value)
# If the distance is infinite, then set it to 10, a very large number in this context
if np.isinf(ngd):
ngd = 10.0
buf += f" ngd={ngd}"
# #### Apply a somewhat arbitrary transformation such that:
# #### NGD = 0.3 leads to a factor of 1.0. That's *really* close
# #### NGD = 0.5 leads to a factor of 0.88. That still a close NGD
# #### NGD = 0.7 leads to a factor of 0.76. Same ballpark
# #### NGD = 0.9 this is pretty far away. Still the factor is 0.64. Distantly related
# #### NGD = 1.0 is very far. Still, factor is 0.58. Grade inflation is rampant.
factor = 1 - ( ngd - 0.3) * 0.6
# Apply limits of 1.0 and 0.01 to the linear fudge
if factor < 0.01:
factor = 0.01
if factor > 1:
factor = 1.0
buf += f" ngd_factor={factor}"
score *= factor
# #### If the edge_attribute is named 'jaccard_index', then use some hocus pocus to convert to a confidence
if edge_attribute.name == 'jaccard_index':
jaccard = float(edge_attribute.value)
# If the jaccard index is infinite, set to some arbitrarily bad score
if np.isinf(jaccard):
jaccard = 0.01
# #### Set the confidence factor so that the best value of all results here becomes 0.95
# #### Why not 1.0? Seems like in scenarios where we're computing a Jaccard index, nothing is really certain
factor = jaccard / score_stats['jaccard_index']['maximum'] * 0.95
buf += f" jaccard={jaccard}, factor={factor}"
score *= factor
# If the edge_attribute is named 'paired_concept_frequency', then ...
if edge_attribute.name == "paired_concept_frequency":
paired_concept_freq = float(edge_attribute.value)
if np.isinf(paired_concept_freq) or np.isnan(paired_concept_freq):
factor = penalize_factor
else:
try:
factor = paired_concept_freq / score_stats['paired_concept_frequency']['maximum']
except:
factor = paired_concept_freq / (score_stats['paired_concept_frequency']['maximum'] + eps)
score *= factor
buf += f" paired_concept_frequency={paired_concept_freq}, factor={factor}"
# If the edge_attribute is named 'observed_expected_ratio', then ...
if edge_attribute.name == 'observed_expected_ratio':
obs_exp_ratio = float(edge_attribute.value)
if np.isinf(obs_exp_ratio) or np.isnan(obs_exp_ratio):
factor = penalize_factor # Penalize for missing info
# Would love to throw this into a sigmoid like function customized by the max value observed
# for now, just throw into a sigmoid and see what happens
factor = 1 / float(1 + np.exp(-4*obs_exp_ratio))
score *= factor
buf += f" observed_expected_ratio={obs_exp_ratio}, factor={factor}"
# If the edge_attribute is named 'chi_square', then compute a factor based on the chisq and the max chisq
if edge_attribute.name == 'chi_square':
chi_square = float(edge_attribute.value)
if np.isinf(chi_square) or np.isnan(chi_square):
factor = penalize_factor
else:
try:
factor = 1 - (chi_square / score_stats['chi_square']['maximum']) # lower is better
except:
factor = 1 - (chi_square / (score_stats['chi_square']['maximum'] + eps)) # lower is better
score *= factor
buf += f" chi_square={chi_square}, factor={factor}"
# #### When debugging, log the edge_id and the accumulated information in the buffer
#response.debug(f" - {kg_edge_id} {buf}")
# #### If there was a best_probability recorded, then multiply into the running score
#### EWD: This was commented out by DMK? I don't know why. I think it should be here FIXME
#if best_probability > 0.0:
# score *= best_probability
# DMK: for some reason, this was causing my scores to be ridiculously low, so I commented it out and confidences went up "quite a bit"
# #### Make all scores at least 0.01. This is all way low anyway, but let's not have anything that rounds to zero
# #### This is a little bad in that 0.005 becomes better than 0.011, but this is all way low, so who cares
if score < 0.01:
score += 0.01
#### Round to reasonable precision. Keep only 3 digits after the decimal
score = int(score * 1000 + 0.5) / 1000.0
#response.debug(f" ---> final score={score}")
result.confidence = score
result.row_data = [ score, result.essence, result.essence_type ]
i_result += 1
#### Add table columns name
message.table_column_names = [ 'confidence', 'essence', 'essence_type' ]
#### Re-sort the final results
message.results.sort(key=lambda result: result.confidence, reverse=True)
# #### ############################################################################################
# #### For each result[], aggregate all available confidence metrics and other scores to compute a final score
def sort_results_by_confidence(self, message, response=None):
# #### Set up the response object if one is not already available
if response is None:
if self.response is None:
response = Response()
else:
response = self.response
else:
self.response = response
self.message = message
response.info("Re-sorting results by overal confidence metrics")
#### Dead-simple sort, probably not very robust
message.results.sort(key=lambda result: result.confidence, reverse=True)
# #### ############################################################################################
# #### For each result[], create a simple tabular entry of the essence values and confidence
def create_tabular_results(self, message, response=None):
# #### Set up the response object if one is not already available
if response is None:
if self.response is None:
response = Response()
else:
response = self.response
else:
self.response = response
self.message = message
response.info(f"Add simple tabular results to the Message")
# #### Loop through the results[] adding row_data for that result
for result in message.results:
# #### For now, just the confidence, essence, and essence_type
result.row_data = [ result.confidence, result.essence, result.essence_type ]
#### Add table columns name
message.table_column_names = [ 'confidence', 'essence', 'essence_type' ]
##########################################################################################
def main():
#### Create a response object
response = Response()
ranker = ARAXRanker()
#### Get a Message to work on
messenger = ARAXMessenger()
print("INFO: Fetching message to work on from arax.rtx.ai",flush=True)
message = messenger.fetch_message('https://arax.rtx.ai/api/rtx/v1/message/2614')
if message is None:
print("ERROR: Unable to fetch message")
return
ranker.aggregate_scores(message,response=response)
#### Show the final result
print(response.show(level=Response.DEBUG))
print("Results:")
for result in message.results:
confidence = result.confidence
if confidence is None:
confidence = 0.0
print(" -" + '{:6.3f}'.format(confidence) + f"\t{result.essence}")
#print(json.dumps(ast.literal_eval(repr(message)),sort_keys=True,indent=2))
if __name__ == "__main__": main()
| 57.097633
| 213
| 0.550184
|
ddd74919e972c376750a315ccb824efc7f3187e5
| 1,795
|
py
|
Python
|
account_handler.py
|
Costa-SM/itabot_ferias
|
e3706f1b09d28a78e079e6d9ccdef4f00d41d047
|
[
"MIT"
] | null | null | null |
account_handler.py
|
Costa-SM/itabot_ferias
|
e3706f1b09d28a78e079e6d9ccdef4f00d41d047
|
[
"MIT"
] | null | null | null |
account_handler.py
|
Costa-SM/itabot_ferias
|
e3706f1b09d28a78e079e6d9ccdef4f00d41d047
|
[
"MIT"
] | null | null | null |
import tweepy
class Account(object):
"""
Represents the account that will be used with the Twitter API.
"""
def __init__(self, consumerkey, bearer, accesskey):
"""
Prepares the account for use with the Twitter API via Tweepy.
:param consumerkey: Tuple containing the Consumer Key and Consumer Key Secret.
:type consumerkey: tuple
:param bearer: String containing the bearer for the account.
:type bearer: str
:param accesskey: Tuple containing the Access Token and Access Token Secret.
:type accesskey: tuple
"""
self.consumer_key = consumerkey
self.bearer = bearer
self.access_key = accesskey
self.auth = tweepy.OAuthHandler(self.consumer_key[0], self.consumer_key[1])
self.auth.set_access_token(self.access_key[0], self.access_key[1])
self.api = tweepy.API(self.auth)
def get_homepage_tweets(self):
"""
Gets the last 20 tweets presented in the account's main timeline (i.e. homepage).
:rtype: list of status objects
"""
return self.api.home_timeline()
def get_last_tweet_date(self):
"""
Gets the last tweet from the account.
:rtype: datetime object
"""
return self.api.user_timeline()[0].created_at
def get_last_tweet_text(self):
"""
Gets the last tweet from the account.
:rtype: str
"""
return self.api.user_timeline()[0].text
def tweet(self, tweetedText):
"""
Tweets the specified string.
:param: tweetedText: String that will be tweeted by the account.
:type: tweetedText: str
:rtype: none
"""
self.api.update_status(tweetedText)
| 30.423729
| 89
| 0.616156
|
f5b95333edf3bf2f8fd3e4885a5661403d64ebf7
| 1,008
|
py
|
Python
|
isi_sdk_8_2_1/test/test_certificate_server_certificate.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/test/test_certificate_server_certificate.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/test/test_certificate_server_certificate.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.certificate_server_certificate import CertificateServerCertificate # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestCertificateServerCertificate(unittest.TestCase):
"""CertificateServerCertificate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCertificateServerCertificate(self):
"""Test CertificateServerCertificate"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.certificate_server_certificate.CertificateServerCertificate() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.585366
| 114
| 0.736111
|
5cb1369bc0ebec5d6fb5fd46376e3b66e29e1cb8
| 1,132
|
py
|
Python
|
data_stats.py
|
raphaelmenges/PierreKrapf-Bachelorthesis
|
9d0b92ffb9d862032e0531e9a210964bef6d2ea0
|
[
"MIT"
] | null | null | null |
data_stats.py
|
raphaelmenges/PierreKrapf-Bachelorthesis
|
9d0b92ffb9d862032e0531e9a210964bef6d2ea0
|
[
"MIT"
] | null | null | null |
data_stats.py
|
raphaelmenges/PierreKrapf-Bachelorthesis
|
9d0b92ffb9d862032e0531e9a210964bef6d2ea0
|
[
"MIT"
] | null | null | null |
import os
import os.path as path
import numpy as np
import re
from prettytable import PrettyTable
from functools import reduce
# TODO: Turn this into arg
folder_path = path.join("G:", "BA", "Data", "Out")
all_files = os.listdir(folder_path)
all_files_iter = iter(all_files)
counts = {}
for file_name in all_files_iter:
cl = re.search("^\d+-\d+-(\w+).png$", file_name)
if cl == None:
continue
# print("filename: %s" % file_name)
# print("match: %s" % cl[1])
label = cl[1]
if label in counts:
counts[label] = counts[label] + 1
else:
counts[label] = 1
arr = []
for (key, count) in counts.items():
arr.append((key, count))
sorted_arr = sorted(arr, key=lambda x: x[1])
sorted_arr.reverse()
number_of_images = reduce((lambda acc, el: acc + el[1]), sorted_arr, 0)
print(f"Number of images: {number_of_images}")
# for (label, count) in sorted_arr:
# print(f"{label}: {count}")
tb = PrettyTable(["Nr", "Klasse", "Anzahl", "Anteil"])
for (nr, (label, count)) in enumerate(sorted_arr):
tb.add_row([nr+1, label, count, round(count / number_of_images*100, 2)])
print(tb)
| 24.608696
| 76
| 0.64576
|
a379d2f292b4582d908295d1891c35c0dc9b8a49
| 2,711
|
py
|
Python
|
asyncfile/usefuls.py
|
try-fail1/asyncfile
|
bef47653308a2f0ca271391e0d07a151695de757
|
[
"MIT"
] | 2
|
2020-06-07T04:32:07.000Z
|
2020-08-16T06:12:33.000Z
|
asyncfile/usefuls.py
|
try-fail1/asyncfile
|
bef47653308a2f0ca271391e0d07a151695de757
|
[
"MIT"
] | null | null | null |
asyncfile/usefuls.py
|
try-fail1/asyncfile
|
bef47653308a2f0ca271391e0d07a151695de757
|
[
"MIT"
] | null | null | null |
from asyncio import AbstractEventLoop
from typing import (
Optional, Iterable,
AsyncIterator, Callable
)
from collections.abc import Coroutine
from functools import wraps
from .threads import threadwork
Loop = Optional[AbstractEventLoop]
def add_async_methods(names: Iterable):
def the_class(cls):
for i in names:
setattr(cls, i, set_async(i))
return cls
return the_class
def add_properties(names: Iterable):
def classy(cls):
for i in names:
setattr(cls, i, set_property(i))
return cls
return classy
def set_async(method_name: str) -> Callable:
async def inner(self, *args, **kwargs):
method_impl = getattr(self._hidden, method_name)
return await threadwork(*args, func=method_impl, loop=self._loop, **kwargs)
return inner
def set_property(property_value: str) -> property:
def getit(self):
return getattr(self._hidden, property_value)
def setit(self, value):
setattr(self._hidden, property_value, value)
def delit(self, value):
delattr(self._hidden, property_value, value)
return property(getit, setit, delit)
class AsyncMixin:
def __aiter__(self):
return self
async def __anext__(self):
f = await self.readline()
if not f:
raise StopAsyncIteration
return f
async def __aenter__(self):
return self
async def __aexit__(self, *exc) -> None:
await self.close()
def generate_repr(cls):
cls.__repr__ = lambda self: f"<{self.__class__.__name__}: {self.name!r}>"
return cls
class AwaitedForYou(Coroutine):
__slots__ = ('coro', 'ret')
def __init__(self, coro):
self.coro = coro
self.ret = None
def send(self, val):
return self.coro.send(val)
def throw(self, typ, val=None, tb=None):
return super().throw(typ, val, tb)
def close(self) -> None:
return self.coro.close()
def __await__(self):
return self.coro.__await__()
async def __aenter__(self):
self.ret = await self.coro
return self.ret
async def __aexit__(self, *exc) -> None:
await self.ret.close()
self.ret = None
def __aiter__(self) -> AsyncIterator:
async def asyncgen():
f = await self.coro
async for i in f:
yield i
return asyncgen()
# Ultimately, implementing `__anext__`
# Is not reasonably acheivable here
def make_async(func):
@wraps(func)
def different(*args, **kwargs):
return AwaitedForYou(func(*args, **kwargs))
return different
| 25.101852
| 83
| 0.617484
|
46c1c9796617997daac4f76506c0ae5ef786b798
| 7,070
|
py
|
Python
|
multi_script_editor/widgets/themeEditor.py
|
paulwinex/pw_multiScriptEditor
|
e447e99f87cb07e238baf693b7e124e50efdbc51
|
[
"MIT"
] | 142
|
2015-03-21T12:56:21.000Z
|
2022-02-08T04:42:46.000Z
|
multi_script_editor/widgets/themeEditor.py
|
paulwinex/pw_multiScriptEditor
|
e447e99f87cb07e238baf693b7e124e50efdbc51
|
[
"MIT"
] | 7
|
2016-02-16T05:44:57.000Z
|
2022-01-07T06:05:35.000Z
|
multi_script_editor/widgets/themeEditor.py
|
paulwinex/pw_multiScriptEditor
|
e447e99f87cb07e238baf693b7e124e50efdbc51
|
[
"MIT"
] | 43
|
2015-04-16T06:14:54.000Z
|
2021-11-01T05:07:18.000Z
|
try:
from PySide.QtCore import *
from PySide.QtGui import *
qt = 1
except:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
qt = 2
import themeEditor_UIs as ui
import settingsManager
import os
from .pythonSyntax import design
from .pythonSyntax import syntaxHighLighter
from . import inputWidget
import icons_rcs
class themeEditorClass(QDialog, ui.Ui_themeEditor):
def __init__(self, parent = None, desk=None):
super(themeEditorClass, self).__init__(parent)
self.setupUi(self)
self.preview_twd = inputWidget.inputClass(self, desk)
self.preview_ly.addWidget(self.preview_twd)
self.preview_twd.setPlainText(defaultText)
self.splitter.setSizes([200,300])
self.s = settingsManager.scriptEditorClass()
self.colors_lwd.itemDoubleClicked.connect(self.getNewColor)
self.save_btn.clicked.connect(self.saveTheme)
self.del_btn.clicked.connect(self.deleteTheme)
self.themeList_cbb.currentIndexChanged.connect(self.updateColors)
self.apply_btn.clicked.connect(self.apply)
self.apply_btn.setText('Close')
self.textSize_spb.valueChanged.connect(self.updateExample)
self.fillUI()
self.updateUI()
self.updateColors()
self.preview_twd.completer.updateCompleteList()
self.namespace={}
def fillUI(self, restore=None):
if restore is None:
restore = self.themeList_cbb.currentText()
settings = self.s.readSettings()
self.themeList_cbb.clear()
self.themeList_cbb.addItem('default')
if settings.get('colors'):
for x in settings.get('colors'):
self.themeList_cbb.addItem(x)
if not restore:
restore = settings.get('theme')
if restore:
index = self.themeList_cbb.findText(restore)
self.themeList_cbb.setCurrentIndex(index)
self.updateExample()
def updateColors(self):
curTheme = self.themeList_cbb.currentText()
if curTheme == 'default':
self.del_btn.setEnabled(0)
colors = design.defaultColors
else:
self.del_btn.setEnabled(1)
settings = self.s.readSettings()
allThemes = settings.get('colors')
if allThemes and curTheme in allThemes:
colors = allThemes.get(curTheme)
for k, v in design.getColors().items():
if not k in colors:
colors[k] = v
else:
colors = design.getColors()
self.colors_lwd.clear()
for x in sorted(colors.keys()):
if x == 'textsize':
self.textSize_spb.setValue(colors['textsize'])
else:
item = QListWidgetItem(x)
pix = QPixmap(QSize(16,16))
pix.fill(QColor(*colors[x]))
item.setIcon(QIcon(pix))
item.setData(32, colors[x])
self.colors_lwd.addItem(item)
self.updateExample()
def updateExample(self):
colors = self.getCurrentColors()
self.preview_twd.applyPreviewStyle(colors)
def getCurrentColors(self):
colors = {}
for i in range(self.colors_lwd.count()):
item = self.colors_lwd.item(i)
colors[item.text()] = item.data(32)
colors['textsize'] = self.textSize_spb.value()
return colors
def getNewColor(self):
items = self.colors_lwd.selectedItems()
if items:
item = items[0]
init = QColor(*item.data(32))
color = QColorDialog.getColor(init ,self)
if color.isValid():
newColor = (color.red(), color.green(), color.blue())
item.setData(32, newColor)
pix = QPixmap(QSize(16,16))
pix.fill(QColor(*newColor))
item.setIcon(QIcon(pix))
self.updateExample()
def saveTheme(self):
text = self.themeList_cbb.currentText() or 'NewTheme'
name = QInputDialog.getText(self, 'Theme name', 'Enter Theme name', QLineEdit.Normal, text)
if name[1]:
name = name[0]
if name == 'default':
name = 'Not default'
settings = self.s.readSettings()
if 'colors' in settings:
if name in settings['colors']:
if not self.yes_no_question('Replace exists?'):
return
colors = self.getCurrentColors()
if 'colors' in settings:
settings['colors'][name] = colors
else:
settings['colors'] = {name: colors}
self.s.writeSettings(settings)
self.fillUI(name)
# self.updateUI()
def deleteTheme(self):
text = self.themeList_cbb.currentText()
if text:
if self.yes_no_question('Remove current theme?'):
name = self.themeList_cbb.currentText()
settings = self.s.readSettings()
if 'colors' in settings:
if name in settings['colors']:
del settings['colors'][name]
self.s.writeSettings(settings)
self.fillUI(False)
self.updateUI()
def updateUI(self):
if not self.themeList_cbb.count():
self.apply_btn.setEnabled(0)
else:
self.apply_btn.setEnabled(1)
def apply(self):
name = self.themeList_cbb.currentText()
if name:
settings = self.s.readSettings()
settings['theme'] = name
self.s.writeSettings(settings)
self.accept()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
event.ignore()
else:
super(themeEditorClass, self).keyPressEvent(event)
def current(self):
pass
# print self.colors_lwd.selectedItems()[0].data(32)
def yes_no_question(self, question):
msg_box = QMessageBox(self)
msg_box.setText(question)
yes_button = msg_box.addButton("Yes", QMessageBox.YesRole)
no_button = msg_box.addButton("No", QMessageBox.NoRole)
msg_box.exec_()
return msg_box.clickedButton() == yes_button
defaultText = r'''@decorator(param=1)
def f(x):
""" Syntax Highlighting Demo
@param x Parameter"""
s = ("Test", 2+3, {'a': 'b'}, x) # Comment
print s[0].lower()
class Foo:
def __init__(self):
string = 'newline'
self.makeSense(whatever=1)
def makeSense(self, whatever):
self.sense = whatever
x = len('abc')
print(f.__doc__)
'''
if __name__ == '__main__':
app = QApplication([])
w = themeEditorClass()
w.show()
qss = os.path.join(os.path.dirname(os.path.dirname(__file__)),'style', 'style.css')
if os.path.exists(qss):
w.setStyleSheet(open(qss).read())
app.exec_()
| 32.883721
| 99
| 0.578218
|
465d8a3695d2f9c6321c7f2d70b8bb84b9adee5e
| 461
|
py
|
Python
|
aliyun_exporter/test_utils.py
|
BoringCat/aliyun-exporter
|
3c9af5e6ee42c9b3876ce312a3c39db046032740
|
[
"Apache-2.0"
] | 1
|
2021-12-30T11:42:45.000Z
|
2021-12-30T11:42:45.000Z
|
aliyun_exporter/test_utils.py
|
BoringCat/aliyun-exporter
|
3c9af5e6ee42c9b3876ce312a3c39db046032740
|
[
"Apache-2.0"
] | null | null | null |
aliyun_exporter/test_utils.py
|
BoringCat/aliyun-exporter
|
3c9af5e6ee42c9b3876ce312a3c39db046032740
|
[
"Apache-2.0"
] | 1
|
2021-12-30T11:42:48.000Z
|
2021-12-30T11:42:48.000Z
|
from .utils import format_metric, format_period
def test_format_metric():
assert format_metric("") == ""
assert format_metric("a.b.c") == "a_b_c"
assert format_metric("aBcD") == "aBcD"
assert format_metric(".a.b.c.") == "_a_b_c_"
def test_format_period():
assert format_period("") == ""
assert format_period("3000") == "3000"
assert format_period("5,10,25,50,100,300") == "5"
assert format_period("300_00,500_00") == "300_00"
| 30.733333
| 53
| 0.652928
|
4a6d47a6cf2732946639dc449c00d301268f58c9
| 19,321
|
py
|
Python
|
Step_3_train_fast_RCNN.py
|
sumedhasingla/ExplainingBBSmoothly
|
0873d7a3b1fa0e9d967d01647e0fa5fe136a7604
|
[
"MIT"
] | null | null | null |
Step_3_train_fast_RCNN.py
|
sumedhasingla/ExplainingBBSmoothly
|
0873d7a3b1fa0e9d967d01647e0fa5fe136a7604
|
[
"MIT"
] | null | null | null |
Step_3_train_fast_RCNN.py
|
sumedhasingla/ExplainingBBSmoothly
|
0873d7a3b1fa0e9d967d01647e0fa5fe136a7604
|
[
"MIT"
] | null | null | null |
import sys
import os
import tensorflow as tf
import numpy as np
import scipy.io as sio
#from PIL import Image
from Fast_RCNN.rpn_proposal.utils import read_batch, generate_anchors, pre_process_xml
from Fast_RCNN.rpn_proposal.vggnet import vgg_16
from Fast_RCNN.rpn_proposal.ops import smooth_l1, offset2bbox
from Fast_RCNN.rpn_proposal.networks import rpn
import pdb
from Fast_RCNN.fast_rcnn.networks import network
from Fast_RCNN.fast_rcnn.ops import smooth_l1, xywh2x1y1x2y2
from Fast_RCNN.fast_rcnn.utils import read_batch as read_batch_rcnn
from utils import load_images_and_labels
import pdb
import yaml
import random
import warnings
import time
import argparse
warnings.filterwarnings("ignore", category=DeprecationWarning)
tf.set_random_seed(0)
np.random.seed(0)
def Train():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', '-c', default='configs/Step_3_MIMIC_Object_Detector_256_Costophrenic_Recess.yaml')
parser.add_argument(
'--main_dir', '-m', default='/ocean/projects/asc170022p/singla/ExplainingBBSmoothly')
args = parser.parse_args()
main_dir = args.main_dir
# ============= Load config =============
config_path = os.path.join(main_dir, args.config)
config = yaml.load(open(config_path))
print(config)
# ============= Experiment Folder=============
assets_dir = os.path.join(main_dir, config['log_dir'], config['name'])
log_dir = os.path.join(assets_dir, 'log')
ckpt_dir = os.path.join(assets_dir, 'ckpt_dir')
sample_dir = os.path.join(assets_dir, 'sample')
test_dir = os.path.join(assets_dir, 'test')
# make directory if not exist
try: os.makedirs(log_dir)
except: pass
try: os.makedirs(ckpt_dir)
except: pass
try: os.makedirs(sample_dir)
except: pass
try: os.makedirs(test_dir)
except: pass
# ============= Experiment Parameters =============
name = config['name']
image_dir = config['image_dir']
BATCHSIZE = config['BATCHSIZE']
channels = config['num_channel']
IMG_H = config['IMG_H']
IMG_W = config['IMG_W']
MINIBATCH = config['MINIBATCH']
NUMS_PROPOSAL = config['NUMS_PROPOSAL']
NMS_THRESHOLD = config['NMS_THRESHOLD']
XML_PATH = config['XML_PATH']
IMG_PATH = config['IMG_PATH']
CLASSES = config['CLASSES']
CROP = config['CROP']
SUFFIX = config['SUFFIX']
starting_step = config['starting_step']
# ============= Data =============
xml_files = pre_process_xml(XML_PATH, CLASSES)
print("The classification CLASSES are: ")
print(CLASSES)
print('The size of the training set: ', len(xml_files))
fp = open(os.path.join(log_dir, 'setting.txt'), 'w')
fp.write('config_file:'+str(config_path)+'\n')
fp.close()
# ============= Model =============
with tf.device('/gpu:0'):
imgs = tf.placeholder(tf.float32, [BATCHSIZE, IMG_H, IMG_W, 1])
bbox_indxs = tf.placeholder(tf.int32, [BATCHSIZE, MINIBATCH])
masks = tf.placeholder(tf.int32, [BATCHSIZE, MINIBATCH])
target_bboxes = tf.placeholder(tf.float32, [BATCHSIZE, MINIBATCH, 4])
learning_rate = tf.placeholder(tf.float32)
vgg_logits = vgg_16(imgs)
cls, reg = rpn(vgg_logits)
cls_logits = tf.concat([tf.nn.embedding_lookup(cls[i], bbox_indxs[i])[tf.newaxis] for i in range(BATCHSIZE)], axis=0)
reg_logits = tf.concat([tf.nn.embedding_lookup(reg[i], bbox_indxs[i])[tf.newaxis] for i in range(BATCHSIZE)], axis=0)
one_hot = tf.one_hot(masks, 2)
pos_nums = tf.reduce_sum(tf.cast(masks, dtype=tf.float32))
loss_cls = tf.reduce_sum(-tf.log(tf.reduce_sum(tf.nn.softmax(cls_logits) * one_hot, axis=-1) + 1e-10)) / pos_nums
loss_reg = tf.reduce_sum(tf.reduce_sum(smooth_l1(reg_logits, target_bboxes), axis=-1) * tf.cast(masks, dtype=tf.float32)) / pos_nums
regular = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
total_loss = loss_cls + loss_reg + regular * 0.0005
with tf.variable_scope("Opt"):
Opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(total_loss)
# ============= Session =============
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# ============= Load VGG =============
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="vgg_16"))
saver.restore(sess, os.path.join(main_dir, config['vgg_checkpoint']))
print("VGG16 checkpoint restored", config['vgg_checkpoint'])
saver = tf.train.Saver()
# ============= Generate Initial Anchors =============
anchors = generate_anchors(IMG_H, IMG_W)
if starting_step == 1:
print("******************** STEP-1 *********************************************")
# ============= Round-1 RPN =============
for i in range(2000):
BATCH_IMGS, BATCH_IDXS, TARGET_BBOXS, MASKS = read_batch(anchors, XML_PATH, IMG_PATH, CLASSES, BATCHSIZE, IMG_H, IMG_W, channels, MINIBATCH, CROP, SUFFIX) # (2, 256, 256, 1), (2, 64), (2, 64, 4), (2, 64)
sess.run(Opt, feed_dict={imgs: BATCH_IMGS, bbox_indxs: BATCH_IDXS, masks: MASKS, target_bboxes: TARGET_BBOXS, learning_rate: 0.001})
if i % 100 == 0:
[LOSS_CLS, LOSS_REG, TOTAL_LOSS] = sess.run([loss_cls, loss_reg, total_loss],
feed_dict={imgs: BATCH_IMGS, bbox_indxs: BATCH_IDXS, masks: MASKS, target_bboxes: TARGET_BBOXS})
print("Iteration: %d, total_loss: %f, loss_cls: %f, loss_reg: %f" % (i, TOTAL_LOSS, LOSS_CLS, LOSS_REG))
if i % 1000 == 0:
saver.save(sess, os.path.join(ckpt_dir,"model_rpn_"+name+".ckpt"))
saver.save(sess, os.path.join(ckpt_dir,"model_rpn_"+name+".ckpt"))
starting_step = 2
# ============= Update Proposal =============
if starting_step == 2:
print("******************** STEP-2 *********************************************")
cls, reg = cls[0], reg[0]
scores = tf.nn.softmax(cls)[:, 1]
anchors = tf.constant(anchors, dtype=tf.float32)
pdb.set_trace()
normal_bbox, reverse_bbox = offset2bbox(reg, anchors)
nms_idxs = tf.image.non_max_suppression(reverse_bbox, scores, max_output_size=2000, iou_threshold=NMS_THRESHOLD)
bboxes = tf.nn.embedding_lookup(normal_bbox, nms_idxs)[:NUMS_PROPOSAL]
saver.restore(sess, os.path.join(ckpt_dir,"model_rpn_"+name+".ckpt"))
print("RPN checkpoint restored!!!", os.path.join(ckpt_dir,"model_rpn_"+name+".ckpt"))
proposal_data = {}
for idx, filename in enumerate(xml_files):
img_names = np.asarray([IMG_PATH + filename[:-4] + SUFFIX])
img = load_images_and_labels(img_names,
image_dir='',
input_size=IMG_W,
crop_size=-1,
num_channel=1)
print(img.shape)
#img = np.array(Image.open(IMG_PATH + filename[:-3] + "jpg").resize([IMG_W, IMG_H]))
#img = np.expand_dims(img, axis=-1)
pdb.set_trace()
BBOX = sess.run(bboxes, feed_dict={imgs: img})
pdb.set_trace()
x, y = (BBOX[:, 0:1] + BBOX[:, 2:3]) / 2, (BBOX[:, 1:2] + BBOX[:, 3:4]) / 2
w, h = BBOX[:, 2:3] - BBOX[:, 0:1], BBOX[:, 3:4] - BBOX[:, 1:2]
BBOX = np.concatenate((x, y, w, h), axis=-1)
proposal_data[filename] = BBOX
print("Total: %d, Current: %d"%(len(xml_files), idx))
sio.savemat(os.path.join(ckpt_dir,"proposal_step2_"+name+".mat"), proposal_data)
print("Proposal Data Saved!!!", os.path.join(ckpt_dir,"proposal_step2_"+name+".mat"))
starting_step = 3
if starting_step == 3:
print("******************** STEP-3 *********************************************")
proposals = sio.loadmat(os.path.join(ckpt_dir,"proposal_step2_"+name+".mat"))
tf.reset_default_graph()
imgs = tf.placeholder(tf.float32, [BATCHSIZE, IMG_H, IMG_W, 1])
batch_proposal = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH, 4])
target_bboxes = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH, 4])
target_bboxes_idx = tf.placeholder(tf.int32, [BATCHSIZE * MINIBATCH])#for roi pooling
target_classes = tf.placeholder(tf.int32, [BATCHSIZE * MINIBATCH])
masks = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH])
learning_rate = tf.placeholder(tf.float32)
batch_proposal_ = xywh2x1y1x2y2(batch_proposal, IMG_H, IMG_W)#for roi pooling
cls, reg = network(imgs, batch_proposal_, target_bboxes_idx, CLASSES)
print(cls)
one_hot = tf.one_hot(target_classes, len(CLASSES) + 1)
pos_nums = tf.reduce_sum(tf.cast(masks, dtype=tf.float32))
loss_cls = tf.reduce_sum(-tf.log(tf.reduce_sum(tf.nn.softmax(cls) * one_hot, axis=-1) + 1e-10)) / pos_nums
loss_reg = tf.reduce_sum(tf.reduce_sum(smooth_l1(reg, target_bboxes), axis=-1) * masks) / pos_nums
regular = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
total_loss = loss_cls + loss_reg + regular * 0.0005
with tf.variable_scope("Opt"):
Opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(total_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="vgg_16"))
saver.restore(sess, os.path.join(main_dir, config['vgg_checkpoint']))
print("VGG16 checkpoint restored", config['vgg_checkpoint'])
saver = tf.train.Saver()
for i in range(2000):
BATCH_IMGS, BATCH_PROPOSAL, TARGET_BBOXES, TARGET_BBOXES_IDX, TARGET_CLASSES, MASKS = read_batch_rcnn(proposals, CLASSES, xml_files, XML_PATH, IMG_PATH, BATCHSIZE, MINIBATCH, IMG_H, IMG_W, CROP, SUFFIX)
_ = sess.run([Opt], feed_dict={imgs: BATCH_IMGS, batch_proposal: BATCH_PROPOSAL, masks: MASKS, target_bboxes: TARGET_BBOXES, target_bboxes_idx: TARGET_BBOXES_IDX,target_classes: TARGET_CLASSES, learning_rate: 0.001})
if i % 100 == 0:
[LOSS_CLS, LOSS_REG, TOTAL_LOSS] = sess.run([loss_cls, loss_reg, total_loss], feed_dict={imgs: BATCH_IMGS, batch_proposal: BATCH_PROPOSAL, masks: MASKS, target_bboxes: TARGET_BBOXES, target_bboxes_idx: TARGET_BBOXES_IDX,target_classes: TARGET_CLASSES, learning_rate: 0.001})
print("Iteration: %d, total_loss: %f, loss_cls: %f, loss_reg: %f" % (i, TOTAL_LOSS, LOSS_CLS, LOSS_REG))
#pdb.set_trace()
if i % 1000 == 0:
saver.save(sess, os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
saver.save(sess, os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
starting_step = 4
if starting_step == 4:
print("******************** STEP-4 *********************************************")
tf.reset_default_graph()
imgs = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, 1])
bbox_indxs = tf.placeholder(tf.int32, [BATCHSIZE, MINIBATCH])
masks = tf.placeholder(tf.int32, [BATCHSIZE, MINIBATCH])
target_bboxes = tf.placeholder(tf.float32, [BATCHSIZE, MINIBATCH, 4])
learning_rate = tf.placeholder(tf.float32)
vgg_logits = vgg_16(imgs)
cls, reg = rpn(vgg_logits)
cls_logits = tf.concat([tf.nn.embedding_lookup(cls[i], bbox_indxs[i])[tf.newaxis] for i in range(BATCHSIZE)], axis=0)
reg_logits = tf.concat([tf.nn.embedding_lookup(reg[i], bbox_indxs[i])[tf.newaxis] for i in range(BATCHSIZE)], axis=0)
one_hot = tf.one_hot(masks, 2)
pos_nums = tf.reduce_sum(tf.cast(masks, dtype=tf.float32))
loss_cls = tf.reduce_sum(-tf.log(tf.reduce_sum(tf.nn.softmax(cls_logits) * one_hot, axis=-1) + 1e-10)) / pos_nums
loss_reg = tf.reduce_sum(tf.reduce_sum(smooth_l1(reg_logits, target_bboxes), axis=-1) * tf.cast(masks, dtype=tf.float32)) / pos_nums
regular = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
total_loss = loss_cls + loss_reg + regular * 0.0005
trainable_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="rpn")
with tf.variable_scope("Opt"):
Opt = tf.train.MomentumOptimizer(learning_rate, momentum= 0.9).minimize(total_loss, var_list=trainable_var)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="vgg_16"))
saver.restore(sess, os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
print("step 4 load: ", os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
saver = tf.train.Saver()
anchors = generate_anchors(IMG_H, IMG_W)
for i in range(2000):
s = time.time()
BATCH_IMGS, BATCH_IDXS, TARGET_BBOXS, MASKS = read_batch(anchors, XML_PATH, IMG_PATH, CLASSES, BATCHSIZE, IMG_H, IMG_W, channels, MINIBATCH, CROP, SUFFIX)
e = time.time()
read_time = e - s
s = time.time()
sess.run(Opt, feed_dict={imgs: BATCH_IMGS, bbox_indxs: BATCH_IDXS, masks: MASKS, target_bboxes: TARGET_BBOXS, learning_rate: 0.001})
e = time.time()
update_time = e - s
if i % 100 == 0:
[LOSS_CLS, LOSS_REG, TOTAL_LOSS] = sess.run([loss_cls, loss_reg, total_loss],
feed_dict={imgs: BATCH_IMGS, bbox_indxs: BATCH_IDXS, masks: MASKS, target_bboxes: TARGET_BBOXS})
print("Iteration: %d, total_loss: %f, loss_cls: %f, loss_reg: %f, read_time: %f, update_time: %f" % (i, TOTAL_LOSS, LOSS_CLS, LOSS_REG, read_time, update_time))
if i % 1000 == 0:
saver.save(sess, os.path.join(ckpt_dir,"model_rpn_step4_"+name+".ckpt"))
saver.save(sess, os.path.join(ckpt_dir,"model_rpn_step4_"+name+".ckpt"))
starting_step = 5
if starting_step == 5:
print("******************** STEP-5 *********************************************")
cls, reg = cls[0], reg[0]
scores = tf.nn.softmax(cls)[:, 1]
anchors = tf.constant(anchors, dtype=tf.float32)
normal_bbox, reverse_bbox = offset2bbox(reg, anchors)
nms_idxs = tf.image.non_max_suppression(reverse_bbox, scores, max_output_size=2000, iou_threshold=NMS_THRESHOLD)
bboxes = tf.nn.embedding_lookup(normal_bbox, nms_idxs)[:NUMS_PROPOSAL]
saver = tf.train.Saver()
saver.restore(sess, os.path.join(ckpt_dir,"model_rpn_step4_"+name+".ckpt"))
proposal_data = {}
for idx, filename in enumerate(xml_files):
img_names = np.asarray([IMG_PATH + filename[:-4] + SUFFIX])
img = load_images_and_labels(img_names,
image_dir='',
input_size=IMG_W,
crop_size=-1,
num_channel=1)
#img = np.array(Image.open(IMG_PATH + filename[:-3] + "jpg").resize([IMG_W, IMG_H]))
#img = np.expand_dims(img, axis=-1)
BBOX = sess.run(bboxes, feed_dict={imgs: img})
x, y = (BBOX[:, 0:1] + BBOX[:, 2:3]) / 2, (BBOX[:, 1:2] + BBOX[:, 3:4]) / 2
w, h = BBOX[:, 2:3] - BBOX[:, 0:1], BBOX[:, 3:4] - BBOX[:, 1:2]
BBOX = np.concatenate((x, y, w, h), axis=-1)
proposal_data[filename] = BBOX
print("Total: %d, Current: %d"%(len(xml_files), idx))
sio.savemat(os.path.join(ckpt_dir,"proposal_step5_"+name+".mat"), proposal_data)
print("Proposal Data Saved!!!", os.path.join(ckpt_dir,"proposal_step5_"+name+".mat"))
starting_step = 6
if starting_step == 6:
print("********************STEP-6*********************************************")
proposals = sio.loadmat(os.path.join(ckpt_dir,"proposal_step5_"+name+".mat"))
tf.reset_default_graph()
imgs = tf.placeholder(tf.float32, [BATCHSIZE, IMG_H, IMG_W, 1])
batch_proposal = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH, 4])
target_bboxes = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH, 4])
target_bboxes_idx = tf.placeholder(tf.int32, [BATCHSIZE * MINIBATCH])#for roi pooling
target_classes = tf.placeholder(tf.int32, [BATCHSIZE * MINIBATCH])
masks = tf.placeholder(tf.float32, [BATCHSIZE * MINIBATCH])
learning_rate = tf.placeholder(tf.float32)
batch_proposal_ = xywh2x1y1x2y2(batch_proposal, IMG_H, IMG_W)#for roi pooling
cls, reg = network(imgs, batch_proposal_, target_bboxes_idx, CLASSES)
one_hot = tf.one_hot(target_classes, len(CLASSES) + 1)
pos_nums = tf.reduce_sum(tf.cast(masks, dtype=tf.float32))
loss_cls = tf.reduce_sum(-tf.log(tf.reduce_sum(tf.nn.softmax(cls) * one_hot, axis=-1) + 1e-10)) / pos_nums
loss_reg = tf.reduce_sum(tf.reduce_sum(smooth_l1(reg, target_bboxes), axis=-1) * masks) / pos_nums
regular = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
total_loss = loss_cls + loss_reg + regular * 0.0005
trainable_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="vgg_16/fc") +\
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="classification") + \
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="regression")
with tf.variable_scope("Opt"):
Opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(total_loss, var_list=trainable_var)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
print("step 6 load: ", os.path.join(ckpt_dir,"model_frcnn_"+name+".ckpt"))
for i in range(2001):
BATCH_IMGS, BATCH_PROPOSAL, TARGET_BBOXES, TARGET_BBOXES_IDX, TARGET_CLASSES, MASKS = read_batch_rcnn(proposals, CLASSES, xml_files, XML_PATH, IMG_PATH, BATCHSIZE, MINIBATCH, IMG_H, IMG_W, CROP, SUFFIX)
sess.run(Opt, feed_dict={imgs: BATCH_IMGS, batch_proposal: BATCH_PROPOSAL, masks: MASKS, target_bboxes: TARGET_BBOXES, target_bboxes_idx: TARGET_BBOXES_IDX,target_classes: TARGET_CLASSES, learning_rate: 0.001})
if i % 100 == 0:
[LOSS_CLS, LOSS_REG, TOTAL_LOSS] = sess.run([loss_cls, loss_reg, total_loss], feed_dict={imgs: BATCH_IMGS, batch_proposal: BATCH_PROPOSAL, masks: MASKS,
target_bboxes: TARGET_BBOXES, target_bboxes_idx: TARGET_BBOXES_IDX,target_classes: TARGET_CLASSES, learning_rate: 0.001})
print("Iteration: %d, total_loss: %f, loss_cls: %f, loss_reg: %f" % (i, TOTAL_LOSS, LOSS_CLS, LOSS_REG))
if i % 1000 == 0:
saver.save(sess, os.path.join(ckpt_dir,"model_frcnn_step6_"+name+".ckpt"))
saver.save(sess, os.path.join(ckpt_dir,"model_frcnn_step6_"+name+".ckpt"))
if __name__ == "__main__":
Train()
| 56.659824
| 290
| 0.617152
|
0ce7e60f4fa3ffd949d39919bda549c80c6d17e1
| 21,135
|
py
|
Python
|
train.py
|
avasalya/yolact
|
14c9eceae0432219084f9157c5ea00a246ff1cc6
|
[
"MIT"
] | null | null | null |
train.py
|
avasalya/yolact
|
14c9eceae0432219084f9157c5ea00a246ff1cc6
|
[
"MIT"
] | null | null | null |
train.py
|
avasalya/yolact
|
14c9eceae0432219084f9157c5ea00a246ff1cc6
|
[
"MIT"
] | null | null | null |
from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils.logger import Log
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math, random
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
# Oof
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=8, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=-1, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models.')
parser.add_argument('--log_folder', default='logs/',
help='Directory for saving logs.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=10000, type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=5000, type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epoch', default=2, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--no_log', dest='log', action='store_false',
help='Don\'t log per iteration information into log_folder.')
parser.add_argument('--log_gpu', dest='log_gpu', action='store_true',
help='Include GPU information in the logs. Nvidia-smi tends to be slow, so set this with caution.')
parser.add_argument('--no_interrupt', dest='interrupt', action='store_false',
help='Don\'t save an interrupt when KeyboardInterrupt is caught.')
parser.add_argument('--batch_alloc', default=None, type=str,
help='If using multiple GPUS, you can set this to be a comma separated list detailing which GPUs should get what local batch size (It should add up to your total batch size).')
parser.add_argument('--no_autoscale', dest='autoscale', action='store_false',
help='YOLACT will automatically scale the lr and the number of iterations depending on the batch size. Set this if you want to disable that.')
parser.set_defaults(keep_latest=False, log=True, log_gpu=False, interrupt=True, autoscale=True)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
if args.autoscale and args.batch_size != 8:
factor = args.batch_size / 8
if __name__ == '__main__':
print('Scaling parameters by %.2f to account for a batch size of %d.' % (factor, args.batch_size))
cfg.lr *= factor
cfg.max_iter //= factor
cfg.lr_steps = [x // factor for x in cfg.lr_steps]
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
# This is managed by set_lr
cur_lr = args.lr
if torch.cuda.device_count() == 0:
print('No GPUs detected. Exiting...')
exit(-1)
if args.batch_size // torch.cuda.device_count() < 6:
if __name__ == '__main__':
print('Per-GPU batch size is less than the recommended limit for batch norm. Disabling batch norm.')
cfg.freeze_bn = True
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class NetLoss(nn.Module):
"""
A wrapper for running the network and computing the loss
This is so we can more efficiently use DataParallel.
"""
def __init__(self, net:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.net = net
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.net(images)
losses = self.criterion(self.net, preds, targets, masks, num_crowds)
return losses
class CustomDataParallel(nn.DataParallel):
"""
This is a custom version of DataParallel that works better with our training data.
It should also be faster than the general case.
"""
def scatter(self, inputs, kwargs, device_ids):
# More like scatter and data prep at the same time. The point is we prep the data in such a way
# that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.
devices = ['cuda:' + str(x) for x in device_ids]
splits = prepare_data(inputs[0], devices, allocation=args.batch_alloc)
return [[split[device_idx] for split in splits] for device_idx in range(len(devices))], \
[kwargs] * len(devices)
def gather(self, outputs, output_device):
out = {}
for k in outputs[0]:
out[k] = torch.stack([output[k].to(output_device) for output in outputs])
return out
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
if args.validation_epoch > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
if args.log:
log = Log(cfg.name, args.log_folder, dict(args._get_kwargs()),
overwrite=(args.resume is None), log_gpu_stats=args.log_gpu)
# I don't use the timer during training (I use a different timing method).
# Apparently there's a race condition with multiple GPUs, so disable it just to be safe.
timer.disable_all()
# Both of these can set args.resume to None, so do them before the check
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio)
if args.batch_alloc is not None:
args.batch_alloc = [int(x) for x in args.batch_alloc.split(',')]
if sum(args.batch_alloc) != args.batch_size:
print('Error: Batch allocation (%s) does not sum to batch size (%s).' % (args.batch_alloc, args.batch_size))
exit(-1)
net = CustomDataParallel(NetLoss(net, criterion))
if args.cuda:
net = net.cuda()
# Initialize everything
if not cfg.freeze_bn: yolact_net.freeze_bn() # Freeze bn so we don't kill our means
yolact_net(torch.zeros(1, 3, cfg.max_size, cfg.max_size).cuda())
if not cfg.freeze_bn: yolact_net.freeze_bn(True)
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types # Forms the print order
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
# try-except so you can use ctrl+c to save early and stop training
try:
for epoch in range(num_epochs):
# Resume from start_iter
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
# Stop if we've reached an epoch if we're resuming from start_iter
if iteration == (epoch+1)*epoch_size:
break
# Stop at the configured number of iterations even if mid-epoch
if iteration == cfg.max_iter:
break
# Change a config setting if we've reached the specified iteration
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
# Warm up by linearly interpolating the learning rate from some smaller value
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
# Adjust the learning rate at the given iterations, but also if we resume from past that iteration
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
# Zero the grad to get ready to compute gradients
optimizer.zero_grad()
# Forward Pass + Compute loss at the same time (see CustomDataParallel and NetLoss)
losses = net(datum)
losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# no_inf_mean removes some components from the loss, so make sure to backward through all of it
# all_loss = sum([v.mean() for v in losses.values()])
# Backprop
loss.backward() # Do this to free up vram even if loss is not finite
if torch.isfinite(loss).item():
optimizer.step()
# Add the loss to the moving average for bookkeeping
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
# Exclude graph setup from the timing information
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
if args.log:
precision = 5
loss_info = {k: round(losses[k].item(), precision) for k in losses}
loss_info['T'] = round(loss.item(), precision)
if args.log_gpu:
log.log_gpu_stats = (iteration % 10 == 0) # nvidia-smi is sloooow
log.log('train', loss=loss_info, epoch=epoch, iter=iteration,
lr=round(cur_lr, 10), elapsed=elapsed)
log.log_gpu_stats = args.log_gpu
iteration += 1
if iteration % args.save_interval == 0 and iteration != args.start_iter:
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
# This is done per epoch
if args.validation_epoch > 0:
if epoch % args.validation_epoch == 0 and epoch > 0:
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
# Compute validation mAP after training is finished
compute_validation_map(epoch, iteration, yolact_net, val_dataset, log if args.log else None)
except KeyboardInterrupt:
if args.interrupt:
print('Stopping early. Saving network...')
# Delete previous copy of the interrupted network so we don't spam the weights folder
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
global cur_lr
cur_lr = new_lr
def gradinator(x):
x.requires_grad = False
return x
def prepare_data(datum, devices:list=None, allocation:list=None):
with torch.no_grad():
if devices is None:
devices = ['cuda:0'] if args.cuda else ['cpu']
if allocation is None:
allocation = [args.batch_size // len(devices)] * (len(devices) - 1)
allocation.append(args.batch_size - sum(allocation)) # The rest might need more/less
images, (targets, masks, num_crowds) = datum
cur_idx = 0
for device, alloc in zip(devices, allocation):
for _ in range(alloc):
images[cur_idx] = gradinator(images[cur_idx].to(device))
targets[cur_idx] = gradinator(targets[cur_idx].to(device))
masks[cur_idx] = gradinator(masks[cur_idx].to(device))
cur_idx += 1
if cfg.preserve_aspect_ratio:
# Choose a random size from the batch
_, h, w = images[random.randint(0, len(images)-1)].size()
for idx, (image, target, mask, num_crowd) in enumerate(zip(images, targets, masks, num_crowds)):
images[idx], targets[idx], masks[idx], num_crowds[idx] \
= enforce_size(image, target, mask, num_crowd, w, h)
cur_idx = 0
split_images, split_targets, split_masks, split_numcrowds \
= [[None for alloc in allocation] for _ in range(4)]
for device_idx, alloc in enumerate(allocation):
split_images[device_idx] = torch.stack(images[cur_idx:cur_idx+alloc], dim=0)
split_targets[device_idx] = targets[cur_idx:cur_idx+alloc]
split_masks[device_idx] = masks[cur_idx:cur_idx+alloc]
split_numcrowds[device_idx] = num_crowds[cur_idx:cur_idx+alloc]
cur_idx += alloc
return split_images, split_targets, split_masks, split_numcrowds
def no_inf_mean(x:torch.Tensor):
"""
Computes the mean of a vector, throwing out all inf values.
If there are no non-inf values, this will return inf (i.e., just the normal mean).
"""
no_inf = [a for a in x if torch.isfinite(a)]
if len(no_inf) > 0:
return sum(no_inf) / len(no_inf)
else:
return x.mean()
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
# Don't switch to eval mode because we want to get losses
iterations = 0
for datum in data_loader:
images, targets, masks, num_crowds = prepare_data(datum)
out = net(images)
wrapper = ScatterWrapper(targets, masks, num_crowds)
_losses = criterion(out, wrapper, wrapper.make_mask())
for k, v in _losses.items():
v = v.mean().item()
if k in losses:
losses[k] += v
else:
losses[k] = v
iterations += 1
if args.validation_size <= iterations * args.batch_size:
break
for k in losses:
losses[k] /= iterations
loss_labels = sum([[k, losses[k]] for k in loss_types if k in losses], [])
print(('Validation ||' + (' %s: %.3f |' * len(losses)) + ')') % tuple(loss_labels), flush=True)
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
with torch.no_grad():
yolact_net.eval()
start = time.time()
print()
print("Computing validation mAP (this may take a while)...", flush=True)
val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
end = time.time()
if log is not None:
log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)
yolact_net.train()
def setup_eval():
eval_script.parse_args(['--no_bar', '--max_images='+str(args.validation_size)])
if __name__ == '__main__':
train()
| 41.851485
| 196
| 0.617743
|
efc86815bc49d6cd0430569fd656deb7363871bc
| 2,075
|
py
|
Python
|
caffe2/python/operator_test/clip_tensor_op_test.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 206
|
2020-11-28T22:56:38.000Z
|
2022-03-27T02:33:04.000Z
|
caffe2/python/operator_test/clip_tensor_op_test.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 19
|
2020-12-09T23:13:14.000Z
|
2022-01-24T23:24:08.000Z
|
caffe2/python/operator_test/clip_tensor_op_test.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 28
|
2020-11-29T15:25:12.000Z
|
2022-01-20T02:16:27.000Z
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestClipTensorByScalingOp(serial.SerializedTestCase):
@given(n=st.integers(5, 8), d=st.integers(2, 4),
threshold=st.floats(0.1, 10),
additional_threshold=st.floats(0.1, 10),
use_additional_threshold=st.booleans(),
inplace=st.booleans(),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_clip_tensor_by_scaling(self, n, d, threshold, additional_threshold,
use_additional_threshold, inplace, gc, dc):
tensor = np.random.rand(n, d).astype(np.float32)
val = np.array(np.linalg.norm(tensor))
additional_threshold = np.array([additional_threshold]).astype(np.float32)
def clip_tensor_by_scaling_ref(tensor_data, val_data,
additional_threshold=None):
if additional_threshold is not None:
final_threshold = threshold * additional_threshold
else:
final_threshold = threshold
if val_data > final_threshold:
ratio = final_threshold / float(val_data)
tensor_data = tensor_data * ratio
return [tensor_data]
op = core.CreateOperator(
"ClipTensorByScaling",
["tensor", "val"] if not use_additional_threshold else (
["tensor", "val", "additional_threshold"]),
['Y'] if not inplace else ["tensor"],
threshold=threshold,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[tensor, val] if not use_additional_threshold else (
[tensor, val, additional_threshold]),
reference=clip_tensor_by_scaling_ref,
)
if __name__ == "__main__":
import unittest
unittest.main()
| 32.421875
| 82
| 0.618313
|
20632b76248c8f6f81969b3d7c386a4814a19b7d
| 133
|
py
|
Python
|
seq2video/__init__.py
|
marcocaggioni/seq2video
|
53de4e7b6ae90c4544da590575d559bbae82ccee
|
[
"MIT"
] | null | null | null |
seq2video/__init__.py
|
marcocaggioni/seq2video
|
53de4e7b6ae90c4544da590575d559bbae82ccee
|
[
"MIT"
] | null | null | null |
seq2video/__init__.py
|
marcocaggioni/seq2video
|
53de4e7b6ae90c4544da590575d559bbae82ccee
|
[
"MIT"
] | null | null | null |
"""Top-level package for Uptimer."""
__author__ = """Marco Caggioni"""
__email__ = "marco.caggioni@gmail.com"
__version__ = "0.1.0"
| 22.166667
| 38
| 0.691729
|
8a05fc1e44174ca88b9bfd39b61fbe604c16fefe
| 779
|
py
|
Python
|
src/modules/agents/rnn_agent.py
|
OscarPedaVendere/sc2MultiAgentES
|
54df34eb94f00294c2bcd1ac5a0c14386c0bbceb
|
[
"Apache-2.0"
] | null | null | null |
src/modules/agents/rnn_agent.py
|
OscarPedaVendere/sc2MultiAgentES
|
54df34eb94f00294c2bcd1ac5a0c14386c0bbceb
|
[
"Apache-2.0"
] | 4
|
2021-03-19T03:43:34.000Z
|
2022-01-13T01:39:12.000Z
|
src/modules/agents/rnn_agent.py
|
OscarPedaVendere/sc2MultiAgentES
|
54df34eb94f00294c2bcd1ac5a0c14386c0bbceb
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state, ep_batch):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q, h
| 33.869565
| 71
| 0.655969
|
9bca5b5a57372c95bdff0bcae4a91ad755ffa764
| 3,806
|
py
|
Python
|
tests/test_config_lib_field.py
|
MacHu-GWU/configirl-project
|
77105874514c5520aa47aa25d89c28f51b152582
|
[
"MIT"
] | 1
|
2019-07-27T17:38:32.000Z
|
2019-07-27T17:38:32.000Z
|
tests/test_config_lib_field.py
|
MacHu-GWU/configirl-project
|
77105874514c5520aa47aa25d89c28f51b152582
|
[
"MIT"
] | null | null | null |
tests/test_config_lib_field.py
|
MacHu-GWU/configirl-project
|
77105874514c5520aa47aa25d89c28f51b152582
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test field related options
"""
import pytest
from pytest import raises
from datetime import datetime
from configirl import ConfigClass, Constant, Derivable
from configirl import ValueNotSetError, DontDumpError, DerivableSetValueError
class Config(ConfigClass):
PROJECT_NAME = Constant()
STAGE = Constant()
PROJECT_NAME_SLUG = Derivable()
@PROJECT_NAME_SLUG.getter
def get_PROJECT_NAME_SLUG(self):
return self.PROJECT_NAME.get_value().replace("_", "-")
@PROJECT_NAME_SLUG.validator
def check_PROJECT_NAME_SLUG(self, value):
if "_" in value:
raise ValueError("you can't use `_` in slugifie name!")
PROJECT_NAME_SUPER_SLUG_CACHE_HIT = Constant(default=0)
PROJECT_NAME_SUPER_SLUG = Derivable(cache=True)
@PROJECT_NAME_SUPER_SLUG.getter
def get_PROJECT_NAME_SUPER_SLUG(self, sep):
self.PROJECT_NAME_SUPER_SLUG_CACHE_HIT.set_value(
self.PROJECT_NAME_SUPER_SLUG_CACHE_HIT.get_value() + 1
)
return self.PROJECT_NAME.get_value().replace("_", sep)
ENVIRONMENT_NAME = Derivable()
@ENVIRONMENT_NAME.getter
def get_ENVIRONMENT_NAME(self):
return "{}-{}".format(
self.PROJECT_NAME_SLUG.get_value(),
self.STAGE.get_value(),
)
GIT_PASSWORD = Constant(printable=False)
CREATE_TIME = Constant(default=datetime.utcnow)
METADATA = Constant(dont_dump=True)
def test_value_not_set_error():
"""
Test been correctly raised.
"""
conf = Config()
with raises(ValueNotSetError):
conf.PROJECT_NAME.get_value()
with raises(ValueNotSetError):
conf.PROJECT_NAME_SLUG.get_value()
# when trying to get a derivable value but the dependent constant value
# has not been set yet
try:
conf.PROJECT_NAME_SLUG.get_value()
except Exception as e:
assert "Config.PROJECT_NAME" in str(e)
assert "Config.PROJECT_NAME_SLUG" in str(e)
with raises(ValueNotSetError):
conf.PROJECT_NAME_SLUG.get_value()
try:
conf.ENVIRONMENT_NAME.get_value()
except Exception as e:
assert "PROJECT_NAME" in str(e)
assert "PROJECT_NAME_SLUG" in str(e)
assert "ENVIRONMENT_NAME" in str(e)
def test_default():
conf = Config()
assert isinstance(conf.CREATE_TIME.get_value(), datetime)
def test_dont_dump_error():
"""
Test DontDumpError been correctly raised.
"""
conf = Config()
with raises(DontDumpError):
conf.METADATA.get_value(check_dont_dump=True)
def test_derivable_set_value_error():
"""
Test DerivableSetValueError been correctly raised.
"""
with raises(DerivableSetValueError):
Config(ENVIRONMENT_NAME="my-project-dev")
conf = Config()
with raises(DerivableSetValueError):
conf.ENVIRONMENT_NAME.set_value("my-project-dev")
def test_get_value_from_env():
import os
os.environ["CONFIGIRL_PROJECT_NAME"] = "configirl"
conf = Config()
PREFIX = "CONFIGIRL_"
assert conf.PROJECT_NAME.get_value_from_env(PREFIX) == "configirl"
def test_get_value_for_lbd():
conf = Config(PROJECT_NAME="configirl")
PREFIX = "CONFIGIRL_"
assert conf.PROJECT_NAME.get_value_for_lbd(PREFIX) == "configirl"
def test_get_value_with_cache():
config = Config(PROJECT_NAME="my_project")
assert config.PROJECT_NAME_SUPER_SLUG.get_value(sep="--") == "my--project"
assert config.PROJECT_NAME_SUPER_SLUG_CACHE_HIT.get_value() == 1
assert config.PROJECT_NAME_SUPER_SLUG.get_value(sep="--") == "my--project"
assert config.PROJECT_NAME_SUPER_SLUG_CACHE_HIT.get_value() == 1
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 26.615385
| 78
| 0.694693
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.