hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58004c27d165d6df826a554942ad5fe8b353a09a
| 990
|
py
|
Python
|
methods_configuration/function_configuration.py
|
ekkrym/CovidTrendModel
|
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
|
[
"MIT"
] | null | null | null |
methods_configuration/function_configuration.py
|
ekkrym/CovidTrendModel
|
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
|
[
"MIT"
] | null | null | null |
methods_configuration/function_configuration.py
|
ekkrym/CovidTrendModel
|
26f49b0e3bf18ef2de5f9ec15f85e8148618c135
|
[
"MIT"
] | null | null | null |
from autoregression import fit_forecasting_loglog, fit_forecasting_ar7_daily, fit_forecast_7ahead, fit_ar_7, fit_ar_log
from misc_methods import fit_spline_linear_extrapolation
from smoothing import simple_mirroring
from poly import poly_fit
from linearfit import linear_model_fit_predict
from poisson import poisson_fit
from benchmark import benchmark
from misc_methods import mean_const, linear
METHODS = {"poly_fit": poly_fit,
"benchmark": benchmark,
"poisson_fit": poisson_fit,
"fit_forecasting_loglog": fit_forecasting_loglog,
"fit_forecasting_ar7_daily": fit_forecasting_ar7_daily,
"fit_spline_linear_extrapolation": fit_spline_linear_extrapolation,
"fit_forecast_7ahead": fit_forecast_7ahead,
"fit_ar_7": fit_ar_7,
"fit_ar_log": fit_ar_log,
"mean_const": mean_const,
"linear": linear,
"linear_model_fit_predict": linear_model_fit_predict}
| 45
| 120
| 0.735354
|
0bd216ea8924fb714f7118a26bfc04a4eb6e1a27
| 1,678
|
py
|
Python
|
pygenomeworks/test/test_overlap_generator.py
|
pb-cdunn/GenomeWorks
|
84f22f7e72c0fe8e5554d7ddfebf22c93ffb4610
|
[
"Apache-2.0"
] | 160
|
2019-07-02T03:35:10.000Z
|
2020-05-05T09:08:26.000Z
|
pygenomeworks/test/test_overlap_generator.py
|
pb-cdunn/GenomeWorks
|
84f22f7e72c0fe8e5554d7ddfebf22c93ffb4610
|
[
"Apache-2.0"
] | 253
|
2019-07-02T13:08:28.000Z
|
2020-05-07T18:47:08.000Z
|
pygenomeworks/test/test_overlap_generator.py
|
pb-cdunn/GenomeWorks
|
84f22f7e72c0fe8e5554d7ddfebf22c93ffb4610
|
[
"Apache-2.0"
] | 54
|
2019-07-02T14:33:48.000Z
|
2020-05-01T16:04:21.000Z
|
#
# Copyright 2019-2020 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from genomeworks.simulators import readsim
test_reads = [
((("read_0",
"AACGTCA",
100,
900),
("read_1",
"AACGTCA",
100,
900)), 1),
((("read_0",
"AACGTCA",
100,
900),
("read_1",
"AACGTCA",
1000,
9000)), 0),
((("read_1",
"AACGTCA",
100,
900),
("read_0",
"AACGTCA",
100,
900)), 1),
((("read_1",
"AACGTCA",
100,
900),
("read_0",
"AACGTCA",
100,
900)), 1),
((("read_1",
"AACGTCA",
100,
900),
("read_2",
"AACGTCA",
100,
900),
("read_3",
"AACGTCA",
100,
900)), 3),
]
@pytest.mark.cpu
@pytest.mark.parametrize("reads, expected_overlaps", test_reads)
def test_generates_overlaps(reads, expected_overlaps):
""" Test that the number of overlaps detected is correct"""
overlaps = readsim.generate_overlaps(reads, gzip_compressed=False)
assert(len(overlaps) == expected_overlaps)
| 22.078947
| 74
| 0.581049
|
71cf70b25797c12f682b465a6d35b9c8b1dec4ee
| 1,530
|
py
|
Python
|
banco de dados/comparação.py
|
RutyRibeiro/bancoDeDados
|
51d7789cd635b07417b2be56433d83837c32b66d
|
[
"MIT"
] | null | null | null |
banco de dados/comparação.py
|
RutyRibeiro/bancoDeDados
|
51d7789cd635b07417b2be56433d83837c32b66d
|
[
"MIT"
] | null | null | null |
banco de dados/comparação.py
|
RutyRibeiro/bancoDeDados
|
51d7789cd635b07417b2be56433d83837c32b66d
|
[
"MIT"
] | null | null | null |
import mysql.connector
import csv
lista = []
with open('escolas.csv', encoding='utf-8', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
for row in spamreader:
dicionario={}
dicionario["grupo"]="A1_GRPECON = '{}'".format(row[0].strip())
dicionario["email"]=row[1].strip()
lista.append(dicionario)
print(lista)
config = {
'host': '',
'user': '',
'password': '',
'database': ''
}
def select():
try:
conn = mysql.connector.connect(**config)
print("Acesso ao banco de dados: Conexão Estabelecida ")
except mysql.connector.Error as err:
print(err)
else:
cursor = conn.cursor()
try:
for i, row in enumerate(lista):
buscaDados = """SELECT USR_VINCULO, USR_EMAIL FROM EasyBI.usuarios usr WHERE USR_CODCLI = 3284 AND USR_EMAIL= '%s'"""
email=row['email']
cursor.execute(buscaDados % (email))
resul = cursor.fetchall()
if (resul):
if (row['grupo']!=resul[0][0]):
print (f'\033[33m olhe : {resul[0][1]}\033[m')
else:
print(f'{email} não existe!')
except Exception as e:
print(e)
finally:
cursor.close()
conn.commit()
conn.close()
print("Fechamento do banco de dados: Com sucesso")
select()
| 27.818182
| 133
| 0.508497
|
f3cff0c5990440235e8f6ce4e9fcdb51e657cc2b
| 1,253
|
py
|
Python
|
Advanced/pianoTilesBot/PianoTilesBot.py
|
Sanjulata19/Hacktoberfest_2021-1
|
720855c9e7e3d1ca04d409cc7defb29381e4a16a
|
[
"Apache-2.0"
] | 1
|
2021-10-31T14:33:09.000Z
|
2021-10-31T14:33:09.000Z
|
Advanced/pianoTilesBot/PianoTilesBot.py
|
Sanjulata19/Hacktoberfest_2021-1
|
720855c9e7e3d1ca04d409cc7defb29381e4a16a
|
[
"Apache-2.0"
] | 4
|
2021-10-31T14:16:14.000Z
|
2021-10-31T16:56:12.000Z
|
Advanced/pianoTilesBot/PianoTilesBot.py
|
Sanjulata19/Hacktoberfest_2021-1
|
720855c9e7e3d1ca04d409cc7defb29381e4a16a
|
[
"Apache-2.0"
] | 19
|
2021-10-30T06:23:49.000Z
|
2021-10-31T14:51:04.000Z
|
# install all dependency with pip install -r requirements.txt
import mss
import pyautogui as gui
import time
# globals
x1, x2, x3, x4 = 874, 977, 1078, 1151
y = 360 # constant for mss function
start_y = 500
y2 = y # increse as the game speed increases
x_cords = [0, x2-x1, x3-x1, x4-x1]
score = 0
def screenShot(): # Screen Capture
with mss.mss() as sct:
gameBox = {"top": y, "left": x1, "width": x4-x1+1, "height": 2}
img = sct.grab(gameBox)
return img
# Starting the game
time.sleep(5)
gui.click(x1, start_y)
gui.click(x2, start_y)
gui.click(x3, start_y)
gui.click(x4, start_y)
while True: # Game loop
img = screenShot()
# decrease or increase y2 values based of your processor speed
if score == 900:
y2 += 20
if score == 1000:
y2 += 30
if score == 1250:
y2 += 50
if score >= 1400 and score%100==0:
y2 += 20
for x in x_cords:
if img.pixel(x, 0)[0] < 50: # increase to 70 if misses blue tiles
gui.click(x+x1, y2+10)
score += 1
# Fail safe, terminates if mouse goes out of gameBox
mousePos = gui.position()[0]
if x1 > mousePos or x4 < mousePos:
print('Fuck! not again!')
exit(0)
| 25.571429
| 73
| 0.593775
|
c12ce8a6f195238c1f4e1b04a353480b2c433230
| 110
|
py
|
Python
|
second.py
|
ilsung98/coding-exam
|
55bc41cd3be3c07c39a26aa5ab229377936394dc
|
[
"MIT"
] | null | null | null |
second.py
|
ilsung98/coding-exam
|
55bc41cd3be3c07c39a26aa5ab229377936394dc
|
[
"MIT"
] | null | null | null |
second.py
|
ilsung98/coding-exam
|
55bc41cd3be3c07c39a26aa5ab229377936394dc
|
[
"MIT"
] | null | null | null |
num_list = list(map(int, input().split()))
high=max(num_list)
low=min(num_list)
print(low,sum(num_list),high)
| 22
| 42
| 0.727273
|
333a67cbeb4fba8c24f2efb9b0145f4b2bf88118
| 6,327
|
py
|
Python
|
src/pytestqt/plugin.py
|
cafhach/pytest-qt
|
435efd2222c3454eaf05c2853d5efd01a83f84a1
|
[
"MIT"
] | 252
|
2015-03-15T23:18:27.000Z
|
2022-03-29T01:41:41.000Z
|
src/pytestqt/plugin.py
|
cafhach/pytest-qt
|
435efd2222c3454eaf05c2853d5efd01a83f84a1
|
[
"MIT"
] | 318
|
2015-03-25T04:05:01.000Z
|
2022-03-11T23:01:06.000Z
|
src/pytestqt/plugin.py
|
cafhach/pytest-qt
|
435efd2222c3454eaf05c2853d5efd01a83f84a1
|
[
"MIT"
] | 84
|
2015-03-17T20:10:50.000Z
|
2022-03-04T15:25:25.000Z
|
import pytest
from pytestqt.exceptions import (
_is_exception_capture_enabled,
_QtExceptionCaptureManager,
)
from pytestqt.logging import QtLoggingPlugin, _QtMessageCapture
from pytestqt.qt_compat import qt_api
from pytestqt.qtbot import QtBot, _close_widgets
@pytest.fixture(scope="session")
def qapp_args():
"""
Fixture that provides QApplication arguments to use.
You can override this fixture to pass different arguments to
``QApplication``:
.. code-block:: python
@pytest.fixture(scope="session")
def qapp_args():
return ["--arg"]
"""
return []
@pytest.fixture(scope="session")
def qapp(qapp_args, pytestconfig):
"""
Fixture that instantiates the QApplication instance that will be used by
the tests.
You can use the ``qapp`` fixture in tests which require a ``QApplication``
to run, but where you don't need full ``qtbot`` functionality.
"""
app = qt_api.QtWidgets.QApplication.instance()
if app is None:
global _qapp_instance
_qapp_instance = qt_api.QtWidgets.QApplication(qapp_args)
name = pytestconfig.getini("qt_qapp_name")
_qapp_instance.setApplicationName(name)
return _qapp_instance
else:
return app # pragma: no cover
# holds a global QApplication instance created in the qapp fixture; keeping
# this reference alive avoids it being garbage collected too early
_qapp_instance = None
@pytest.fixture
def qtbot(qapp, request):
"""
Fixture used to create a QtBot instance for using during testing.
Make sure to call addWidget for each top-level widget you create to ensure
that they are properly closed after the test ends.
"""
result = QtBot(request)
return result
@pytest.fixture
def qtlog(request):
"""Fixture that can access messages captured during testing"""
if hasattr(request._pyfuncitem, "qt_log_capture"):
return request._pyfuncitem.qt_log_capture
else:
return _QtMessageCapture([]) # pragma: no cover
@pytest.fixture
def qtmodeltester(request):
"""
Fixture used to create a ModelTester instance to test models.
"""
from pytestqt.modeltest import ModelTester
tester = ModelTester(request.config)
yield tester
tester._cleanup()
def pytest_addoption(parser):
parser.addini(
"qt_api", 'Qt api version to use: "pyside6" , "pyside2", "pyqt6", "pyqt5"'
)
parser.addini("qt_no_exception_capture", "disable automatic exception capture")
parser.addini(
"qt_default_raising",
"Default value for the raising parameter of qtbot.waitSignal/waitCallback",
)
parser.addini(
"qt_qapp_name", "The Qt application name to use", default="pytest-qt-qapp"
)
default_log_fail = QtLoggingPlugin.LOG_FAIL_OPTIONS[0]
parser.addini(
"qt_log_level_fail",
'log level in which tests can fail: {} (default: "{}")'.format(
QtLoggingPlugin.LOG_FAIL_OPTIONS, default_log_fail
),
default=default_log_fail,
)
parser.addini(
"qt_log_ignore",
"list of regexes for messages that should not cause a tests " "to fails",
type="linelist",
)
group = parser.getgroup("qt", "qt testing")
group.addoption(
"--no-qt-log",
dest="qt_log",
action="store_false",
default=True,
help="disable pytest-qt logging capture",
)
group.addoption(
"--qt-log-format",
dest="qt_log_format",
default=None,
help="defines how qt log messages are displayed.",
)
@pytest.mark.hookwrapper
@pytest.mark.tryfirst
def pytest_runtest_setup(item):
"""
Hook called after before test setup starts, to start capturing exceptions
as early as possible.
"""
capture_enabled = _is_exception_capture_enabled(item)
if capture_enabled:
item.qt_exception_capture_manager = _QtExceptionCaptureManager()
item.qt_exception_capture_manager.start()
yield
_process_events()
if capture_enabled:
item.qt_exception_capture_manager.fail_if_exceptions_occurred("SETUP")
@pytest.mark.hookwrapper
@pytest.mark.tryfirst
def pytest_runtest_call(item):
yield
_process_events()
capture_enabled = _is_exception_capture_enabled(item)
if capture_enabled:
item.qt_exception_capture_manager.fail_if_exceptions_occurred("CALL")
@pytest.mark.hookwrapper
@pytest.mark.trylast
def pytest_runtest_teardown(item):
"""
Hook called after each test tear down, to process any pending events and
avoiding leaking events to the next test. Also, if exceptions have
been captured during fixtures teardown, fail the test.
"""
_process_events()
_close_widgets(item)
_process_events()
yield
_process_events()
capture_enabled = _is_exception_capture_enabled(item)
if capture_enabled:
item.qt_exception_capture_manager.fail_if_exceptions_occurred("TEARDOWN")
item.qt_exception_capture_manager.finish()
def _process_events():
"""Calls app.processEvents() while taking care of capturing exceptions
or not based on the given item's configuration.
"""
app = qt_api.QtWidgets.QApplication.instance()
if app is not None:
app.processEvents()
def pytest_configure(config):
config.addinivalue_line(
"markers",
"qt_no_exception_capture: Disables pytest-qt's automatic exception "
"capture for just one test item.",
)
config.addinivalue_line(
"markers", "qt_log_level_fail: overrides qt_log_level_fail ini option."
)
config.addinivalue_line(
"markers", "qt_log_ignore: overrides qt_log_ignore ini option."
)
config.addinivalue_line("markers", "no_qt_log: Turn off Qt logging capture.")
if config.getoption("qt_log") and config.getoption("capture") != "no":
config.pluginmanager.register(QtLoggingPlugin(config), "_qt_logging")
qt_api.set_qt_api(config.getini("qt_api"))
def pytest_report_header():
from pytestqt.qt_compat import qt_api
v = qt_api.get_versions()
fields = [
f"{v.qt_api} {v.qt_api_version}",
"Qt runtime %s" % v.runtime,
"Qt compiled %s" % v.compiled,
]
version_line = " -- ".join(fields)
return [version_line]
| 29.156682
| 83
| 0.691165
|
75227c5b9f54492db7e68b5ce6b021752e9e4a5a
| 611
|
py
|
Python
|
docs/src/db_mongodb.py
|
nullhack/fastapi-users
|
e850871e7935c460e9c169385dd7d42b45e0320c
|
[
"MIT"
] | null | null | null |
docs/src/db_mongodb.py
|
nullhack/fastapi-users
|
e850871e7935c460e9c169385dd7d42b45e0320c
|
[
"MIT"
] | null | null | null |
docs/src/db_mongodb.py
|
nullhack/fastapi-users
|
e850871e7935c460e9c169385dd7d42b45e0320c
|
[
"MIT"
] | null | null | null |
import motor.motor_asyncio
from fastapi import FastAPI
from fastapi_users import models
from fastapi_users.db import MongoDBUserDatabase
class User(models.BaseUser):
pass
class UserCreate(User, models.BaseUserCreate):
pass
class UserUpdate(User, models.BaseUserUpdate):
pass
class UserDB(User, models.BaseUserDB):
pass
DATABASE_URL = "mongodb://localhost:27017"
client = motor.motor_asyncio.AsyncIOMotorClient(
DATABASE_URL, uuidRepresentation="standard"
)
db = client["database_name"]
collection = db["users"]
app = FastAPI()
user_db = MongoDBUserDatabase(UserDB, collection)
| 17.457143
| 49
| 0.772504
|
40d065fff07276c673745dfa8851569744730062
| 824
|
py
|
Python
|
openpnm/algorithms/__init__.py
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | 2
|
2019-08-24T09:17:40.000Z
|
2020-07-05T07:21:21.000Z
|
openpnm/algorithms/__init__.py
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | null | null | null |
openpnm/algorithms/__init__.py
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | null | null | null |
r"""
Collection of pre-defined algorithms
====================================
The ``algorithms`` module contains classes for conducting transport
simulations on pore networks.
"""
from ._mixins import *
from ._generic_algorithm import *
from ._generic_transport import *
from ._reactive_transport import *
from ._transient_reactive_transport import *
from ._stokes_flow import *
from ._fickian_diffusion import *
from ._transient_fickian_diffusion import *
from ._advection_diffusion import *
from ._transient_advection_diffusion import *
from ._fourier_conduction import *
from ._ohmic_conduction import *
from ._ordinary_percolation import *
from ._invasion_percolation import *
from ._mixed_ip import *
from ._mixed_ip_coop import *
from ._ionic_conduction import *
from ._transient_ionic_conduction import *
| 22.888889
| 67
| 0.774272
|
9dcee743dd6f265714bbbdd6e4422b0d3fcb8730
| 4,545
|
bzl
|
Python
|
internal/pkg_web/pkg_web.bzl
|
ankitects/rules_nodejs
|
9a50cb694e39fba8874f991675ae7ca585108df5
|
[
"Apache-2.0"
] | null | null | null |
internal/pkg_web/pkg_web.bzl
|
ankitects/rules_nodejs
|
9a50cb694e39fba8874f991675ae7ca585108df5
|
[
"Apache-2.0"
] | 6
|
2021-09-02T20:32:14.000Z
|
2022-03-02T10:59:15.000Z
|
internal/pkg_web/pkg_web.bzl
|
ankitects/rules_nodejs
|
9a50cb694e39fba8874f991675ae7ca585108df5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the pkg_web rule.
"""
load("//:providers.bzl", "NODE_CONTEXT_ATTRS", "NodeContextInfo")
_DOC = """Assembles a web application from source files."""
_ATTRS = dict(NODE_CONTEXT_ATTRS, **{
"additional_root_paths": attr.string_list(
doc = """Path prefixes to strip off all srcs, in addition to the current package. Longest wins.""",
),
"srcs": attr.label_list(
allow_files = True,
doc = """Files which should be copied into the package""",
),
"substitutions": attr.string_dict(
doc = """Key-value pairs which are replaced in all the files while building the package.
You can use values from the workspace status command using curly braces, for example
`{"0.0.0-PLACEHOLDER": "{STABLE_GIT_VERSION}"}`.
See the section on stamping in the README.""",
),
"_assembler": attr.label(
default = "@build_bazel_rules_nodejs//internal/pkg_web:assembler",
executable = True,
cfg = "host",
),
})
def move_files(output_name, substitutions, version_file, info_file, stamp, files, action_factory, var, assembler, root_paths):
"""Moves files into an output directory
Args:
output_name: The name of the output directory
substitutions: key/value pairs to replace
version_file: bazel-out/volatile-status.txt
info_file: bazel-out/stable-status.txt
stamp: whether the build is performed with --stamp
files: The files to move
action_factory: Bazel's actions module from ctx.actions - see https://docs.bazel.build/versions/master/skylark/lib/actions.html
var: environment variables
assembler: The assembler executable
root_paths: Path prefixes to strip off all srcs. Longest wins.
Returns:
The output directory tree-artifact
"""
www_dir = action_factory.declare_directory(output_name)
args = action_factory.args()
inputs = files[:]
args.add(www_dir.path)
if stamp:
args.add(version_file.path)
inputs.append(version_file)
args.add(info_file.path)
inputs.append(info_file)
else:
args.add_all(["", ""])
args.add(substitutions)
args.add_all(root_paths)
args.add("--assets")
args.add_all([f.path for f in files])
args.use_param_file("%s", use_always = True)
action_factory.run(
inputs = inputs,
outputs = [www_dir],
executable = assembler,
arguments = [args],
execution_requirements = {"local": "1"},
env = {"COMPILATION_MODE": var["COMPILATION_MODE"]},
)
return depset([www_dir])
def additional_root_paths(ctx):
return ctx.attr.additional_root_paths + [
# also add additional_root_paths variants from genfiles dir and bin dir
"/".join([ctx.genfiles_dir.path, p])
for p in ctx.attr.additional_root_paths
] + [
"/".join([ctx.bin_dir.path, p])
for p in ctx.attr.additional_root_paths
] + [
# package path is the root, including in bin/gen
ctx.label.package,
"/".join([ctx.bin_dir.path, ctx.label.package]),
"/".join([ctx.genfiles_dir.path, ctx.label.package]),
# bazel-bin/gen dirs to absolute paths
ctx.genfiles_dir.path,
ctx.bin_dir.path,
# package re-rooted subdirectory
"/".join([p for p in [ctx.bin_dir.path, ctx.label.package, "_" + ctx.label.name, ctx.label.package] if p]),
]
def _impl(ctx):
root_paths = additional_root_paths(ctx)
package_layout = move_files(
ctx.label.name,
ctx.attr.substitutions,
ctx.version_file,
ctx.info_file,
ctx.attr.node_context_data[NodeContextInfo].stamp,
ctx.files.srcs,
ctx.actions,
ctx.var,
ctx.executable._assembler,
root_paths,
)
return [
DefaultInfo(files = package_layout),
]
pkg_web = rule(
implementation = _impl,
attrs = _ATTRS,
doc = _DOC,
)
| 33.91791
| 133
| 0.663146
|
2c6d18cc4467f8a5079b79db81775e64851d230b
| 199
|
py
|
Python
|
ifitwala_ed/stock/doctype/price_list_country/price_list_country.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 13
|
2020-09-02T10:27:57.000Z
|
2022-03-11T15:28:46.000Z
|
ifitwala_ed/stock/doctype/price_list_country/price_list_country.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 43
|
2020-09-02T07:00:42.000Z
|
2021-07-05T13:22:58.000Z
|
ifitwala_ed/stock/doctype/price_list_country/price_list_country.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 6
|
2020-10-19T01:02:18.000Z
|
2022-03-11T15:28:47.000Z
|
# Copyright (c) 2021, ifitwala and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PriceListCountry(Document):
pass
| 22.111111
| 49
| 0.798995
|
21f937569de3b71d27acdafc0c848194a9caaca7
| 403
|
py
|
Python
|
usage/middleware.py
|
jef79m/django-usage
|
b793609dd1bb58e85ecf4e70b712b7aa852ba666
|
[
"BSD-3-Clause"
] | null | null | null |
usage/middleware.py
|
jef79m/django-usage
|
b793609dd1bb58e85ecf4e70b712b7aa852ba666
|
[
"BSD-3-Clause"
] | null | null | null |
usage/middleware.py
|
jef79m/django-usage
|
b793609dd1bb58e85ecf4e70b712b7aa852ba666
|
[
"BSD-3-Clause"
] | null | null | null |
from .models import PageHit
from django.contrib.auth import get_user
class UsageMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
PageHit.objects.create(
user=get_user(request),
requested_page=request.path_info,
user_agent=request.META['HTTP_USER_AGENT'])
else:
pass
| 28.785714
| 59
| 0.632754
|
c7615f64d597953206ec47837640a1d4a62f35eb
| 1,691
|
py
|
Python
|
ceilometer/agent/discovery/tenant.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/agent/discovery/tenant.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | 1
|
2018-08-16T15:18:09.000Z
|
2018-08-16T20:51:45.000Z
|
ceilometer/agent/discovery/tenant.py
|
Missxiaoguo/stx-ceilometer
|
a226b47216e76ec209818b900253d3c1f1ffc3aa
|
[
"Apache-2.0"
] | 3
|
2018-08-15T14:35:23.000Z
|
2019-01-11T15:57:02.000Z
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from ceilometer.agent import plugin_base as plugin
LOG = log.getLogger(__name__)
class TenantDiscovery(plugin.DiscoveryBase):
"""Discovery that supplies keystone tenants.
This discovery should be used when the pollster's work can't be divided
into smaller pieces than per-tenants. Example of this is the Swift
pollster, which polls account details and does so per-project.
"""
def discover(self, manager, param=None):
domains = manager.keystone.domains.list()
LOG.debug('Found %s keystone domains', len(domains))
if domains:
tenants = []
for domain in domains:
domain_tenants = manager.keystone.projects.list(domain)
LOG.debug("Found %s tenants in domain %s", len(domain_tenants),
domain.name)
tenants = tenants + domain_tenants
else:
tenants = manager.keystone.projects.list()
LOG.debug("No domains - found %s tenants in default domain",
len(tenants))
return tenants or []
| 37.577778
| 79
| 0.674749
|
5c709e6229d8d873549e5d12febdc6ae7665de36
| 628
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/texture_mirror_clamp_to_edge.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/texture_mirror_clamp_to_edge.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/texture_mirror_clamp_to_edge.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_EXT_texture_mirror_clamp_to_edge'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_EXT_texture_mirror_clamp_to_edge',error_checker=_errors._error_checker)
GL_MIRROR_CLAMP_TO_EDGE_EXT=_C('GL_MIRROR_CLAMP_TO_EDGE_EXT',0x8743)
| 39.25
| 136
| 0.800955
|
06fb7423513e4f1479bde925b2dc6fc149745581
| 2,824
|
py
|
Python
|
parlai/mturk/tasks/image_chat/igc_evals/task_configs/task_config_responses.py
|
ricsinaruto/ParlAI
|
733b627ae456d6b11a2fc4624088a781bc6c1d03
|
[
"MIT"
] | 258
|
2020-04-10T07:01:06.000Z
|
2022-03-26T11:49:30.000Z
|
parlai/mturk/tasks/image_chat/igc_evals/task_configs/task_config_responses.py
|
ricsinaruto/ParlAI
|
733b627ae456d6b11a2fc4624088a781bc6c1d03
|
[
"MIT"
] | 33
|
2020-04-10T04:28:51.000Z
|
2022-03-31T02:52:02.000Z
|
parlai/mturk/tasks/image_chat/igc_evals/task_configs/task_config_responses.py
|
ricsinaruto/ParlAI
|
733b627ae456d6b11a2fc4624088a781bc6c1d03
|
[
"MIT"
] | 43
|
2020-04-14T10:43:33.000Z
|
2022-03-13T02:27:54.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
task_config = {}
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Rate Quality of Responses to Questions in Context of Image'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will rate the quality of responses to '
'questions in the context of a discussion about an image.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'image,rate'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''
<h2><b>Description</b></h2>
In this task, you will be shown 4 images. For each image, there will be
a contextual statement, a question regarding the image and in response to the statement,
and a selection of various responses to the question.
The goal of this task is to rate the quality of these responses.
<br>
<br>
<h4><b>STEP 1</b></h4> You will be shown an image, some textual context,
a questions in response to the textual context,
and a set of candidate responses to the question.
<br>
<br>
E.g., you may be shown an image of a tree; some textual context, i.e.
"An amazing tree for climbing.";
a question, "Do you think you could really climb that tree?";
and, a set of candidate responses:
<br> 1. "Are you kidding? I could climb that tree in my sleep."
<br> 2. "Is it time for dinner yet?"
<br>
<h4><b>STEP 2</b></h4> You will rate each candidate response on a scale from 1 to 3,
where 3 is the <b>highest</b> quality and 1 is the <b>lowest</b> quality.
<br>
<br>
E.g. in the example above, you might give the first question a "3" rating
and the second question a "1" rating.
<br>
<br>
<h4><b>REWARD/BONUS</b></h4>
To complete this task, <b><span style="color:blue">you must rate the questions on
ALL 4 images.</span></b>
If you complete the task, you will receive $0.40.
<br>
<br>
<br>
<h4><b>CLOSE WINDOW/TIMEOUT/RETURN HIT</b></h4>
Once the task has started, close window/timeout or return HIT will result in
<b><span style="color:blue">HIT EXPIRED</span></b> to you and NO reward paid.
<br>
<br>
<br>
If you are ready, please click "Accept HIT" to start this task.
'''
| 37.157895
| 88
| 0.734773
|
47ae2f1b73ae062e4dd7d024d1a9e91742574254
| 14,196
|
py
|
Python
|
mlg1categories/views.py
|
willianz7/mlg1noticias
|
b8793dd00835c477363d22f1be3b0ab939e9833e
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
mlg1categories/views.py
|
willianz7/mlg1noticias
|
b8793dd00835c477363d22f1be3b0ab939e9833e
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
mlg1categories/views.py
|
willianz7/mlg1noticias
|
b8793dd00835c477363d22f1be3b0ab939e9833e
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
import io
from PIL import Image
#cd django-dev
#source my_env/bin/activate
#alias python=python3
#python manage.py runserver localhost:8000
#abra o firefox e digite localhost:8000/MachineLearning
class ControllerML(): #tratar com scikitlearning
corpus = []
labels = []
X = []#Recebe o corpus
Y = []#recebe labels
def __init__(self):
self.Brasil = []
self.Mundo = []
self.Economia = []
self.CienciaSaude = []
self.Politica = []
self.Blog = []
self.Cultura = []
def adicionarNoticiaBrasil(self,n):
self.Brasil.append(n)
def adicionarNoticiaMundo(self,n) :
self.Mundo.append(n)
def adicionarNoticiaEconomia(self,n):
self.Economia.append(n)
def adicionarNoticiaCienciaSaude(self,n):
self.CienciaSaude.append(n)
def adicionarNoticiaPolitica(self,n) :
self.Politica.append(n)
def adicionarNoticiaBlog(self,n):
self.Blog.append(n)
def adicionarNoticiaCultura(self,n):
self.Cultura.append(n)
def criarCorpus(self):
#for noticia in self.Brasil:
# self.corpus.append(noticia)
#self.labels.append('Brasil')
#for noticia in self.Mundo:
# self.corpus.append(noticia)
#self.labels.append('Mundo')
#for noticia in self.Economia:
#self.corpus.append(noticia)
#self.labels.append('Economia')
for noticia in self.CienciaSaude:
self.corpus.append(noticia)
self.labels.append('Ciência e Saúde')
for noticia in self.Politica:
self.corpus.append(noticia)
self.labels.append('Politica')
#for noticia in self.Blog:
# self.corpus.append(noticia)
#self.labels.append('Blog')
#for noticia in self.Cultura:
# self.corpus.append(noticia)
#self.labels.append('Cultura')
self.X = self.corpus
self.Y = self.labels
return self.corpus
def evalueteModel(self, Y_test, Y_pred_rv):
from sklearn.metrics import confusion_matrix
import seaborn as sns
sns.set(font_scale=0.8)
cm = confusion_matrix(Y_test, Y_pred_rv)
sns.heatmap(cm, xticklabels=['predicted_CienciaSaude', 'predicted_Politica'], yticklabels=['actual_CienciaSaude', 'actual_Politica'],annot=True, fmt='d', annot_kws={'fontsize':20}, cmap="YlGnBu");
true_neg, false_pos = cm[0]
false_neg, true_pos = cm[1]
accuracy = round((true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg),3)
precision = round((true_pos) / (true_pos + false_pos),3)
recall = round((true_pos) / (true_pos + false_neg),3)
f1 = round(2 * (precision * recall) / (precision + recall),3)
#image_file =plt.savefig('heatmap.png', format= 'png')
plt.show()
#image_file = plt.savefig('fig.png', dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format='png',transparent=True, bboxinches=None, padinches=0., frameon=None)
s = ''
s = s + 'Accuracy: {}'.format(accuracy)
s = s + 'Precision: {}'.format(precision)
s = s + 'Recall: {}'.format(recall)
s = s + 'F1 Score: {}'.format(f1)
return(s)
def NaiveBayesEvaluete(self, X_train_rv, X_test_rv, Y_train, Y_test, st):
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
# Train the model
nb.fit(X_train_rv, Y_train)
# Take the model that was trained on the X_train_cv data and apply it to the X_test_cv
#data
Y_pred_rv = nb.predict(X_test_rv)
plt.suptitle("Evaluete Model: " + st, color='m')
#print(Y_pred_cv_nb)
return self.evalueteModel(Y_test, Y_pred_rv)
def logistRegressionEvaluete(self, X_train_rv, X_test_rv,Y_train, Y_test, st):
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
# Train the model
lr.fit(X_train_rv, Y_train)
# Take the model that was trained on the X_train_cv data and apply it to the X_test_cv
#data
Y_pred_rv = lr.predict(X_test_rv)
#Input:
plt.suptitle("Evaluete Model: " + st, color='m')
#print(Y_pred_rv)
return self.evalueteModel(Y_test, Y_pred_rv)
#rv = representacao vetorial SVM Kernel=Linear
def SvmLinearSVCEvaluete(self, X_train_rv, X_test_rv,Y_train, Y_test, st):
from sklearn.svm import LinearSVC
sl = LinearSVC()
sl.fit(X_train_rv, Y_train)
Y_pred_rv = sl.predict(X_test_rv)
plt.suptitle("Evaluete Model: " + st, color='m')
#print(Y_pred_rv)
return self.evalueteModel(Y_test, Y_pred_rv)
def extrairTemaTopico(self,topico, v):
tema = ''
for indexword in topico:
tema = tema + v.get_feature_names()[indexword] + ' '
return tema
def rotular(self, vDescriptions, posNoticia):
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import NMF
tfidf = TfidfVectorizer(max_features= 1000, max_df = 0.5, smooth_idf=True)
X = tfidf.fit_transform(vDescriptions)
nmf = NMF(n_components=10, random_state=42)
nmf.fit_transform(X[posNoticia])
#topicos = extrairTopicos(nmf)
#data.shape
#print(data[0])
#print('\nDocumento', i)
sorted_0 = nmf.components_[0].argsort() #primeiro tópico
#sorted_1 = pca.components_[1].argsort() #segundo tópico
#sorted_2 = pca.components_[2].argsort() #terceiro tópico
sortedflip_0 = np.flip(sorted_0)[0:5]
#sortedflip_1 = np.flip(sorted_1)[0:5]
#sortedflip_2 = np.flip(sorted_2)[0:5]
#print('Tópico 1:')
return self.extrairTemaTopico(sortedflip_0, tfidf)
# print('Tópico 2:')
#extrairTemaTopico(sortedflip_1, cv)
#print('Tópico 3:')
#extrairTemaTopico(sortedflip_2, cv)
def treinarMachineLearning(self):
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from itertools import combinations
from sklearn.metrics.pairwise import cosine_similarity
# split the data into a training and test set
X_train, X_test, Y_train, Y_test = train_test_split(self.X, self.Y, test_size=0.3, random_state=42)
# test size = 30% of observations, which means training size = 70% of observations
# random state = 42, so we all get the same random train / test split
#cv = CountVectorizer(stop_words=‘english’)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X_train_cv = cv.fit_transform(X_train) # fit_transform learns the vocab and one-hot encodes
X_test_cv = cv.transform(X_test) # transform uses the same vocab and one-hot encodes
#Tfidf
from sklearn.feature_extraction.text import TfidfVectorizer
tf = TfidfVectorizer()
X_train_tf = tf.fit_transform(X_train) # fit_transform learns the vocab and one-hot encodes
X_test_tf = tf.transform(X_test) # transform uses the same vocab and one-hot encodes
# Use a logistic regression model CV
rl_cv = self.logistRegressionEvaluete(X_train_cv, X_test_cv,Y_train, Y_test, 'Logistic regression model CV')
# Use a logistic regression model tfidf
rl_tf = self.logistRegressionEvaluete(X_train_tf, X_test_tf,Y_train, Y_test, 'Logistic regression model tfidf')
# Use a Naive Bayes model CV
nb_cv = self.NaiveBayesEvaluete(X_train_cv, X_test_cv,Y_train, Y_test, 'Naive Bayes model CV')
# Use a Naive Bayes model tfidf
nb_tf = self.NaiveBayesEvaluete(X_train_tf, X_test_tf,Y_train, Y_test, 'Naive Bayes model tfidf')
# Use a SVM Linear SVC LINEAR model CV
svm_cv = self.SvmLinearSVCEvaluete(X_train_cv, X_test_cv,Y_train, Y_test, 'SVM Linear SVC LINEAR model CV')
# Use a SVM Linear SVC LINEAR model tfidf
svm_tf = self.SvmLinearSVCEvaluete(X_train_tf, X_test_tf,Y_train, Y_test, 'SVM Linear SVC LINEAR model tfidf')
return (rl_cv,rl_tf, nb_cv,nb_tf, svm_cv, svm_tf)
from django.http import HttpResponse
import re
import string
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from nltk.tag import pos_tag
#!pip install nltk
nltk.download('rslp')
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
from nltk.tokenize.treebank import TreebankWordDetokenizer
import json
from pyUFbr.baseuf import ufbr
#Categoria: Brasil, Mundo, Economia, Ciência e Saúde, Política, Blog, Cultura,
estados_do_brasil = ['Acre','Alagoas','Amapá','Amazonas','Bahia','Ceará','Distrito Federal','Espirito Santo','Goiás','Maranhão','Mato Grosso do Sul','Mato Grosso','Minas Gerais','Pará','Paraíba','Paraná','Pernambuco','Piauí','Rio de Janeiro','Rio Grande do Norte','Rio Grande do Sul','Rondônia','Roraima','Santa Catarina','São Paulo','Sergipe','Tocantins'];
cidades_sp = ufbr.list_cidades('SP')
Noticias = []
vDescriptions = []
def preparaTexto(text):
if text == None : text = '' #puxar tudo abrir o link
clean_text = re.sub('\w*\d\w*',' ',text) #elimina palavras com numeros
clean_text = re.sub('[%s]' % re.escape(string.punctuation),' ', clean_text.lower()) #elimina pontuacao e torna o texto minusculo
tokens = word_tokenize(clean_text)#tokeniza
tokens_without_sw = [word for word in tokens if not word in stopwords.words('portuguese')]
text_without_sw = TreebankWordDetokenizer().detokenize(tokens_without_sw) #retira stop words
return(text_without_sw)
def aprendizagem_ns(ml,Noticias, vDescriptions):
i = 0
html = ''
for noticia in Noticias:
html = html + '<a href="'+ noticia[0] +'">' + noticia[1] + '</a>' + '(' + noticia[2].strip() + ')'+ '(' + ml.rotular(vDescriptions, i) + ')' +' <br>'
i+=1
return html
def carrega(noticias, ml):#, flag):
html = ''
pol = 0
cien = 0
for noticia in noticias:
if (noticia['link'] != None):
if noticia['categoria'] == None : noticia['categoria'] = 'Não Categorizada'
if noticia['title'] == None : noticia['title'] = 'Sem titulo'
texto = preparaTexto(noticia['description'])
if noticia['categoria'].strip().upper().find('MUNDO') != -1 : ml.adicionarNoticiaMundo(texto)
if noticia['categoria'].strip() in estados_do_brasil or noticia['categoria'].strip().upper() in cidades_sp :
noticia['categoria'] = 'Brasil'
ml.adicionarNoticiaBrasil(texto)
if noticia['categoria'].strip().find('Blog') != -1 :
noticia['categoria'] = 'Blog'
ml.adicionarNoticiaBlog(texto)
if noticia['categoria'].strip().upper().find('CORONAVÍRUS') != -1 or noticia['categoria'].strip().upper().find('VÍRUS') != -1 or noticia['categoria'].strip().upper().find('VACINA') != -1:
noticia['categoria'] = 'Ciência e Saúde'
ml.adicionarNoticiaCienciaSaude(texto)
cien += 1
Noticias.append([noticia['link'],noticia['title'],noticia['categoria'],texto])
vDescriptions.append(texto)
if noticia['categoria'].strip().upper().find('CONCURSO') != -1 or noticia['categoria'].strip().upper().find('EMPREGO') != -1 or noticia['categoria'].strip().upper().find('IMPOSTO') != -1 or noticia['categoria'].strip().upper().find('NEGÓCIO') != -1 or noticia['categoria'].strip().upper().find('FINANCEIRA') != -1 :
noticia['categoria'] = 'Economia'
ml.adicionarNoticiaEconomia(texto)
if noticia['categoria'].strip().find('Pop & Arte') != -1 or noticia['categoria'].strip().upper().find('MÚSICA') != -1 or noticia['categoria'].strip().upper().find('CINEMA') != -1 or noticia['categoria'].strip().upper().find('LIVES') != -1 or noticia['categoria'].strip().find('Agora é Assim?') != -1 or noticia['categoria'].strip().upper().find('CARNAVAL') != -1 :
noticia['categoria'] = 'Cultura'
ml.adicionarNoticiaCultura(texto)
if noticia['categoria'].strip().find('Política') != -1 :
ml.adicionarNoticiaPolitica(texto)
pol += 1
#if flag == 'NS':
Noticias.append([noticia['link'],noticia['title'],noticia['categoria'],texto])
vDescriptions.append(texto)
#if flag == 'S':
html = html + '<a href="'+ noticia['link'] +'">' + noticia['title'] + '</a>' + '(' + noticia['categoria'].strip() + ')' +' <br>'
#if flag == 'NS':
html = aprendizagem_ns(ml,Noticias, vDescriptions)
html = html + 'Noticias de Politica: ' + str(pol) + ' Noticias de Ciência e Saúde: ' + str(cien) + '\n<br>'
return html
def carregaS(request,noticias):
ml = ControllerML()
html = carrega(noticias, ml)#, 'S') #noticias string do arquivo json
ml.criarCorpus()
rl_cv,rl_tf, nb_cv,nb_tf, svm_cv, svm_tf = ml.treinarMachineLearning()
dados1 = 'Regressão Log CV: '+rl_cv
dados2 = 'Regressão Log tfidf: '+rl_tf
dados3 = 'Naive Bayes com CV: '+nb_cv
dados4 = 'Naive Bayes com tfidf: '+nb_tf
dados5 = 'smv K=LINEAR com CV: '+svm_cv
dados6 = 'smv K=LINEAR com tfidf: '+svm_tf
html = html + dados1 + '<br>' + dados2 + '<br>' + dados3 + '<br>' + dados4 + '<br>' + dados5 + '<br>' + dados6 + '<br>'
return html
def carregaNS(request,noticias):
ml = ControllerML()
html = carrega(noticias, ml)#, 'NS') #noticias string do arquivo json
return html
# Create your views here.
def index(request):
with open('noticias.json', 'r') as json_file:
noticias = json.load(json_file)
#html = '<a href="'+carregaNS(request,noticias) + '">' + 'Carregar Não Supervisionado' + '</a> <br>\n'
#html = html + '<a href="'+ carregaS(request,noticias) + '">' + 'Carregar Supervisionado' + '</a> <br>'
#Brasil, Mundo, Economia, CienciaSaude,Politica, Blog, Cultura
ml = ControllerML()
html = carrega(noticias, ml) #noticias string do arquivo json
ml.criarCorpus()
rl_cv,rl_tf, nb_cv,nb_tf, svm_cv, svm_tf = ml.treinarMachineLearning()
#fig1 = rl_cv[0]
dados1 = 'Regressão Log CV: '+rl_cv
dados2 = 'Regressão Log tfidf: '+rl_tf
dados3 = 'Naive Bayes com CV: '+nb_cv
dados4 = 'Naive Bayes com tfidf: '+nb_tf
dados5 = 'smv K=LINEAR com CV: '+svm_cv
dados6 = 'smv K=LINEAR com tfidf: '+svm_tf
html = html + dados1 + '<br>' + dados2 + '<br>' + dados3 + '<br>' + dados4 + '<br>' + dados5 + '<br>' + dados6 + '<br>'
return HttpResponse(html)
| 42.25
| 373
| 0.67998
|
4421abba8aec220a65f57b01107e36b13ad018f2
| 2,532
|
py
|
Python
|
common/utils/requests.py
|
shapeshift-legacy/watchtower
|
c9cd5150f8549145f7de9b1ea820d548959350fe
|
[
"MIT"
] | null | null | null |
common/utils/requests.py
|
shapeshift-legacy/watchtower
|
c9cd5150f8549145f7de9b1ea820d548959350fe
|
[
"MIT"
] | null | null | null |
common/utils/requests.py
|
shapeshift-legacy/watchtower
|
c9cd5150f8549145f7de9b1ea820d548959350fe
|
[
"MIT"
] | null | null | null |
import logging
import urllib3
import certifi
import json
logger = logging.getLogger('watchtower.common.utils.requests')
class HTTPError(Exception):
"""An error status was returned from the http request"""
pass
class RequestsUtil:
def get_multiple(self, urls):
responses = []
for url in urls:
try:
response = http.get(url, retries=2)
responses.append(response)
except:
pass
return responses
def get_multiple_from_dictionary(self, dict):
for key in dict:
dict[key]['response'] = http.get(dict[key]['url'], retries=2)
return dict
class Http:
def __init__(self):
self.http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
def get(self, url, params={}, headers={}, retries=urllib3.Retry(3)):
return self._request('GET', url, params=params, headers=headers, retries=retries)
def post(self, url, params={}, headers={}, body=None, retries=urllib3.Retry(3)):
return self._request('POST', url, params=params, headers=headers, body=body, retries=retries)
def _request(self, method, url, params={}, headers={}, body=None, retries=urllib3.Retry(3)):
payload = body
if payload and not isinstance(payload, str):
payload = json.dumps(payload)
response = self.http.request(method, url, fields=params, body=payload, retries=retries, headers=headers)
content_type = response.headers.get('Content-Type')
# Check for http error
if response.status < 200 or response.status >= 400:
r_data = response.data.decode('utf-8').rstrip()
http_error_msg = ''
if 400 <= response.status < 500:
logger.warn(u'%d Client Error: %s for url: %s' % (response.status, r_data, url))
http_error_msg = u'%d Client Error: %s' % (response.status, r_data)
elif 500 <= response.status < 600:
logger.error(u'%d Server Error: %s for url: %s' % (response.status, r_data, url))
http_error_msg = u'%d Server Error: %s' % (response.status, r_data)
if http_error_msg:
raise HTTPError(http_error_msg)
# Decode json response
if isinstance(content_type, str) and content_type.startswith('application/json'):
response.json_data = json.loads(response.data.decode('utf-8'))
return response
requests_util = RequestsUtil()
http = Http()
| 36.171429
| 112
| 0.621643
|
41414affbe31372bebb2d733d02827b3ea84635c
| 2,091
|
py
|
Python
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/msteams/module.py
|
MikeKMiller/RedELK
|
669794e81cbbccee40dfa8371c1312d5ed8fd211
|
[
"BSD-3-Clause"
] | 1,863
|
2018-10-05T04:29:59.000Z
|
2022-03-31T23:55:51.000Z
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/msteams/module.py
|
MikeKMiller/RedELK
|
669794e81cbbccee40dfa8371c1312d5ed8fd211
|
[
"BSD-3-Clause"
] | 164
|
2018-10-17T18:12:26.000Z
|
2022-03-24T08:30:02.000Z
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/msteams/module.py
|
MikeKMiller/RedELK
|
669794e81cbbccee40dfa8371c1312d5ed8fd211
|
[
"BSD-3-Clause"
] | 332
|
2018-10-05T15:30:33.000Z
|
2022-03-30T13:21:37.000Z
|
#!/usr/bin/python3
"""
Part of RedELK
This connector sends RedELK alerts via Microsoft Teams
Authors:
- Lorenzo Bernardi (@fastlorenzo)
"""
import logging
import pymsteams
import config
from modules.helpers import get_value, pprint
info = {
'version': 0.1,
'name': 'msteams connector',
'description': 'This connector sends RedELK alerts via Microsoft Teams',
'type': 'redelk_connector',
'submodule': 'msteams'
}
class Module(): # pylint: disable=too-few-public-methods
""" msteams connector module """
def __init__(self):
self.logger = logging.getLogger(info['submodule'])
def send_alarm(self, alarm):
""" Send the alarm notification """
tmsg = pymsteams.connectorcard(config.notifications['msteams']['webhook_url'])
description = alarm['info']['description']
if len(alarm['groupby']) > 0:
description += f'\n *Please note that the items below have been grouped by: {pprint(alarm["groupby"])}*'
tmsg.text(description)
tmsg.color('red')
try:
for hit in alarm['hits']['hits']:
tcs = pymsteams.cardsection()
tcs.disableMarkdown()
i = 0
title = hit['_id']
while i < len(alarm['groupby']):
val = get_value(f'_source.{alarm["groupby"][i]}', hit)
if i == 0:
title = val
else:
title = f'{title} / {val}'
i += 1
tcs.activityTitle(f'Alarm on item: {title}')
# tcs.activitySubtitle(alarm['info']['description'])
for field in alarm['fields']:
val = get_value(f'_source.{field}', hit)
tcs.addFact(field, pprint(val))
tmsg.addSection(tcs)
# pylint: disable=broad-except
except Exception as error:
self.logger.exception(error)
tmsg.title(f'Alarm from {alarm["info"]["name"]} [{alarm["hits"]["total"]} hits]')
tmsg.send()
| 33.190476
| 116
| 0.550454
|
c2b0468041d4fb0c9b739f521048cc076793d186
| 1,559
|
py
|
Python
|
UnleashClient/periodic_tasks/send_metrics.py
|
hellohaptik/unleash-client-python
|
8bd06318bcab0fc2ff0cd450d5a9416c228ccdc1
|
[
"MIT"
] | null | null | null |
UnleashClient/periodic_tasks/send_metrics.py
|
hellohaptik/unleash-client-python
|
8bd06318bcab0fc2ff0cd450d5a9416c228ccdc1
|
[
"MIT"
] | 10
|
2021-04-28T09:47:51.000Z
|
2022-02-17T07:32:10.000Z
|
UnleashClient/periodic_tasks/send_metrics.py
|
hellohaptik/unleash-client-python
|
8bd06318bcab0fc2ff0cd450d5a9416c228ccdc1
|
[
"MIT"
] | 1
|
2022-02-15T19:21:29.000Z
|
2022-02-15T19:21:29.000Z
|
import redis
import pickle
from collections import ChainMap
from datetime import datetime, timezone
from UnleashClient.api import send_metrics
from UnleashClient.constants import METRIC_LAST_SENT_TIME
def aggregate_and_send_metrics(url: str,
app_name: str,
instance_id: str,
custom_headers: dict,
custom_options: dict,
features: dict,
ondisk_cache: redis.Redis
) -> None:
feature_stats_list = []
for feature_name in features.keys():
feature_stats = {
features[feature_name].name: {
"yes": features[feature_name].yes_count,
"no": features[feature_name].no_count
}
}
features[feature_name].reset_stats()
feature_stats_list.append(feature_stats)
metric_last_seen_time = pickle.loads(
ondisk_cache.get(
METRIC_LAST_SENT_TIME
)
)
metrics_request = {
"appName": app_name,
"instanceId": instance_id,
"bucket": {
"start": metric_last_seen_time.isoformat(),
"stop": datetime.now(timezone.utc).isoformat(),
"toggles": dict(ChainMap(*feature_stats_list))
}
}
send_metrics(url, metrics_request, custom_headers, custom_options)
ondisk_cache.set(
METRIC_LAST_SENT_TIME,
pickle.dumps(datetime.now(timezone.utc))
)
| 30.568627
| 70
| 0.56703
|
630901d09cc699aa8fe7e1647d81331dd6f0f673
| 322
|
py
|
Python
|
test/test_onnx.py
|
fangyu1006/Lightweight-Face-Detector-With-Landmarks
|
e15e7f009d21e0b7098d75154bc0176c3cf44f71
|
[
"MIT"
] | 12
|
2020-07-01T09:12:11.000Z
|
2021-06-13T18:31:05.000Z
|
test/test_onnx.py
|
fangyu1006/Lightweight-Face-Detector-With-Landmarks
|
e15e7f009d21e0b7098d75154bc0176c3cf44f71
|
[
"MIT"
] | null | null | null |
test/test_onnx.py
|
fangyu1006/Lightweight-Face-Detector-With-Landmarks
|
e15e7f009d21e0b7098d75154bc0176c3cf44f71
|
[
"MIT"
] | 2
|
2020-09-08T07:22:25.000Z
|
2021-01-28T02:32:19.000Z
|
import onnxruntime as nxrun
import numpy as np
ximg = np.random.rand(1,3,240,320).astype(np.float32)
sess = nxrun.InferenceSession("./converted_models/mobilenet/mobilenet_sim.onnx")
input_name = sess.get_inputs()[0].name
output_name = sess.get_outputs()[0].name
result = sess.run(None, {input_name: ximg})
print(result)
| 29.272727
| 80
| 0.767081
|
776d99a65ec0c6ee5b270dc369c217ad8fab0640
| 1,731
|
py
|
Python
|
test/test_board.py
|
kirbysebastian/TicTacToe
|
192c4e98127eedcf42844a24cd5577e773b6eab8
|
[
"MIT"
] | 1
|
2021-01-31T08:00:22.000Z
|
2021-01-31T08:00:22.000Z
|
test/test_board.py
|
kirbysebastian/TicTacToe
|
192c4e98127eedcf42844a24cd5577e773b6eab8
|
[
"MIT"
] | null | null | null |
test/test_board.py
|
kirbysebastian/TicTacToe
|
192c4e98127eedcf42844a24cd5577e773b6eab8
|
[
"MIT"
] | null | null | null |
import pytest
from tic_tac_toe.board import Board
def test_board_place_with_integer_pos():
board = Board()
out = board.place('*', 4)
assert out == False
def test_board_place_with_invalid_pos():
board = Board()
out = board.place('*', '0')
assert out == False
def test_board_place():
board = Board()
out = board.place('*', '5')
assert out == True
def test_board_is_full():
board = Board()
board.place('*', '1')
board.place('*', '2')
board.place('*', '3')
board.place('*', '4')
board.place('*', '5')
board.place('*', '6')
board.place('*', '7')
board.place('*', '8')
board.place('*', '9')
assert board.is_full() == True
def test_board_is_not_full():
board = Board()
board.place('*', '1')
board.place('*', '2')
board.place('*', '3')
board.place('*', '4')
board.place('*', '5')
board.place('*', '6')
board.place('*', '7')
board.place('*', '8')
assert board.is_full() == False
def test_board_place_correct_position():
first_board = Board()
second_board = Board()
first_board.place('*', '5')
second_board.place('*', '5')
assert str(first_board) == str(second_board)
def test_board_place_incorrect_position():
first_board = Board()
second_board = Board()
first_board.place('*', '3')
second_board.place('*', '2')
assert str(first_board) != str(second_board)
def test_board_is_space_available():
board = Board()
assert board.is_full() == False
for num in range(1, 10):
assert board.is_space_available(str(num)) == True
board.place('*', str(num))
assert board.is_space_available(str(num)) == False
assert board.is_full() == True
| 25.835821
| 58
| 0.590988
|
a53a6d8b85b3ce981d9dafeaacaf343245c82551
| 336
|
py
|
Python
|
common_settings.py
|
avinassh/Reddit-GoodReads-Bot
|
45d40920fa84d21f1b5f5893e9c8985c2495dfed
|
[
"MIT"
] | 35
|
2015-07-17T17:07:58.000Z
|
2021-07-29T23:10:42.000Z
|
common_settings.py
|
avinassh/Reddit-GoodReads-Bot
|
45d40920fa84d21f1b5f5893e9c8985c2495dfed
|
[
"MIT"
] | 3
|
2016-01-03T05:21:42.000Z
|
2017-12-06T10:07:24.000Z
|
common_settings.py
|
avinassh/Reddit-GoodReads-Bot
|
45d40920fa84d21f1b5f5893e9c8985c2495dfed
|
[
"MIT"
] | 8
|
2015-07-28T20:06:49.000Z
|
2017-12-06T10:00:14.000Z
|
# Common, non secret settings
# Used in both for Docker and non-Docker deployments
# Check `settings_sample.py` or `docker_settings.py`
supported_subreddits = 'india+indianbooks+52in52+indianreaders'
user_agent = ('Goodreads, v0.1. Gives info of the book whenever goodreads'
'link to a book is posted. (by /u/avinassh)')
| 42
| 74
| 0.738095
|
0530b9058c8c780742ff39b99f743d03a3a86075
| 22,465
|
py
|
Python
|
instruments.py
|
rocketsaurus/pyemi
|
24d733b5a2ff475964a31ff87c7b112e9c9831ad
|
[
"MIT"
] | null | null | null |
instruments.py
|
rocketsaurus/pyemi
|
24d733b5a2ff475964a31ff87c7b112e9c9831ad
|
[
"MIT"
] | null | null | null |
instruments.py
|
rocketsaurus/pyemi
|
24d733b5a2ff475964a31ff87c7b112e9c9831ad
|
[
"MIT"
] | null | null | null |
import logging
from pathlib import Path
import time
import glob
import numpy as np
import pandas as pd
from ruamel.yaml import YAML
import visa
class BaseInstrument:
driver_folder = Path(__file__).parent.absolute() / Path('drivers')
def __init__(self, resource=None, driver=None, log_level=logging.CRITICAL, **kwargs):
if kwargs:
for key, value in kwargs.items():
self.interface = key.upper()
self.id = value
self.resource_string = f'{self.interface}::{self.id}::INSTR'
else:
self.resource_string = resource.upper()
FORMAT = '[%(levelname)s]%(asctime)s - %(message)s'
logging.basicConfig(level=log_level, format=FORMAT)
logging.info(f'Resource string: {self.resource_string}')
self.rm = visa.ResourceManager()
self.resource = self.rm.open_resource(self.resource_string)
if driver:
self.load_driver(driver)
def load_driver(self, driver_file):
yaml=YAML(typ='safe')
doc = self.driver_folder / Path(driver_file)
if doc.exists():
logging.info(f'Driver file: {doc}')
self.commands = yaml.load(doc)
if 'write_termination' in self.commands:
self.resource.write_termination = self.commands['write_termination']
if 'query_delay' in self.commands:
self.resource.query_delay = float(self.commands['query_delay'])
else:
logging.warning(f'Driver file does not exist: {doc}')
def list_available_drivers(self):
drivers = glob.glob(str(self.driver_folder / Path('*.yaml')))
return [Path(f).name for f in drivers]
def __str__(self):
'''Returns instrument ID'''
idn = self.resource.query('*IDN?')
idn = idn.split(',')
if len(idn) >= 2:
return idn[0] + ' ' + idn[1]
else:
return idn
def __repr__(self):
'''Returns instrument ID'''
return self.resource.query('*IDN?')
def reset(self):
'''Resets instrument'''
self.resource.write('*RST')
def opc(self):
'''Returns 1 when command is completed, 0 otherwise'''
return int(self.resource.query('*OPC?'))
class SpectrumAnalyzer(BaseInstrument):
def __init__(self, **kwargs):
return super().__init__(**kwargs)
def ese(self, val):
'''Event status enable sets the bits of the event status registers'''
self.resource.write(f'*ESE {val}')
def esr(self):
'''Queries the contents of the event status register'''
self.resource.query('*ESR?')
@property
def rbw(self):
command = self.commands['rbw']['get']
return int(self.resource.query(command))
@rbw.setter
def rbw(self, val):
command = self.commands['rbw']['set'] % val
self.resource.write(command)
@property
def vbw(self):
command = self.commands['vbw']['get']
return int(self.resource.query(command))
@vbw.setter
def vbw(self, val):
command = self.commands['vbw']['set'] % val
self.resource.write(command)
@property
def amplitude_units(self):
command = self.commands['amplitude']['units']['get']
units = self.resource.query(command)
units = units.lower().strip('\n').replace('b', 'B').replace('v', 'V')
return units
@amplitude_units.setter
def amplitude_units(self, val):
command = self.commands['amplitude']['units']['set'] % val
self.resource.write(command)
@property
def start_frequency(self):
command = self.commands['frequency']['start']['get']
return int(self.resource.query(command))
@start_frequency.setter
def start_frequency(self, val):
command = self.commands['frequency']['start']['set'] % val
self.resource.write(command)
@property
def stop_frequency(self):
command = self.commands['frequency']['stop']['get']
return int(self.resource.query(command))
@stop_frequency.setter
def stop_frequency(self, val):
command = self.commands['frequency']['stop']['set'] % val
self.resource.write(command)
@property
def center_frequency(self):
command = self.commands['frequency']['center']['get']
return self.resource.query(command)
@center_frequency.setter
def center_frequency(self, val):
command = self.commands['frequency']['center']['set'] % val
self.resource.write(command)
@property
def span_frequency(self):
command = self.commands['frequency']['span']['get']
return self.resource.query(command)
@span_frequency.setter
def span_frequency(self, val):
command = self.commands['frequency']['span']['set'] % val
self.resource.write(command)
@property
def sweep_mode(self):
command = self.commands['sweep']['mode']['get']
return self.resource.query(command)
@sweep_mode.setter
def sweep_mode(self, val):
if val.lower() == 'continuous' or val.lower() == 'on' or val == 1:
command = self.commands['sweep']['mode']['continuous']['set']
else:
command = self.commands['sweep']['mode']['single']['set']
return self.resource.write(command)
@property
def sweep_points(self):
command = self.commands['sweep']['points']['get']
return self.resource.query(command)
@sweep_points.setter
def sweep_points(self, val):
command = self.commands['sweep']['points']['set'] % val
return self.resource.query(command)
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, val):
command = self.commands['mode']
if 'spectrum' in val.lower() or 'san' in val.lower() or 'analyzer' in val.lower():
command = command % 'SAN'
self._mode = 'SAN'
elif 'receiver' in val.lower() or 'emi' in val.lower():
command = command % 'REC'
self._mode = 'REC'
self.resource.write(command)
@property
def rf_input(self):
return self._rf_input
@rf_input.setter
def rf_input(self, val):
command = self.commands['input'] % val
self._rf_input = val
self.resource.write(command)
@property
def format(self):
command = self.commands['format']['get']
return self.resource.query()
@format.setter
def format(self, val):
if type(val) is tuple:
command = self.commands['format']['binary'] % val
elif 'asc' in val.lower():
command = self.commands['format']['ascii']
self.resource.write(command)
@property
def display(self):
return self._display
@display.setter
def display(self, val):
command = self.commands['display']
command.format(val)
self.resource.write(command)
def Trace(self, t):
return self._Trace(t, self)
class _Trace:
def __init__(self, t, sa):
self.sa = sa
self._trace = t
@property
def mode(self):
command = self.sa.commands['trace']['mode']['get'] % self._trace
return self.sa.resource.query(command)
@mode.setter
def mode(self, val):
command = self.sa.commands['trace']['mode']['set'] % (self._trace, val)
self.sa.resource.write(command)
@property
def detector(self):
command = self.sa.commands['trace']['detector']['get'] % self._trace
return self.sa.resource.query(command)
@detector.setter
def detector(self, val):
command = self.sa.commands['trace']['detector']['set'] % (self._trace, val)
self.sa.resource.write(command)
def dataframe(self, delay=None):
''' Returns pandas dataframe of Frequency (Hz), Amplitude ()'''
command = self.sa.commands['trace']['values'] % self._trace
pd.options.display.float_format = '{:.2f}'.format
if self.sa.interface.upper() == 'TCPIP':
self.sa.format = ('REAL', 32)
data = self.sa.resource.query_binary_values(command, delay=delay, data_points=self.sa.sweep_points)
elif self.sa.interface.upper() == 'GPIB':
if delay:
time.sleep(delay)
self.sa.format = 'ASCII'
data = self.sa.resource.query(command)
data = data.replace('\n', '001')
data = data.replace('001001', '001')
data = data.split(',')
frequency = np.linspace(self.sa.start_frequency, self.sa.stop_frequency, len(data))
units = self.sa.amplitude_units
df = pd.DataFrame(data={'Frequency (Hz)': frequency, f'Amplitude ({units})': data})
df[f'Amplitude ({units})'] = df[f'Amplitude ({units})'].astype(float)
return df
def Marker(self, m):
return self._Marker(m, self)
class _Marker:
def __init__(self, m, sa):
self.sa = sa
self._marker = m
self.state = 'ON'
@property
def state(self):
return self._state
@state.setter
def state(self, val):
command = self.sa.commands['marker']['state'] % (self._marker, val)
self._state = val
self.sa.resource.write(command)
@property
def frequency(self):
command = self.sa.commands['marker']['frequency']['get'] % self._marker
return self.sa.resource.query(command)
@frequency.setter
def frequency(self, val):
command = self.sa.commands['marker']['frequency']['set']
f, v = val
command = command % (self._marker, f, v)
self.sa.resource.write(command)
@property
def amplitude(self):
command = self.sa.commands['marker']['amplitude'] % self._marker
return float(self.sa.resource.query(command))
def goto_max(self):
'''Moves marker to maximum value'''
command = self.sa.commands['marker']['max'] % self._marker
self.sa.resource.write(command)
def goto_min(self):
'''Moves marker to minimum value'''
command = self.sa.commands['marker']['min'] % self._marker
self.sa.resource.write(command)
def center(self):
'''Centers the frequency span around the marker'''
command = self.sa.commands['marker']['center'] % self._marker
self.sa.resource.write(command)
class SignalGenerator(BaseInstrument):
def __init__(self, **kwargs):
return super().__init__(**kwargs)
@property
def discrete_frequency(self):
command = self.commands['frequency']['discrete']['get']
return self.resource.query(command)
@discrete_frequency.setter
def discrete_frequency(self, val):
command = self.commands['frequency']['discrete']['set'] % val
self.resource.write(command)
@property
def output(self):
command = self.commands['output']['get']
return self.resource.query(command)
@output.setter
def output(self, val):
command = self.commands['output']['set'] % val
self.resource.write(command)
@property
def level(self):
command = self.commands['level']['get']
return self.resource.query(command)
@level.setter
def level(self, val):
command = self.commands['level']['set'] % val
self.resource.write(command)
@property
def unit(self):
command = self.commands['unit']['get']
return self.resource.query(command)
@unit.setter
def unit(self, val):
command = self.commands['unit']['set'] % val
self.resource.write(command)
class ControllerBase(BaseInstrument):
def __init__(self, **kwargs):
return super().__init__(**kwargs)
@property
def position(self):
command = self.commands['position']['get']
return self.resource.query(command)
@position.setter
def position(self, val):
command = self.commands['position']['set'] % val
return self.resource.write(command)
@property
def acceleration(self):
command = self.commands['acceleration']['get']
return self.resource.query(command)
@acceleration.setter
def acceleration(self, val):
command = self.commands['acceleration']['set'] % val
return self.resource.write(command)
@property
def speed(self):
command = self.commands['speed']['get']
return self.resource.query(command)
@speed.setter
def speed(self, val):
command = self.commands['speed']['set'] % val
return self.resource.write(command)
@property
def cycle(self):
command = self.commands['cycle']['get']
return self.resource.query(command)
@cycle.setter
def cycle(self, val):
command = self.commands['cycle']['set'] % val
return self.resource.write(command)
@property
def error(self):
command = self.commands['error']['get']
return self.resource.query(command)
def start_scan(self):
'''Starts scanning from upper and lower limits based on # of cycles'''
command = self.commands['scan']['set']
self.resource.write(command)
def scan_progress(self):
'''Returns scan progress'''
command = self.commands['scan']['get']
self.resource.query(command)
class Tower(ControllerBase):
def __init__(self, **kwargs):
return super().__init__(**kwargs)
@property
def direction(self):
command = self.commands['direction']['get']
return self.resource.query(command)
@direction.setter
def direction(self, val):
if val == -1 or 'd' in val.lower():
command = self.commands['direction']['set']['down']
elif val == 1 or 'u' in val.lower():
command = self.commands['direction']['set']['up']
elif val == 0 or 's' in val.lower():
command = self.commands['direction']['set']['stop']
else:
logging.critical('Invalid direction, choose 1, 0, -1 or up, stop, down')
return
self.resource.write(command)
@property
def polarity(self):
command = self.commands['polarity']['get']
return self.resource.query(command)
@polarity.setter
def polarity(self, val):
if 'v' in val.lower():
command = self.commands['polarity']['set'] % 'V'
elif 'h' in val.lower():
command = self.commands['polarity']['set'] % 'H'
else:
logging.critical('Invalid polarity, choose V or H')
return
self.resource.write(command)
class Turntable(ControllerBase):
def __init__(self, **kwargs):
return super().__init__(**kwargs)
@property
def direction(self):
command = self.commands['direction']['get']
return self.resource.query(command)
@direction.setter
def direction(self, val):
if val == -1 or 'cc' in val.lower():
command = self.commands['direction']['set']['counterclockwise']
elif val == 1 or 'cw' in val.lower():
command = self.commands['direction']['set']['clockwise']
elif val == 0 or 's' in val.lower():
command = self.commands['direction']['set']['stop']
else:
logging.critical('Invalid direction, choose 1, 0, -1 or cw, stop, cc')
return
self.resource.write(command)
class DualController(BaseInstrument):
'''For devices with a single interface that control both antenna mast and turntable'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.device = 'tower'
def reset(self):
command = self.commands['reset'] % self._device
self.resource.write(command)
def opc(self):
command = self.commands['opc'] % self._device
opc = self.resource.query(command)
opc = opc.strip('OK')
opc = opc.strip('\n')
return int(opc)
@property
def device(self):
return self.readable_device
@device.setter
def device(self, val):
if 'tower' in val.lower() or 'mast' in val.lower() or 'ant' in val.lower():
self.readable_device = 'tower'
self._device = self.commands['device']['tower']
elif 'table' in val.lower():
self.readable_device = 'turntable'
self._device = self.commands['device']['turntable']
else:
logging.critical('Invalid device, choose tower or turntable')
raise ValueError
@property
def position(self):
command = self.commands['position']['get'] % self._device
return self.resource.query(command)
@position.setter
def position(self, val):
command = self.commands['position']['set'] % (self._device, val)
return self.resource.write(command)
@property
def acceleration(self):
command = self.commands['acceleration']['get'] % self._device
return self.resource.query(command)
@acceleration.setter
def acceleration(self, val):
command = self.commands['acceleration']['set'] % (self._device, val)
return self.resource.write(command)
@property
def speed(self):
command = self.commands['speed']['get'] % self._device
return self.resource.query(command)
@speed.setter
def speed(self, val):
command = self.commands['speed']['set'] % (self._device, val)
return self.resource.write(command)
@property
def cycle(self):
command = self.commands['cycle']['get'] % self._device
return self.resource.query(command)
@cycle.setter
def cycle(self, val):
command = self.commands['cycle']['set'] % (self._device, val)
return self.resource.write(command)
@property
def error(self):
command = self.commands['error']['get'] % self._device
return self.resource.query(command)
def start_scan(self):
'''Starts scanning from upper and lower limits based on # of cycles'''
command = self.commands['scan']['set'] % self._device
self.resource.write(command)
def scan_progress(self):
'''Returns scan progress'''
command = self.commands['scan']['get'] % self._device
self.resource.query(command)
@property
def direction(self):
command = self.commands['direction']['get']
return self.resource.query(command)
@direction.setter
def direction(self, val):
if self.readable_device == 'tower':
if val == -1 or 'd' in val.lower():
command = self.commands['direction']['set']['down']
elif val == 1 or 'u' in val.lower():
command = self.commands['direction']['set']['up']
elif val == 0 or 's' in val.lower():
command = self.commands['direction']['set']['stop']
else:
logging.critical('Invalid direction, choose 1, 0, -1 or up, stop, down')
raise ValueError
elif self.readable_device == 'turntable':
if val == -1 or 'cc' in val.lower():
command = self.commands['direction']['set']['counterclockwise']
elif val == 1 or 'cw' in val.lower():
command = self.commands['direction']['set']['clockwise']
elif val == 0 or 's' in val.lower():
command = self.commands['direction']['set']['stop']
else:
logging.critical('Invalid direction, choose 1, 0, -1 or cw, stop, cc')
raise ValueError
self.resource.write(command)
@property
def polarity(self):
command = self.commands['polarity']['get'] % self.commands['device']['tower']
return self.resource.query(command)
@polarity.setter
def polarity(self, val):
if 'v' in val.lower():
command = self.commands['polarity']['set'] % 'V'
elif 'h' in val.lower():
command = self.commands['polarity']['set'] % 'H'
else:
logging.critical('Invalid polarity, choose V or H')
return
self.resource.write(command)
if __name__ == "__main__":
'''
sa = SpectrumAnalyzer(gpib=20, driver='esw.yaml')
print(sa)
# Tower/Turntable example
controller = DualController(gpib=7, driver='emcenter.yaml', log_level=logging.DEBUG)
# Read device current position, default on object creation is tower
print(controller.position)
# Change device to turntable and set to 200
controller.device = 'turntable'
controller.position = 200
# Should return 200 or the angle of the turntable during its current rotation to 200
print(controller.position)
# Close connection
controller.resource.close()
# Signal generator example
sg = SignalGenerator(tcpip='10.0.0.11', driver='smw200a.yaml', log_level=logging.INFO)
# Reset instrument
sg.reset()
# Set fixed frequency
sg.discrete_frequency = (200, 'MHz')
print(sg.discrete_frequency)
# Turn rf output on
sg.output = 1
print(sg.output)
# Set output signal level
sg.level = -20
print(sg.level)
# Set output units dBm, dBuV etc.
sg.unit = 'dbuv'
print(sg.unit)
# Close connection
sg.resource.close()
# Spectrum analyzer example
sa = SpectrumAnalyzer(tcpip='10.0.0.10', driver='esw.yaml', log_level=logging.INFO)
# Reset instrument
sa.reset()
# Set to spectrum analyzer mode
sa.mode = 'SAN'
# Set start frequency to 1MHz
sa.start_frequency = (1, 'MHz')
start = sa.start_frequency
# Create trace object tied to trace 1 and read its contents as a pandas dataframe
t1 = sa.Trace(1)
print(t1.dataframe())
# Create a marker object tied to marker 1
m1 = sa.Marker(1)
# Move the marker to the peak value
m1.goto_max()
# Read out marker frequency and amplitude
amp = m1.amplitude
x = m1.frequency
print(amp, x)
# Close connection
sa.resource.close()
'''
| 31.865248
| 115
| 0.590608
|
c04354b8e88e69033d83d96abfc37f58e5bbf4c8
| 7,453
|
py
|
Python
|
adbutils/extra/minicap/__init__.py
|
hakaboom/adb-utils
|
deb6d62fc636be2969a641df613c1348ea889d6c
|
[
"Apache-2.0"
] | 24
|
2021-07-27T05:23:57.000Z
|
2022-03-31T17:54:43.000Z
|
adbutils/extra/minicap/__init__.py
|
yundouguai/adb-utils
|
1932f57cd45b14412b4894264f9aa237a09b3f32
|
[
"Apache-2.0"
] | 13
|
2021-08-11T09:50:14.000Z
|
2021-12-20T11:37:50.000Z
|
adbutils/extra/minicap/__init__.py
|
yundouguai/adb-utils
|
1932f57cd45b14412b4894264f9aa237a09b3f32
|
[
"Apache-2.0"
] | 12
|
2021-08-17T07:35:46.000Z
|
2022-02-07T02:36:11.000Z
|
# -*- coding: utf-8 -*-
import time
import threading
from loguru import logger
from adbutils.constant import (ANDROID_TMP_PATH, MNC_REMOTE_PATH, MNC_SO_REMOTE_PATH, MNC_CMD, MNC_CAP_LOCAL_PATH,
MNC_LOCAL_NAME, MNC_LOCAL_PATH, MNC_SO_LOCAL_PATH)
from adbutils.extra.minicap.exceptions import MinicapStartError, MinicapServerConnectError
from adbutils import ADBDevice
from adbutils._utils import NonBlockingStreamReader, reg_cleanup, SafeSocket
from adbutils._wraps import threadsafe_generator
from typing import Tuple
import struct
import subprocess
class Minicap(object):
RECVTIMEOUT = None
def __init__(self, device: ADBDevice, rotation_watcher=None):
"""
初始化minicap
Args:
device: 设备类
rotation_watcher: 方向监控函数
"""
self.device = device
self.MNC_LOCAL_NAME = MNC_LOCAL_NAME.format(device_id=self.device.device_id) # minicap在设备上的转发名
self.MNC_PORT = None # minicap在电脑上使用的端口
self.quirk_flag = 0
self.server_flag = False # 判断minicap服务是否启动
self.proc = None
self.nbsp = None
self._update_rotation_event = threading.Event()
if rotation_watcher:
rotation_watcher.reg_callback(lambda x: self.update_rotation(x * 90))
self._install_minicap()
def __str__(self):
return f"<minicap ({self.server_flag and 'Start' or 'Close'})> port:{self.MNC_PORT}" \
f"\tlocal_name:{self.MNC_LOCAL_NAME}"
def start_server(self) -> None:
"""
开启minicap服务
Raises:
MinicapStartError: minicap server start error
Returns:
None
"""
self._set_minicap_forward()
param = self._get_params()
proc = self.device.start_shell([MNC_CMD, f"-n '{self.MNC_LOCAL_NAME}'", '-P',
"%dx%d@%dx%d/%d 2>&1" % param])
nbsp = NonBlockingStreamReader(proc.stdout)
while True:
line = nbsp.readline(timeout=5)
if line is None:
raise MinicapStartError("minicap server setup timeout")
if b'have different types' in line:
raise MinicapStartError("minicap server setup error")
if b"Server start" in line:
logger.info('minicap server setup')
break
if proc.poll() is not None:
raise MinicapStartError('minicap server quit immediately')
reg_cleanup(proc.kill)
time.sleep(.5)
# self.proc = proc
# self.nbsp = nbsp
self.server_flag = True
def teardown(self) -> None:
"""
关闭minicap服务
Returns:
None
"""
logger.debug('minicap server teardown')
if self.proc:
self.proc.kill()
if self.nbsp:
self.nbsp.kill()
if self.MNC_PORT and self.device.get_forward_port(remote=self.MNC_LOCAL_NAME):
self.device.remove_forward(local=f'tcp:{self.MNC_PORT}')
self.server_flag = False
def _set_minicap_forward(self):
"""
设置minicap开放的端口
Returns:
None
"""
# teardown服务后,保留端口信息,用于下次启动
remote = f'localabstract:{self.MNC_LOCAL_NAME}'
if port := self.device.get_forward_port(remote=remote, device_id=self.device.device_id):
self.MNC_PORT = port
return
self.MNC_PORT = self.MNC_PORT or self.device.get_available_forward_local()
self.device.forward(local=f'tcp:{self.MNC_PORT}', remote=remote)
def _install_minicap(self) -> None:
"""
check if minicap and minicap.so installed
Returns:
None
"""
if not self.device.check_file(ANDROID_TMP_PATH, 'minicap'):
self.device.push(local=MNC_LOCAL_PATH.format(abi_version=self.device.abi_version),
remote=MNC_REMOTE_PATH)
time.sleep(1)
self.device.shell(['chmod', '755', MNC_REMOTE_PATH])
if not self.device.check_file(ANDROID_TMP_PATH, 'minicap.so'):
self.device.push(local=MNC_SO_LOCAL_PATH.format(abi_version=self.device.abi_version,
sdk_version=self.device.sdk_version),
remote=MNC_SO_REMOTE_PATH)
time.sleep(1)
self.device.shell(['chmod', '755', MNC_SO_REMOTE_PATH])
def _get_params(self) -> Tuple[int, int, int, int, int]:
"""
获取minicap命令需要的屏幕分辨率参数
Returns:
None
"""
display_info = self.device.displayInfo
real_width = display_info['width']
real_height = display_info['height']
real_rotation = display_info['rotation']
if self.quirk_flag & 2 and real_rotation in (90, 270):
params = real_height, real_width, real_height, real_width, 0
else:
params = real_width, real_height, real_width, real_height, real_rotation
return params
def update_rotation(self, rotation):
"""
更新屏幕方向
Args:
rotation: 方向角度
Returns:
None
"""
logger.debug("minicap update_rotation: {}", rotation)
self._update_rotation_event.set()
def get_frame(self):
"""
获取屏幕截图
Returns:
图像数据
"""
if self._update_rotation_event.is_set():
logger.info('minicap update_rotation')
self.teardown()
self.start_server()
self._update_rotation_event.clear()
try:
return self._get_frame()
except (ConnectionRefusedError, OSError) as err:
self.teardown()
raise MinicapServerConnectError(f'{err}')
def _get_frame(self):
s = SafeSocket()
s.connect((self.device.host, self.MNC_PORT))
t = s.recv(24)
# minicap header
global_headers = struct.unpack("<2B5I2B", t)
# Global header binary format https://github.com/openstf/minicap#global-header-binary-format
ori, self.quirk_flag = global_headers[-2:]
if self.quirk_flag & 2 and ori not in (0, 1, 2):
stopping = True
logger.error("quirk_flag found:{}, going to resetup", self.quirk_flag)
else:
stopping = False
if not stopping:
s.send(b"1")
if self.RECVTIMEOUT is not None:
header = s.recv_with_timeout(4, self.RECVTIMEOUT)
else:
header = s.recv(4)
if header is None:
logger.error("minicap header is None")
else:
frame_size = struct.unpack("<I", header)[0]
frame_data = s.recv(frame_size)
s.close()
return frame_data
logger.info('minicap get_frame ends')
s.close()
self.teardown()
if __name__ == '__main__':
import cv2
from baseImage import IMAGE
from adbutils import ADBDevice
from adbutils.extra.minicap import Minicap
device = ADBDevice(device_id='emulator-5554')
minicap = Minicap(device)
minicap.start_server()
while True:
if img := minicap.get_frame():
cv2.imshow('test', IMAGE(img).imread())
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
minicap.teardown()
| 31.447257
| 114
| 0.587012
|
94d506bdd976bd930b419b039ccab407e9d29abf
| 6,099
|
py
|
Python
|
catalyst/rl/offpolicy/algorithms/critic.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 1
|
2019-11-26T06:41:33.000Z
|
2019-11-26T06:41:33.000Z
|
catalyst/rl/offpolicy/algorithms/critic.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | null | null | null |
catalyst/rl/offpolicy/algorithms/critic.py
|
andrey-avdeev/catalyst
|
fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3
|
[
"Apache-2.0"
] | 1
|
2021-12-20T07:32:25.000Z
|
2021-12-20T07:32:25.000Z
|
from typing import Dict, Union # isort:skip
import copy
from catalyst.rl import utils
from catalyst.rl.core import (
ActorSpec, AlgorithmSpec, CriticSpec, EnvironmentSpec
)
from catalyst.rl.registry import AGENTS
class OffpolicyCritic(AlgorithmSpec):
def __init__(
self,
critic: CriticSpec,
gamma: float,
n_step: int,
critic_loss_params: Dict = None,
critic_optimizer_params: Dict = None,
critic_scheduler_params: Dict = None,
critic_grad_clip_params: Dict = None,
critic_tau: float = 1.0,
**kwargs
):
self._device = utils.get_device()
self.critic = critic.to(self._device)
self.target_critic = copy.deepcopy(critic).to(self._device)
# preparation
critic_components = utils.get_trainer_components(
agent=self.critic,
loss_params=critic_loss_params,
optimizer_params=critic_optimizer_params,
scheduler_params=critic_scheduler_params,
grad_clip_params=critic_grad_clip_params
)
# criterion
self._critic_loss_params = critic_components["loss_params"]
self.critic_criterion = critic_components["criterion"]
# optimizer
self._critic_optimizer_params = critic_components["optimizer_params"]
self.critic_optimizer = critic_components["optimizer"]
# scheduler
self._critic_scheduler_params = critic_components["scheduler_params"]
self.critic_scheduler = critic_components["scheduler"]
# grad clipping
self._critic_grad_clip_params = critic_components["grad_clip_params"]
self.critic_grad_clip_fn = critic_components["grad_clip_fn"]
# other hyperparameters
self._n_step = n_step
self._gamma = gamma
self.critic_tau = critic_tau
# other init
self._init(**kwargs)
def _init(self, **kwargs):
assert len(kwargs) == 0
@property
def n_step(self) -> int:
return self._n_step
@property
def gamma(self) -> float:
return self._gamma
def pack_checkpoint(self, with_optimizer: bool = True):
checkpoint = {}
for key in ["critic"]:
checkpoint[f"{key}_state_dict"] = getattr(self, key).state_dict()
if with_optimizer:
for key2 in ["optimizer", "scheduler"]:
key2 = f"{key}_{key2}"
value2 = getattr(self, key2, None)
if value2 is not None:
checkpoint[f"{key2}_state_dict"] = value2.state_dict()
return checkpoint
def unpack_checkpoint(self, checkpoint, with_optimizer: bool = True):
for key in ["critic"]:
value_l = getattr(self, key, None)
if value_l is not None:
value_r = checkpoint[f"{key}_state_dict"]
value_l.load_state_dict(value_r)
if with_optimizer:
for key2 in ["optimizer", "scheduler"]:
key2 = f"{key}_{key2}"
value_l = getattr(self, key2, None)
if value_l is not None:
value_r = checkpoint[f"{key2}_state_dict"]
value_l.load_state_dict(value_r)
def critic_update(self, loss):
self.critic.zero_grad()
self.critic_optimizer.zero_grad()
loss.backward()
if self.critic_grad_clip_fn is not None:
self.critic_grad_clip_fn(self.critic.parameters())
self.critic_optimizer.step()
if self.critic_scheduler is not None:
self.critic_scheduler.step()
return {"lr_critic": self.critic_scheduler.get_lr()[0]}
def target_critic_update(self):
utils.soft_update(self.target_critic, self.critic, self.critic_tau)
def update_step(self, value_loss, critic_update=True):
"""
Updates parameters of neural networks and returns learning metrics
Args:
value_loss:
critic_update:
Returns:
"""
raise NotImplementedError
def train(self, batch, actor_update=False, critic_update=True):
states_t, actions_t, rewards_t, states_tp1, done_t = \
batch["state"], batch["action"], batch["reward"], \
batch["next_state"], batch["done"]
states_t = utils.any2device(states_t, self._device)
actions_t = utils.any2device(actions_t,
self._device).unsqueeze(1).long()
rewards_t = utils.any2device(rewards_t, self._device).unsqueeze(1)
states_tp1 = utils.any2device(states_tp1, device=self._device)
done_t = utils.any2device(done_t, device=self._device).unsqueeze(1)
"""
states_t: [bs; history_len; observation_len]
actions_t: [bs; 1]
rewards_t: [bs; 1]
states_tp1: [bs; history_len; observation_len]
done_t: [bs; 1]
"""
value_loss = self._loss_fn(
states_t, actions_t, rewards_t, states_tp1, done_t
)
metrics = self.update_step(
value_loss=value_loss, critic_update=critic_update
)
return metrics
@classmethod
def prepare_for_trainer(
cls, env_spec: EnvironmentSpec, config: Dict
) -> "AlgorithmSpec":
config_ = config.copy()
agents_config = config_["agents"]
critic_params = agents_config["critic"]
critic = AGENTS.get_from_params(
**critic_params,
env_spec=env_spec,
)
algorithm = cls(
**config_["algorithm"],
critic=critic,
)
return algorithm
@classmethod
def prepare_for_sampler(
cls, env_spec: EnvironmentSpec, config: Dict
) -> Union[ActorSpec, CriticSpec]:
config_ = config.copy()
agents_config = config_["agents"]
critic_params = agents_config["critic"]
critic = AGENTS.get_from_params(
**critic_params,
env_spec=env_spec,
)
return critic
| 32.967568
| 78
| 0.606329
|
5fdf048733391e31248a0a83599c0cf14cfd3f2a
| 11,452
|
py
|
Python
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
Mithilesh1609/tensorflow
|
63f70b5611d7f50512ea26295d26016c2704901b
|
[
"Apache-2.0"
] | 8
|
2020-07-29T18:50:45.000Z
|
2021-07-25T07:06:43.000Z
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:36.000Z
|
2022-02-10T05:39:47.000Z
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
3ecurityy/tensorflow
|
f8c0e68a8aa5d575a19129ec67c9ed6262652082
|
[
"Apache-2.0"
] | 11
|
2020-05-31T13:14:56.000Z
|
2021-12-14T04:39:25.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class DefFunctionTest(test.TestCase):
def testAutoclusteringWithTfFunction(self):
@def_function.function(experimental_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(experimental_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
def fn(x, a):
return x + a
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
def fn(x, a):
return x + a
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testDerivative(self):
if test.is_built_with_rocm():
return
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, experimental_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with experimental_compile=True from
# experimental_compile=False should compile the inner func.
def testNestedCall(self):
def fn(x, a):
return x + a
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x, a):
return xla_func(x, a)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
def testNestedCallUnsupportedOps(self):
def fn(x):
return array_ops.unique(x).y
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'not compilable'):
func(inputs)
def testUnsupportedOps(self):
def fn(x):
return array_ops.unique(x).y # Unique is not supported by XLA
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([1, 2, 3], func(inputs))
with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
xla_func(inputs)
def testFunctionGradient(self):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
run_and_check(xla_func)
def testControlFlow(self):
@def_function.function(experimental_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(experimental_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
def testMethodCompilation(self):
if test.is_built_with_rocm():
return
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
def testMethodCompilationUnsupportedFunc(self):
if test.is_built_with_rocm():
return
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x):
return array_ops.unique(x).y
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if test.is_built_with_rocm():
return
@def_function.function(experimental_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@def_function.function(experimental_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@def_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
def testArgMinMax(self):
@def_function.function(experimental_compile=True)
def argmax(x):
return math_ops.argmax(x)
@def_function.function(experimental_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
def testErrorMessagePassingTensorArray(self):
@def_function.function(experimental_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegexp(
errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
def testTensorListConcatV2(self):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
def testTensorListConcatV2Multidim(self):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
def testTensorListConcatV2Scalars(self):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
def testTensorListConcatGrad(self):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = def_function.function(experimental_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
def testCumsum(self):
@def_function.function(experimental_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 30.867925
| 80
| 0.668442
|
ec456a9461cfa53a2f0be8a212d2c2b9b280fe49
| 12,504
|
py
|
Python
|
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
import json
#
# {
# "applicationId":"com.neulion.f",
# "applicationName":"f",
# "applicationVersion":"8.0602",
# "librarys": [
# {
# "libraryName":"track-core",
# "libraryVersion":"6"
# },{
# "libraryName":"track-ga",
# "libraryVersion":"6"
# },{
# "libraryName":"player",
# "libraryVersion":"5.0.0"
# }
# ]
# }
tempdata = [{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"amazon","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"amazon_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]},{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"google","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"google_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]},{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"prod","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"prod_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]}]
print(type(tempdata))
print(len(tempdata))
for data in tempdata:
print(data)
print(type(data))
db = get_db()
error = None
if dict1['package'] is None:
error = 'Missing package'
elif dict1['packageName'] is None:
error = 'Missing packageName'
elif dict1['packageVersionCode'] is None:
error = 'Missing packageVersionCode'
elif dict1['libraryCoordinateList'] is None:
error = 'Missing Library info'
# if already exist just update.
elif db.execute(
'SELECT package, packageVersionName, productFlavorName FROM Package WHERE package = ? and '
'packageVersionName = ? and productFlavorName = ?',
(dict1['package'], dict1['packageVersionName'], dict1['productFlavorName'])).fetchone() is not None:
# print('Found')
db.execute(
'UPDATE Package SET packageName = ?, packageVersionCode = ?,productFlavorName = ?, packageTargetSdk = ?, '
'packageMiniSdk = ?, packageMappingUrl = ?, deepLinkScheme = ? WHERE package = ? and '
'packageVersionName = ?', (dict1['packageName'], dict1['packageVersionCode'], dict1['productFlavorName'],
dict1['packageTargetSdk'], dict1['packageMiniSdk'],
dict1['packageMappingUrl'], dict1['deepLinkScheme'], dict1['package'],
dict1['packageVersionName'])
)
db.execute(
'UPDATE Package SET date = datetime(\'now\', \'localtime\') WHERE package = ? and '
'packageVersionName = ?', (dict1['package'], dict1['packageVersionName'])
)
db.commit()
id = db.execute(
'select id from PackageLibrary WHERE package = ? and packageVersionName = ? and productFlavorName = ?',
(dict1['package'], dict1['packageVersionName'], dict1['productFlavorName']))
pids = [dict(id=row[0]) for row in id.fetchall()]
ids = []
for item in pids:
ids.append(item['id'])
# print(ids)
i = 0
for dict2 in dict1['libraryCoordinateList']:
db.execute(
'UPDATE PackageLibrary SET package = ?, packageName = ?, productFlavorName = ?, packageVersionName = ?, '
'libraryGroup = ?, libraryName = ?, libraryVersion = ? WHERE package = ? and '
'packageVersionName = ? and id = ?', (dict1['package'], dict1['packageName'], dict1['productFlavorName'],
dict1['packageVersionName'], dict2['group'],
dict2['name'], dict2['currentVersion'],
dict1['package'], dict1['packageVersionName'],
ids[i])
)
db.commit()
i = i + 1
# print('Update')
error = 'Project Info Updated'
# print(error)
# insert new data
| 142.090909
| 9,437
| 0.678183
|
9e5c52b6cf5d1d4f09346b9729877dfc22605e0d
| 922
|
py
|
Python
|
src/cpapi/_api/__init__.py
|
cpcgskill/cpapi
|
50ebd86637aec5aab47dff5561f1f1b54e967d64
|
[
"Apache-2.0"
] | 1
|
2021-09-11T08:06:59.000Z
|
2021-09-11T08:06:59.000Z
|
src/cpapi/_api/__init__.py
|
cpcgskill/cpapi
|
50ebd86637aec5aab47dff5561f1f1b54e967d64
|
[
"Apache-2.0"
] | null | null | null |
src/cpapi/_api/__init__.py
|
cpcgskill/cpapi
|
50ebd86637aec5aab47dff5561f1f1b54e967d64
|
[
"Apache-2.0"
] | null | null | null |
# -*-coding:utf-8 -*-
u"""
:创建时间: 2021/11/28 0:17
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import os
from . import _run_lib
from . import (OpenMaya,
OpenMayaAnim,
OpenMayaFX,
OpenMayaRender,
OpenMayaUI,
OpenMayaMPx)
if os.environ.get("CPAPI_DEBUG"):
from imp import reload
reload(_run_lib)
[reload(m) for m in (OpenMaya,
OpenMayaAnim,
OpenMayaFX,
OpenMayaRender,
OpenMayaUI,
OpenMayaMPx)]
__all__ = ["OpenMaya",
"OpenMayaAnim",
"OpenMayaFX",
"OpenMayaRender",
"OpenMayaUI",
"OpenMayaMPx", ]
| 24.263158
| 47
| 0.52603
|
6cc12c1f2dfd67def08699f3e820417935576ccb
| 342
|
py
|
Python
|
app/migrations/0024_remove_submittedplaylist_playlist.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 1
|
2020-12-28T07:54:05.000Z
|
2020-12-28T07:54:05.000Z
|
app/migrations/0024_remove_submittedplaylist_playlist.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 11
|
2020-06-05T17:35:01.000Z
|
2022-02-26T07:00:16.000Z
|
app/migrations/0024_remove_submittedplaylist_playlist.py
|
kindweb223/React-Django-M3U8
|
ee3edfabcf4e4423e96591df7cf42f8d89813292
|
[
"Apache-2.0"
] | 1
|
2021-11-02T11:35:45.000Z
|
2021-11-02T11:35:45.000Z
|
# Generated by Django 2.0.6 on 2018-06-10 13:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0023_remove_channel_playlist'),
]
operations = [
migrations.RemoveField(
model_name='submittedplaylist',
name='playlist',
),
]
| 19
| 48
| 0.608187
|
a1b794740acbca6a99e4484cd005846cd5cb76ca
| 1,093
|
py
|
Python
|
iotbx/regression/tst_reindex.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iotbx/regression/tst_reindex.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iotbx/regression/tst_reindex.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import libtbx.load_env
from libtbx.test_utils import approx_equal
from libtbx.utils import null_out
import os
def exercise():
hkl_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/reflection_files/1yjp.mtz",
test=os.path.isfile)
if (hkl_file is None):
print "Skipping"
return
from iotbx.command_line import reindex
from iotbx import file_reader
output_file = reindex.run(args=[hkl_file, "change_of_basis=c,b,a",
"output_file=tmp666.mtz"], out=null_out())
assert os.path.isfile(output_file)
arrays_in = file_reader.any_file(hkl_file).file_server.miller_arrays
arrays_out = file_reader.any_file(output_file).file_server.miller_arrays
assert (approx_equal(arrays_in[0].unit_cell().parameters(),
(21.937, 4.866, 23.477, 90.0, 107.08, 90.0), eps=0.001))
assert (approx_equal(arrays_out[0].unit_cell().parameters(),
(23.477, 4.866, 21.937, 90.0, 107.08, 90.0), eps=0.001))
assert (arrays_out[0].info().labels == ["FOBS_X","SIGFOBS_X"])
if (__name__ == "__main__"):
exercise()
print "OK"
| 35.258065
| 74
| 0.73559
|
19bb171a1474b58de771aad125a56f8f720398f4
| 2,605
|
py
|
Python
|
test/integration/ggrc/models/mixins/test_stateful.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/integration/ggrc/models/mixins/test_stateful.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/models/mixins/test_stateful.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration test for Stateful mixin."""
import ddt
from integration.ggrc import api_helper
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc import TestCase
from integration.ggrc.models import factories
from ggrc.models import get_model
from ggrc.models.mixins import synchronizable
@ddt.ddt
class TestStatefulMixin(WithQueryApi, TestCase):
"""Test cases for Stateful mixin."""
def setUp(self):
super(TestStatefulMixin, self).setUp()
self.client.get("/login")
self.api = api_helper.Api()
@ddt.data(
"AccessGroup",
"Audit",
"Control",
"DataAsset",
"Requirement",
"Facility",
"Issue",
"Market",
"Objective",
"OrgGroup",
"Product",
"Program",
"Project",
"Risk",
"Requirement",
"Threat",
"Vendor",
"ProductGroup",
"TechnologyEnvironment",
)
def test_update_status(self, model_name):
"""Test status updating for {0}."""
factory = factories.get_model_factory(model_name)
# pylint: disable=protected-access
if issubclass(factory._meta.model, synchronizable.Synchronizable):
self.api.login_as_external()
obj = factory()
object_name = obj._inflector.table_singular
for status in obj.VALID_STATES:
# Try to update status.
response = self.api.put(obj, {u"status": status})
self.assert200(response)
# Check that status has been updated.
response = self.api.get(get_model(model_name), obj.id)
self.assert200(response)
new_status = response.json.get(object_name, {}).get("status")
self.assertEqual(new_status, status)
@ddt.data(
"AccessGroup",
"Audit",
"Control",
"DataAsset",
"Requirement",
"Facility",
"Issue",
"Market",
"Objective",
"OrgGroup",
"Product",
"Program",
"Project",
"Risk",
"Requirement",
"Threat",
"Vendor",
"ProductGroup",
"TechnologyEnvironment",
)
def test_set_invalid_status(self, model_name):
"""Test returning 400 code for setting invalid status."""
factory = factories.get_model_factory(model_name)
# pylint: disable=protected-access
if issubclass(factory._meta.model, synchronizable.Synchronizable):
self.api.login_as_external()
obj = factory()
invalid_status = u"Invalid status."
response = self.api.put(obj, {u"status": invalid_status})
self.assert400(response)
| 26.05
| 78
| 0.651056
|
c885f0065000b73ac53a35b722ccb4a73be07dd2
| 460
|
py
|
Python
|
askbotopenmooc/wsgi.py
|
OpenMOOC/askbot-openmooc
|
8534e435247b6217d5cbc62b9eac15c8f75eba62
|
[
"Apache-2.0"
] | 3
|
2015-03-17T09:17:43.000Z
|
2017-02-06T21:18:42.000Z
|
askbotopenmooc/wsgi.py
|
OpenMOOC/askbot-openmooc
|
8534e435247b6217d5cbc62b9eac15c8f75eba62
|
[
"Apache-2.0"
] | null | null | null |
askbotopenmooc/wsgi.py
|
OpenMOOC/askbot-openmooc
|
8534e435247b6217d5cbc62b9eac15c8f75eba62
|
[
"Apache-2.0"
] | 2
|
2016-08-02T15:44:30.000Z
|
2017-07-17T05:11:35.000Z
|
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'askbotopenmooc.settings'
try:
localsettings = os.environ['LOCAL_SETTINGS_PATH']
sys.path.insert(0, localsettings)
except:
sys.path.insert(0, '/etc/openmooc/askbot')
sys.path.insert(0, os.getcwd())
def application(environ, start_response):
from django.core.wsgi import get_wsgi_application
django_app = get_wsgi_application()
return django_app(environ, start_response)
| 27.058824
| 64
| 0.752174
|
9053ec91cc1458d45f6bb059cae38cde098208b5
| 3,540
|
py
|
Python
|
plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_catchup_to_next_view_during_view_change_by_primary.py
|
Toktar/indy-plenum
|
2f1f838332b0506f8dd8837ac341cba0cd3f7ff4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.test.delayers import delay_for_view
from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange, view_change_timeout
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.stasher import delay_rules
from plenum.test.test_node import checkProtocolInstanceSetup, ensureElectionsDone
nodeCount = 7
VIEW_CHANGE_TIMEOUT = 5
@pytest.fixture(scope="module")
def tconf(tconf):
with view_change_timeout(tconf, VIEW_CHANGE_TIMEOUT):
yield tconf
def test_catchup_to_next_view_during_view_change_by_primary(txnPoolNodeSet, looper,
sdk_pool_handle, sdk_wallet_steward):
'''
1) Lagging node is a primary for view=1
2) All nodes except the lagging one start a view change (to view=1)
3) The nodes can not finish it on time since the Primary for view=1 is lagging
4) All nodes except the lagging one go to view=2 then
5) All nodes except the lagging one order txns on view=2
6) Lagging node gets InstanceChanges for view=1 => it changes to view=2, and catches up till txns from view=2
7) Lagging node gets InstanceChanges for view=2 => it changes to view=2
8) Make sure that the lagging node is up to date, and can participate in consensus
'''
lagging_node = txnPoolNodeSet[1]
other_nodes = list(set(txnPoolNodeSet) - {lagging_node})
initial_view_no = checkViewNoForNodes(txnPoolNodeSet)
initial_last_ordered = lagging_node.master_last_ordered_3PC
with delay_rules(lagging_node.nodeIbStasher, delay_for_view(viewNo=2)):
with delay_rules(lagging_node.nodeIbStasher, delay_for_view(viewNo=0), delay_for_view(viewNo=1)):
# view change to viewNo=2 since a primary for viewNo=1 is a lagging node
for n in txnPoolNodeSet:
n.view_changer.on_master_degradation()
waitForViewChange(looper,
other_nodes,
expectedViewNo=initial_view_no + 2,
customTimeout=30)
checkProtocolInstanceSetup(looper=looper, nodes=other_nodes, instances=range(3))
ensure_all_nodes_have_same_data(looper, nodes=other_nodes)
# order some txns
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_steward, 5)
assert initial_view_no == lagging_node.viewNo
assert initial_last_ordered == lagging_node.master_last_ordered_3PC
assert len(lagging_node.master_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]) > 0
# make sure that the first View Change happened on lagging node
waitForViewChange(looper, [lagging_node], expectedViewNo=initial_view_no + 1,
customTimeout=20)
assert initial_view_no + 1 == lagging_node.viewNo
# make sure that the second View Change happened on lagging node
waitForViewChange(looper, [lagging_node], expectedViewNo=initial_view_no + 2,
customTimeout=20)
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=other_nodes)
# make sure that the pool is functional
sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
| 49.859155
| 117
| 0.716384
|
004554929c99fa509481a37e5de1141e0426118d
| 290
|
py
|
Python
|
tag/player.py
|
kendase3/every
|
83b543c54a2dd071f0a34f128c5baa20a7e58def
|
[
"BSD-2-Clause"
] | 1
|
2017-06-12T18:36:20.000Z
|
2017-06-12T18:36:20.000Z
|
tag/player.py
|
kendase3/every
|
83b543c54a2dd071f0a34f128c5baa20a7e58def
|
[
"BSD-2-Clause"
] | null | null | null |
tag/player.py
|
kendase3/every
|
83b543c54a2dd071f0a34f128c5baa20a7e58def
|
[
"BSD-2-Clause"
] | null | null | null |
# remember that ord('@') trick
class Player:
def __init__(self, id, color, isIt, x, y):
self.id = id
self.color = color
self.isIt = isIt
self.x = x
self.y = y
def __repr__(self):
return "hi!"
#return "number=%d" % self.number
def __str__(self):
return repr(self)
| 16.111111
| 43
| 0.617241
|
860fefe5014696d7380eb92e0af086e6a1d8085e
| 272
|
py
|
Python
|
packages/task-scheduler/task_scheduler/models/pydantic_models.py
|
baviera08/romi-dashboard
|
ac3a15014ad3c3bdac523a6550934a06653cfba1
|
[
"Apache-2.0"
] | null | null | null |
packages/task-scheduler/task_scheduler/models/pydantic_models.py
|
baviera08/romi-dashboard
|
ac3a15014ad3c3bdac523a6550934a06653cfba1
|
[
"Apache-2.0"
] | 1
|
2020-12-01T20:25:32.000Z
|
2020-12-01T20:25:32.000Z
|
packages/task-scheduler/task_scheduler/models/pydantic_models.py
|
baviera08/romi-dashboard
|
ac3a15014ad3c3bdac523a6550934a06653cfba1
|
[
"Apache-2.0"
] | null | null | null |
from tortoise.contrib.pydantic import pydantic_model_creator
from .tortoise_models import ScheduledTask, TaskRule
TaskRule_Pydantic = pydantic_model_creator(TaskRule, name="TaskRule")
ScheduledTask_Pydantic = pydantic_model_creator(ScheduledTask, name="ScheduledTask")
| 34
| 84
| 0.860294
|
88bb448b81e46bbc9a0dae96a3587159507bff06
| 1,724
|
py
|
Python
|
detect_scale.py
|
PBrockmann/StripesCounter
|
cd1d3daf8da335308659c24facaa373917e8942f
|
[
"MIT"
] | 2
|
2021-02-05T17:40:25.000Z
|
2021-12-01T02:45:38.000Z
|
detect_scale.py
|
PBrockmann/StripesCounter
|
cd1d3daf8da335308659c24facaa373917e8942f
|
[
"MIT"
] | 8
|
2021-04-06T19:47:41.000Z
|
2021-05-17T15:08:21.000Z
|
detect_scale.py
|
PBrockmann/StripesCounter
|
cd1d3daf8da335308659c24facaa373917e8942f
|
[
"MIT"
] | 1
|
2021-04-06T18:29:35.000Z
|
2021-04-06T18:29:35.000Z
|
#!/usr/bin/env python
#=================================================================
# Author: Patrick Brockmann CEA/DRF/LSCE - Feb 2021
#=================================================================
# Test detection of the scale
# Usage: ./detect_scale.py BEL17-2-2_1.35x_haut0001.png
#------------------------------------------------------------------
import sys, re, os
import numpy as np
import cv2
import pytesseract
#------------------------------------------------------------------
imageFileName = sys.argv[1]
print("File: ", imageFileName)
#------------------------------------------------------------------
img = cv2.imread(imageFileName)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(gray, 0, 0)
print("Writing mask.png")
cv2.imwrite('mask.png', mask)
#------------------------------------------------------------------
try:
scaleDetected = pytesseract.image_to_string(mask)
matchObj = re.match(r'[^0-9]*([0-9]*)mm', scaleDetected.strip())
scaleValue = float(matchObj.group(1))
print("Detected scale value: ", scaleValue)
except:
print("Detected scale value: not possible")
#------------------------------------------------------------------
try:
fld = cv2.ximgproc.createFastLineDetector()
lines = fld.detect(mask)
scaleLength = 0
for line in lines:
point1 = np.array((line[0][0],line[0][1]))
point2 = np.array((line[0][2],line[0][3]))
length = np.linalg.norm(point1 - point2)
#print(point1, point2, dist)
if (length > scaleLength):
scaleLength = int(length)
print("Detected scale length in pixel: ", scaleLength)
except:
print("Detected scale length in pixel: not possible")
| 32.528302
| 68
| 0.486079
|
6d8e8c6647762a04790ce5563cba9ab7ff98c2d8
| 150
|
py
|
Python
|
python/json_/json_example.py
|
SnoopJeDi/playground
|
73fab4a38ceeff3da23683e3dd1cb1b3a74cf4cf
|
[
"MIT"
] | null | null | null |
python/json_/json_example.py
|
SnoopJeDi/playground
|
73fab4a38ceeff3da23683e3dd1cb1b3a74cf4cf
|
[
"MIT"
] | null | null | null |
python/json_/json_example.py
|
SnoopJeDi/playground
|
73fab4a38ceeff3da23683e3dd1cb1b3a74cf4cf
|
[
"MIT"
] | null | null | null |
import json
from pprint import pprint
if __name__ == "__main__":
with open("data.json", "r") as f:
data = json.load(f)
pprint(data)
| 16.666667
| 37
| 0.62
|
b68ca9021b65b1f601a184f6b0131e7bc2195fe9
| 14,306
|
py
|
Python
|
tests/api/v3_1_1/test_sg_vn_mapping.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_1_1/test_sg_vn_mapping.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_1_1/test_sg_vn_mapping.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI sg_vn_mapping API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match')
def is_valid_get_sg_vn_mappings(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_e69e3338166d5c1887e5fa82efb72a11_v3_1_1').validate(obj.response)
return True
def get_sg_vn_mappings(api):
endpoint_result = api.sg_vn_mapping.get_sg_vn_mappings(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_get_sg_vn_mappings(api, validator):
try:
assert is_valid_get_sg_vn_mappings(
validator,
get_sg_vn_mappings(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_sg_vn_mappings_default(api):
endpoint_result = api.sg_vn_mapping.get_sg_vn_mappings(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_get_sg_vn_mappings_default(api, validator):
try:
assert is_valid_get_sg_vn_mappings(
validator,
get_sg_vn_mappings_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_sg_vn_mapping(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_018b050fff6a5302ace3e16674c8b19a_v3_1_1').validate(obj.response)
return True
def create_sg_vn_mapping(api):
endpoint_result = api.sg_vn_mapping.create_sg_vn_mapping(
active_validation=False,
id='string',
last_update='string',
payload=None,
sg_name='string',
sgt_id='string',
vn_id='string',
vn_name='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_create_sg_vn_mapping(api, validator):
try:
assert is_valid_create_sg_vn_mapping(
validator,
create_sg_vn_mapping(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_sg_vn_mapping_default(api):
endpoint_result = api.sg_vn_mapping.create_sg_vn_mapping(
active_validation=False,
id=None,
last_update=None,
payload=None,
sg_name=None,
sgt_id=None,
vn_id=None,
vn_name=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_create_sg_vn_mapping_default(api, validator):
try:
assert is_valid_create_sg_vn_mapping(
validator,
create_sg_vn_mapping_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_create_sg_vn_mappings(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_3e81b5f00f35577dbad11186f70f25be_v3_1_1').validate(obj.response)
return True
def bulk_create_sg_vn_mappings(api):
endpoint_result = api.sg_vn_mapping.bulk_create_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_create_sg_vn_mappings(api, validator):
try:
assert is_valid_bulk_create_sg_vn_mappings(
validator,
bulk_create_sg_vn_mappings(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_create_sg_vn_mappings_default(api):
endpoint_result = api.sg_vn_mapping.bulk_create_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_create_sg_vn_mappings_default(api, validator):
try:
assert is_valid_bulk_create_sg_vn_mappings(
validator,
bulk_create_sg_vn_mappings_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_delete_sg_vn_mappings(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_3c5cad090a875d9d8bd87e59654c9d75_v3_1_1').validate(obj.response)
return True
def bulk_delete_sg_vn_mappings(api):
endpoint_result = api.sg_vn_mapping.bulk_delete_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_delete_sg_vn_mappings(api, validator):
try:
assert is_valid_bulk_delete_sg_vn_mappings(
validator,
bulk_delete_sg_vn_mappings(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_delete_sg_vn_mappings_default(api):
endpoint_result = api.sg_vn_mapping.bulk_delete_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_delete_sg_vn_mappings_default(api, validator):
try:
assert is_valid_bulk_delete_sg_vn_mappings(
validator,
bulk_delete_sg_vn_mappings_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_update_sg_vn_mappings(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_80c9c798a8ce58b88b3231575f5b8c98_v3_1_1').validate(obj.response)
return True
def bulk_update_sg_vn_mappings(api):
endpoint_result = api.sg_vn_mapping.bulk_update_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_update_sg_vn_mappings(api, validator):
try:
assert is_valid_bulk_update_sg_vn_mappings(
validator,
bulk_update_sg_vn_mappings(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_update_sg_vn_mappings_default(api):
endpoint_result = api.sg_vn_mapping.bulk_update_sg_vn_mappings(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_bulk_update_sg_vn_mappings_default(api, validator):
try:
assert is_valid_bulk_update_sg_vn_mappings(
validator,
bulk_update_sg_vn_mappings_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_sg_vn_mapping_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_8fceb2944abb59e2a748b970ee79fbb7_v3_1_1').validate(obj.response)
return True
def get_sg_vn_mapping_by_id(api):
endpoint_result = api.sg_vn_mapping.get_sg_vn_mapping_by_id(
id='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_get_sg_vn_mapping_by_id(api, validator):
try:
assert is_valid_get_sg_vn_mapping_by_id(
validator,
get_sg_vn_mapping_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_sg_vn_mapping_by_id_default(api):
endpoint_result = api.sg_vn_mapping.get_sg_vn_mapping_by_id(
id='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_get_sg_vn_mapping_by_id_default(api, validator):
try:
assert is_valid_get_sg_vn_mapping_by_id(
validator,
get_sg_vn_mapping_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_sg_vn_mapping_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_147075a66f9651fca28e85b97cf1b968_v3_1_1').validate(obj.response)
return True
def update_sg_vn_mapping_by_id(api):
endpoint_result = api.sg_vn_mapping.update_sg_vn_mapping_by_id(
active_validation=False,
id='string',
last_update='string',
payload=None,
sg_name='string',
sgt_id='string',
vn_id='string',
vn_name='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_update_sg_vn_mapping_by_id(api, validator):
try:
assert is_valid_update_sg_vn_mapping_by_id(
validator,
update_sg_vn_mapping_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_sg_vn_mapping_by_id_default(api):
endpoint_result = api.sg_vn_mapping.update_sg_vn_mapping_by_id(
active_validation=False,
id='string',
last_update=None,
payload=None,
sg_name=None,
sgt_id=None,
vn_id=None,
vn_name=None
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_update_sg_vn_mapping_by_id_default(api, validator):
try:
assert is_valid_update_sg_vn_mapping_by_id(
validator,
update_sg_vn_mapping_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_sg_vn_mapping_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_0718cb6b83a55dfb8f3536b43cfaf081_v3_1_1').validate(obj.response)
return True
def delete_sg_vn_mapping_by_id(api):
endpoint_result = api.sg_vn_mapping.delete_sg_vn_mapping_by_id(
id='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_delete_sg_vn_mapping_by_id(api, validator):
try:
assert is_valid_delete_sg_vn_mapping_by_id(
validator,
delete_sg_vn_mapping_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_sg_vn_mapping_by_id_default(api):
endpoint_result = api.sg_vn_mapping.delete_sg_vn_mapping_by_id(
id='string'
)
return endpoint_result
@pytest.mark.sg_vn_mapping
def test_delete_sg_vn_mapping_by_id_default(api, validator):
try:
assert is_valid_delete_sg_vn_mapping_by_id(
validator,
delete_sg_vn_mapping_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 30.117895
| 109
| 0.707815
|
a719325853ab2aeab1138ecb6282d2dccae7971f
| 1,171
|
py
|
Python
|
test/test_validation.py
|
rpgoldman/pySBOL3
|
0f1ea8d56466206d38561d0cca8207bda1bc96bc
|
[
"MIT"
] | 14
|
2020-09-14T20:28:08.000Z
|
2022-01-23T13:04:31.000Z
|
test/test_validation.py
|
rpgoldman/pySBOL3
|
0f1ea8d56466206d38561d0cca8207bda1bc96bc
|
[
"MIT"
] | 203
|
2020-05-13T16:15:21.000Z
|
2022-03-24T17:40:09.000Z
|
test/test_validation.py
|
rpgoldman/pySBOL3
|
0f1ea8d56466206d38561d0cca8207bda1bc96bc
|
[
"MIT"
] | 8
|
2020-07-29T16:37:19.000Z
|
2022-03-23T12:22:55.000Z
|
import unittest
import sbol3
class TestValidationReport(unittest.TestCase):
def test_boolean(self):
# False if no errors or warnings
report = sbol3.ValidationReport()
self.assertEqual(False, bool(report))
# True if any errors
report.addError(None, None, 'Fake error')
self.assertEqual(True, bool(report))
# True if any warnings
report = sbol3.ValidationReport()
report.addWarning(None, None, 'Fake warning')
self.assertEqual(True, bool(report))
# True if both errors and warnings
report = sbol3.ValidationReport()
report.addError(None, None, 'Fake error')
report.addWarning(None, None, 'Fake warning')
self.assertEqual(True, bool(report))
def test_length(self):
# Length should be the sum of errors and warnings
report = sbol3.ValidationReport()
self.assertEqual(0, len(report))
report.addError(None, None, 'Fake error')
self.assertEqual(1, len(report))
report.addWarning(None, None, 'Fake warning')
self.assertEqual(2, len(report))
if __name__ == '__main__':
unittest.main()
| 31.648649
| 57
| 0.643894
|
8d0a0b143a8fb9def345f5930ec29e2c2448ea79
| 24,363
|
py
|
Python
|
opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py
|
manojpandey/opentelemetry-operations-python
|
d4ca1f1ff7186bf76ea53e2ee3fdfb97f897e84b
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py
|
manojpandey/opentelemetry-operations-python
|
d4ca1f1ff7186bf76ea53e2ee3fdfb97f897e84b
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-exporter-gcp-monitoring/tests/test_cloud_monitoring.py
|
manojpandey/opentelemetry-operations-python
|
d4ca1f1ff7186bf76ea53e2ee3fdfb97f897e84b
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=too-many-statements
# Copyright 2021 The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import OrderedDict
from typing import Optional
from unittest import mock
from google.api.label_pb2 import LabelDescriptor
from google.api.metric_pb2 import MetricDescriptor
from google.api.monitored_resource_pb2 import MonitoredResource
from google.cloud.monitoring_v3.proto.metric_pb2 import TimeSeries
from opentelemetry.exporter.cloud_monitoring import (
MAX_BATCH_WRITE,
NANOS_PER_SECOND,
UNIQUE_IDENTIFIER_KEY,
WRITE_INTERVAL,
CloudMonitoringMetricsExporter,
)
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import ExportRecord
from opentelemetry.sdk.metrics.export.aggregate import (
HistogramAggregator,
SumAggregator,
ValueObserverAggregator,
)
from opentelemetry.sdk.resources import Resource
class UnsupportedAggregator:
pass
class MockBatcher:
def __init__(self, stateful):
self.stateful = stateful
def mock_meter(stateful: Optional[bool] = None):
# create an autospec of Meter from an instance in order to capture instance
# variables (meter.processor)
meter = MeterProvider(stateful).get_meter(__name__)
meter_mock = mock.create_autospec(meter, spec_set=True)
meter_mock.processor.stateful = meter.processor.stateful
return meter_mock
class MockMetric:
def __init__(
self,
name="name",
description="description",
value_type=int,
meter=None,
stateful=True,
):
self.name = name
self.description = description
self.value_type = value_type
self.meter = meter or mock_meter(stateful)
# pylint: disable=protected-access
# pylint can't deal with ProtoBuf object members
# pylint: disable=no-member
class TestCloudMonitoringMetricsExporter(unittest.TestCase):
def setUp(self):
self.client_patcher = mock.patch(
"opentelemetry.exporter.cloud_monitoring.MetricServiceClient"
)
self.client_patcher.start()
def tearDown(self) -> None:
self.client_patcher.stop()
@classmethod
def setUpClass(cls):
cls.project_id = "PROJECT"
cls.project_name = "PROJECT_NAME"
def test_constructor_default(self):
exporter = CloudMonitoringMetricsExporter(self.project_id)
self.assertEqual(exporter.project_id, self.project_id)
def test_constructor_explicit(self):
client = mock.Mock()
exporter = CloudMonitoringMetricsExporter(
self.project_id, client=client
)
self.assertIs(exporter.client, client)
self.assertEqual(exporter.project_id, self.project_id)
def test_batch_write(self):
client = mock.Mock()
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
exporter._batch_write(range(2 * MAX_BATCH_WRITE + 1))
client.create_time_series.assert_has_calls(
[
mock.call(self.project_name, range(MAX_BATCH_WRITE)),
mock.call(
self.project_name,
range(MAX_BATCH_WRITE, 2 * MAX_BATCH_WRITE),
),
mock.call(
self.project_name,
range(2 * MAX_BATCH_WRITE, 2 * MAX_BATCH_WRITE + 1),
),
]
)
exporter._batch_write(range(MAX_BATCH_WRITE))
client.create_time_series.assert_has_calls(
[mock.call(self.project_name, range(MAX_BATCH_WRITE))]
)
exporter._batch_write(range(MAX_BATCH_WRITE - 1))
client.create_time_series.assert_has_calls(
[mock.call(self.project_name, range(MAX_BATCH_WRITE - 1))]
)
def test_get_metric_descriptor(self):
client = mock.Mock()
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
self.assertIsNone(
exporter._get_metric_descriptor(
ExportRecord(
MockMetric(),
(),
UnsupportedAggregator(),
Resource.get_empty(),
)
)
)
record = ExportRecord(
MockMetric(),
(("label1", "value1"),),
SumAggregator(),
Resource.get_empty(),
)
metric_descriptor = exporter._get_metric_descriptor(record)
client.create_metric_descriptor.assert_called_with(
self.project_name,
MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [
LabelDescriptor(key="label1", value_type="STRING")
],
"metric_kind": "CUMULATIVE",
"value_type": "INT64",
}
),
)
# Getting a cached metric descriptor shouldn't use another call
cached_metric_descriptor = exporter._get_metric_descriptor(record)
self.assertEqual(client.create_metric_descriptor.call_count, 1)
self.assertEqual(metric_descriptor, cached_metric_descriptor)
# Drop labels with values that aren't string, int or bool
exporter._get_metric_descriptor(
ExportRecord(
MockMetric(name="name2", value_type=float),
(
("label1", "value1"),
("label2", dict()),
("label3", 3),
("label4", False),
),
SumAggregator(),
Resource.get_empty(),
)
)
client.create_metric_descriptor.assert_called_with(
self.project_name,
MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name2",
"display_name": "name2",
"description": "description",
"labels": [
LabelDescriptor(key="label1", value_type="STRING"),
LabelDescriptor(key="label3", value_type="INT64"),
LabelDescriptor(key="label4", value_type="BOOL"),
],
"metric_kind": "CUMULATIVE",
"value_type": "DOUBLE",
}
),
)
def test_get_value_observer_metric_descriptor(self):
client = mock.Mock()
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
record = ExportRecord(
MockMetric(), (), ValueObserverAggregator(), Resource.get_empty(),
)
exporter._get_metric_descriptor(record)
client.create_metric_descriptor.assert_called_with(
self.project_name,
MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [],
"metric_kind": "GAUGE",
"value_type": "INT64",
}
),
)
def test_export(self):
client = mock.Mock()
with mock.patch(
"opentelemetry.exporter.cloud_monitoring.time_ns",
lambda: NANOS_PER_SECOND,
):
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
exporter.export(
[
ExportRecord(
MockMetric(),
(("label1", "value1"),),
UnsupportedAggregator(),
Resource.get_empty(),
)
]
)
client.create_time_series.assert_not_called()
client.create_metric_descriptor.return_value = MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [
LabelDescriptor(key="label1", value_type="STRING"),
LabelDescriptor(key="label2", value_type="INT64"),
],
"metric_kind": "CUMULATIVE",
"value_type": "DOUBLE",
}
)
resource = Resource(
attributes={
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"extra_info": "extra",
"gcp.resource_type": "gce_instance",
"not_gcp_resource": "value",
}
)
sum_agg_one = SumAggregator()
sum_agg_one.checkpoint = 1
sum_agg_one.last_update_timestamp = (
WRITE_INTERVAL + 1
) * NANOS_PER_SECOND
exporter.export(
[
ExportRecord(
MockMetric(meter=mock_meter()),
(("label1", "value1"), ("label2", 1),),
sum_agg_one,
resource,
),
ExportRecord(
MockMetric(meter=mock_meter()),
(("label1", "value2"), ("label2", 2),),
sum_agg_one,
resource,
),
]
)
expected_resource = MonitoredResource(
type="gce_instance",
labels={"project_id": "123", "instance_id": "host", "zone": "US"},
)
series1 = TimeSeries(resource=expected_resource)
series1.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
series1.metric.type = "custom.googleapis.com/OpenTelemetry/name"
series1.metric.labels["label1"] = "value1"
series1.metric.labels["label2"] = "1"
point = series1.points.add()
point.value.int64_value = 1
point.interval.end_time.seconds = WRITE_INTERVAL + 1
point.interval.end_time.nanos = 0
point.interval.start_time.seconds = 1
point.interval.start_time.nanos = 0
series2 = TimeSeries(resource=expected_resource)
series2.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
series2.metric.type = "custom.googleapis.com/OpenTelemetry/name"
series2.metric.labels["label1"] = "value2"
series2.metric.labels["label2"] = "2"
point = series2.points.add()
point.value.int64_value = 1
point.interval.end_time.seconds = WRITE_INTERVAL + 1
point.interval.end_time.nanos = 0
point.interval.start_time.seconds = 1
point.interval.start_time.nanos = 0
client.create_time_series.assert_has_calls(
[mock.call(self.project_name, [series1, series2])]
)
# Attempting to export too soon after another export with the exact
# same labels leads to it being dropped
sum_agg_two = SumAggregator()
sum_agg_two.checkpoint = 1
sum_agg_two.last_update_timestamp = (
WRITE_INTERVAL + 2
) * NANOS_PER_SECOND
exporter.export(
[
ExportRecord(
MockMetric(),
(("label1", "value1"), ("label2", 1),),
sum_agg_two,
Resource.get_empty(),
),
ExportRecord(
MockMetric(),
(("label1", "value2"), ("label2", 2),),
sum_agg_two,
Resource.get_empty(),
),
]
)
self.assertEqual(client.create_time_series.call_count, 1)
# But exporting with different labels is fine
sum_agg_two.checkpoint = 2
exporter.export(
[
ExportRecord(
MockMetric(),
(("label1", "changed_label"), ("label2", 2),),
sum_agg_two,
Resource.get_empty(),
),
]
)
series3 = TimeSeries()
series3.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
series3.metric.type = "custom.googleapis.com/OpenTelemetry/name"
series3.metric.labels["label1"] = "changed_label"
series3.metric.labels["label2"] = "2"
point = series3.points.add()
point.value.int64_value = 2
point.interval.end_time.seconds = WRITE_INTERVAL + 2
point.interval.end_time.nanos = 0
point.interval.start_time.seconds = 1
point.interval.start_time.nanos = 0
client.create_time_series.assert_has_calls(
[
mock.call(self.project_name, [series1, series2]),
mock.call(self.project_name, [series3]),
]
)
def test_export_value_observer(self):
client = mock.Mock()
with mock.patch(
"opentelemetry.exporter.cloud_monitoring.time_ns",
lambda: NANOS_PER_SECOND,
):
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
client.create_metric_descriptor.return_value = MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [],
"metric_kind": "GAUGE",
"value_type": "INT64",
}
)
aggregator = ValueObserverAggregator()
aggregator.checkpoint = aggregator._TYPE(1, 2, 3, 4, 5)
aggregator.last_update_timestamp = (
WRITE_INTERVAL + 1
) * NANOS_PER_SECOND
exporter.export(
[
ExportRecord(
MockMetric(meter=mock_meter()),
(),
aggregator,
Resource.get_empty(),
)
]
)
series = TimeSeries()
series.metric_kind = MetricDescriptor.MetricKind.GAUGE
series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
point = series.points.add()
point.value.int64_value = 5
point.interval.end_time.seconds = WRITE_INTERVAL + 1
point.interval.end_time.nanos = 0
point.interval.start_time.seconds = WRITE_INTERVAL + 1
point.interval.start_time.nanos = 0
client.create_time_series.assert_has_calls(
[mock.call(self.project_name, [series])]
)
def test_export_histogram(self):
client = mock.Mock()
with mock.patch(
"opentelemetry.exporter.cloud_monitoring.time_ns",
lambda: NANOS_PER_SECOND,
):
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client
)
exporter.project_name = self.project_name
client.create_metric_descriptor.return_value = MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [],
"metric_kind": "CUMULATIVE",
"value_type": "DISTRIBUTION",
}
)
aggregator = HistogramAggregator(config={"bounds": [2, 4, 6]})
aggregator.checkpoint = OrderedDict([(2, 1), (4, 2), (6, 4), (">", 3)])
aggregator.last_update_timestamp = (
WRITE_INTERVAL + 1
) * NANOS_PER_SECOND
exporter.export(
[
ExportRecord(
MockMetric(meter=mock_meter()),
(),
aggregator,
Resource.get_empty(),
)
]
)
series = TimeSeries()
series.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
point = {
"interval": {
"start_time": {"seconds": 1},
"end_time": {"seconds": 11},
},
"value": {
"distribution_value": {
"count": 10,
"bucket_options": {
"explicit_buckets": {"bounds": [2.0, 4.0, 6.0]}
},
"bucket_counts": [1, 2, 4, 3],
}
},
}
series.points.add(**point)
client.create_time_series.assert_has_calls(
[mock.call(self.project_name, [series])]
)
def test_stateless_times(self):
client = mock.Mock()
with mock.patch(
"opentelemetry.exporter.cloud_monitoring.time_ns",
lambda: NANOS_PER_SECOND,
):
exporter = CloudMonitoringMetricsExporter(
project_id=self.project_id, client=client,
)
client.create_metric_descriptor.return_value = MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [
LabelDescriptor(
key=UNIQUE_IDENTIFIER_KEY, value_type="STRING"
),
],
"metric_kind": "CUMULATIVE",
"value_type": "DOUBLE",
}
)
agg = SumAggregator()
agg.checkpoint = 1
agg.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND
metric_record = ExportRecord(
MockMetric(stateful=False), (), agg, Resource.get_empty()
)
exporter.export([metric_record])
exports_1 = client.create_time_series.call_args_list[0]
# verify the first metric started at exporter start time
self.assertEqual(
exports_1[0][1][0].points[0].interval.start_time.seconds, 1
)
self.assertEqual(
exports_1[0][1][0].points[0].interval.start_time.nanos, 0
)
self.assertEqual(
exports_1[0][1][0].points[0].interval.end_time.seconds,
WRITE_INTERVAL + 1,
)
agg.last_update_timestamp = (WRITE_INTERVAL * 2 + 2) * NANOS_PER_SECOND
metric_record = ExportRecord(
MockMetric(stateful=False), (), agg, Resource.get_empty()
)
exporter.export([metric_record])
exports_2 = client.create_time_series.call_args_list[1]
# 1ms ahead of end time of last export
self.assertEqual(
exports_2[0][1][0].points[0].interval.start_time.seconds,
WRITE_INTERVAL + 1,
)
self.assertEqual(
exports_2[0][1][0].points[0].interval.start_time.nanos, 1e6
)
self.assertEqual(
exports_2[0][1][0].points[0].interval.end_time.seconds,
WRITE_INTERVAL * 2 + 2,
)
def test_unique_identifier(self):
client = mock.Mock()
exporter1 = CloudMonitoringMetricsExporter(
project_id=self.project_id,
client=client,
add_unique_identifier=True,
)
exporter2 = CloudMonitoringMetricsExporter(
project_id=self.project_id,
client=client,
add_unique_identifier=True,
)
exporter1.project_name = self.project_name
exporter2.project_name = self.project_name
client.create_metric_descriptor.return_value = MetricDescriptor(
**{
"name": None,
"type": "custom.googleapis.com/OpenTelemetry/name",
"display_name": "name",
"description": "description",
"labels": [
LabelDescriptor(
key=UNIQUE_IDENTIFIER_KEY, value_type="STRING"
),
],
"metric_kind": "CUMULATIVE",
"value_type": "DOUBLE",
}
)
sum_agg_one = SumAggregator()
sum_agg_one.update(1)
metric_record = ExportRecord(
MockMetric(), (), sum_agg_one, Resource.get_empty()
)
exporter1.export([metric_record])
exporter2.export([metric_record])
(
first_call,
second_call,
) = client.create_metric_descriptor.call_args_list
self.assertEqual(first_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY)
self.assertEqual(
second_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY
)
first_call, second_call = client.create_time_series.call_args_list
self.assertNotEqual(
first_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
second_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
)
def test_extract_resources(self):
exporter = CloudMonitoringMetricsExporter(project_id=self.project_id)
self.assertIsNone(
exporter._get_monitored_resource(Resource.get_empty())
)
resource = Resource(
attributes={
"cloud.account.id": 123,
"host.id": "host",
"cloud.zone": "US",
"cloud.provider": "gcp",
"extra_info": "extra",
"gcp.resource_type": "gce_instance",
"not_gcp_resource": "value",
}
)
expected_extract = MonitoredResource(
type="gce_instance",
labels={"project_id": "123", "instance_id": "host", "zone": "US"},
)
self.assertEqual(
exporter._get_monitored_resource(resource), expected_extract
)
resource = Resource(
attributes={
"cloud.account.id": "123",
"host.id": "host",
"extra_info": "extra",
"not_gcp_resource": "value",
"gcp.resource_type": "gce_instance",
"cloud.provider": "gcp",
}
)
# Should throw when passed a malformed GCP resource dict
self.assertRaises(KeyError, exporter._get_monitored_resource, resource)
resource = Resource(
attributes={
"cloud.account.id": "123",
"host.id": "host",
"extra_info": "extra",
"not_gcp_resource": "value",
"gcp.resource_type": "unsupported_gcp_resource",
"cloud.provider": "gcp",
}
)
self.assertIsNone(exporter._get_monitored_resource(resource))
resource = Resource(
attributes={
"cloud.account.id": "123",
"host.id": "host",
"extra_info": "extra",
"not_gcp_resource": "value",
"cloud.provider": "aws",
}
)
self.assertIsNone(exporter._get_monitored_resource(resource))
| 34.121849
| 79
| 0.547962
|
b2e17f23a4ec1b82369713ea51c54e2366ce05e0
| 3,103
|
py
|
Python
|
test/unit/applications/lang/java.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | 1
|
2021-03-08T05:11:13.000Z
|
2021-03-08T05:11:13.000Z
|
test/unit/applications/lang/java.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | null | null | null |
test/unit/applications/lang/java.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import shutil
import subprocess
import pytest
from unit.applications.proto import TestApplicationProto
from unit.option import option
class TestApplicationJava(TestApplicationProto):
application_type = "java"
def prepare_env(self, script):
app_path = option.temp_dir + '/java'
web_inf_path = app_path + '/WEB-INF/'
classes_path = web_inf_path + 'classes/'
script_path = option.test_dir + '/java/' + script + '/'
if not os.path.isdir(app_path):
os.makedirs(app_path)
src = []
for f in os.listdir(script_path):
file_path = script_path + f
if f.endswith('.java'):
src.append(file_path)
continue
if f.startswith('.') or f == 'Makefile':
continue
if os.path.isdir(file_path):
if f == 'WEB-INF':
continue
shutil.copytree(file_path, app_path + '/' + f)
continue
if f == 'web.xml':
if not os.path.isdir(web_inf_path):
os.makedirs(web_inf_path)
shutil.copy2(file_path, web_inf_path)
else:
shutil.copy2(file_path, app_path)
if src:
if not os.path.isdir(classes_path):
os.makedirs(classes_path)
classpath = (
option.current_dir + '/build/tomcat-servlet-api-9.0.39.jar'
)
ws_jars = glob.glob(
option.current_dir + '/build/websocket-api-java-*.jar'
)
if not ws_jars:
pytest.fail('websocket api jar not found.')
javac = [
'javac',
'-target', '8', '-source', '8', '-nowarn',
'-encoding', 'utf-8',
'-d', classes_path,
'-classpath', classpath + ':' + ws_jars[0],
]
javac.extend(src)
if option.detailed:
print("\n$ " + " ".join(javac))
try:
process = subprocess.Popen(javac, stderr=subprocess.STDOUT)
process.communicate()
except KeyboardInterrupt:
raise
except:
pytest.fail('Can\'t run javac process.')
def load(self, script, **kwargs):
self.prepare_env(script)
self._load_conf(
{
"listeners": {"*:7080": {"pass": "applications/" + script}},
"applications": {
script: {
"unit_jars": option.current_dir + '/build',
"type": self.get_application_type(),
"processes": {"spare": 0},
"working_directory": option.test_dir
+ '/java/'
+ script
+ '/',
"webapp": option.temp_dir + '/java',
}
},
},
**kwargs
)
| 28.731481
| 76
| 0.461811
|
c1343b86c95800f1c2ed39765720eb12c94274c1
| 33,699
|
py
|
Python
|
JSSPproblem.py
|
zangzelin/HDNNMv2.0
|
6704119f9354986cf16dff267c754db0bf78d640
|
[
"MIT"
] | 6
|
2019-03-12T05:35:17.000Z
|
2020-02-13T18:27:13.000Z
|
JSSPproblem.py
|
Chupeng24/HDNNMv2.0
|
6704119f9354986cf16dff267c754db0bf78d640
|
[
"MIT"
] | null | null | null |
JSSPproblem.py
|
Chupeng24/HDNNMv2.0
|
6704119f9354986cf16dff267c754db0bf78d640
|
[
"MIT"
] | 4
|
2019-03-12T05:36:06.000Z
|
2021-03-09T13:41:00.000Z
|
import copy
import random
import time
# from __future__ import print_function
from itertools import combinations, permutations
# import guchoose
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Import Python wrapper for or-tools constraint solver.
from ortools.constraint_solver import pywrapcp
import Subproblem
class Problem:
m = None # number of the machines
n = None # number of the jobs
solute = None
time_low = None
time_high = None
p = np.array([]) # the processing time of jobs
r = np.array([]) # the order limit
x = np.array([]) # the result position mat
h = np.array([]) # the start time of jobs
e = np.array([]) # the end time of jobs
f = np.array([])
best_x = np.array([])
opetimalx = None
number_of_1d_feature = 11
# p.sum()
def __init__(self, m, n, time_low, time_high):
self.m = m
self.n = n
self.time_high = time_high
self.time_low = time_low
self.solute = 0
a = list(range(self.time_low, self.time_high))
p = []
for k in range(self.n):
p.append(random.sample(a, self.m))
self.p = np.array(p)
a = list(range(self.m))
r = []
for k in range(self.n):
r.append(random.sample(a, self.m))
self.r = np.array(r)
sum_time_of_job = np.sum(self.p,axis=1)
for i in range(n):
for j in range(i+1, n):
if sum_time_of_job[i] > sum_time_of_job[j]:
a = np.copy(self.p[j,:])
self.p[j,:] = self.p[i,:]
self.p[i,:] = a
sum_time_of_job[i],sum_time_of_job[j] = sum_time_of_job[j],sum_time_of_job[i]
sum_time_of_mach = [[i,0] for i in range(m)]
for i in range(n):
for j in range(m):
sum_time_of_mach[self.r[i,j]][1] += self.p[i,j]
for i in range(m):
for j in range(i+1, m):
if sum_time_of_mach[i][1] > sum_time_of_mach[j][1] :
sum_time_of_mach[i], sum_time_of_mach[j] = sum_time_of_mach[j], sum_time_of_mach[i]
nr = np.zeros((n,m),dtype=int)-1
for i in range(m):
nr[self.r==i] =sum_time_of_mach[i][0]
sum_time_of_mach = [[i,0] for i in range(m)]
for i in range(n):
for j in range(m):
sum_time_of_mach[self.r[i,j]][1] += self.p[i,j]
self.r = nr
def Print_info(self):
machine_job_p = np.zeros((self.m, self.n))
machine_job_r = np.zeros((self.m, self.n))
for job in range(self.n):
for order in range(self.m):
machine = self.r[job, order]
machine_job_p[machine, job] = self.p[job, order]
machine_job_r[machine, job] = order
np.savetxt('p.csv', machine_job_p, delimiter=',')
np.savetxt('r.csv', machine_job_r, delimiter=',')
self.PlotResult()
def SaveProblemToFile(self, filepath, index, pool=0):
filename = '{}/jssp_problem_m={}_n={}_timehigh={}_timelow={}_pool={}.txt'.format(
filepath, self.m, self.n, self.time_high, self.time_low, pool)
f = open(filename, 'a')
# f.write(str(index))
# f.write('\nr=\n')
f.write(str(self.m)+'\n')
f.write(str(self.n)+'\n')
f.write(TranslateNpToStr(self.p))
f.write(TranslateNpToStr(self.r))
f.close()
def SavesolutionToFile(self, filepath, index, pool=0):
f = open('{}/jssp_problem_m={}_n={}_timehigh={}_timelow={}_pool={}.txt'.format(
filepath, self.m, self.n, self.time_high, self.time_low, pool), 'a')
# f.write(str(index)+'\n')
f.write(TranslateNpToStr(self.x))
f.write(TranslateNpToStr(self.h))
f.write(TranslateNpToStr(self.e))
f.close()
def SoluteWithBBM(self):
solver = pywrapcp.Solver('jobshop')
solver.TimeLimit(1)
all_machines = range(0, self.m)
all_jobs = range(0, self.n)
x = np.zeros((self.m, self.n, 2), dtype=int)
h = np.zeros((self.m, self.n), dtype=int)
e = np.zeros((self.m, self.n), dtype=int)
# processing_times = self.p.tolist()
# machines = self.r.tolist()
horizon = int(self.p.sum())
# Creates jobs.
all_tasks = {}
for i in all_jobs:
for j in range(self.m):
all_tasks[(i, j)] = solver.FixedDurationIntervalVar(
0, horizon, int(self.p[i, j]), False, 'Job_%i_%i' % (i, j))
# Creates sequence variables and add disjunctive constraints.
all_sequences = []
all_machines_jobs = []
for i in all_machines:
machines_jobs = []
for j in all_jobs:
for k in range(self.m):
if self.r[j, k] == i:
machines_jobs.append(all_tasks[(j, k)])
disj = solver.DisjunctiveConstraint(
machines_jobs, 'machine %i' % i)
all_sequences.append(disj.SequenceVar())
solver.Add(disj)
# Add conjunctive contraints.
for i in all_jobs:
for j in range(0, self.m - 1):
solver.Add(
all_tasks[(i, j + 1)].StartsAfterEnd(all_tasks[(i, j)]))
# Set the objective.
obj_var = solver.Max([all_tasks[(i, self.m-1)].EndExpr()
for i in all_jobs])
objective_monitor = solver.Minimize(obj_var, 1)
# Create search phases.
sequence_phase = solver.Phase([all_sequences[i] for i in all_machines],
solver.SEQUENCE_DEFAULT)
vars_phase = solver.Phase([obj_var],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
main_phase = solver.Compose([sequence_phase, vars_phase])
# Create the solution collector.
collector = solver.LastSolutionCollector()
# Add the interesting variables to the SolutionCollector.
collector.Add(all_sequences)
collector.AddObjective(obj_var)
for i in all_machines:
sequence = all_sequences[i]
sequence_count = sequence.Size()
for j in range(0, sequence_count):
t = sequence.Interval(j)
collector.Add(t.StartExpr().Var())
collector.Add(t.EndExpr().Var())
# Solve the problem.
disp_col_width = 10
if solver.Solve(main_phase, [objective_monitor, collector]):
# print("\nOptimal Schedule Length:", collector.ObjectiveValue(0), "\n")
sol_line = ""
sol_line_tasks = ""
# print("Optimal Schedule", "\n")
for i in all_machines:
seq = all_sequences[i]
sol_line += "Machine " + str(i) + ": "
sol_line_tasks += "Machine " + str(i) + ": "
sequence = collector.ForwardSequence(0, seq)
seq_size = len(sequence)
for j in range(0, seq_size):
t = seq.Interval(sequence[j])
# Add spaces to output to align columns.
sol_line_tasks += t.Name() + " " * (disp_col_width - len(t.Name()))
x[i, j, 0] = int(t.Name().split('_')[1])
x[i, j, 1] = int(t.Name().split('_')[2])
for j in range(0, seq_size):
t = seq.Interval(sequence[j])
sol_tmp = "[" + \
str(collector.Value(0, t.StartExpr().Var())) + ","
sol_tmp += str(collector.Value(0,
t.EndExpr().Var())) + "] "
# Add spaces to output to align columns.
sol_line += sol_tmp + " " * (disp_col_width - len(sol_tmp))
h[i, j] = collector.Value(0, t.StartExpr().Var())
e[i, j] = collector.Value(0, t.EndExpr().Var())
sol_line += "\n"
sol_line_tasks += "\n"
self.x = x
self.h = h
self.e = e
self.best_x = x
def SoluteWithGA(self):
pt_tmp = self.p
ms_tmp = self.r + 1
dfshape = pt_tmp.shape
num_mc = dfshape[1] # number of machines
num_job = dfshape[0] # number of jobs
num_gene = num_mc*num_job # number of genes in a chromosome
pt = pt_tmp
ms = ms_tmp
population_size = 30
crossover_rate = 0.8
mutation_rate = 0.2
mutation_selection_rate = 0.2
num_mutation_jobs = round(num_gene*mutation_selection_rate)
num_iteration = 2000
start_time = time.time()
Tbest = 999999999999999
best_list, best_obj = [], []
population_list = []
makespan_record = []
for i in range(population_size):
# generate a random permutation of 0 to num_job*num_mc-1
nxm_random_num = list(np.random.permutation(num_gene))
# add to the population_list
population_list.append(nxm_random_num)
for j in range(num_gene):
# convert to job number format, every job appears m times
population_list[i][j] = population_list[i][j] % num_job
for n in range(num_iteration):
Tbest_now = 99999999999
'''-------- two point crossover --------'''
parent_list = copy.deepcopy(population_list)
offspring_list = copy.deepcopy(population_list)
# generate a random sequence to select the parent chromosome to crossover
S = list(np.random.permutation(population_size))
for m in range(int(population_size/2)):
crossover_prob = np.random.rand()
if crossover_rate >= crossover_prob:
parent_1 = population_list[S[2*m]][:]
parent_2 = population_list[S[2*m+1]][:]
child_1 = parent_1[:]
child_2 = parent_2[:]
cutpoint = list(np.random.choice(
num_gene, 2, replace=False))
cutpoint.sort()
child_1[cutpoint[0]:cutpoint[1]
] = parent_2[cutpoint[0]:cutpoint[1]]
child_2[cutpoint[0]:cutpoint[1]
] = parent_1[cutpoint[0]:cutpoint[1]]
offspring_list[S[2*m]] = child_1[:]
offspring_list[S[2*m+1]] = child_2[:]
'''----------repairment-------------'''
for m in range(population_size):
job_count = {}
# 'larger' record jobs appear in the chromosome more than m times, and 'less' records less than m times.
larger, less = [], []
for i in range(num_job):
if i in offspring_list[m]:
count = offspring_list[m].count(i)
pos = offspring_list[m].index(i)
# store the above two values to the job_count dictionary
job_count[i] = [count, pos]
else:
count = 0
job_count[i] = [count, 0]
if count > num_mc:
larger.append(i)
elif count < num_mc:
less.append(i)
for k in range(len(larger)):
chg_job = larger[k]
while job_count[chg_job][0] > num_mc:
for d in range(len(less)):
if job_count[less[d]][0] < num_mc:
offspring_list[m][job_count[chg_job]
[1]] = less[d]
job_count[chg_job][1] = offspring_list[m].index(
chg_job)
job_count[chg_job][0] = job_count[chg_job][0]-1
job_count[less[d]][0] = job_count[less[d]][0]+1
if job_count[chg_job][0] == num_mc:
break
'''--------mutatuon--------'''
for m in range(len(offspring_list)):
mutation_prob = np.random.rand()
if mutation_rate >= mutation_prob:
# chooses the position to mutation
m_chg = list(np.random.choice(
num_gene, num_mutation_jobs, replace=False))
# save the value which is on the first mutation position
t_value_last = offspring_list[m][m_chg[0]]
for i in range(num_mutation_jobs-1):
# displacement
offspring_list[m][m_chg[i]
] = offspring_list[m][m_chg[i+1]]
# move the value of the first mutation position to the last mutation position
offspring_list[m][m_chg[num_mutation_jobs-1]
] = t_value_last
'''--------fitness value(calculate makespan)-------------'''
total_chromosome = copy.deepcopy(
parent_list)+copy.deepcopy(offspring_list) # parent and offspring chromosomes combination
chrom_fitness, chrom_fit = [], []
total_fitness = 0
for m in range(population_size*2): # for every gene line
j_keys = [j for j in range(num_job)]
key_count = {key: 0 for key in j_keys}
j_count = {key: 0 for key in j_keys}
m_keys = [j+1 for j in range(num_mc)]
m_count = {key: 0 for key in m_keys}
for i in total_chromosome[m]:
gen_t = int(pt[i][key_count[i]])
gen_m = int(ms[i][key_count[i]])
j_count[i] = j_count[i]+gen_t
m_count[gen_m] = m_count[gen_m]+gen_t
if m_count[gen_m] < j_count[i]:
m_count[gen_m] = j_count[i]
elif m_count[gen_m] > j_count[i]:
j_count[i] = m_count[gen_m]
key_count[i] = key_count[i]+1
makespan = max(j_count.values())
chrom_fitness.append(1/makespan)
chrom_fit.append(makespan)
total_fitness = total_fitness+chrom_fitness[m]
'''----------selection(roulette wheel approach)----------'''
pk, qk = [], []
for i in range(population_size*2):
pk.append(chrom_fitness[i]/total_fitness)
for i in range(population_size*2):
cumulative = 0
for j in range(0, i+1):
cumulative = cumulative+pk[j]
qk.append(cumulative)
selection_rand = [np.random.rand() for i in range(population_size)]
for i in range(population_size):
if selection_rand[i] <= qk[0]:
population_list[i] = copy.deepcopy(total_chromosome[0])
else:
for j in range(0, population_size*2-1):
if selection_rand[i] > qk[j] and selection_rand[i] <= qk[j+1]:
population_list[i] = copy.deepcopy(
total_chromosome[j+1])
break
'''----------comparison----------'''
for i in range(population_size*2):
if chrom_fit[i] < Tbest_now:
Tbest_now = chrom_fit[i]
sequence_now = copy.deepcopy(total_chromosome[i])
if Tbest_now <= Tbest:
Tbest = Tbest_now
sequence_best = copy.deepcopy(sequence_now)
makespan_record.append(Tbest)
'''----------result----------'''
# print("optimal sequence", sequence_best)
# print("optimal value:%f" % Tbest)
# print('the elapsed time:%s' % (time.time() - start_time))
import pandas as pd
import datetime
x = np.zeros((self.m, self.n, 2), dtype=int)
h = np.zeros((self.m, self.n), dtype=int)
e = np.zeros((self.m, self.n), dtype=int)
m_keys = [j+1 for j in range(num_mc)]
j_keys = [j for j in range(num_job)]
key_count = {key: 0 for key in j_keys}
j_count = {key: 0 for key in j_keys}
m_count = {key: 0 for key in m_keys}
j_record = {}
for i in sequence_best:
gen_t = int(pt[i][key_count[i]]) # time
gen_m = int(ms[i][key_count[i]]) # order
j_count[i] = j_count[i]+gen_t # time of job
m_count[gen_m] = m_count[gen_m]+gen_t # time of machine
if m_count[gen_m] < j_count[i]:
m_count[gen_m] = j_count[i]
elif m_count[gen_m] > j_count[i]:
j_count[i] = m_count[gen_m]
# convert seconds to hours, minutes and seconds
start_time = int(j_count[i]-pt[i][int(key_count[i])])
end_time = int(j_count[i])
j_record[(i, gen_m)] = [start_time, end_time,key_count[i]]
key_count[i] = key_count[i]+1
df = []
for m in m_keys:
for j in j_keys:
list_of_start = [j_record[(q, m)][0] for q in j_keys]
list_of_start.sort()
order = list_of_start.index(j_record[(j, m)][0])
h[m-1, order] = j_record[(j, m)][0]
e[m-1, order] = j_record[(j, m)][1]
x[m-1, order, 0] = j
x[m-1, order, 1] = j_record[(j, m)][2]
df.append(dict(Task='Machine %s' % (m), Start='2018-07-14 %s' % (str(j_record[(
j, m)][0])), Finish='2018-07-14 %s' % (str(j_record[(j, m)][1])), Resource='Job %s' % (j+1)))
self.h = h
self.e = e
self.x = x
self.best_x = x
# self.PlotResult()
# plt.show()
def PlotResult(self, num=0):
colorbox = ['yellow', 'whitesmoke', 'lightyellow',
'khaki', 'silver', 'pink', 'lightgreen', 'orange', 'grey', 'r', 'brown']
for i in range(100):
colorArr = ['1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
color = ""
for i in range(6):
color += colorArr[random.randint(0, 14)]
colorbox.append("#"+color)
zzl = plt.figure(figsize=(12, 4))
for i in range(self.m):
# number_of_mashine:
for j in range(self.n):
# number_of_job:
# % 数据读写
mPoint1 = self.h[i, j] # % 读取开始的点
mPoint2 = self.e[i, j] # % 读取结束的点
mText = i + 1.5 # % 读取机器编号
PlotRec(mPoint1, mPoint2, mText) # % 画图函数,(开始点,结束点,高度)
Word = str(self.x[i, j, 0]+1) +'.'+ str(self.x[i, j, 1]+1) # % 读取工件编号
# hold on
# % 填充
x1 = mPoint1
y1 = mText-0.8
x2 = mPoint2
y2 = mText-0.8
x3 = mPoint2
y3 = mText
x4 = mPoint1
y4 = mText
plt.fill([x1, x2, x3, x4], [y1, y2, y3, y4],
color=colorbox[self.x[i, j, 0]])
plt.text(0.5*mPoint1+0.5*mPoint2-3, mText-0.5, Word)
plt.xlabel('Time')
plt.ylabel('Machine')
plt.tight_layout()
plt.savefig('out.png', dpi=400)
def SoluteWithBBMAndSaveToFile(self, filepath, index, pool=0):
self.SoluteWithBBM()
self.SaveProblemToFile(filepath, index, pool)
self.SavesolutionToFile(filepath, index, pool)
def SoluteWithGaAndSaveToFile(self, filepath, index, pool=0):
self.SoluteWithGA()
self.SaveProblemToFile(filepath, index, pool)
self.SavesolutionToFile(filepath, index, pool)
def LoadProblemWithSolution(self, filepath, index, pool=0):
f = open('data/jssp_problem_'+filepath, 'r')
# line_list = [ i for i in range(index * 7, index * 7 + 7 )]
data = f.readlines()
data = data[index * 7:index * 7 + 7]
self.m = int(data[0])
self.n = int(data[1])
# print(data[2])
self.p = np.fromstring(
data[2][:-1], dtype=int, sep=',').reshape((self.n, self.m))
self.r = np.fromstring(
data[3][:-1], dtype=int, sep=',').reshape((self.n, self.m))
self.x = np.fromstring(
data[4][:-1], dtype=int, sep=',').reshape((self.m, self.n, 2))
self.h = np.fromstring(
data[5][:-1], dtype=int, sep=',').reshape((self.m, self.n))
self.e = np.fromstring(
data[6][:-1], dtype=int, sep=',').reshape((self.m, self.n))
def CalculateSimilarityDegree(self,x = None):
if x is not None:
self.x = x
right = 0
for i in range(self.m):
for j in range(self.n):
if self.x[i,j,0] == self.best_x[i,j,0] and self.x[i,j,1] == self.best_x[i,j,1]:
right += 1
return right/self.m/self.n
def LoadProblemWithoutSolution(self, filepath, index, pool=0):
f = open('data/jssp_problem_'+filepath, 'r')
# line_list = [ i for i in range(index * 7, index * 7 + 7 )]
data = f.readlines()
data = data[index * 7:index * 7 + 7]
self.m = int(data[0])
self.n = int(data[1])
# print(data[2])
self.p = np.fromstring(
data[2][:-1], dtype=int, sep=',').reshape((self.n, self.m))
self.r = np.fromstring(
data[3][:-1], dtype=int, sep=',').reshape((self.n, self.m))
# self.x = np.fromstring(
# data[4][:-1], dtype=int, sep=',').reshape((self.m, self.n, 2))
# self.h = np.fromstring(
# data[5][:-1], dtype=int, sep=',').reshape((self.m, self.n))
# self.e = np.fromstring(
# data[6][:-1], dtype=int, sep=',').reshape((self.m, self.n))
def subproblem(self):
sub_list = []
for job in range(self.n):
for procedure in range(self.m):
sub_p = Subproblem.Subproblem(self, job, procedure)
sub_list.append(sub_p)
return sub_list
def GetFeaturesInTest1D(self):
F = []
sub_list = self.subproblem()
for sub in sub_list:
F.append(sub.GetFeatures1D())
F = np.array(F)
return F
def Getlables(self):
L = []
sub_list = self.subproblem()
for sub in sub_list:
L.append(sub.label)
L = np.array(L)
return L
def GetFeaturesInTest1D2D(self):
len_feature_1d = self.number_of_1d_feature
F1d = []
F2d1 = []
F2d2 = []
F2d3 = []
F2d4 = []
F2d5 = []
F2d6 = []
sub_list = self.subproblem()
for sub in sub_list:
F1d.append(sub.GetFeatures1D())
features_2d = sub.GetFeatures2D()
F2d1.append(features_2d[0])
F2d2.append(features_2d[1])
F2d3.append(features_2d[2])
F2d4.append(features_2d[3])
F2d5.append(features_2d[4])
F2d6.append(features_2d[5])
F1d = np.array(F1d).reshape(
(-1, len_feature_1d, 1))
F2d1 = np.array(F2d1).reshape(
(-1, self.n**2, self.n**2, 1))
F2d2 = np.array(F2d2).reshape(
(-1, self.n**2, self.n, 1))
F2d3 = np.array(F2d3).reshape(
(-1, self.n**2, len_feature_1d, 1))
F2d4 = np.array(F2d4).reshape(
(-1, self.n, self.n, 1))
F2d5 = np.array(F2d5).reshape(
(-1, self.n, len_feature_1d, 1))
F2d6 = np.array(F2d6).reshape(
(-1, len_feature_1d, len_feature_1d, 1))
tf = [F1d.reshape((-1, len_feature_1d, 1)), F2d1,
F2d2, F2d3, F2d4, F2d5, F2d6]
return tf
def GetIndexMatrix(self):
index = np.zeros((self.n, self.m))
sub = self.subproblem()
for s in sub:
index[s.job_id, s.machine_id] = s.SearchInX()
return index
def SchedulingSequenceGenerationMethod(self, output):
np.savetxt('output.csv', output,fmt="%.2f",delimiter=',')
for i in range(self.m * self.n):
output[i,:] = output[i,:]/output[i,:].sum()
h = np.zeros((self.m, self.n))
e = np.zeros((self.m, self.n))
x = np.zeros((self.m, self.n,2))
procedure_job = [0] * self.n
order_machine = [0] * self.m
for i in range(self.m*self.n):
possible_probability = []
for job in range(self.n):
procedure = procedure_job[job]
machine = self.r[job,min(4,procedure)]
order = order_machine[machine]
if procedure <5 and order < 5:
possible_probability.append( [job,procedure,machine,order,output[job*self.m+procedure][order]] )
else:
machine = -1
possible_probability.append( [job,procedure,machine,order,0] )
possible_probability = sorted(possible_probability, key=lambda x:x[4] )
bestjob,bestproce,bestmachine,bestorder = possible_probability[-1][:4]
x[bestmachine,bestorder][:] = [bestjob,bestproce]
procedure_job[bestjob]+=1
order_machine[bestmachine] += 1
self.x = x
def GurobiModelingmethod(self, output):
np.savetxt('output.csv', output,fmt="%.2f",delimiter=',')
lables = self.Getlables()
R = self.r.reshape(self.m*self.n)
# x ,p = guchoose.main(output,R,lables,self.m,self.n)
self.x = x
h = np.zeros((self.m, self.n))
e = np.zeros((self.m, self.n))
for order in range(self.n):
timeline_machine = np.zeros((self.m), dtype=int)
timeline_jobs = np.zeros((self.n), dtype=int)
index_in_machine = np.zeros((self.m), dtype=int)
job_finsh = np.zeros((self.n), dtype=int)
for i in range(self.m*self.n):
mask = np.zeros((self.m), dtype=int)
for ma in range(self.m):
job, order = x[ma, min(self.n-1, index_in_machine[ma]), :]
# if job_finsh[job] == order:
mask[ma] = timeline_machine[ma]
# else:
# mask[ma] = 10000
earlyestmachine = np.argmin(mask)
while index_in_machine[earlyestmachine] == self.n:
timeline_machine[earlyestmachine] = 100000
earlyestmachine = np.argmin(timeline_machine)
# while can_do_in_machine[earlyestmachine]
job, order = x[earlyestmachine,
index_in_machine[earlyestmachine], :]
time_s = max(
timeline_machine[earlyestmachine], timeline_jobs[job])
time_e = time_s + self.p[job, order]
timeline_machine[earlyestmachine] = time_e
timeline_jobs[job] = time_e
h[earlyestmachine, index_in_machine[earlyestmachine]] = time_s
e[earlyestmachine, index_in_machine[earlyestmachine]] = time_e
index_in_machine[earlyestmachine] += 1
job_finsh[job] += 1
self.e = e
self.h = h
def PriorityQueuingMethod(self, output):
np.savetxt('output.csv', output,fmt="%.2f",delimiter=',')
lables = self.Getlables()
R = self.r.reshape(self.m*self.n)
x = [[] for j in range(self.m) ]
for i in range(self.m*self.n):
machine = R[i]
x[machine].append([i//self.m,i%self.m,output[i]])
for m in range(self.m):
x[m].sort(key = lambda x:x[2])
xx = np.zeros((self.m,self.n,2),dtype=int)
for i in range(self.m):
for j in range(self.n):
xx[i,j,0] = x[i][j][0]
xx[i,j,1] = x[i][j][1]
x =xx
self.x = xx
h = np.zeros((self.m, self.n))
e = np.zeros((self.m, self.n))
for order in range(self.n):
timeline_machine = np.zeros((self.m), dtype=int)
timeline_jobs = np.zeros((self.n), dtype=int)
index_in_machine = np.zeros((self.m), dtype=int)
job_finsh = np.zeros((self.n), dtype=int)
for i in range(self.m*self.n):
mask = np.zeros((self.m), dtype=int)
for ma in range(self.m):
job, order = x[ma, min(self.n-1, index_in_machine[ma]), :]
if job_finsh[job] == order:
mask[ma] = timeline_machine[ma]
else:
mask[ma] = 10000
earlyestmachine = np.argmin(mask)
while index_in_machine[earlyestmachine] == self.n:
timeline_machine[earlyestmachine] = 100000
earlyestmachine = np.argmin(timeline_machine)
# while can_do_in_machine[earlyestmachine]
job, order = x[earlyestmachine,
index_in_machine[earlyestmachine], :]
time_s = max(
timeline_machine[earlyestmachine], timeline_jobs[job])
time_e = time_s + self.p[job, order]
timeline_machine[earlyestmachine] = time_e
timeline_jobs[job] = time_e
h[earlyestmachine, index_in_machine[earlyestmachine]] = time_s
e[earlyestmachine, index_in_machine[earlyestmachine]] = time_e
index_in_machine[earlyestmachine] += 1
job_finsh[job] += 1
self.e = e
self.h = h
# def SchedulingSequenceGenerationMethod(self, output):
# np.savetxt('output.csv', output, delimiter=',')
# x = np.zeros((self.m, self.n, 2), dtype=int)
# for machine in range(self.m):
# M = []
# index = np.argwhere(self.r == machine)
# M = [output[job*self.m + pro] for job, pro in index]
# M = np.array(M)
# for i in range(self.n):
# max_in_M = M.max()
# a, order = np.argwhere(M == max_in_M)[0]
# x[machine, order, :] = index[a]
# M[a, :] = 0
# M[:, order] = 0
# self.x = x
# # x = self.x
# h = np.zeros((self.m, self.n))
# e = np.zeros((self.m, self.n))
# for order in range(self.n):
# timeline_machine = np.zeros((self.m), dtype=int)
# timeline_jobs = np.zeros((self.n), dtype=int)
# index_in_machine = np.zeros((self.m), dtype=int)
# job_finsh = np.zeros((self.n), dtype=int)
# for i in range(self.m*self.n):
# mask = np.zeros((self.m), dtype=int)
# for ma in range(self.m):
# job, order = x[ma, min(self.n-1, index_in_machine[ma]), :]
# # if job_finsh[job] == order:
# mask[ma] = timeline_machine[ma]
# # else:
# # mask[ma] = 10000
# earlyestmachine = np.argmin(mask)
# while index_in_machine[earlyestmachine] == self.n:
# timeline_machine[earlyestmachine] = 100000
# earlyestmachine = np.argmin(timeline_machine)
# # while can_do_in_machine[earlyestmachine]
# job, order = x[earlyestmachine,
# index_in_machine[earlyestmachine], :]
# time_s = max(
# timeline_machine[earlyestmachine], timeline_jobs[job])
# time_e = time_s + self.p[job, order]
# timeline_machine[earlyestmachine] = time_e
# timeline_jobs[job] = time_e
# h[earlyestmachine, index_in_machine[earlyestmachine]] = time_s
# e[earlyestmachine, index_in_machine[earlyestmachine]] = time_e
# index_in_machine[earlyestmachine] += 1
# job_finsh[job] += 1
# self.e = e
# self.h = h
# # print('ddd')
def GetMakespan(self):
return self.e.max()
def TranslateNpToStr(m):
a = m.reshape((-1))
a = list(a)
s = ''.join(['{},'.format(round(o,2)) for o in a]) + '\n'
return s
def PlotRec(mPoint1, mPoint2, mText):
vPoint = np.zeros((4, 2))
vPoint[0, :] = [mPoint1, mText-0.8]
vPoint[1, :] = [mPoint2, mText-0.8]
vPoint[2, :] = [mPoint1, mText]
vPoint[3, :] = [mPoint2, mText]
plt.plot([vPoint[0, 0], vPoint[1, 0]], [vPoint[0, 1], vPoint[1, 1]], 'k')
# hold on
plt.plot([vPoint[0, 0], vPoint[2, 0]], [vPoint[0, 1], vPoint[2, 1]], 'k')
plt.plot([vPoint[1, 0], vPoint[3, 0]], [vPoint[1, 1], vPoint[3, 1]], 'k')
plt.plot([vPoint[2, 0], vPoint[3, 0]], [vPoint[2, 1], vPoint[3, 1]], 'k')
if __name__ == "__main__":
prob = Problem(2, 8, 10, 30)
prob.SoluteWithBBMAndSaveToFile('data', 0)
prob.Print_info()
sub_list = prob.subproblem()
sub_list[0].Show2DFeatures()
| 38.513143
| 120
| 0.501439
|
9b5a8f1b4102d499bd26a90d822f7e95d987f48f
| 45,161
|
py
|
Python
|
python/akg/utils/kernel_exec.py
|
nn4ip/akg
|
4ca2d3fa46f172246ad8275f8c171ae85d0c7dd2
|
[
"Apache-2.0"
] | null | null | null |
python/akg/utils/kernel_exec.py
|
nn4ip/akg
|
4ca2d3fa46f172246ad8275f8c171ae85d0c7dd2
|
[
"Apache-2.0"
] | null | null | null |
python/akg/utils/kernel_exec.py
|
nn4ip/akg
|
4ca2d3fa46f172246ad8275f8c171ae85d0c7dd2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""util"""
import sys
import gc
import inspect
import datetime
import os
import uuid
import logging
import time
import random
import subprocess
import re
import tvm
from timeit import default_timer as timer
from threading import Thread
from functools import reduce
import numpy as np
from enum import IntEnum
import ctypes
import akg
import akg.tvm
from akg.tvm import autotvm
from akg.tvm import rpc
from akg.tvm import _api_internal
from akg.build_module import help_tiling_level
from akg.utils import result_analysis as ra_util
from akg.utils import format_transform as ft_util
from akg.utils import custom_tiling as ct_util
from akg.utils import validation_check as vc_util
from akg.utils.dsl_create import TensorUtils
from akg.backend.parsing_profiling_data import HWTSLogParser
from akg.backend.parsing_profiling_data import validate_and_normalize_path
sh = logging.StreamHandler(sys.stdout)
logging.getLogger().addHandler(sh)
logging.getLogger().setLevel(logging.INFO)
rpc_machine = {}
rpc_lb = {}
PERFORMANCE_TEST_FILE = "PERFORMANCE_TEST_FILE"
BINDS = "binds"
CUDA = "cuda"
CCE = "cce"
RANDOM_SEED_NUM = 20
PROF_ERROR_CODE = 9999999999
WGT_WIDTH = 16
INP_WIDTH = 16
OUT_WIDTH = 16
BLOCK_IN = 16
BLOCK_OUT = 16
BLOCK_REDUCE = 16
INP_ELEM_BYTES = (BLOCK_IN * BLOCK_REDUCE * INP_WIDTH // 8)
WGT_ELEM_BYTES = (BLOCK_OUT * BLOCK_REDUCE * WGT_WIDTH // 8)
OUT_ELEM_BYTES = (BLOCK_IN * BLOCK_OUT * OUT_WIDTH // 8)
GLB_ELEM_BYTES = (16 * OUT_WIDTH // 8)
class ReturnType(IntEnum):
"""Return Type IntEnum"""
DEFAULT = 0
FEAT = 1
MOD = 2
MOD_AND_FEAT = 3
def debug_mode(debug_flag):
"""
Pass to enable tpu debug mode.
Args:
debug_flag (int): The dbeug flag to be passed.
Returns:
list of function, the pass to set to build_config(add_lower_pass=tpu.debug_mode(mode)).
"""
# the number in pass_list such as 0,1,2,3 represents the order of the pass called
pass_list = []
if debug_flag == 1:
from akg.tvm import ir_pass
pass_list.append((0, ir_pass.inject_dma_intrin))
return pass_list
def func_time_required(func_name):
"""Checking the Time Required for Function Running."""
def wrapper(*args, **kwargs):
t0 = time.time()
result = func_name(*args, **kwargs)
t1 = time.time()
logging.info("func_time_required func:%s, running:%lf seconds", func_name.__name__, (t1 - t0))
return result
return wrapper
def create_code(kernel_name, code_path=None, code=None, code_type=CCE):
"""
Create cce or cuda file.
Args:
kernel_name: file name.
code_path: file path.
code: code.
code_type: code type.
"""
if code_type == CCE:
postfix = ".cce"
elif code_type == CUDA:
postfix = ".cu"
else:
logging.info("the target code type %s is not supported.", code_type)
if not code_path:
code_path = "./"
if code_type == CCE and len(code_path) > 4 and code_path[-4:].lower() == postfix:
real_path = code_path
elif code_type == CUDA and len(code_path) > 3 and code_path[-3:].lower() == postfix:
real_path = code_path
else:
if code_path[-1] == r"/":
real_path = code_path + kernel_name + postfix
else:
real_path = code_path + r"/" + kernel_name + postfix
dir_path = r"/".join(real_path.split(r"/")[:-1])
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
with open(real_path, 'wt') as ss:
ss.write(code)
def gen_name_kernel(kernel, dtype, shapes):
"""generate kernel name."""
def _flat_array(srclist, dstlist):
for i in srclist:
if isinstance(i, (list, tuple)):
_flat_array(i, dstlist)
else:
dstlist.append(i)
res = ''
flat = []
_flat_array(shapes, flat)
for s in flat:
res = "%s%s'_'" % (res, s)
res = "%s_%s%s" % (kernel, res, dtype)
return res
def load_rpc_server_info(mode):
"""
load rpc server host and port info.
Args:
mode (str): string of runtime choose, can set ca aic and rpc.
"""
env_dic = os.environ
if env_dic.get('RPC_HOST') and env_dic.get('RPC_PORT'):
return None
if mode == 'rpc_cloud':
logging.error("runtime_mode=rpc_cloud must set 1980 host ip and port!")
raise Exception("ERROR:runtime_mode=rpc_cloud must set 1980 host ip and port!")
rpc_server_info_config = env_dic.get('RPC_SERVER_INFO_FILE')
if not rpc_server_info_config:
logging.error("runtime_mode=rpc must set RPC_SERVER_INFO_FILE for rpc server info config")
raise Exception("ERROR:runtime_mode=rpc must set RPC_SERVER_INFO_FILE for rpc server info config")
# load rpc server host and port info from local file.
import json
with open(rpc_server_info_config, 'r') as f:
info = json.load(f)
for i in info:
rpc_machine[i] = info[i]
rpc_lb[i] = 0.0
return None
def dispatch(rank=0):
"""Function for lock waiting dispatch handle version 1."""
def _sort_by_value(d):
items = list(d.items())
random.shuffle(items)
items.sort(key=lambda x: x[1])
return [item[0] for item in items]
for k, v in rpc_lb.items():
logging.info("######rpc_lb[%s]=%f", rpc_machine[k][0], v)
lb_list = _sort_by_value(rpc_lb)
if len(lb_list) > rank:
return lb_list[rank]
return lb_list[len(lb_list) - 1]
def commit(remote, weight):
rpc_lb[remote] = weight
@func_time_required
def mod_launch_rpc_worker(mod, args, outputs, host, port, tuning=False):
"""internal RPC worker, should be called by mod_launch_rpc_thread."""
logging.info("%s:====start connect to rpc ip: %s, rpc port: %d ",
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), host, port)
remote = rpc.connect(host, port, session_timeout=300)
logging.info("%s:====connect to rpc ip: %s, rpc port: %d finished ",
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), host, port)
uuid_str = uuid.uuid4().hex
temp_file_name = "stackvm_%s.o" % uuid_str
mod.save(temp_file_name)
remote.upload(temp_file_name)
remote_mod = remote.load_module(temp_file_name)
ctx = remote.cce()
arg_list = []
for a in args:
arg_list.append(akg.tvm.nd.array(a, ctx))
start_time = timer()
remote_mod(*arg_list)
ctx.sync()
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
out_list = []
for i in outputs:
out = arg_list[len(arg_list) + i if i < 0 else i].asnumpy()
out_list.append(out)
# this time measure is no accurate now, to be improved soon
t = timer() - start_time
if not tuning:
return out_list[0] if len(out_list) == 1 else tuple(out_list)
stat_info = {"run_time": t}
return out_list[0] if len(out_list) == 1 else tuple(out_list), stat_info
def mod_launch_rpc_thread(mode, mod, args, outputs, results, need_retry, retry, tuning=False):
"""internal RPC thread, should be called by mod_launch_rpc_multithread."""
remoteevb = '0'
host = None
port = None
env_dic = os.environ
if env_dic.get('RPC_HOST') and env_dic.get('RPC_PORT'):
host = env_dic.get('RPC_HOST')
port = int(env_dic.get('RPC_PORT'))
else:
if mode == 'rpc_cloud':
logging.error("runtime_mode=rpc_cloud must set 1980 host ip and port!")
raise Exception("ERROR:runtime_mode=rpc_cloud must set 1980 host ip and port!")
remoteevb = dispatch(retry)
host = rpc_machine[remoteevb][0]
port = rpc_machine[remoteevb][1]
start_time = timer()
end_time = 0.0
logging.debug("rpc ip: %s, rpc port: %d", host, port)
try:
out_list = mod_launch_rpc_worker(mod, args, outputs, host, port, tuning=tuning)
end_time = timer()
t = end_time - start_time
if not env_dic.get('RPC_HOST'):
commit(remoteevb, 20 if t > 20 else t)
logging.info("===this round host is %s time is %f", host, (end_time - start_time))
results[retry] = out_list
except RuntimeError:
need_retry[retry] = True
end_time = timer()
logging.error("===Failed! this round host is %s time is %f", host, (end_time - start_time))
if not env_dic.get('RPC_HOST'):
commit(remoteevb, end_time - start_time + 20 * (retry + 1))
logging.error("rpc retry error: %d %s", retry, sys.exc_info())
def mod_launch_rpc(mode, mod, args, outputs, tuning=False):
"""
launch rpc or rpc_cloud module with retry.
Note:
To minimize waiting time of struggler RPC servers, we wait for a short timeout and spawn
a new thread after the timeout.
In normal case, RPC would complete before the short timeout, so, only one thread will be created.
When the RPC server is slow, we create multiple threads that run concurrently.
We wait for the first thread that successfully completes its work and return the result.
If a thread fails (an exception is raised), we spawn a new thread to retry.
Newly spawned threads will use different RPC servers.
We bound the maximum number of threads, i.e. maximum number of retries.
"""
max_num_threads = 5
import operator
arg_filter = filter(lambda x: isinstance(x, np.ndarray), args)
arg_tensor = list(arg_filter)
tensor_size = reduce(operator.add, [reduce(operator.mul, arg.shape) for arg in arg_tensor])
expected_upload_speed = 5e6
expected_upload_time = int(tensor_size / expected_upload_speed)
timeout_before_spawning_new_thread = 200 + expected_upload_time
poll_interval = 1
thread_timeout = 400 + expected_upload_time * 3
load_rpc_server_info(mode)
threads = [None] * max_num_threads
results = [None] * max_num_threads
need_retry = [None] * max_num_threads
retried = [False] * max_num_threads
for thread_index in range(max_num_threads):
if thread_index > 0:
logging.error("Thread %d run for %d seconds, spawn a new thread to retry",
(thread_index - 1), timeout_before_spawning_new_thread)
threads[thread_index] = Thread(target=mod_launch_rpc_thread,
args=(mode, mod, args, outputs, results, need_retry, thread_index, tuning))
# daemonize the thread to prevent long running threads from hanging the whole process
threads[thread_index].daemon = True
threads[thread_index].start()
poll_count = timeout_before_spawning_new_thread // poll_interval
while poll_count > 0:
poll_count -= 1
# wait for the newly created thread, because it is most likely to complete first
threads[thread_index].join(poll_interval)
for poll_index in range(thread_index + 1):
if not threads[poll_index].is_alive() and not need_retry[poll_index]:
return results[poll_index]
if need_retry[poll_index] and not retried[poll_index]:
logging.error("Thread %d exit with error, spawn a new thread immediately", poll_index)
poll_count = 0
retried[poll_index] = True
logging.error("All %d threads are created, poll the threads until the first one exits normally, \
or all threads exit abnormally or timeout", max_num_threads)
poll_count = thread_timeout // poll_interval
for _ in range(poll_count):
threads[max_num_threads - 1].join(poll_interval)
exit_thread_count = 0
for poll_index in range(max_num_threads):
if not threads[poll_index].is_alive() and not need_retry[poll_index]:
return results[poll_index]
if not threads[poll_index].is_alive():
exit_thread_count += 1
if exit_thread_count == max_num_threads:
logging.error("All %d threads exit abnormally", max_num_threads)
return None
logging.error("All %d threads timeout", max_num_threads)
return None
def profiling_mode_run(mod, args, outputs, tuning, device_id):
"""
Function for collecting cycle data from device.
Args:
mod: CCE Module.
args: list or tuple of numpy array.
outputs: list or tuple of output argment index.
tuning: tuning model.
device_id: device_id on device.
"""
ctx = akg.tvm.ndarray.cce(device_id)
tvm.get_global_func("ascend_start_profiling")(device_id)
arg_list = []
for a in args:
arg_list.append(akg.tvm.nd.array(a, ctx))
time_before_launch = time.time()
mod(*arg_list)
ctx.sync()
tvm.get_global_func("ascend_stop_profiling")()
out_list = []
cycle = profiling_analyse(device_id, time_before_launch)
for i in outputs:
out = arg_list[len(arg_list) + i if i < 0 else i].asnumpy()
out_list.append(out)
logging.info('=====parsing cycles==============================')
if cycle != PROF_ERROR_CODE:
logging.info(cycle)
else:
logging.error("OOPS, can't correctly parsing cycles!")
TestUtils.record_cycle(cycle)
logging.info('=====parsing cycles==============================')
output_data = out_list[0] if len(out_list) == 1 else tuple(out_list)
if tuning:
return output_data, {'run_time': cycle}
return output_data
def profiling_analyse(device_id, time_before_launch):
"""analyse profiling."""
def exec_cmds_with_pipe(cmd_list):
cmd_num = len(cmd_list)
if cmd_num <= 1:
raise RuntimeError("length of cmd_list should be greater than 1.")
ps = []
for i, cmd in enumerate(cmd_list):
if i == 0:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
else:
p = subprocess.Popen(cmd, stdin=ps[-1].stdout, stdout=subprocess.PIPE)
ps.append(p)
for p in ps:
p.wait()
return ps[-1].communicate()
if not isinstance(device_id, int):
raise TypeError("device_id must be an integer.")
try:
public_path = os.getenv('PROFILING_DIR')
if public_path is None:
raise RuntimeError("Environment PROFILING_DIR not set!")
public_path = validate_and_normalize_path(public_path)
cmd_list = [
["find", public_path, "-iname", "*.log.%d" % device_id, "-printf", "'%T+\t%p\n'"],
["grep", "JOB"],
["sort", "-r"],
["head", "-n10"],
["awk", "{print $2}"],
["head", "-n1"],
]
for _ in range(200):
p = exec_cmds_with_pipe(cmd_list)
if p[0].decode('utf8').strip() == '':
time.sleep(1)
else:
break
try:
job_file = p[0].decode('utf8').strip().split('/')[-2]
except BaseException:
logging.warning("failed to decode profiling result")
return None
logging.debug("job file is: %s", job_file)
file_abs_path = public_path + "/" + job_file
file_create_time = os.path.getctime(file_abs_path)
if file_create_time < time_before_launch:
raise RuntimeError("The JOB file is too old")
count = 0
while count < 5:
try:
hwtslog_parser = HWTSLogParser(file_abs_path)
return hwtslog_parser.execute()
except BaseException:
time.sleep(1)
count += 1
except SyntaxError as e:
logging.error(e)
return PROF_ERROR_CODE
def array_as_continue(arr):
assert isinstance(arr, np.ndarray)
arr = np.ascontiguousarray(arr, dtype=arr.dtype)
assert arr.flags['C_CONTIGUOUS']
return arr
def get_launch_args(arg_list, outputs):
launch_args = []
outputs = set(outputs)
for i in range(len(arg_list)):
arg = arg_list[i]
if isinstance(arg, np.ndarray):
data = arg.ctypes.data_as(ctypes.c_void_p)
nbytes = arg.size * arg.dtype.itemsize
is_output = 1 if i in outputs else 0
launch_args.append(data)
launch_args.append(nbytes)
launch_args.append(is_output)
else:
launch_args.append(arg)
return launch_args
def mod_launch_air(mod, args, outputs, device_id):
"""launch mod on kc_air."""
ctx = akg.tvm.ndarray.cce(device_id)
arg_list = []
for a in args:
if isinstance(a, np.ndarray):
arg_list.append(akg.tvm.nd.array(a, ctx))
elif isinstance(a, (list, tuple)):
for aa in a:
if isinstance(aa, np.ndarray):
arg_list.append(akg.tvm.nd.array(aa, ctx))
else:
arg_list.append(aa)
else:
arg_list.append(a)
for retry in range(3):
need_retry = False
try:
mod(*arg_list)
ctx.sync()
out_list = []
if not need_retry:
for i in outputs:
out = arg_list[len(arg_list) + i if i < 0 else i].asnumpy()
out_list.append(out)
return out_list[0] if len(out_list) == 1 else tuple(out_list)
except RuntimeError:
need_retry = True
logging.error("kc_air retry error: %d %s", retry, sys.exc_info())
logging.error("kc_air runtime error, please check!")
return None
def ascend_run(kernel_name, args, outputs, device_id):
"""launch mod on ascend."""
# Currently akg runs through this function in CCE RT mode
arg_list = []
for a in args:
if isinstance(a, np.ndarray):
arg_list.append(array_as_continue(a))
elif isinstance(a, (list, tuple)):
for aa in a:
if isinstance(aa, np.ndarray):
arg_list.append(array_as_continue(a))
else:
arg_list.append(aa)
else:
arg_list.append(a)
outputs = [len(arg_list) + i if i < 0 else i for i in outputs]
launch_args_list = get_launch_args(arg_list, outputs)
tvm.get_global_func("ascend_run")(kernel_name, device_id, *launch_args_list)
out_list = []
for i in outputs:
out = arg_list[i]
out_list.append(out)
return out_list[0] if len(out_list) == 1 else tuple(out_list)
def get_kernel_name(code):
kernel_name_end_pos = code.find("_kernel")
kernel_name_start_pos = code[:kernel_name_end_pos].rfind(" ") + 1
kernel_name = code[kernel_name_start_pos:kernel_name_end_pos]
if not kernel_name:
raise ValueError("fail to get kernel_name")
return kernel_name
@func_time_required
def mod_launch(mod, args, outputs=(-1,), tuning=False, device_id=-1, expect=None, repeat_time=400):
"""
unified run CCE kernel api.
Args:
mod (str): CCE Module, string of runtime choose, can set ca aic and rpc.
args (Union[list, tuple]): list or tuple of numpy array.
outputs (Union[list, tuple]): list or tuple of output argment index.
tuning (bool): tuning model.
device_id: device_id on device.
expect: when mode in ["compile_cloud", "compile_mini"], return it.
Returns:
output numpy array, or tuple of numpy array if multi-output.
"""
gc.collect()
if device_id == -1:
device_id = int(os.environ.get("DEVICE_ID", 0))
module = mod.imported_modules[0]
if module.type_key == CUDA:
ctx = akg.tvm.context(CUDA, device_id)
mod_args = [akg.tvm.nd.array(a, ctx) for a in args]
mod(*mod_args)
out_list = [mod_args[len(args) + i if i < 0 else i].asnumpy() for i in outputs]
if not tuning:
return out_list[0] if len(out_list) == 1 else tuple(out_list)
else:
cycles = get_gpu_cycles(mod, *mod_args, device_id=device_id, repeat_time=repeat_time)
return out_list[0] if len(out_list) == 1 else tuple(out_list), {'run_time': cycles}
stat_info = {}
profiling_mode = get_profiling_mode()
if profiling_mode:
return profiling_mode_run(mod, args, outputs, tuning, device_id)
mode = get_runtime_mode()
if mode == 'aic':
output = aic_model.launch(mod, args, outputs)
if not tuning:
return output
ra_util.get_ticks(stat_info)
return output, stat_info
if mode == 'aic_cloud':
output = aic_model.launch(mod, args, outputs, spec=aic_model.Spec.CLOUD)
if not tuning:
return output
ra_util.get_ticks(stat_info)
return output, stat_info
if mode in ('rpc', 'rpc_cloud'):
return mod_launch_rpc(mode, mod, args, outputs, tuning)
# The air_cloud is the current default mode and needs to be modified in the future
if mode == 'air_cloud':
kernel_name = get_kernel_name(module.get_source())
return ascend_run(kernel_name, args, outputs, device_id)
if mode in ('ca', 'air', 'air_cloud'):
return mod_launch_air(mod, args, outputs, device_id)
if mode in ("compile_cloud", "compile_mini"):
return expect
if mode in ("csim", "ccesim", "cdiff"):
from akg.backend.csim import csim_launch
return csim_launch(args, outputs)
if mode == "cpu":
tvm_array = []
ctx = akg.tvm.context("llvm", 0)
for _, args_val in enumerate(args):
tvm_temp = akg.tvm.nd.array(args_val, ctx)
tvm_array.append(tvm_temp)
mod(*tvm_array)
return tvm_array[-1].asnumpy()
raise ValueError("mode must be aic, rpc, aic_cloud, ca, compile_cloud, compile_mini, cpu, csim, ccesim or cdiff")
def gen_kernel_name(input_shapes, input_types, op_attrs=None, kernel_name=""):
"""generate kernel name."""
dir_max_length = 250
shape_info = ''
for _, (shape, dtype) in enumerate(zip(input_shapes, input_types)):
if isinstance(shape, (list, tuple)) and shape and isinstance(shape[0], (list, tuple)):
for _, tmp_shape in enumerate(shape):
vc_util.check_shape(tmp_shape)
tmp_shape = list(tmp_shape)
str_tmp_shape = [str(tmp) for tmp in tmp_shape]
shape_info = "%s_%s_%s" % (shape_info, dtype, '_'.join(str_tmp_shape))
elif isinstance(shape, akg.tvm.tensor.Tensor):
for tmp_shape in shape.shape:
if isinstance(tmp_shape, akg.tvm.expr.Var):
str_shape = tmp_shape.name
else:
str_shape = str(tmp_shape)
shape_info = "%s_%s_%s" % (shape_info, dtype, '_'.join(str_shape))
else:
vc_util.check_shape(shape)
if isinstance(shape, akg.tvm.expr.Var):
shape = [shape]
shape = list(shape)
str_shape = [str(i) for i in shape]
shape_info = "%s_%s_%s" % (shape_info, dtype, '_'.join(str_shape))
if op_attrs is not None:
for tmp in op_attrs:
if isinstance(tmp, (list, tuple)):
for ele in tmp:
if isinstance(ele, (list, tuple)):
str_tmp = [str(i) for i in ele]
shape_info = shape_info + '_' + '_'.join(str_tmp)
else:
shape_info = shape_info + '_' + str(ele)
elif isinstance(tmp, (int, float)):
shape_info = shape_info + '_' + str(tmp)
elif isinstance(tmp, (str)):
shape_info = shape_info + '_' + tmp
elif isinstance(tmp, (np.ndarray)):
shape = list(tmp.shape)
str_shape = [str(i) for i in shape]
shape_info = shape_info + '_' + '_'.join(str_shape)
kernel_name = kernel_name + shape_info
kernel_name = re.sub(r'[^0-9a-zA-Z]+', '_', kernel_name)
if len(kernel_name) > dir_max_length:
logging.info("Dir name %s exceed maximal length, use first %d char as dir name.", kernel_name, dir_max_length)
kernel_name = kernel_name[:dir_max_length]
return kernel_name
@func_time_required
def op_build_test(op_func, input_shapes, input_types, op_attrs=None, kernel_name="",
attrs=None, log_cce=False, dump_ir=True, dump_code=True,
polyhedral=True, tuning=False):
"""
Return module from op_build with given inputs, distinguish tuning mode.
Args:
op_func (function returning an op or (op, [op_vars])): The op build function
input_shapes(iterable of iterable of int): the dim sizes for input for op
input_types (iterable of iterable of str): the dtypes for each input
op_attrs (list or tuple): extra attributes for the op.
kernel_name (str): name of op.
attrs (dict): tiling parameter.
log_cce (bool): False by default.
dump_ir (bool): True by default.
dump_code (bool): False by default.
polyhedral (bool): True by default.
tuning (bool): False by default.
Return:
module.
"""
if isinstance(attrs, dict) and 'tuning' in attrs.keys():
kernel_name = kernel_name
else:
kernel_name = gen_kernel_name(input_shapes, input_types, op_attrs, kernel_name)
logging.debug('kernel_name---------- %s', str(kernel_name))
mod = op_build(op_func, input_shapes, input_types, op_attrs, kernel_name,
attrs, log_cce, dump_ir, dump_code,
polyhedral, tuning)
return mod
def recursive_copy(obj):
"""
Copy a container object recursively
Args:
obj (list, tuple, dict or object): input container object.
Return:
copied object.
"""
if isinstance(obj, list):
return [recursive_copy(it) for it in obj]
if isinstance(obj, tuple):
return tuple([recursive_copy(it) for it in obj])
if isinstance(obj, dict):
copy_obj = dict()
for key in obj:
copy_obj[key] = recursive_copy(obj[key])
return copy_obj
return obj
def gen_inputs_and_shape_params(input_shapes, input_types, inputs, shape_params):
"""
Generate akg.tvm.placeholder as inputs for op with given input_shapes and input_types
Args:
input_shapes(iterable of iterable of int): the dim sizes for input for op.
input_types (iterable of iterable of str): the dtypes for each input.
inputs (list): None by default.
shape_params (list): None by default.
"""
for i, (shape, dtype) in enumerate(zip(input_shapes, input_types)):
if isinstance(shape, (list, tuple)) and shape and isinstance(shape[0], (list, tuple)):
tmp_input = []
for j, tmp_shape in enumerate(shape):
tmp_input.append(akg.tvm.placeholder(tmp_shape, dtype, "input_%d_%d" % (i + 1, j + 1)))
for tmp in tmp_shape:
if isinstance(tmp, akg.tvm.expr.Var):
shape_params.append(tmp)
inputs.append(tmp_input)
elif isinstance(shape, (list, tuple)) and shape and isinstance(shape[0], akg.tvm.expr.Var):
inputs.append(akg.tvm.placeholder(shape, dtype, "input_%d" % (i + 1)))
for tmp_shape in shape:
if isinstance(tmp_shape, akg.tvm.expr.Var):
shape_params.append(tmp_shape)
elif isinstance(shape, akg.tvm.tensor.Tensor):
inputs.append(shape)
for tmp_shape in shape.shape:
shape_params.append(tmp_shape)
else:
inputs.append(akg.tvm.placeholder(shape, dtype, "input_%d" % (i + 1)))
def gen_attrs_params(op_attrs, attrs_params):
"""
Parsing attrs given by op_attrs.
Args:
op_attrs (list or tuple): extra attributes for the op.
attrs_params (list): None by default.
"""
for tmp_attr in op_attrs:
if isinstance(tmp_attr, (list, tuple)) and tmp_attr and isinstance(tmp_attr[0], akg.tvm.expr.Var):
for attr_param in tmp_attr:
if isinstance(attr_param, akg.tvm.expr.Var):
attrs_params.append(attr_param)
elif isinstance(tmp_attr, akg.tvm.expr.Var):
attrs_params.append(tmp_attr)
def get_dim_from_func_map(attrs, op_func, args, input_shapes, input_types, op_attrs):
"""
Get tiling parameter from map defined in op_func.
Args:
attrs (dict): tiling parameter.
op_func (function returning an op or (op, [op_vars])): The op build function.
args (list): input tensors and attributes(if exists) of op_func.
input_shapes (iterable of iterable of int): the dim sizes for input for op.
input_types (iterable of iterable of str): the dtypes for each input.
op_attrs (list or tuple): extra attributes for the op.
"""
if attrs is None or 'dim' not in attrs or not attrs['dim']:
dim_info = ""
if attrs is None:
attrs = dict()
if op_func.__name__ in ct_util.set_dim_func_map.keys():
value = ct_util.set_dim_func_map[op_func.__name__]
if inspect.isfunction(value):
dim_info = value(*args)
elif isinstance(value, dict):
key = []
key.append(ft_util.convert_to_list(input_shapes))
key.append(ft_util.convert_to_list(input_types))
if op_attrs is not None:
key.append(op_attrs)
key = str(tuple(key))
if key in value.keys():
dim_info = ct_util.set_dims(value[key])
else:
raise RuntimeError("Registered set_dim_map is invalid. Must be a function or a dict!")
if isinstance(dim_info, (list, tuple)):
dim_info = dim_info[0]
attrs['dim'] = dim_info
return attrs
def parsing_output(output, attrs, compute_func, sch_tmpl, gpu_binds):
"""
Parsing the outputs of op.
Args:
output (iterable of iterable of akg.tvm.tensor): the outputs of op.
attrs (dict): tiling parameter.
compute_func (function): None by default, func for doing compute_inline or other.
sch_tmpl (dict): None by default.
gpu_binds (dict): None by default.
"""
if isinstance(output, (list, tuple)):
from inspect import isfunction
new_outputs = []
for elem in output:
if isfunction(elem):
compute_func = elem
elif isinstance(elem, dict):
for key, value in elem.items():
if key not in attrs or not attrs[key]:
attrs[key] = value
elif isinstance(elem, (list, tuple)):
new_outputs += elem
else:
new_outputs.append(elem)
output = new_outputs
elif isinstance(output, dict):
sch_tmpl = output
output = sch_tmpl['output']
gpu_binds = sch_tmpl['binds']
return output, compute_func, sch_tmpl, gpu_binds
def gen_op_var(inputs, output, op_var):
"""
Combine inputs and outputs about the op.
Args:
inputs(list): the inputs of op.
output(list): the outputs of op.
op_var (list): inputs and outputs for the op.
"""
for xx in inputs:
if isinstance(xx, list):
for x in xx:
op_var.append(x)
else:
op_var.append(xx)
if isinstance(output, (list, tuple)):
op_var = op_var + [i for i in output if TensorUtils.is_output_value(i)]
else:
if TensorUtils.is_output_value(output):
op_var = op_var + [output]
return op_var
def gen_shape_var(attrs_params, shape_params, shape_var):
"""
Combine shape of inputs and extra attributes about the op.
Args:
attrs_params(list): shape of inputs for the op
shape_params(list): extra attributes for the op
shape_var (list): shape of inputs and extra attributes for the op.
"""
if attrs_params:
for i in attrs_params:
if i not in shape_var:
shape_var.append(i)
for i in shape_params:
if i not in shape_var:
shape_var.append(i)
def gen_spaces_dim_key(op_func, args, s, op_var, kernel_name, attrs, polyhedral, tuning, target):
"""
Generate tiling parameter.
Args:
op_func (function returning an op or (op, [op_vars])): The op build function.
args (Union[list, tuple]): list or tuple of numpy array.
s (dict): schedule of op.
op_var (list): the akg.tvm.tensor of inputs and outputs for op.
kernel_name (str): name of op.
attrs (dict): tiling parameter.
polyhedral (bool): True by default.
tuning (bool): False by default.
Return:
tiling parameter.
"""
set_dim_key = ""
if op_func.__name__ in ct_util.set_dim_func_map.keys():
func_ = ct_util.set_dim_func_map[op_func.__name__]
if inspect.isfunction(func_):
set_dim_key = func_(*args)[1]
elif op_func.__name__ in ct_util.gen_key_func_map.keys():
func_ = ct_util.gen_key_func_map[op_func.__name__]
if inspect.isfunction(func_):
set_dim_key = func_(*args)
with akg.build_config(dump_pass_ir=True):
spaces = akg.lower(s, op_var, name=kernel_name, attrs=attrs, polyhedral=polyhedral, tuning=tuning,
target=target)
if set_dim_key == "":
set_dim_key = str(args)
return spaces, set_dim_key
def create_gpu_mod(sch_tmpl, s, op_func, op_var, shape_var, kernel_name, attrs, polyhedral, binds, dump_ir, dump_code,
tuning):
"""
Return module for op of gpu.
Args:
sch_tmpl (dict): schedule of op and the others.
s (dict): schedule of op.
op_func (function returning an op or (op, [op_vars])): The op build function.
op_var (list): the akg.tvm.tensor of inputs and outputs for op.
shape_var (list): shape of inputs and extra attributes for the op.
kernel_name (str): name of op.
attrs (dict): tiling parameter.
polyhedral (bool): True by default.
binds (dict): BINDS
dump_ir (bool): True by default.
dump_code (bool): False by default.
tuning (bool): False by default.
Return:
module.
"""
if sch_tmpl is not None or (attrs and attrs.get("target", "cce") == "cuda"):
if kernel_name == "":
kernel_name = op_func.__name__ if sch_tmpl is None else sch_tmpl['op_name']
target = CUDA
if sch_tmpl is not None:
if sch_tmpl['target'] != CUDA:
raise ValueError("Only support cuda as target when using schedule template.")
with akg.tvm.target.cuda() as target:
if not tuning:
s = sch_tmpl['schedule'](sch_tmpl['output'])
with akg.tvm.build_config(dump_pass_ir=dump_ir):
mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs,
polyhedral=False, binds=binds)
else:
@autotvm.template
def _autotune_template():
s = sch_tmpl['schedule'](sch_tmpl['output'])
return (s, op_var)
# create autotune task
task = autotvm.task.create(_autotune_template,
args=list(),
target='cuda')
print("task config: ", task.config_space)
# set measure_option
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=5, min_repeat_ms=150, timeout=4)
)
# Begin tuning, log records to file `kernel_name.log`
tuner = autotvm.tuner.RandomTuner(task)
if not os.path.exists(kernel_name + '.log'):
tuner.tune(n_trial=len(task.config_space),
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(kernel_name + '.log')])
# query best config
dispatch_context = autotvm.apply_history_best(kernel_name + '.log')
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config is:")
print(best_config)
# apply best config
with autotvm.apply_history_best(kernel_name + '.log'):
s, op_var = _autotune_template()
mod = akg.build(s, op_var, "cuda", shape_var, name=kernel_name, attrs=attrs,
polyhedral=False, binds=binds)
else:
with akg.build_config(dump_pass_ir=dump_ir):
mod = akg.build(s, op_var, target, shape_var, name=kernel_name, attrs=attrs, polyhedral=polyhedral,
binds=binds)
if dump_code:
source_code = mod.imported_modules[0].get_source()
create_code(kernel_name, "./", source_code, CUDA)
return mod
def op_build(op_func, input_shapes, input_types, op_attrs=None, kernel_name="",
attrs=None, log_cce=False, dump_ir=True, dump_code=True,
polyhedral=True, tuning=False, ret_mode=ReturnType.MOD):
"""
Return module built from op_func with given inputs.
Args:
op_func (function returning an op or (op, [op_vars])): The op build function.
input_shapes(iterable of iterable of int): the dim sizes for input for op.
input_types (iterable of iterable of str): the dtypes for each input.
op_attrs (list or tuple): extra attributes for the op.
kernel_name (str): name of op.
attrs (dict): tiling parameter.
log_cce (bool): False by default.
dump_ir (bool): True by default.
dump_code (bool): False by default.
polyhedral (bool): True by default.
tuning (bool): False by default.
Return:
module.
"""
inputs = []
shape_params = [] # save all the shape params for dynamic_shape cases
gen_inputs_and_shape_params(input_shapes, input_types, inputs, shape_params)
attrs_params = []
if op_attrs is not None:
args = inputs + op_attrs
gen_attrs_params(op_attrs, attrs_params)
else:
args = inputs
# backup inputs because the tensor names may be updated inside op_func
inputs_backup = recursive_copy(inputs)
output = op_func(*args)
# restore inputs to make sure that tensor names are not changed by op_func
inputs = inputs_backup
# set dim
attrs = get_dim_from_func_map(attrs, op_func, args, input_shapes, input_types, op_attrs)
compute_func = None # func which is defined in dsl for doing compute_inline or other
sch_tmpl = None
gpu_binds = None
output, compute_func, sch_tmpl, gpu_binds = parsing_output(output, attrs, compute_func, sch_tmpl, gpu_binds)
op_var = []
op_var = gen_op_var(inputs, output, op_var)
shape_var = []
gen_shape_var(attrs_params, shape_params, shape_var)
if sch_tmpl is not None:
return create_gpu_mod(sch_tmpl, None, op_func, op_var, shape_var, kernel_name, attrs, polyhedral, gpu_binds,
dump_ir, dump_code, tuning)
if isinstance(output, (list, tuple)):
tmp = []
for x in list(output):
if isinstance(x, tuple):
tmp.append(x[0].op)
else:
tmp.append(x.op)
s = akg.tvm.create_schedule(tmp)
else:
s = akg.tvm.create_schedule(output.op)
if compute_func is not None:
compute_func(s)
polyhedral = False
target = CCE
if attrs and attrs.get("target", "cce") == CUDA:
target = CUDA
level = attrs.get("help_tiling") if attrs and "help_tiling" in attrs else None
if tuning or (level is not None and level > help_tiling_level['None']):
return gen_spaces_dim_key(op_func, args, s, op_var, kernel_name, attrs, polyhedral, tuning, target)
mode = get_runtime_mode()
if mode == "cpu":
mod = akg.tvm.build(s, op_var, "llvm")
if not os.path.isdir("./cpu/ir/"):
os.makedirs("./cpu/ir/")
with os.fdopen(os.open("./cpu/ir/" + kernel_name + ".cc", os.O_WRONLY | os.O_CREAT, 0o400), 'w') as irf:
irf.write(akg.tvm.lower(s, op_var, shape_var, simple_mode=True))
return mod
binds = None if not attrs else attrs.pop(BINDS, None)
if ret_mode in [ReturnType.FEAT, ReturnType.MOD_AND_FEAT]:
if binds is None:
from akg.tvm import build_module
binds, _ = build_module.get_binds(op_var)
cfg = _api_internal._GetCurrentBuildConfig()
stmt, args = _api_internal._Lower(s, op_var, shape_params, kernel_name,
binds, attrs, False, True, False, target,
cfg, True)
from akg.utils.auto_tuning import get_features_from_stmts
feature = get_features_from_stmts(stmts=[stmt], binds=[binds], n_skip_cache=0)[0]
if ret_mode == ReturnType.FEAT:
return feature
mod = _api_internal._BuildStmtToModule(stmt, kernel_name, cfg, args, target)
return mod, feature
if target == CUDA:
return create_gpu_mod(None, s, op_func, op_var, shape_var, kernel_name, attrs, polyhedral, binds, dump_ir,
dump_code, tuning)
target = CCE
with akg.build_config(dump_pass_ir=dump_ir):
mod = akg.build(s, op_var, target, shape_var, name=kernel_name, attrs=attrs, polyhedral=polyhedral, binds=binds)
source_code = mod.imported_modules[0].get_source()
if log_cce:
logging.debug("#################cce code####################")
logging.debug(source_code)
if dump_code:
create_code(kernel_name, "./", source_code, target)
return mod
def get_runtime_mode():
"""get runtime mode."""
env_dic = os.environ
if not env_dic.get('RUNTIME_MODE'):
mode = 'rpc_cloud'
else:
mode = env_dic.get('RUNTIME_MODE')
return mode
def get_profiling_mode():
"""get profiling mode."""
env_dic = os.environ
if env_dic.get('PROFILING_MODE') and env_dic.get('PROFILING_MODE').lower() == "true":
return True
return False
def product_is_mini():
"""check whether in mini environment."""
mode = get_runtime_mode()
if mode in ('rpc', 'air', 'aic', 'compile_mini'):
return True
return False
def get_available_devices_num():
"""get available devives num."""
env_dic = os.environ
try:
return int(env_dic.get('DEVICE_TOTAL_NUM').lower()) if env_dic.get('DEVICE_TOTAL_NUM') else 1
except NameError as e:
logging.error(e)
return 1
def get_device_id():
"""get device id."""
env_dic = os.environ
try:
return int(env_dic.get('DEVICE_ID').lower()) if env_dic.get('DEVICE_ID') else 0
except NameError as e:
logging.error(e)
return 0
def get_gpu_cycles(mod, *mod_args, device_id=0, repeat_time=400):
"""get gpu profiling cycles."""
from akg.utils.result_analysis import gpu_profiling
tcost = gpu_profiling(mod, *mod_args, repeat_time=repeat_time, device_id=device_id)
return tcost
class TestUtils:
"""Class for getting cycle and core num."""
@staticmethod
def record_cycle(cycle):
if os.environ.get(PERFORMANCE_TEST_FILE):
result_file = os.environ.get(PERFORMANCE_TEST_FILE)
with open(result_file, "a+") as f:
f.write("{0}\n".format(cycle))
@staticmethod
def record_core(stmt):
"""Function for getting performance data from cores."""
def get_core_num():
core_num = 1
if hasattr(stmt, 'attr_key') and stmt.attr_key == 'thread_extent':
core_num = stmt.value
return core_num
if os.environ.get(PERFORMANCE_TEST_FILE):
result_file = os.environ.get(PERFORMANCE_TEST_FILE)
with open(result_file, "a+") as f:
f.write("{0}; ".format(get_core_num()))
| 36.332261
| 120
| 0.615221
|
4b87eb9e4d08a1f94537f4fb14b275fbcabe4ff3
| 202
|
py
|
Python
|
fbpcs/pc_pre_validation/__init__.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 24
|
2021-08-24T09:30:18.000Z
|
2022-03-28T20:51:01.000Z
|
fbpcs/pc_pre_validation/__init__.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 142
|
2021-08-16T23:49:27.000Z
|
2022-03-31T21:05:04.000Z
|
fbpcs/pc_pre_validation/__init__.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 23
|
2021-09-10T22:55:04.000Z
|
2022-03-25T18:11:25.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 33.666667
| 65
| 0.752475
|
51b8379e1c2e4cc4657663b6e8dbd0b2612b5c41
| 2,449
|
py
|
Python
|
goXSS.py
|
filipesam/goXSS
|
eae5d56c3a750ba47a2b5bcf242ba6d9916dfbbd
|
[
"MIT"
] | null | null | null |
goXSS.py
|
filipesam/goXSS
|
eae5d56c3a750ba47a2b5bcf242ba6d9916dfbbd
|
[
"MIT"
] | null | null | null |
goXSS.py
|
filipesam/goXSS
|
eae5d56c3a750ba47a2b5bcf242ba6d9916dfbbd
|
[
"MIT"
] | null | null | null |
import sys, random, string, httplib, requests, getopt, re
def usage():
print "goXSS.py -u <attackurl> -o <outputfile>"
#simple payloadgenerator
def generatePayload():
global varRandomString
varRandomString = "".join(random.sample(string.ascii_uppercase, 8))
print "-" * 65
print "Random string generated for XSS attack is:", varRandomString
"""
def breakChars():
global tryToBreak
tryToBreak = ['<','>','"','\\','/','(',')']
for character in tryToBreak:
print "Trying to break:", tryToBreak
"""
def attackUrl():
generatePayload()
tryToBreak = ['<','>','"','script','/','=',':','<script>',
'<img >','<svg/onload=alert(0)>','confirm']
for Char in tryToBreak:
global inject
inject = str(Char)
print "Trying... " + inject
#breakChars()
urlRequest = urlSet + "/" + varRandomString + inject
r = requests.get(url=urlRequest)
print "Url to attack is:", urlRequest
print r
print "-" * 65
#print 'Output file is =>', outFile
# comment or uncomment next line to debug
#print(r.content)
global response
response = r.content
searchReflection()
def searchReflection():
#searchPayload = varRandomString + tryToBreak
searchPayload = varRandomString + inject
if re.search(searchPayload, response):
print "Found", searchPayload, "on Response !"
print "=" * 65
else:
print "REFLECTION NOT FOUND"
def main(argv):
if len(argv)<2:
usage()
try:
opts, args = getopt.getopt(argv,"hu:o:",["urlSet=","outFile="])
except getopt.GetoptError:
print "goXSS.py -u <attackurl> -o <outputfile>"
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'goXSS.py -u <attackurl> -o <outputfile>'
sys.exit()
elif opt in ("-u", "--url"):
global urlSet
urlSet = arg
elif opt in ("-o", "--ofile"):
global outFile
outFile = arg
attackUrl()
if __name__ == "__main__":
main(sys.argv[1:])
##### TODO
# should also "fuzz" http methods
# try all methods, like requests.put, request.head, etc
#def encodeChars():
# should encode *breakChars in unicode, hex, url, double encode
# Search reflection function should be created in a manner
# to search for literal "chars" reflected (raw hex value)
| 28.811765
| 71
| 0.585953
|
5ba9fb5d05b27339d924bfe42c0e6ba0c2c68da3
| 10,985
|
py
|
Python
|
models/match/multiview-simnet/model.py
|
seiriosPlus/PaddleRec
|
2101532f8444783c68c4d545e5fc556a14a460a0
|
[
"Apache-2.0"
] | null | null | null |
models/match/multiview-simnet/model.py
|
seiriosPlus/PaddleRec
|
2101532f8444783c68c4d545e5fc556a14a460a0
|
[
"Apache-2.0"
] | null | null | null |
models/match/multiview-simnet/model.py
|
seiriosPlus/PaddleRec
|
2101532f8444783c68c4d545e5fc556a14a460a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers.tensor as tensor
import paddle.fluid.layers.control_flow as cf
from paddlerec.core.utils import envs
from paddlerec.core.model import Model as ModelBase
class BowEncoder(object):
""" bow-encoder """
def __init__(self):
self.param_name = ""
def forward(self, emb):
return fluid.layers.sequence_pool(input=emb, pool_type='sum')
class CNNEncoder(object):
""" cnn-encoder"""
def __init__(self,
param_name="cnn",
win_size=3,
ksize=128,
act='tanh',
pool_type='max'):
self.param_name = param_name
self.win_size = win_size
self.ksize = ksize
self.act = act
self.pool_type = pool_type
def forward(self, emb):
return fluid.nets.sequence_conv_pool(
input=emb,
num_filters=self.ksize,
filter_size=self.win_size,
act=self.act,
pool_type=self.pool_type,
param_attr=self.param_name + ".param",
bias_attr=self.param_name + ".bias")
class GrnnEncoder(object):
""" grnn-encoder """
def __init__(self, param_name="grnn", hidden_size=128):
self.param_name = param_name
self.hidden_size = hidden_size
def forward(self, emb):
fc0 = fluid.layers.fc(input=emb,
size=self.hidden_size * 3,
param_attr=self.param_name + "_fc.w",
bias_attr=False)
gru_h = fluid.layers.dynamic_gru(
input=fc0,
size=self.hidden_size,
is_reverse=False,
param_attr=self.param_name + ".param",
bias_attr=self.param_name + ".bias")
return fluid.layers.sequence_pool(input=gru_h, pool_type='max')
class SimpleEncoderFactory(object):
def __init__(self):
pass
''' create an encoder through create function '''
def create(self, enc_type, enc_hid_size):
if enc_type == "bow":
bow_encode = BowEncoder()
return bow_encode
elif enc_type == "cnn":
cnn_encode = CNNEncoder(ksize=enc_hid_size)
return cnn_encode
elif enc_type == "gru":
rnn_encode = GrnnEncoder(hidden_size=enc_hid_size)
return rnn_encode
class Model(ModelBase):
def __init__(self, config):
ModelBase.__init__(self, config)
self.init_config()
def init_config(self):
self._fetch_interval = 1
query_encoder = envs.get_global_env("hyper_parameters.query_encoder", None, self._namespace)
title_encoder = envs.get_global_env("hyper_parameters.title_encoder", None, self._namespace)
query_encode_dim = envs.get_global_env("hyper_parameters.query_encode_dim", None, self._namespace)
title_encode_dim = envs.get_global_env("hyper_parameters.title_encode_dim", None, self._namespace)
query_slots = envs.get_global_env("hyper_parameters.query_slots", None, self._namespace)
title_slots = envs.get_global_env("hyper_parameters.title_slots", None, self._namespace)
factory = SimpleEncoderFactory()
self.query_encoders = [
factory.create(query_encoder, query_encode_dim)
for i in range(query_slots)
]
self.title_encoders = [
factory.create(title_encoder, title_encode_dim)
for i in range(title_slots)
]
self.emb_size = envs.get_global_env("hyper_parameters.sparse_feature_dim", None, self._namespace)
self.emb_dim = envs.get_global_env("hyper_parameters.embedding_dim", None, self._namespace)
self.emb_shape = [self.emb_size, self.emb_dim]
self.hidden_size = envs.get_global_env("hyper_parameters.hidden_size", None, self._namespace)
self.margin = 0.1
def input(self, is_train=True):
self.q_slots = [
fluid.data(
name="%d" % i, shape=[None, 1], lod_level=1, dtype='int64')
for i in range(len(self.query_encoders))
]
self.pt_slots = [
fluid.data(
name="%d" % (i + len(self.query_encoders)), shape=[None, 1], lod_level=1, dtype='int64')
for i in range(len(self.title_encoders))
]
if is_train == False:
return self.q_slots + self.pt_slots
self.nt_slots = [
fluid.data(
name="%d" % (i + len(self.query_encoders) + len(self.title_encoders)), shape=[None, 1], lod_level=1,
dtype='int64')
for i in range(len(self.title_encoders))
]
return self.q_slots + self.pt_slots + self.nt_slots
def train_input(self):
res = self.input()
self._data_var = res
use_dataloader = envs.get_global_env("hyper_parameters.use_DataLoader", False, self._namespace)
if self._platform != "LINUX" or use_dataloader:
self._data_loader = fluid.io.DataLoader.from_generator(
feed_list=self._data_var, capacity=256, use_double_buffer=False, iterable=False)
def get_acc(self, x, y):
less = tensor.cast(cf.less_than(x, y), dtype='float32')
label_ones = fluid.layers.fill_constant_batch_size_like(
input=x, dtype='float32', shape=[-1, 1], value=1.0)
correct = fluid.layers.reduce_sum(less)
total = fluid.layers.reduce_sum(label_ones)
acc = fluid.layers.elementwise_div(correct, total)
return acc
def net(self):
q_embs = [
fluid.embedding(
input=query, size=self.emb_shape, param_attr="emb")
for query in self.q_slots
]
pt_embs = [
fluid.embedding(
input=title, size=self.emb_shape, param_attr="emb")
for title in self.pt_slots
]
nt_embs = [
fluid.embedding(
input=title, size=self.emb_shape, param_attr="emb")
for title in self.nt_slots
]
# encode each embedding field with encoder
q_encodes = [
self.query_encoders[i].forward(emb) for i, emb in enumerate(q_embs)
]
pt_encodes = [
self.title_encoders[i].forward(emb) for i, emb in enumerate(pt_embs)
]
nt_encodes = [
self.title_encoders[i].forward(emb) for i, emb in enumerate(nt_embs)
]
# concat multi view for query, pos_title, neg_title
q_concat = fluid.layers.concat(q_encodes)
pt_concat = fluid.layers.concat(pt_encodes)
nt_concat = fluid.layers.concat(nt_encodes)
# projection of hidden layer
q_hid = fluid.layers.fc(q_concat,
size=self.hidden_size,
param_attr='q_fc.w',
bias_attr='q_fc.b')
pt_hid = fluid.layers.fc(pt_concat,
size=self.hidden_size,
param_attr='t_fc.w',
bias_attr='t_fc.b')
nt_hid = fluid.layers.fc(nt_concat,
size=self.hidden_size,
param_attr='t_fc.w',
bias_attr='t_fc.b')
# cosine of hidden layers
cos_pos = fluid.layers.cos_sim(q_hid, pt_hid)
cos_neg = fluid.layers.cos_sim(q_hid, nt_hid)
# pairwise hinge_loss
loss_part1 = fluid.layers.elementwise_sub(
tensor.fill_constant_batch_size_like(
input=cos_pos,
shape=[-1, 1],
value=self.margin,
dtype='float32'),
cos_pos)
loss_part2 = fluid.layers.elementwise_add(loss_part1, cos_neg)
loss_part3 = fluid.layers.elementwise_max(
tensor.fill_constant_batch_size_like(
input=loss_part2, shape=[-1, 1], value=0.0, dtype='float32'),
loss_part2)
self.avg_cost = fluid.layers.mean(loss_part3)
self.acc = self.get_acc(cos_neg, cos_pos)
def avg_loss(self):
self._cost = self.avg_cost
def metrics(self):
self._metrics["loss"] = self.avg_cost
self._metrics["acc"] = self.acc
def train_net(self):
self.train_input()
self.net()
self.avg_loss()
self.metrics()
def optimizer(self):
learning_rate = envs.get_global_env("hyper_parameters.learning_rate", None, self._namespace)
optimizer = fluid.optimizer.Adam(learning_rate=learning_rate)
return optimizer
def infer_input(self):
res = self.input(is_train=False)
self._infer_data_var = res
self._infer_data_loader = fluid.io.DataLoader.from_generator(
feed_list=self._infer_data_var, capacity=64, use_double_buffer=False, iterable=False)
def infer_net(self):
self.infer_input()
# lookup embedding for each slot
q_embs = [
fluid.embedding(
input=query, size=self.emb_shape, param_attr="emb")
for query in self.q_slots
]
pt_embs = [
fluid.embedding(
input=title, size=self.emb_shape, param_attr="emb")
for title in self.pt_slots
]
# encode each embedding field with encoder
q_encodes = [
self.query_encoders[i].forward(emb) for i, emb in enumerate(q_embs)
]
pt_encodes = [
self.title_encoders[i].forward(emb) for i, emb in enumerate(pt_embs)
]
# concat multi view for query, pos_title, neg_title
q_concat = fluid.layers.concat(q_encodes)
pt_concat = fluid.layers.concat(pt_encodes)
# projection of hidden layer
q_hid = fluid.layers.fc(q_concat,
size=self.hidden_size,
param_attr='q_fc.w',
bias_attr='q_fc.b')
pt_hid = fluid.layers.fc(pt_concat,
size=self.hidden_size,
param_attr='t_fc.w',
bias_attr='t_fc.b')
# cosine of hidden layers
cos = fluid.layers.cos_sim(q_hid, pt_hid)
self._infer_results['query_pt_sim'] = cos
| 36.374172
| 116
| 0.593446
|
7f797c552beb18902c986182c04e291a8f147e2e
| 618
|
py
|
Python
|
pymanopt/tools/autodiff/_backend.py
|
calincru/pymanopt
|
3eb4696ea7fc62e89905409afadc3d905b36ed30
|
[
"BSD-3-Clause"
] | 21
|
2020-06-18T20:35:33.000Z
|
2022-03-29T16:46:08.000Z
|
pymanopt/tools/autodiff/_backend.py
|
leonbottou/pymanopt
|
7d8c46f4513c3746234ba804604694b11db62d0a
|
[
"BSD-3-Clause"
] | null | null | null |
pymanopt/tools/autodiff/_backend.py
|
leonbottou/pymanopt
|
7d8c46f4513c3746234ba804604694b11db62d0a
|
[
"BSD-3-Clause"
] | 3
|
2021-03-30T09:08:57.000Z
|
2022-03-24T07:53:21.000Z
|
from functools import wraps
def assert_backend_available(f):
@wraps(f)
def inner(backend, *args, **kwargs):
if not backend.is_available():
raise RuntimeError(
"Backend `{:s}` is not available".format(str(backend)))
return f(backend, *args, **kwargs)
return inner
class Backend(object):
def __str__(self):
return "<backend>"
def __id(self, objective, argument):
return objective
compile_function = compute_gradient = compute_hessian = __id
def __false(self):
return False
is_available = is_compatible = __false
| 22.888889
| 71
| 0.635922
|
607d202aba2d5ec60a2a22d9ae462f7a144a7a1d
| 544
|
py
|
Python
|
demo_numpy.py
|
gmunozhe/astr-119-session-4
|
3ee9f816587797b8a8580112b95f886e793791bd
|
[
"MIT"
] | null | null | null |
demo_numpy.py
|
gmunozhe/astr-119-session-4
|
3ee9f816587797b8a8580112b95f886e793791bd
|
[
"MIT"
] | null | null | null |
demo_numpy.py
|
gmunozhe/astr-119-session-4
|
3ee9f816587797b8a8580112b95f886e793791bd
|
[
"MIT"
] | null | null | null |
import numpy as np
x = 1.0 #define a float
y = 2.0 #define another float
#trig
print(np.sin(x)) #sin(x)
print(np.cos(x)) #cos(x)
print(np.tan(x)) #tan(x)
print(np.arcsin(x)) #arcsin(x)
print(np.arccos(x)) #arccos(x)
print(np.arctan(x)) #arctan(x)
print(np.arctan2(x,y)) #arctan(x/y)
print(np.rad2deg(x)) #convert rad to deg
#hyperbolic functions
print(np.sinh(x)) #sinh(x)
print(np.cosh(x)) #cosh(x)
print(np.tanh(x)) #tanh(x)
print(np.arcsinh(x)) #arcsinh(x)
print(np.arccosh(x)) #arccosh(x)
print(np.arctanh(x)) #arctanh(x)
| 24.727273
| 41
| 0.654412
|
5ace0dcec041557ef65f26f30f329a4ea04e3288
| 1,084
|
py
|
Python
|
dimsim/tests/test_dimsim.py
|
blowsfire/DimSim
|
d81a552f9b21a547c0b39ceffe0a596e289da9f4
|
[
"Apache-2.0"
] | 82
|
2018-11-07T08:03:50.000Z
|
2022-03-17T14:53:29.000Z
|
dimsim/tests/test_dimsim.py
|
blowsfire/DimSim
|
d81a552f9b21a547c0b39ceffe0a596e289da9f4
|
[
"Apache-2.0"
] | 11
|
2019-01-24T02:57:02.000Z
|
2021-03-05T06:11:10.000Z
|
dimsim/tests/test_dimsim.py
|
blowsfire/DimSim
|
d81a552f9b21a547c0b39ceffe0a596e289da9f4
|
[
"Apache-2.0"
] | 23
|
2018-12-10T10:19:10.000Z
|
2022-03-10T09:54:17.000Z
|
# -*- coding: utf-8 -*-
# Standard libs
import io
import os
# Dependencies
import pytest
# The module to test
from dimsim.core.model import get_distance, get_candidates
def test_distance_near():
dist = get_distance(u'大侠',u'大虾')
assert dist == 0.0002380952380952381
def test_distance_far():
dist = get_distance(u'大侠',u'大人')
assert dist == 25.001417183349876
def test_distance_pinyin():
dist = get_distance(['da4','xia2'],['da4','xia1'],pinyin=True)
assert dist == 0.0002380952380952381
def test_invalid_input():
pytest.raises(AssertionError, get_distance, u'大侠', u'大')
def test_get_candidates_simplified():
candidates = get_candidates(u'大侠', mode='simplified', theta=1)
for c in candidates:
assert c in [u'打下', u'大虾', u'大侠']
def test_get_candidates_traditional():
candidates = get_candidates(u'粉丝', mode='traditional', theta=1)
for c in candidates:
assert c in [u'門市', u'分時', u'焚屍', u'粉飾', u'粉絲']
if __name__ == '__main__':
pytest.main([__file__])
| 27.794872
| 71
| 0.644834
|
158181450ce624f5f615bc988aaa119ded2e7b43
| 4,605
|
py
|
Python
|
tenant_schemas/middleware.py
|
BillSchumacher/django-tenant-schemas
|
a4e3cdce3465fe7572f5cb8e84b7699386582c38
|
[
"MIT"
] | null | null | null |
tenant_schemas/middleware.py
|
BillSchumacher/django-tenant-schemas
|
a4e3cdce3465fe7572f5cb8e84b7699386582c38
|
[
"MIT"
] | 1
|
2022-02-19T22:48:12.000Z
|
2022-02-19T22:48:12.000Z
|
tenant_schemas/middleware.py
|
BillSchumacher/django-tenant-schemas
|
a4e3cdce3465fe7572f5cb8e84b7699386582c38
|
[
"MIT"
] | null | null | null |
import django
from django.conf import settings
from django.core.exceptions import DisallowedHost
from django.db import connection
from django.http import Http404
from tenant_schemas.utils import (
get_public_schema_name,
get_tenant_model,
remove_www,
)
"""
These middlewares should be placed at the very top of the middleware stack.
Selects the proper database schema using request information. Can fail in
various ways which is better than corrupting or revealing data.
Extend BaseTenantMiddleware for a custom tenant selection strategy,
such as inspecting the header, or extracting it from some OAuth token.
"""
class BaseTenantMiddleware(object):
TENANT_NOT_FOUND_EXCEPTION = Http404
"""
Subclass and override this to achieve desired behaviour. Given a
request, return the tenant to use. Tenant should be an instance
of TENANT_MODEL. We have three parameters for backwards compatibility
(the request would be enough).
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def get_tenant(self, model, hostname, request):
raise NotImplementedError
def hostname_from_request(self, request):
""" Extracts hostname from request. Used for custom requests filtering.
By default removes the request's port and common prefixes.
"""
return remove_www(request.get_host().split(":")[0]).lower()
def process_request(self, request):
# Connection needs first to be at the public schema, as this is where
# the tenant metadata is stored.
connection.set_schema_to_public()
hostname = self.hostname_from_request(request)
TenantModel = get_tenant_model()
try:
# get_tenant must be implemented by extending this class.
tenant = self.get_tenant(TenantModel, hostname, request)
assert isinstance(tenant, TenantModel)
except TenantModel.DoesNotExist:
raise self.TENANT_NOT_FOUND_EXCEPTION(
"No tenant for {!r}".format(request.get_host())
)
except AssertionError:
raise self.TENANT_NOT_FOUND_EXCEPTION(
"Invalid tenant {!r}".format(request.tenant)
)
request.tenant = tenant
connection.set_tenant(request.tenant)
# Do we have a public-specific urlconf?
if (
hasattr(settings, "PUBLIC_SCHEMA_URLCONF")
and request.tenant.schema_name == get_public_schema_name()
):
request.urlconf = settings.PUBLIC_SCHEMA_URLCONF
class TenantMiddleware(BaseTenantMiddleware):
"""
Selects the proper database schema using the request host. E.g. <my_tenant>.<my_domain>
"""
def get_tenant(self, model, hostname, request):
return model.objects.get(domain_url=hostname)
class SuspiciousTenantMiddleware(TenantMiddleware):
"""
Extend the TenantMiddleware in scenario where you need to configure
``ALLOWED_HOSTS`` to allow ANY domain_url to be used because your tenants
can bring any custom domain with them, as opposed to all tenants being a
subdomain of a common base.
See https://github.com/bernardopires/django-tenant-schemas/pull/269 for
discussion on this middleware.
"""
TENANT_NOT_FOUND_EXCEPTION = DisallowedHost
class DefaultTenantMiddleware(SuspiciousTenantMiddleware):
"""
Extend the SuspiciousTenantMiddleware in scenario where you want to
configure a tenant to be served if the hostname does not match any of the
existing tenants.
Subclass and override DEFAULT_SCHEMA_NAME to use a schema other than the
public schema.
class MyTenantMiddleware(DefaultTenantMiddleware):
DEFAULT_SCHEMA_NAME = 'default'
"""
DEFAULT_SCHEMA_NAME = None
def get_tenant(self, model, hostname, request):
try:
return super(DefaultTenantMiddleware, self).get_tenant(
model, hostname, request
)
except model.DoesNotExist:
schema_name = self.DEFAULT_SCHEMA_NAME
if not schema_name:
schema_name = get_public_schema_name()
return model.objects.get(schema_name=schema_name)
| 33.613139
| 91
| 0.692725
|
b81abac88477bd0299011f5c1591650394a83ea8
| 1,185
|
py
|
Python
|
plugins/extract/detect/manual.py
|
RedPillGroup/RedPillFaceSwap
|
2313d4fe0e6e7b579ce450d556ad4fd6bd0cbed6
|
[
"MIT"
] | 1
|
2020-04-14T22:16:50.000Z
|
2020-04-14T22:16:50.000Z
|
plugins/extract/detect/manual.py
|
RedPillGroup/RedPillFaceSwap
|
2313d4fe0e6e7b579ce450d556ad4fd6bd0cbed6
|
[
"MIT"
] | null | null | null |
plugins/extract/detect/manual.py
|
RedPillGroup/RedPillFaceSwap
|
2313d4fe0e6e7b579ce450d556ad4fd6bd0cbed6
|
[
"MIT"
] | 2
|
2020-04-14T22:17:09.000Z
|
2020-10-30T03:01:13.000Z
|
#!/usr/bin/env python3
""" Manual face detection plugin """
from ._base import Detector, dlib
class Detect(Detector):
""" Manual Detector """
def __init__(self, **kwargs):
super().__init__(**kwargs)
def set_model_path(self):
""" No model required for Manual Detector """
return None
def initialize(self, *args, **kwargs):
""" Create the mtcnn detector """
print("Initializing Manual Detector...")
super().initialize(*args, **kwargs)
self.init.set()
print("Initialized Manual Detector.")
def detect_faces(self, *args, **kwargs):
""" Return the given bounding box in a dlib rectangle """
super().detect_faces(*args, **kwargs)
while True:
item = self.queues["in"].get()
if item == "EOF":
break
image, face = item
bounding_box = [dlib.rectangle(int(face[0]), int(face[1]),
int(face[2]), int(face[3]))]
retval = {"image": image,
"detected_faces": bounding_box}
self.finalize(retval)
self.queues["out"].put("EOF")
| 30.384615
| 71
| 0.538397
|
a1cfce7bd9c169353199d420666a73fafeeb2112
| 1,511
|
py
|
Python
|
alipay/aop/api/domain/AlipayDataDataserviceAntdacEasyserviceQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayDataDataserviceAntdacEasyserviceQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayDataDataserviceAntdacEasyserviceQueryModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataDataserviceAntdacEasyserviceQueryModel(object):
def __init__(self):
self._method_id = None
self._parameter_json = None
@property
def method_id(self):
return self._method_id
@method_id.setter
def method_id(self, value):
self._method_id = value
@property
def parameter_json(self):
return self._parameter_json
@parameter_json.setter
def parameter_json(self, value):
self._parameter_json = value
def to_alipay_dict(self):
params = dict()
if self.method_id:
if hasattr(self.method_id, 'to_alipay_dict'):
params['method_id'] = self.method_id.to_alipay_dict()
else:
params['method_id'] = self.method_id
if self.parameter_json:
if hasattr(self.parameter_json, 'to_alipay_dict'):
params['parameter_json'] = self.parameter_json.to_alipay_dict()
else:
params['parameter_json'] = self.parameter_json
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataDataserviceAntdacEasyserviceQueryModel()
if 'method_id' in d:
o.method_id = d['method_id']
if 'parameter_json' in d:
o.parameter_json = d['parameter_json']
return o
| 26.982143
| 79
| 0.621443
|
68bcd8c281d7dc2f7bd7b869d529645ea319cddc
| 2,128
|
py
|
Python
|
utils/dpbench_datagen/kmeans/generate_data_random.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 8
|
2021-03-26T15:17:58.000Z
|
2022-01-21T21:56:19.000Z
|
utils/dpbench_datagen/kmeans/generate_data_random.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 22
|
2021-03-30T21:20:57.000Z
|
2022-02-22T13:42:17.000Z
|
utils/dpbench_datagen/kmeans/generate_data_random.py
|
geexie/dpbench
|
7d41409ded3c816f35003bc5aea071852bceb892
|
[
"BSD-2-Clause"
] | 7
|
2021-03-23T11:00:43.000Z
|
2022-02-02T12:28:55.000Z
|
import numpy as np
try:
import numpy.random_intel as rnd
except:
import numpy.random as rnd
# constants used for input data generation
SEED = 7777777
XL = 1.0
XH = 5.0
# write input data to a file in binary format
def __dump_binary__(X, arrayPclusters, arrayC, arrayCsum, arrayCnumpoint):
with open("X.bin", "w") as fd:
X.tofile(fd)
with open("arrayPclusters.bin", "w") as fd:
arrayPclusters.tofile(fd)
# with open('arrayC.bin', 'w') as fd:
# arrayC.tofile(fd)
# with open('arrayCsum.bin', 'w') as fd:
# arrayCsum.tofile(fd)
# with open('arrayCnumpoint.bin', 'w') as fd:
# arrayCnumpoint.tofile(fd)
# write input data to a file in text format
def __dump_text__(X, arrayPclusters, arrayC, arrayCsum, arrayCnumpoint):
with open("X.txt", "w") as fd:
X.tofile(fd, "\n", "%s")
with open("arrayPclusters.txt", "w") as fd:
arrayPclusters.tofile(fd, "\n", "%s")
# with open('arrayC.txt', 'w') as fd:
# arrayC.tofile(fd, '\n', '%s')
# with open('arrayCsum.txt', 'w') as fd:
# arrayCsum.tofile(fd, '\n', '%s')
# with open('arrayCnumpoint.txt', 'w') as fd:
# arrayCnumpoint.tofile(fd, '\n', '%s')
# call numpy.random.uniform to generate input data
def gen_rand_data(nopt, dims=2, NUMBER_OF_CENTROIDS=10, dtype=np.float64):
rnd.seed(SEED)
return (
rnd.uniform(XL, XH, (nopt, dims)).astype(dtype),
np.ones(nopt, dtype=np.int32),
np.ones((NUMBER_OF_CENTROIDS, 2), dtype=dtype),
np.ones((NUMBER_OF_CENTROIDS, 2), dtype=dtype),
np.ones(NUMBER_OF_CENTROIDS, dtype=np.int32),
)
# call numpy.random.uniform to generate input data and write the input as binary to a file
def gen_data_to_file(nopt, dims=2, NUMBER_OF_CENTROIDS=10, dtype=np.float64):
X, arrayPclusters, arrayC, arrayCsum, arrayCnumpoint = gen_rand_data(
nopt, dims, NUMBER_OF_CENTROIDS, dtype
)
__dump_binary__(X, arrayPclusters, arrayC, arrayCsum, arrayCnumpoint)
# __dump_text__(X,arrayPclusters,arrayC,arrayCsum,arrayCnumpoint) #for verification purpose only
| 31.294118
| 100
| 0.654135
|
241c12e14dbb5788f2ef3baaa2c49ec7834ffd7f
| 640
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/solitary-scene-29128
|
2a516e2a2175f144c83f85110a3ebc7dbe364bd6
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/solitary-scene-29128
|
2a516e2a2175f144c83f85110a3ebc7dbe364bd6
|
[
"FTL",
"AML",
"RSA-MD"
] | 15
|
2021-07-24T22:01:20.000Z
|
2021-07-24T22:01:26.000Z
|
backend/manage.py
|
crowdbotics-apps/solitary-scene-29128
|
2a516e2a2175f144c83f85110a3ebc7dbe364bd6
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'solitary_scene_29128.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.090909
| 84
| 0.689063
|
dbcf4cbdaa6a29b778ce2262a0a2bb25fb13a7bc
| 24,816
|
py
|
Python
|
solaris/files/usr/local/bin/diskmap.py
|
rafie/zfs-on-solaris-setup
|
cd777d1420b3de0af4342dedd1038e9e96509b0a
|
[
"MIT"
] | null | null | null |
solaris/files/usr/local/bin/diskmap.py
|
rafie/zfs-on-solaris-setup
|
cd777d1420b3de0af4342dedd1038e9e96509b0a
|
[
"MIT"
] | null | null | null |
solaris/files/usr/local/bin/diskmap.py
|
rafie/zfs-on-solaris-setup
|
cd777d1420b3de0af4342dedd1038e9e96509b0a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###########################
# Please go to https://github.com/swacquie/DiskMap
###########################
VERSION="0.11b"
import subprocess, re, os, sys, readline, cmd, pickle, glob
from pprint import pformat, pprint
pj = os.path.join
from socket import gethostname
hostname = gethostname()
cachefile = "/tmp/pouet"
sas2ircu = "/usr/local/bin/sas2ircu"
prtconf = "/usr/sbin/prtconf"
zpool = "/usr/sbin/zpool"
smartctl = "/usr/sbin/smartctl"
mdb = "/usr/bin/mdb"
def run(cmd, args, tosend=""):
if not isinstance(args, list):
args = [ args ]
if not os.path.exists(cmd):
raise Exception("Executable %s not found, please provide absolute path"%cmd)
args = tuple([ str(i) for i in args ])
if tosend:
process = subprocess.Popen((cmd,) + args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
return process.communicate(tosend)[0]
else:
return subprocess.Popen((cmd,) + args,
stdout=subprocess.PIPE).communicate()[0]
def revert(mydict):
return dict([ (v,k) for k,v in mydict.items()])
def cleandict(mydict, *toint):
result = {}
for k in mydict.keys():
if k in toint:
result[k] = long(mydict[k])
elif isinstance(mydict[k], str):
result[k] = mydict[k].strip()
else:
result[k] = mydict[k]
return result
def megabyze(i, fact=1000):
"""
Return the size in Kilo, Mega, Giga, Tera, Peta according to the input.
"""
i = float(i)
for unit in "", "K", "M", "G", "T", "P":
if i < 2000: break
i = i / fact
return "%.1f%s"%(i, unit)
class SesManager(cmd.Cmd):
def __init__(self, *l, **kv):
cmd.Cmd.__init__(self, *l, **kv)
self._enclosures = {}
self._controllers = {}
self._disks = {}
self.aliases = {}
self.prompt = "Diskmap - %s> "%hostname
@property
def disks(self):
return dict([ (k, v) for k, v in self._disks.items() if k.startswith("/dev/rdsk/") ])
@property
def enclosures(self):
return self._enclosures
@property
def controllers(self):
return self._controllers
def discover_controllers(self, fromstring=None):
""" Discover controller present in the computer """
if not fromstring:
fromstring = run(sas2ircu, "LIST")
tmp = re.findall("(\n +[0-9]+ +.*)", fromstring)
for ctrl in tmp:
ctrl = ctrl.strip()
m = re.match("(?P<id>[0-9]) +(?P<adaptertype>[^ ].*[^ ]) +(?P<vendorid>[^ ]+) +"
"(?P<deviceid>[^ ]+) +(?P<pciadress>[^ ]*:[^ ]*) +(?P<subsysvenid>[^ ]+) +"
"(?P<subsysdevid>[^ ]+) *", ctrl)
if m:
m = cleandict(m.groupdict(), "id")
self._controllers[m["id"]] = m
def discover_enclosures(self, ctrls = None):
""" Discover enclosure wired to controller. Ctrls = { 0: 'sas2ircu output', 1: 'sas2ircu output', ...}"""
if not ctrls:
tmp = {}
for ctrl in self.controllers.keys():
tmp[ctrl] = run(sas2ircu, [ctrl, "DISPLAY"])
ctrls = tmp
for ctrl, output in ctrls.items():
enclosures = {}
# Discover enclosures
for m in re.finditer("Enclosure# +: (?P<index>[^ ]+)\n +"
"Logical ID +: (?P<id>[^ ]+)\n +"
"Numslots +: (?P<numslot>[0-9]+)", output):
m = cleandict(m.groupdict(), "index", "numslot")
m["controller"] = ctrl
self._enclosures[m["id"].lower()] = m
enclosures[m["index"]] = m
# Discover Drives
for m in re.finditer("Device is a Hard disk\n +"
"Enclosure # +: (?P<enclosureindex>[^\n]*)\n +"
"Slot # +: (?P<slot>[^\n]*)\n +"
"(SAS Address +: (?P<sasaddress>[^\n]*)\n +)?"
"State +: (?P<state>[^\n]*)\n +"
"Size .in MB./.in sectors. +: (?P<sizemb>[^/]*)/(?P<sizesector>[^\n]*)\n +"
"Manufacturer +: (?P<manufacturer>[^\n]*)\n +"
"Model Number +: (?P<model>[^\n]*)\n +"
"Firmware Revision +: (?P<firmware>[^\n]*)\n +"
"Serial No +: (?P<serial>[^\n]*)\n +"
"(GUID +: (?P<guid>[^\n]*)\n +)?"
"Protocol +: (?P<protocol>[^\n]*)\n +"
"Drive Type +: (?P<drivetype>[^\n]*)\n"
, output):
m = cleandict(m.groupdict(), "enclosureindex", "slot", "sizemb", "sizesector")
m["enclosure"] = enclosures[m["enclosureindex"]]["id"]
m["controller"] = ctrl
self._disks[m["serial"]] = m
def discover_mapping(self, fromstring=None):
""" use prtconf to get real device name using disk serial """
if not fromstring:
fromstring = run(prtconf, "-v")
# Do some ugly magic to get what we want
# First, get one line per drive
tmp = fromstring.replace("\n", "").replace("disk, instance", "\n")
# Then match with regex
tmp = re.findall("name='inquiry-serial-no' type=string items=1 dev=none +value='([^']+)'"
".*?"
#"name='client-guid' type=string items=1 *value='([^']+)'"
#".*?"
"dev_link=(/dev/rdsk/c[^ ]*d0)s0", tmp)
# Capitalize serial an guid
for serial, device in tmp:
serial = serial.strip().upper()
# Sometimes serial returned by prtconf and by sas2ircu are different. Mangle them
if serial not in self._disks and serial.replace("-", "") in self._disks:
serial = serial.replace("-", "")
if serial in self._disks:
# Add device name to disks
self._disks[serial]["device"] = device
# Add a reverse lookup
self._disks[device] = self._disks[serial]
else:
print "Warning : Got the serial %s from prtconf, but can't find it in disk detected by sas2ircu (disk removed/not on backplane ?)"%serial
def discover_zpool(self, fromstring=None):
""" Try to locate disk in current zpool configuration"""
if not fromstring:
fromstring = run(zpool, "status")
pools = fromstring.split("pool:")
for pool in pools:
if not pool.strip(): continue
for m in re.finditer(" (?P<pool>[^\n]+)\n *" # We've splitted on pool:, so our first word is the pool name
"state: (?P<state>[^ ]+)\n *"
"(status: (?P<status>(.|\n)+)\n *)??"
"scan: (?P<scan>(.|\n)*)\n *"
"config: ?(?P<config>(.|\n)*)\n *"
"errors: (?P<errors>[^\n]*)"
,pool):
m = m.groupdict()
parent = "stripped"
for disk in re.finditer("(?P<indent>[ \t]+)(?P<name>[^ \t\n]+)( +(?P<state>[^ \t\n]+) +)?("
"(?P<read>[^ \t\n]+) +(?P<write>[^ \t\n]+) +"
"(?P<cksum>[^\n]+))?(?P<notes>[^\n]+)?\n", m["config"]):
disk = disk.groupdict()
if not disk["name"] or disk["name"] in ("NAME", m["pool"]):
continue
if disk["name"][-4:-2] == "d0":
disk["name"] = disk["name"][:-2]
if (disk["name"].startswith("mirror") or
disk["name"].startswith("log") or
disk["name"].startswith("raid") or
disk["name"].startswith("spare") or
disk["name"].startswith("cache")):
parent = disk["name"].strip()
continue
if "/dev/rdsk" not in disk["name"]:
disk["name"] = "/dev/rdsk/%s"%disk["name"]
if disk["name"] not in self._disks:
print "Warning : Got the disk %s from zpool status, but can't find it in disk detected by sas2ircu (disk removed ?)"%disk["name"]
continue
self._disks[disk["name"]]["zpool"] = self._disks[disk["name"]].get("zpool", {})
self._disks[disk["name"]]["zpool"][m["pool"]] = parent
def set_leds(self, disks, value=True):
if isinstance(disks, dict):
disks = disks.values()
progress = xrange(1,len(disks)+1, 1).__iter__()
value = "on" if value else "off"
for disk in disks:
print "\rTurning leds %s : %3d/%d"%(value, progress.next(),len(disks)),
run(sas2ircu, [disk["controller"], "LOCATE", "%(enclosureindex)s:%(slot)s"%disk, value])
print
def preloop(self):
try:
self.do_load()
except:
print "Loading of previous save failed, trying to discover"
self.do_discover()
self.do_save()
def emptyline(self):
self.do_help("")
def do_quit(self, line):
"Quit"
return True
do_EOF = do_quit
def do_discover(self, configdir=""):
"""Perform discovery on host to populate controller, enclosures and disks
Take an optionnal parameter which can be a directory containing files dumped
with confidump.
"""
self._enclosures = {}
self._controllers = {}
self._disks = {}
if configdir and os.path.isdir(configdir):
# We wan't to load data from an other box for testing purposes
# So we don't want to catch any exception
files = os.listdir(configdir)
for f in ("prtconf-v.txt", "sas2ircu-0-display.txt", "sas2ircu-list.txt", "zpool-status.txt"):
if f not in files:
print "Invalid confdir, lacking of %s"%f
return
self.discover_controllers(file(pj(configdir, "sas2ircu-list.txt")).read())
files = glob.glob(pj(configdir, "sas2ircu-*-display.txt"))
tmp = {}
for name in files:
ctrlid = long(os.path.basename(name).split("-")[1])
tmp[ctrlid] = file(name).read()
self.discover_enclosures(tmp)
self.discover_mapping(file(pj(configdir, "prtconf-v.txt")).read())
self.discover_zpool(file(pj(configdir, "zpool-status.txt")).read())
else:
for a in ( "discover_controllers", "discover_enclosures",
"discover_mapping", "discover_zpool" ):
try:
getattr(self, a)()
except Exception, e:
print "Got an error during %s discovery : %s"%(a,e)
print "Please run %s configdump and send the report to dev"%sys.argv[0]
self.do_save()
do_refresh = do_discover
def do_save(self, line=cachefile):
"""Save data to cache file. Use file %s if not specified"""%cachefile
if not line: line = cachefile # Cmd pass a empty string
pickle.dump((self.controllers, self.enclosures, self._disks, self.aliases), file(line, "w+"))
def do_load(self, line=cachefile):
"""Load data from cache file. Use file %s if not specified"""%cachefile
self._controllers, self._enclosures, self._disks, self.aliases = pickle.load(file(line))
def do_enclosures(self, line):
"""Display detected enclosures"""
pprint(self.enclosures)
def do_controllers(self, line):
"""Display detected controllers"""
pprint(self.controllers)
def do_disks(self, line):
"""Display detected disks. Use -v for verbose output"""
list = [ ("%1d:%.2d:%.2d"%(v["controller"], v["enclosureindex"], v["slot"]), v)
for k,v in self.disks.items() ]
list.sort()
if line == "-v":
pprint (list)
return
totalsize = 0
for path, disk in list:
disk["path"] = path
disk["device"] = disk["device"].replace("/dev/rdsk/", "")
disk["readablesize"] = megabyze(disk["sizemb"]*1024*1024)
disk["pzpool"] = " / ".join([ "%s: %s"%(k,v) for k,v in disk.get("zpool", {}).items() ])
totalsize += disk["sizemb"]*1024*1024
print "%(path)s %(device)23s %(model)16s %(readablesize)6s %(state)s %(pzpool)s"%disk
print "Drives : %s Total Capacity : %s"%(len(self.disks), megabyze(totalsize))
def smartctl(self, disks, action="status"):
""" Execute smartctl on listed drive. If no drive selected, run it on all available drive. """
params = [ "-s", "on", "-d", "sat" ]
if action == "status":
params += [ "-a" ]
elif action == "test":
params += [ "-t", "short" ]
result = []
progress = xrange(1,len(disks)+1, 1).__iter__()
for disk in disks:
print "\rExecuting smartcl on %s : %3d/%d"%(disk["device"].replace("/dev/rdsk/",""),
progress.next(),len(disks)),
smartparams = params + [ disk["device"]+"p0" ]
result.append(run(smartctl, smartparams))
print "Done"
return result
def do_smartcl_getstatus(self, line):
# FIXME : line parsing
if line:
raise NotImplemetedError
else:
disks = self.disks.values()
for (disk, smartoutput) in zip(disks, self.smartctl(disks)):
try:
self._disks[disk["device"]]["smartoutput"] = smartoutput
smartoutput = re.sub("\n[ \t]+", " ", smartoutput)
if "test failed" in smartoutput:
print " Disk %s fail his last test"%disk["device"].replace("/dev/rdsk/", "")
zob= re.findall("(Self-test execution status.*)", smartoutput)
except KeyError:
pass
def do_smartcl_runtest(self, line):
# FIXME : line parsing
if line:
raise NotImplemetedError
else:
disks = self.disks.values()
self.smartctl(disks, action="test")
def get_enclosure(self, line):
""" Try to find an enclosure """
aliases = revert(self.aliases)
if line in aliases:
line = aliases[line]
if line in self.enclosures:
return line
if line.lower() in self.enclosures:
return line.lower()
try:
c, e = line.split(":", 1)
c, e = long(c), long(e)
tmp = [ v["id"].lower() for v in self.enclosures.values()
if v["controller"] == c and v["index"] == e ]
if len(tmp) != 1: raise
return tmp[0]
except Exception, e:
#print e
return None
def get_disk(self, line):
for t in (line, "/dev/rdsk/%s"%line, line.upper(), line.lower()):
tmp = self._disks.get(t, None)
if tmp:
return [ tmp ]
# Try to locate by path
try:
# Check if first element of path is an enclosure
tmp = line.split(":",2)
if len(tmp) == 2:
e = self.get_enclosure(tmp[0])
if e:
return [ disk for disk in self.disks.values()
if disk["enclosure"] == e and disk["slot"] == long(tmp[1]) ]
else:
c, e, s = tmp
c, e, s = long(c), long(e), long(s)
return [ disk for disk in self.disks.values()
if disk["controller"] == c and disk["enclosureindex"] == e
and disk["slot"] == s ]
except Exception, e:
#print e
return None
def do_drawletter(self, line):
""" Print a char on a 4x6 enclosure """
line = line.strip()
if not line: return
letters = { "N": [ 0, 1, 2, 3, 4, 5, 9, 10, 13, 14, 18, 19, 20, 21, 22, 23 ],
"X": [ 0, 1, 4, 5, 8, 9, 14, 15, 18 , 19, 22, 23 ],
# FIXME Ajouter les chiffres
}
letter, enclosure = line.split(" ",1)
e = self.get_enclosure(enclosure)
if not e:
print "Invalid enclosure %s"%e
self.do_ledoff(e)
self.set_leds([ disk for disk in self.disks.values()
if disk["slot"] in letters[letter] and disk["enclosure"] == e ], True)
def do_configdump(self, path):
if not path:
path = pj(".", "configudump-%s"%hostname)
if not os.path.exists(path):
os.makedirs(path)
tmp = run(sas2ircu, "LIST")
self.discover_controllers(tmp)
file(pj(path, "sas2ircu-list.txt"), "w").write(tmp)
for ctrl in self.controllers:
file(pj(path, "sas2ircu-%s-display.txt"%ctrl), "w").write(
run(sas2ircu, [ctrl, "DISPLAY"]))
file(pj(path, "prtconf-v.txt"), "w").write(
run(prtconf, "-v"))
file(pj(path, "zpool-status.txt"), "w").write(
run(zpool, "status"))
print "Dumped all value to path %s"%path
def ledparse(self, value, line):
line = line.strip()
targets = []
if line == "all":
targets = self.disks
else:
# Try to see if it's an enclosure
target = self.get_enclosure(line)
if target:
targets = [ disk for disk in self.disks.values() if disk["enclosure"] == target ]
else:
# Try to see if it's a disk
targets = self.get_disk(line)
if targets:
self.set_leds(targets, value)
else:
print "Could not find what you're talking about"
def do_ledon(self, line):
""" Turn on locate led on parameters FIXME : syntax parameters"""
self.ledparse(True, line)
def complete_ledon(self, text, line, begidx, endidx):
candidates = [ "all", "ALL" ]
candidates.extend(self.aliases.values())
candidates.extend([ disk["device"].replace("/dev/rdsk/", "") for disk in self.disks.values() ])
candidates.extend([ disk["serial"] for disk in self.disks.values() ])
candidates.extend([ "%(controller)s:%(enclosureindex)s:%(slot)s"%disk for disk in self.disks.values() ])
candidates.extend([ "%(controller)s:%(index)s"%enclosure for enclosure in self.enclosures.values() ] )
candidates.sort()
return [ i for i in candidates if i.startswith(text) ]
complete_ledoff = complete_ledon
def do_ledoff(self, line):
""" Turn off locate led on parameters FIXME : syntax parameters"""
self.ledparse(False, line)
def do_alias(self, line):
"""
Used to set a name on a enclosure.
Usage : alias enclosure name
alias -r name
alias -r enclosure
Without parameters : list current alias
"""
if not line:
pprint(self.aliases)
elif line.startswith("-r"):
junk, alias = line.split(" ",1)
alias = alias.strip()
if alias in self.aliases:
del self.aliases[alias]
else:
# We have to do a reverse lookup to find it !
tmp = revert(self.aliases)
if alias in tmp:
del self.aliases[tmp[alias]]
self.do_save()
elif " " in line:
target, alias = line.split(" ",1)
alias = alias.strip()
enclosure = self.get_enclosure(target.strip())
if not enclosure:
print "No such enclosure %s"%target.lower()
else:
self.aliases[enclosure] = alias
self.do_save()
def complete_alias(self, text, line, begidx, endidx):
if line.startswith("alias -r "):
return ([ i for i in self.aliases.keys() if i.startswith(text) ] +
[ i for i in self.aliases.values() if i.startswith(text) ])
if line.count(" ") == 1:
result = []
result.extend(self.enclosures.keys())
result.extend([ "%(controller)s:%(index)s"%e for e in self.enclosures.values() ])
return [ i for i in result if i.startswith(text) ]
def do_mangle(self, junk=""):
""" This function is automatically called when piping something to diskmap.
It'll suffix all drive name with the enclosure name they are in (defined with an
alias) and the drive slot.
Try : iostat -x -e -n 1 | diskmap.py
"""
if sys.stdin.isatty():
print "This command is not intented to be executed in interactive mode"
return
replacelist = []
for enclosure, alias in self.aliases.items():
for disk in self.disks.values():
if disk["enclosure"] == enclosure:
tmp = disk["device"].replace("/dev/rdsk/", "")
replacelist.append((tmp, "%s/%s%02d"%(tmp, alias, disk["slot"])))
line = sys.stdin.readline()
while line:
for r, e in replacelist:
line = line.replace(r, e)
sys.stdout.write(line)
sys.stdout.flush()
line = sys.stdin.readline()
def do_sd_timeout(self, timeout=""):
"""
Get / Set sd timeout value
When no parameter is present, display the current sd_io_time, and check that running
drive use the same timing.
This script will only change value for the running drive. If you wan't to apply change
permanently, put 'set sd:sd_io_time=5' in /etc/system
Be aware that the script will change the default value of sd_io_time, and also change
the current value for all drive in your system.
See : http://blogs.everycity.co.uk/alasdair/2011/05/adjusting-drive-timeouts-with-mdb-on-solaris-or-openindiana/
"""
if timeout:
try:
timeout = int(timeout)
except:
print "Invalid timeout specified"
return
# Displaying current timeout
tmp = run(mdb, "-k", tosend="sd_io_time::print\n")
globaltimeout = int(tmp.strip(), 16)
print "Current Global sd_io_time : %s"%globaltimeout
drivestimeout = run(mdb, "-k", tosend="::walk sd_state | ::grep '.!=0' | "
"::print -a struct sd_lun un_cmd_timeout\n")
values = [ int(i, 16) for i in re.findall("= (0x[0-9a-f]+)", drivestimeout) if i ]
print "Got %s values from sd disk driver, %s are not equal to system default"%(
len(values), len(values)-values.count(globaltimeout))
if timeout: # We want to set new timeout for drives
# Set global timeout
print "Setting global timeout ...",
run(mdb, "-kw", tosend="sd_io_time/W 0x%x\n"%timeout)
# Set timeout for every drive
for driveid in re.findall("(.+) un_cmd_timeout", drivestimeout):
print "\rSetting timeout for drive id %s ..."%driveid,
run(mdb, "-kw", tosend="%s/W 0x%x\n"%(driveid, timeout))
print "Done"
print "Don't forget add to your /etc/system 'set sd:sd_io_time=%s' so change persist accross reboot"%timeout
def __str__(self):
result = []
for i in ("controllers", "enclosures", "disks"):
result.append(i.capitalize())
result.append("="*80)
result.append(pformat(getattr(self,i)))
result.append("")
return "\n".join(result)
import unittest
class TestConfigs(unittest.TestCase):
pass
if __name__ == "__main__":
#if not os.path.isfile(sas2ircu):
# sys.exit("Error, cannot find sas2ircu (%s)"%sas2ircu)
sm = SesManager()
if len(sys.argv) > 1:
sm.preloop()
sm.onecmd(" ".join(sys.argv[1:]))
sm.postloop()
elif sys.stdin.isatty():
sm.cmdloop()
else:
sm.preloop()
sm.onecmd("mangle")
sm.postloop()
| 41.637584
| 153
| 0.507012
|
d3b6a51bcd5a4eadc3a1a9f78385e7817002eb6a
| 121,919
|
py
|
Python
|
tensorflow/python/framework/test_util.py
|
devsangwoo/tensor
|
066592c9f9cdf4acdd1b9b104766271133e9088e
|
[
"Apache-2.0"
] | 1
|
2020-01-12T14:38:34.000Z
|
2020-01-12T14:38:34.000Z
|
tensorflow/python/framework/test_util.py
|
devsangwoo/tensor
|
066592c9f9cdf4acdd1b9b104766271133e9088e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/test_util.py
|
devsangwoo/tensor
|
066592c9f9cdf4acdd1b9b104766271133e9088e
|
[
"Apache-2.0"
] | null | null | null |
<<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_disable_autotune"
if original_xla_flags:
new_xla_flags += " " + original_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
pywrap_tensorflow.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
=======
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
import contextlib
import math
import re
import threading
import tensorflow.python.platform
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import graph_util
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util.protobuf import compare
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Root class for tests that need to test tensor flow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
<<<<<<< HEAD
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
=======
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
<<<<<<< HEAD
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
=======
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses Proto2Cmp() first, as it returns correct results
for floating point attributes, and then use assertProto2Equal()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
<<<<<<< HEAD
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
=======
"""
if compare.Proto2Cmp(a, b) != 0:
compare.assertProto2Equal(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
<<<<<<< HEAD
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
=======
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
<<<<<<< HEAD
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
=======
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type " +
type(expected_message_maybe_ascii) + " and " +
type(message))
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
<<<<<<< HEAD
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
=======
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
<<<<<<< HEAD
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
=======
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device(graph_util.pin_to_cpu):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device(graph_util.pin_to_cpu):
yield sess
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
<<<<<<< HEAD
self._is_thread_joined = False
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
<<<<<<< HEAD
except Exception as e: # pylint: disable=broad-except
=======
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
<<<<<<< HEAD
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
=======
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
<<<<<<< HEAD
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
<<<<<<< HEAD
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
=======
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
<<<<<<< HEAD
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
=======
f1: a float value.
f2: a float value.
err: a float value.
"""
self.assertTrue(math.fabs(f1 - f2) < err)
def assertArrayNear(self, farray1, farray2, err):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
<<<<<<< HEAD
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
=======
"""
for f1, f2 in zip(farray1, farray2):
self.assertNear(f1, f2, err)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
<<<<<<< HEAD
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
=======
def assertNDArrayNear(self, ndarray1, ndarray2, err):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
<<<<<<< HEAD
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
=======
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
<<<<<<< HEAD
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
=======
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print "not close where = ", np.where(cond)
else:
# np.where is broken for scalars
x, y = a, b
print "not close lhs = ", x
print "not close rhs = ", y
print "not close dif = ", np.abs(x - y)
print "not close tol = ", atol + rtol * np.abs(y)
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
<<<<<<< HEAD
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
# With Python 3, we need to make sure the dtype matches between a and b.
b = b.astype(a.dtype)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b, msg)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements")
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
=======
print "not equal where = ", np.where(diff)
else:
# np.where is broken for scalars
x, y = a, b
print "not equal lhs = ", x
print "not equal rhs = ", y
np.testing.assert_array_equal(a, b)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
<<<<<<< HEAD
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
=======
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
<<<<<<< HEAD
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
=======
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
<<<<<<< HEAD
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
=======
def assertShapeEqual(self, np_array, tf_tensor):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
<<<<<<< HEAD
msg: Optional message to report on failure.
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
<<<<<<< HEAD
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
=======
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| 35.20618
| 112
| 0.669797
|
ff86d75b923d4bd935e96a15be2d0bdef2854ae4
| 5,130
|
py
|
Python
|
src/tools/tools.py
|
Blackdevil132/machineLearning
|
de048bb1473994052f8ed1afb11a15b7833b506d
|
[
"MIT"
] | 1
|
2019-05-04T07:28:19.000Z
|
2019-05-04T07:28:19.000Z
|
src/tools/tools.py
|
Blackdevil132/machineLearning
|
de048bb1473994052f8ed1afb11a15b7833b506d
|
[
"MIT"
] | 3
|
2019-04-29T09:20:11.000Z
|
2019-04-29T09:23:22.000Z
|
src/tools/tools.py
|
Blackdevil132/machineLearning
|
de048bb1473994052f8ed1afb11a15b7833b506d
|
[
"MIT"
] | null | null | null |
import math
import sys
import time
def statusBar(iteration, total_episodes):
bar_len = 60
filled_len = int(round(bar_len * iteration / total_episodes))
percents = 100 * iteration / total_episodes
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('\r[%s] %s%%\n' % (bar, percents))
sys.stdout.flush()
def product(li):
prod = 1
for elem in li:
prod *= elem
return prod
def sieveOfEratosthenes(n):
# finds all primes < n
# list of numbers from 2 to n, excluding even numbers
primes = [2] + [i for i in range(3, n, 2)]
# start at index 1, index 0 is already dealt with
i = 1
while True:
try:
# find position of the square of number at index i
j = primes.index(primes[i] ** 2)
except ValueError:
# if square is > n, done
return primes
# remove all multiples of primes[i]
while j < len(primes):
if primes[j] % primes[i] == 0:
primes.pop(j)
else:
j += 1
i += 1
def getPrimes(n):
# returns primes < n
# reads primes from file if possible
with open("primes.txt", 'r') as file:
primes = file.read().split(',')
primes = list(map(int, primes))
if n is None:
return primes
elif n > primes[-1]:
primes = sieveOfAtkin(n)
file = open("primes.txt", 'w')
text = ','.join(map(str, primes))
file.write(text)
file.close()
return primes
else:
return list(filter(lambda x: x < n, primes))
def readPrimes(PATH, limit=None):
with open(PATH, 'r') as file:
diffs = file.read().split(',')
max_p = int(diffs.pop(0))
if limit is None:
limit = max_p
primes = [2, 3]
for d in diffs:
p = primes[-1]+2*int(d)
if p > limit:
break
primes.append(p)
return primes
def savePrimes(PATH, primes):
with open(PATH, 'w') as file:
file.write("%i" % primes[-1])
for i in range(2, len(primes)):
file.write(",%i" % ((primes[i] - primes[i-1])//2))
def fac(n):
# return faculty of n
# recursive
if n == 0:
return 1
return fac(n-1) * n
def gcD(a, b):
# return greatest common divisor of a and b
if a == 0:
return abs(b)
if b == 0:
return abs(a)
while b != 0:
tmp = a % b
a = b
b = tmp
return abs(a)
def trialDivision(n, partial=False):
# returns all prime factors of n
factors = []
primes = getPrimes(n)
if not primes:
return []
exp = 2
if partial:
exp = 3
i = 0
while primes[i]**exp <= n:
p = primes[i]
if n % p == 0:
n //= p
factors.append(p)
else:
i += 1
if n != 1:
factors.append(n)
return factors
class TrialDivision:
def __init__(self, limit=None, partial=False):
self.primes = readPrimes("primes.p", limit)
self.partial = partial
def exec(self, n):
factors = []
if not self.primes:
return []
exp = 2
if self.partial:
exp = 3
i = 0
while self.primes[i] ** exp <= n:
p = self.primes[i]
if n % p == 0:
n //= p
factors.append(p)
else:
i += 1
if n != 1:
factors.append(n)
return factors
def distinct(li):
# removes all duplicates from list li
return list(dict.fromkeys(li))
def timeit(func, args, loops=None):
if loops is None:
start = time.perf_counter()
ret = func(*args)
end = time.perf_counter()
print("Execution Time for %s: %.3f s" % (func.__name__, (end-start)))
return ret
start = time.perf_counter()
for i in range(loops):
func(*args)
end = time.perf_counter()
if loops > 1:
print("%i Loops, Average Execution Time for %s: %.3f s" % (loops, func.__name__, (end-start)/loops))
else:
print("Execution Time for %s: %.3f ss" % (func.__name__, (end-start)/loops))
return (end-start)/loops
def sieveOfAtkin(n):
primes = [2, 3, 5]
sieve = [False] * (n+1)
root_n = int(math.sqrt(n))
x = 1
while x*x < n:
y = 1
while y*y < n:
n1 = 4*x**2 + y**2
n2 = 3*x**2 + y**2
n3 = 3*x**2 - y**2
#print(x, y, n)
#print(n1, n2, n3)
if n1 <= n and n1 % 60 in (1, 13, 17, 29, 37, 41, 49, 53):
sieve[n1] ^= True
if n2 <= n and n2 % 60 in (7, 19, 31, 43):
sieve[n2] ^= True
if x > y and n3 <= n and n3 % 60 in (11, 23, 47, 59):
sieve[n3] ^= True
y += 1
x += 1
for x in range(5, n):
if sieve[x]:
primes.append(x)
if x < root_n:
for y in range(x**2, n+1, x**2):
sieve[y] = False
return primes
| 20.52
| 108
| 0.488499
|
c5a808700fab519b28fa025dbd65b6e1d6270430
| 1,390
|
py
|
Python
|
src/osipidce/qt_gui/madym_gui_processor.py
|
michaelberks/OSIPI-DCE-DSC-toolbox
|
60690aafab06e253601588396c2cdf683f705b82
|
[
"Apache-2.0"
] | 1
|
2021-10-04T15:43:15.000Z
|
2021-10-04T15:43:15.000Z
|
src/osipidce/qt_gui/madym_gui_processor.py
|
michaelberks/OSIPI-DCE-DSC-toolbox
|
60690aafab06e253601588396c2cdf683f705b82
|
[
"Apache-2.0"
] | null | null | null |
src/osipidce/qt_gui/madym_gui_processor.py
|
michaelberks/OSIPI-DCE-DSC-toolbox
|
60690aafab06e253601588396c2cdf683f705b82
|
[
"Apache-2.0"
] | null | null | null |
/*!
* @file madym_gui_processor.h
* @brief Class for GUI processing tasks that runs in separate thread to main GUI
* @details Allows processing to proceed without blocking the main GUI. Not currently used.
* @author MA Berks (c) Copyright QBI Lab, University of Manchester 2020
*/
#ifndef MADYM_GUI_PROCESSOR
#define MADYM_GUI_PROCESSOR
#include <QObject>
#include <mdm_RunTools.h>
//!Class for GUI processing tasks that runs in separate thread to main GUI
class madym_gui_processor : public QObject
{
Q_OBJECT
// INTERFACE
//!Enum defining the the type of tool to process
public:
enum RunType {
T1, //!< T1 mapping
AIF, //!< AIF detection
DCE //!< DCE tracer-kinetic model fitting
};
madym_gui_processor();
//! Reference to set run tools options
/*!
\return options
*/
mdm_RunTools& madym_exe();
//! Set a new run tools object of the required sub-type
/*!
\param type of run tool required: T1, AIF or DCE
*/
void set_madym_exe(RunType type);
signals:
//! QT signal sent when processing finished
/*!
*/
void processing_finished(int);
public slots:
//! QT slot to do some processing
/*!
*/
void start_processing();
// IMPLEMENTATION
private: // Methods
private: // Variables
//Run tools object, dynamically allocated in factory method
std::unique_ptr<mdm_RunTools> madym_exe_;
};
#endif //MADYM_GUI_PROCESSOR
| 19.857143
| 91
| 0.708633
|
606f684844e0cb224be1c6694d8da7084b42c693
| 24,012
|
py
|
Python
|
readml/icecream/icecream.py
|
soumayaihihi/readml
|
bfda11cccf8ffe2a29a98589696769f843304309
|
[
"Apache-2.0"
] | null | null | null |
readml/icecream/icecream.py
|
soumayaihihi/readml
|
bfda11cccf8ffe2a29a98589696769f843304309
|
[
"Apache-2.0"
] | null | null | null |
readml/icecream/icecream.py
|
soumayaihihi/readml
|
bfda11cccf8ffe2a29a98589696769f843304309
|
[
"Apache-2.0"
] | null | null | null |
"""
FBDTools library - icecream package
This module contains the main class that prepares data and draws PD/ICE/ALE plots.
"""
import os
import warnings
from typing import Any, Dict, List, Optional, Sized, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly import offline
from .check_utils import check_input_in_list, conduct_full_check
from .compute_utils import (
aggregate_series,
compute_ale_agg_results,
compute_ice_model_predictions,
compute_ice_model_results_2D,
compute_model_ale_results_2D,
guess_model_predict_function,
pivot_dataframe,
sample_kmeans,
sample_quantiles,
)
from .config import options
from .discretizer import FeatureDiscretizer
from .plot_utils import (
plotly_ice_box,
plotly_ice_lines,
plotly_partial_dependency,
plotly_partial_dependency_2d_hist,
plotly_partial_dependency_2d_scatter,
)
class IceCream(object):
"""
Class that generates and contains predictions and aggregations used to
draw PDPlots, ICE plots and ALE plots.
Parameters
----------
data : pd.DataFrame
Dataframe of model inputs
feature_names : List[str]
List of names of columns from data to discretize and analyse
bins : Union[Dict[str, Union[int, Sized, None]], int] = {}
Bin definitions for features:
- None -> all bin definitions are guessed by module
- integer -> value is used as number of bins for all features
- dict -> keys are features names, values are:
- integers for number of bins (0 for categorical features)
- None to let the module decide
- Sized to define specific bins
Empty dict by default (which means all bins will be guessed)
use_ale : bool, optional
If True, computes ALE: Accumulated Local Effects.
Can only be used for numerical features. (the Default is False)
model : scikit-learn model, optional
Model to compute predictions using provided data,
`model.predict(data)` must work
predictions : Optional[Sized]
Series containing predictions for rows in data,
used if no model can be given
targets : Optional[Sized]
Series containing targets for rows in data
aggfunc : str, optional
Aggregation function for targets and predictions aggregation (the default is "mean")
use_classif_proba : bool, optional
If True, use prediction probability as model output,
only used if model is a classifier. (the default is True)
clip_quantile : float, optional
Quantile to clip the feature values for continuous features,
set to 0 to disable clipping (the default is 0.0)
quantile_based : bool, optional
Option to use a quantile-based discretization function for
continuous features (instead of a linear discretization), (the default is False)
Attributes
----------
features : List[FeatureDiscretizer]
Discretized representations of the studied features
predictions : Dict[str, pd.DataFrame]
Dictionary of predictions, keys are feature names, values are
dataframes of predictions for each bin
agg_predictions : Dict[str, pd.Series]
Dictionary of aggregated predictions values, keys are feature names
agg_targets : Dict[str, pd.Series]
Dictionary of aggregated target values, keys are feature names
- samples : Dict[str, pd.DataFrame]
Dict of dataframes of computed predictions samples/clusters
if ice line plot is drawn. Not filled until `draw` method is called.
"""
def __init__(
self,
data: pd.DataFrame,
feature_names: List[str],
bins: Union[Dict[str, Union[int, Sized, None]], int] = {},
use_ale: bool = False,
model: Optional[Any] = None,
predictions: Optional[Sized] = None,
targets: Optional[Sized] = None,
aggfunc: str = "mean",
use_classif_proba: bool = True,
clip_quantile: float = 0.0,
quantile_based: bool = False,
class_name=None,
) -> None:
"""
Creates class instance with attributes, then run results computation.
"""
conduct_full_check(
data,
feature_names,
bins,
model,
predictions,
targets,
aggfunc,
["mean", "median"],
use_classif_proba,
clip_quantile,
use_ale,
)
# transform bins if a scalar was given
if isinstance(bins, (int, np.integer)):
bins = {name: bins for name in feature_names}
self.features = [
FeatureDiscretizer(
data[name], bins.get(name), clip_quantile, quantile_based
)
for name in feature_names
]
self.predictions = dict() # type: Dict[str, Any]
self.agg_predictions = dict() # type: Dict[str, Any]
self.agg_targets = dict() # type: Dict[str, Any]
self.samples = dict() # type: Dict[str, Any]
self._aggfunc = aggfunc
self.use_ale = use_ale
self.class_name = class_name
self._run(data, model, predictions, targets, use_classif_proba, use_ale)
def __repr__(self) -> str:
return "{}: features ({})".format(
self.__class__.__name__,
", ".join([feature.name for feature in self.features]),
)
def _run(
self,
data: pd.DataFrame,
model: Any,
predictions: Optional[Sized],
targets: Optional[Sized],
use_classif_proba: bool,
use_ale: bool,
) -> None:
"""
Run all operations to compute data used in PDPlots, ICE plots and ALE plots.
Operations are run only if model/targets/predictions are given.
"""
if predictions is not None:
predictions = pd.Series(predictions)
self.agg_predictions = {
feature.name: aggregate_series(feature, predictions, self._aggfunc)
for feature in self.features
}
if model is not None:
predict_function = guess_model_predict_function(
model, use_classif_proba, self.class_name
)
if use_ale:
for feature in self.features:
self.agg_predictions[feature.name] = compute_ale_agg_results(
data, feature, predict_function
)
else:
self.predictions = {
feature.name: compute_ice_model_predictions(
data, feature, predict_function
)
for feature in self.features
}
self.agg_predictions = {
name: self.predictions[name].agg(self._aggfunc, axis=0)
for name in self.predictions
}
if targets is not None:
targets = pd.Series(targets)
self.agg_targets = {
feature.name: aggregate_series(feature, targets, self._aggfunc)
for feature in self.features
}
def draw(
self,
kind: str = "pdp",
show: bool = True,
save_path: Optional[str] = None,
ice_nb_lines: int = 15,
ice_clustering_method: str = "quantiles",
) -> Dict[str, go.FigureWidget]:
"""
Builds plots, optionally shows them in current notebook and save them in HTML format.
Parameters
----------
kind : str, optional
Kind of plot to draw, possibilities are:
- "pdp": draws a Partial Dependency Plot
- "box": draws a box plot of predictions for each bin of features
- "ice": draws a Individual Conditional Expectation plot
- "ale": draws an Accumulated Local Effects plot
(the default is "pdp")
show : bool, optional
Option to show the plots in notebook (the default is True)
save_path : Optional[str]
Path to directory to save the plots,
directory is created if it does not exist
ice_nb_lines : int, optional
Number of lines to draw if kind="ice" (the default is 15)
ice_clustering_method : str, optional
Sampling or clustering method to compute the best lines to draw if kind="ice",
available methods:
- "kmeans": automatic clustering using KMeans to get representative lines
- "quantiles": division of predictions in quantiles to get lines
- "random": random selection of rows among predictions
(the default is "quantiles")
Returns
-------
figures : Dict[str, go.FigureWidget]
Dictionary of generated plots,
keys are feature names, values are Plotly objects
"""
check_input_in_list(kind, ["pdp", "box", "ice", "ale"])
check_input_in_list(ice_clustering_method, ["kmeans", "quantiles", "random"])
# specific arguments for ice plot method
if kind == "ice":
figures = self._ice_lines_plot(
nb_lines=ice_nb_lines, clustering_method=ice_clustering_method
)
elif kind == "box":
figures = self._ice_box_plot()
elif kind == "pdp" or kind == "ale":
figures = self._line_plot()
if save_path is not None:
save_dir = os.path.abspath(save_path)
os.makedirs(save_dir, exist_ok=True)
for name, figure in figures.items():
filename = os.path.join(save_dir, "{}_{}.html".format(kind, name))
offline.plot(figure, filename=filename, auto_open=False)
if show:
# for loop on features list for sorted show
for feature in self.features:
if feature.name in figures:
offline.iplot(figures[feature.name])
return figures
def _line_plot(self) -> Dict[str, go.FigureWidget]:
"""
Returns a dict of N Plotly line plots for the N features contained in instance.
Returns
-------
figures : Dict[str, go.FigureWidget]
Dictionary of generated plots,
keys are feature names, values are Plotly objects
"""
if not self.predictions and self.agg_predictions and not self.use_ale:
warnings.warn(
"No model was provided, shown predictions were aggregated"
" and thus do not explain the model that produced them"
)
figures = dict()
for feature in self.features:
name = feature.name
figures[name] = plotly_partial_dependency(
feature,
self.agg_predictions.get(name),
self.agg_targets.get(name),
self._aggfunc,
self.use_ale,
)
return figures
def _ice_box_plot(self) -> Dict[str, go.FigureWidget]:
"""
Returns a dict of N Plotly ICE Box plots for the N features in instance.
Returns
-------
figures : Dict[str, go.FigureWidget]
Dictionary of generated plots,
keys are feature names, values are Plotly objects
"""
if not self.predictions and self.agg_predictions:
warnings.warn(
"No model was provided, predictions cannot be shown on the ICE box plot"
)
figures = dict()
for feature in self.features:
name = feature.name
figures[name] = plotly_ice_box(
feature,
self.predictions.get(name),
self.agg_targets.get(name),
self._aggfunc,
)
return figures
def _ice_lines_plot(
self, nb_lines: int, clustering_method: str
) -> Dict[str, go.FigureWidget]:
"""
Returns a dict of N Plotly ICE plots for the N features in instance.
Returns
-------
figures : Dict[str, go.FigureWidget]
Dictionary of generated plots,
keys are feature names, values are Plotly objects
"""
if not self.predictions and self.agg_predictions:
warnings.warn(
"No model was provided, predictions cannot be shown on the ICE plot"
)
if nb_lines < 2:
raise ValueError(
"Number of lines for ICE plot must be greater than 1"
", use PDP to show a 1 line aggregation plot"
)
figures = dict()
for feature in self.features:
name = feature.name
predictions = self.predictions.get(name)
if predictions is not None:
assert (
len(predictions) >= nb_lines
), "Number of lines must be inferior or equal to length of dataset"
if clustering_method == "random":
samples = predictions.sample(n=nb_lines)
counts = np.full(nb_lines, len(predictions) / nb_lines)
names = ["" for _ in samples]
colors = [options.predictions_color for _ in samples]
elif clustering_method == "kmeans":
samples, counts, names, colors = sample_kmeans(
predictions, nb_lines
)
elif clustering_method == "quantiles":
samples, counts, names, colors = sample_quantiles(
predictions, nb_lines
)
figures[name] = plotly_ice_lines(
feature,
samples,
counts,
names,
colors,
self.agg_targets.get(name),
self._aggfunc,
)
self.samples[name] = samples
return figures
class IceCream2D(object):
"""
Class that generates and contains predictions and aggregations used to
draw 2D interaction plots (partial dependencies or ALE heatmaps).
Parameters
----------
data : pd.DataFrame
Dataframe of model inputs
feature_x : str
Name of column from data to discretize for x axis
feature_y : str
Name of column from data to discretize for y axis
bins_x : Optional[Union[int, Sized]]
Bin definition for feature_x
- None -> bin definition is guessed by module
- integer -> value is used as number of bins for all features
- Sized -> define specific bins
bins_y : Optional[Union[int, Sized]]
Bin definition for feature_y
- None -> bin definition is guessed by module
- integer -> value is used as number of bins for all features
- Sized -> define specific bins
use_ale : bool, optional
If True, computes ALE: Accumulated Local Effects.
Can only be used for numerical features. (the default is False)
model : scikit-learn model, optional
Model to compute predictions using provided data,
`model.predict(data)` must work
predictions : Optional[Sized]
Series containing predictions for rows in data,
used if no model can be given
targets : Optional[Sized]
Series containing targets for rows in data
aggfunc : str, optional
Aggregation function for targets and predictions aggregation
(the default is "mean")
use_classif_proba : bool
If True, use prediction probability as model output,
only used if model is a classifier
(the default is True)
clip_quantile : float, optional
Quantile to clip the feature values for continuous features,
set to 0 to disable clipping
(the default is 0.0)
quantile_based : bool, optional
Option to use a quantile-based discretization function for
continuous features (instead of a linear discretization).
(the default is False)
Attributes
----------
feature_x : FeatureDiscretizer
Discretized representation of feature for x axis
feature_y : FeatureDiscretizer
Discretized representation of feature for y axis
counts : List[pd.DataFrame]
List of counts of number of rows for each square of heatmap
agg_predictions : pd.DataFrame
Dataframe of aggregated predictions for each bin of features x and y
agg_targets : pd.DataFrame
Dataframe of aggregated targets for each bin of features x and y
"""
def __init__(
self,
data: pd.DataFrame,
feature_x: str,
feature_y: str,
bins_x: Optional[Union[int, Sized]],
bins_y: Optional[Union[int, Sized]],
use_ale: bool = False,
model: Optional[Any] = None,
predictions: Optional[Sized] = None,
targets: Optional[Sized] = None,
aggfunc: str = "mean",
use_classif_proba: bool = True,
clip_quantile: float = 0.0,
quantile_based: bool = False,
) -> None:
"""
Creates class instance with attributes, then run results computation.
"""
conduct_full_check(
data,
[feature_x, feature_y],
{"x": bins_x, "y": bins_y},
model,
predictions,
targets,
aggfunc,
["mean", "median"],
use_classif_proba,
clip_quantile,
use_ale,
)
self.feature_x = FeatureDiscretizer(
data[feature_x], bins_x, clip_quantile, quantile_based
)
self.feature_y = FeatureDiscretizer(
data[feature_y], bins_y, clip_quantile, quantile_based
)
self.counts = None
self.use_ale = use_ale
self.agg_predictions = None
self.agg_targets = None
self._aggfunc = aggfunc
self._run(data, model, predictions, targets, use_classif_proba, use_ale)
def __repr__(self) -> str:
return "{}: feature_x ({}), feature_y ({})".format(
self.__class__.__name__, self.feature_x.name, self.feature_y.name
)
def _run(
self,
data: pd.DataFrame,
model: Any,
predictions: Optional[Sized],
targets: Optional[Sized],
use_classif_proba: bool,
use_ale: bool,
) -> None:
"""
Run all operations to compute data used in heatmaps.
"""
# fake data for count pivot table
data_temp = data.assign(values=0)
self.counts = pivot_dataframe(
data_temp, "values", self.feature_x, self.feature_y, "count", fill_value=0
)
if predictions is not None:
predictions = pd.Series(predictions)
data_temp = data.assign(values=predictions)
self.agg_predictions = pivot_dataframe(
data_temp, "values", self.feature_x, self.feature_y, self._aggfunc
)
if model is not None:
predict_function = guess_model_predict_function(model, use_classif_proba)
if use_ale:
self.agg_predictions = compute_model_ale_results_2D(
data, self.feature_x, self.feature_y, predict_function
)
else:
self.agg_predictions = compute_ice_model_results_2D(
data,
self.feature_x,
self.feature_y,
predict_function,
self._aggfunc,
)
if targets is not None:
targets = pd.Series(targets)
data_temp = data.assign(values=targets)
self.agg_targets = pivot_dataframe(
data_temp, "values", self.feature_x, self.feature_y, self._aggfunc
)
def draw(
self, kind: str = "hist", show: bool = True, save_path: Optional[str] = None
) -> List[go.FigureWidget]:
"""
Builds plots, optionally shows them in current notebook and save them in HTML format.
Parameters
----------
show : bool, optional
Option to show the plots in notebook
(the default is True)
save_path : Optional[str]
Path to directory to save the plots,
directory is created if it does not exist
(the default is None)
kind : str
Kind of plot to draw, possibilities are:
- "hist": histograms for feature values, heatmap for predictions and targets
- "scatter": scatter for feature values, heatmap for predictions and targets
(the default is "hist")
Returns
-------
figures : List[go.FigureWidget]
List of generated plots,
1 plot for predictions if model or predictions were given
1 plot for targets if they were given
"""
check_input_in_list(kind, ["hist", "scatter"])
if kind == "hist":
figures = self._pd_hist_plot()
elif kind == "scatter":
figures = self._pd_scatter_plot()
if save_path is not None:
save_dir = os.path.abspath(save_path)
os.makedirs(save_dir, exist_ok=True)
for i, figure in enumerate(figures):
filename = os.path.join(save_dir, "heatmap_{}.html".format(i))
offline.plot(figure, filename=filename, auto_open=False)
if show:
# for loop on features list for sorted show
for figure in figures:
offline.iplot(figure)
return figures
def _pd_hist_plot(self) -> List[go.FigureWidget]:
"""
Returns a list of Plotly PDP 2D plots.
Returns
-------
figures : Dict[str, go.FigureWidget]
List of generated plots,
1 plot for predictions if model or predictions were given
1 plot for targets if they were given
"""
figures = [] # type: List[go.FigureWidget]
if self.agg_predictions is not None:
figures.append(
plotly_partial_dependency_2d_hist(
self.feature_x,
self.feature_y,
self.counts,
self.agg_predictions,
"predictions",
)
)
if self.agg_targets is not None:
figures.append(
plotly_partial_dependency_2d_hist(
self.feature_x,
self.feature_y,
self.counts,
self.agg_targets,
"targets",
)
)
return figures
def _pd_scatter_plot(self) -> List[go.FigureWidget]:
"""
Returns a list of Plotly PDP 2D plots.
Returns
-------
figures : Dict[str, go.FigureWidget]
List of generated plots,
1 plot for predictions if model or predictions were given
1 plot for targets if they were given
"""
figures = [] # type: List[go.FigureWidget]
if self.agg_predictions is not None:
figures.append(
plotly_partial_dependency_2d_scatter(
self.feature_x,
self.feature_y,
self.counts,
self.agg_predictions,
"predictions",
)
)
if self.agg_targets is not None:
figures.append(
plotly_partial_dependency_2d_scatter(
self.feature_x,
self.feature_y,
self.counts,
self.agg_targets,
"targets",
)
)
return figures
| 36.162651
| 93
| 0.576837
|
a9f92d9cc611578dc7c9fd5df3952959267a1634
| 1,451
|
py
|
Python
|
RiotConsts.py
|
nattokun/leagueOfRate
|
4d5bd2e4e8256495eefce99b1e7c15589ee796cc
|
[
"MIT"
] | null | null | null |
RiotConsts.py
|
nattokun/leagueOfRate
|
4d5bd2e4e8256495eefce99b1e7c15589ee796cc
|
[
"MIT"
] | null | null | null |
RiotConsts.py
|
nattokun/leagueOfRate
|
4d5bd2e4e8256495eefce99b1e7c15589ee796cc
|
[
"MIT"
] | null | null | null |
URL = {
'base' : 'https://global.api.pvp.net/api/{url}',
'summoner_by_name' : 'lol/na/v{version}/summoner/by-name/{names}?champData=stats',
'summoner_summonerId_rune' : 'lol/na/v{version}/summoner/{ids}/runes',
'summoner_summonerId_masteries' : 'lol/na/v{version}/summoner/{ids}/masteries',
'champions' : 'lol/static-data/na/v{version}/champion',
'champion_by_id': 'lol/static-data/na/v{version}/champion/{ids}',
'get_champion_stats' : 'lol/static-data/na/v1.2/champion/{ids}?champData=stats',
'get_champion_info' : 'lol/static-data/na/v1.2/champion/{ids}?champData=info',
'get_champion_spells' : 'lol/static-data/na/v1.2/champion/{ids}?champData=spells',
'get_rune_by_id' : 'lol/static-data/na/v{version}/rune/{ids}',
'get_rune_stats_by_id' : 'lol/static-data/na/v{version}/rune/{ids}?runeData=all'
}
API_VERSIONS = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
REGIONS = {
'GLOBAL' : 'global',
'BRAZIL' : 'br',
'EUROPE_NORDIC_EAST' : 'eune',
'EUROPE_WEST' : 'euw',
'KOREA' : 'kr',
'LATIN_AMERICA_NORTH' : 'lan',
'LATIN_AMERICA_SOUTH' : 'las',
'NORTH_AMERICA' : 'na',
'OCEANIA' : 'oce',
'RUSSIA' : 'ru',
'TURKEY' : 'tr'
}
| 32.977273
| 86
| 0.610613
|
6b377e365f6f8a257180be569b6960fd4c0f8887
| 4,478
|
py
|
Python
|
experiments/gradcam_utils.py
|
anonymous-user-256/mlrc-cgn
|
64f43fcb89b3a13c0ae46db4f19060d9f204a6b1
|
[
"MIT"
] | null | null | null |
experiments/gradcam_utils.py
|
anonymous-user-256/mlrc-cgn
|
64f43fcb89b3a13c0ae46db4f19060d9f204a6b1
|
[
"MIT"
] | null | null | null |
experiments/gradcam_utils.py
|
anonymous-user-256/mlrc-cgn
|
64f43fcb89b3a13c0ae46db4f19060d9f204a6b1
|
[
"MIT"
] | null | null | null |
"""Helper functions for GradCAM analysis.
Referece: https://linuxtut.com/en/082f71b96b9aca0d5df5/
"""
import os
import sys
import argparse
import json
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
import torchvision.datasets as tv_datasets
from torchvision import transforms as tv_transforms
from torchvision.utils import save_image, make_grid
from PIL import Image
from tqdm import tqdm
from gradcam.utils import visualize_cam
from gradcam import GradCAM, GradCAMpp
import matplotlib.pyplot as plt
import seaborn as sns
from torchmetrics import JaccardIndex
import warnings
warnings.filterwarnings("ignore")
from experiment_utils import set_env, REPO_PATH, seed_everything
set_env()
from cgn_framework.mnists.models.classifier import CNN
from cgn_framework.mnists.train_cgn import save
from cgn_framework.mnists.dataloader import get_tensor_dataloaders, TENSOR_DATASETS
def compute_iou_between_gt_and_gradcam_for_mnist(
dataset, weight_path, seed=0, disable_tqdm=False, debug=False, return_samples=False,
):
"""
Compute IoU between ground truth and GradCAM for MNIST.
Args:
dataset (str): dataset name
weight_path (str): absolute path to the checkpoint file
seed (int): random seed
disable_tqdm (bool): whether to disable tqdm progress bar
debug (bool): whether to print debug messages
return_samples (bool): whether to return samples of the gradcam
"""
seed_everything(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# load model and its weights from a checkpoint
model = CNN()
# load checkpoint
print("Loading model weights from checkpoint: {}".format(weight_path))
ckpt = torch.load(weight_path, map_location='cpu')
model.load_state_dict(ckpt)
model = model.eval()
model = model.to(device)
# load the test dataloader to evaluate GradCAM on
print("Loading dataset: {}".format(dataset))
dl_train, dl_test = get_tensor_dataloaders(dataset, 64)
ds_test = dl_test.dataset
# load original MNIST to obtain binary maps
T = tv_transforms.Compose([
tv_transforms.Resize((32, 32), Image.NEAREST),
tv_transforms.ToTensor(),
])
original_data_root = os.path.join(REPO_PATH, "cgn_framework/mnists/data/MNIST/")
original = tv_datasets.MNIST(
root=original_data_root, download=True, train=False, transform=T,
)
# define the target layer to be used
target_layer = model.model[8]
gradcam = GradCAM(model, target_layer)
# for metric computation
jaccard = JaccardIndex(num_classes=2)
jaccard = jaccard.to(device)
# apply GradCAM on the test set
num_samples = len(ds_test) if not debug else 10
np.random.seed(0)
sample_indices = np.random.choice(num_samples, 10, replace=False)
iterator = tqdm(
range(num_samples),
colour="red",
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}',
desc="Computing GradCAM",
disable=disable_tqdm,
)
samples = []
iou_values = []
labels = []
show = False
for i in iterator:
# get image
image, label = ds_test[i]
image = image.to(device)
# get gradcam mask
gc_mask, _ = gradcam(image.unsqueeze(0))
heatmap, result = visualize_cam(gc_mask, image)
gc_mask = gc_mask.squeeze(0).to(device)
# get coresponding GT mask from original dataset
gt_mask = original[i][0]
gt_mask = gt_mask.to(device)
gt_mask_binary = gt_mask > 0.5
iou = jaccard(gc_mask, gt_mask_binary)
iou_values.append(iou.cpu().item())
labels.append(label)
if return_samples and i in sample_indices:
samples.append([image.data.cpu(), heatmap, result])
if show:
grid = make_grid([gt_mask, gc_mask], padding=0)
plt.title(f"IoU: {iou:.4f}", fontsize=18)
plt.imshow(grid.permute((1, 2, 0)))
plt.show()
df = pd.DataFrame(None, columns=["iou", "label"])
df["iou"] = iou_values
df["label"] = torch.stack(labels).numpy()
class_wise_results = dict(df.groupby("label")["iou"].mean())
class_wise_results["overall_mean"] = np.mean(iou_values)
class_wise_results["overall_std"] = np.std(iou_values)
if return_samples:
return class_wise_results, sample_indices, samples
else:
return class_wise_results
| 31.535211
| 92
| 0.685574
|
2d104e6da45af53cfc81f93a054461cd8717aa0c
| 5,078
|
py
|
Python
|
src/metropolisGA.py
|
lbenning/Knapsack
|
1b06409bafc04210837b984fb638804794faada6
|
[
"MIT"
] | 4
|
2015-07-26T19:59:25.000Z
|
2018-12-13T15:39:44.000Z
|
src/metropolisGA.py
|
lbenning/Knapsack
|
1b06409bafc04210837b984fb638804794faada6
|
[
"MIT"
] | null | null | null |
src/metropolisGA.py
|
lbenning/Knapsack
|
1b06409bafc04210837b984fb638804794faada6
|
[
"MIT"
] | null | null | null |
import random
import numpy
from tools import *
import math
# Computes the Hamming distance between 2 bitstrings,
# which is simply the number of places where the bitstrings
# differ in value
def hammingDistance(x,y):
dist = 0
for z in range(len(x)):
if (not x[z] == y[z]):
dist += 1
return dist
# Genetic algorithm simulation - uses the Metropolis algorithm
# to maintain diversity
def geneticMetropolis(popSize,values,weights,limit,epsilon,exactSoln):
# Initialization
population = []
for x in range(popSize):
population.append(generatePack(values,weights,limit))
# Track number of evaluations of fitness function
fitCtr = 0
# Mutation rate that will be degraded
mut = degradeMutation(fitCtr)
# Top individual's fitness
topFitness = -1
# Pairing indices for tournament
pairing = numpy.arange(0,popSize)
# Initial temperature
temp = 1000
# Cooling factor - multiplies temp. by this factor
coolFactor = 0.95
while(True):
# Run a tournament
numpy.random.shuffle(pairing)
curr = 0
while (curr+1 < len(population)):
parent1 = population[pairing[curr]]
parent2 = population[pairing[curr+1]]
child1 = reproduce(parent1,parent2,weights,limit,mut)
child2 = reproduce(parent2,parent1,weights,limit,mut)
# Compute fitnesses of parents and children
fitnesses = (fitness(parent1,values),fitness(parent2,values),fitness(child1,values),
fitness(child2,values))
# Update the top fitness if new best achieved
topFitness = max(topFitness,fitnesses[0],fitnesses[1],
fitnesses[2],fitnesses[3])
# Termination condition
if (topFitness*(1+epsilon) >= exactSoln):
return topFitness
fitCtr += 4
# Replace according to Metropolis algorithm
if (hammingDistance(parent1,child1)+hammingDistance(parent2,child2) >
hammingDistance(parent1,child2)+hammingDistance(parent2,child1)):
if (fitnesses[2] >= fitnesses[0] or random.uniform(0,1)
<= math.exp(-(fitnesses[0]-fitnesses[2])/temp)):
population[pairing[curr]] = child1
if (fitnesses[3] >= fitnesses[1] or random.uniform(0,1)
<= math.exp(-(fitnesses[1]-fitnesses[3])/temp)):
population[pairing[curr+1]] = child2
else:
if (fitnesses[3] >= fitnesses[0] or random.uniform(0,1)
<= math.exp(-(fitnesses[0]-fitnesses[3])/temp)):
population[pairing[curr]] = child2
if (fitnesses[2] >= fitnesses[1] or random.uniform(0,1)
<= math.exp(-(fitnesses[1]-fitnesses[2])/temp)):
population[pairing[curr+1]] = child1
curr += 2
# Update the mutation rate
mut = degradeMutation(fitCtr)
# Update the temperature
temp *= coolFactor
# Genetic algorithm simulation - uses the Metropolis algorithm
# to maintain diversity
def recordMetropolis(popSize,values,weights,limit,intervals,bound):
# Initialization
population = []
for x in range(popSize):
population.append(generatePack(values,weights,limit))
# Track number of evaluations of fitness function
fitCtr = 0
# Mutation rate that will be degraded
mut = degradeMutation(fitCtr)
# Top individual's fitness
topFitness = -1
# Pairing indices for tournament
pairing = numpy.arange(0,popSize)
# Initial temperature
temp = 1000
# Cooling factor - multiplies temp. by this factor
coolFactor = 0.95
# Record fitnesses
recFitnesses = []
while(True):
# Run a tournament
numpy.random.shuffle(pairing)
curr = 0
while (curr+1 < len(population)):
parent1 = population[pairing[curr]]
parent2 = population[pairing[curr+1]]
child1 = reproduce(parent1,parent2,weights,limit,mut)
child2 = reproduce(parent2,parent1,weights,limit,mut)
# Compute fitnesses of parents and children
fitnesses = (fitness(parent1,values),fitness(parent2,values),fitness(child1,values),
fitness(child2,values))
# Update the top fitness if new best achieved
topFitness = max(topFitness,fitnesses[0],fitnesses[1],
fitnesses[2],fitnesses[3])
# Replace according to Metropolis algorithm
if (hammingDistance(parent1,child1)+hammingDistance(parent2,child2) >
hammingDistance(parent1,child2)+hammingDistance(parent2,child1)):
if (fitnesses[2] >= fitnesses[0] or random.uniform(0,1)
<= math.exp(-(fitnesses[0]-fitnesses[2])/temp)):
population[pairing[curr]] = child1
if (fitnesses[3] >= fitnesses[1] or random.uniform(0,1)
<= math.exp(-(fitnesses[1]-fitnesses[3])/temp)):
population[pairing[curr+1]] = child2
else:
if (fitnesses[3] >= fitnesses[0] or random.uniform(0,1)
<= math.exp(-(fitnesses[0]-fitnesses[3])/temp)):
population[pairing[curr]] = child2
if (fitnesses[2] >= fitnesses[1] or random.uniform(0,1)
<= math.exp(-(fitnesses[1]-fitnesses[2])/temp)):
population[pairing[curr+1]] = child1
if (fitCtr+1 in intervals or fitCtr+2 in intervals or fitCtr+3 in intervals or
fitCtr+4 in intervals):
recFitnesses.append(topFitness)
curr += 2
fitCtr += 4
# Termination condition
if (fitCtr >= bound):
return recFitnesses
# Update the mutation rate
mut = degradeMutation(fitCtr)
# Update the temperature
temp *= coolFactor
| 31.540373
| 87
| 0.707562
|
cb9a97aa13bd8cca4f6601674cd746fd949a882a
| 676
|
py
|
Python
|
app/post/admin.py
|
HSAkash/Questionnaire-Docker
|
ee7f45ddc04e9ce72156a888255711df6a34720b
|
[
"MIT"
] | 1
|
2021-12-26T06:18:48.000Z
|
2021-12-26T06:18:48.000Z
|
app/post/admin.py
|
HSAkash/Questionnaire_api
|
95a0e4e7bc680fe27dd312dad0e2fd70ceafb8b4
|
[
"MIT"
] | null | null | null |
app/post/admin.py
|
HSAkash/Questionnaire_api
|
95a0e4e7bc680fe27dd312dad0e2fd70ceafb8b4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from post import models
class QuestionAdmin(admin.ModelAdmin):
list_display = ('title', 'user', 'created_at', 'published_date')
list_filter = ('published_date',)
search_fields = ('title', 'user')
date_hierarchy = 'published_date'
ordering = ['-published_date']
filter_horizontal = ()
fieldsets = ()
list_per_page = 10
class AnswerAdmin(admin.ModelAdmin):
list_display = ('question', 'user')
search_fields = ('question', 'user')
filter_horizontal = ()
fieldsets = ()
list_per_page = 10
admin.site.register(models.Question, QuestionAdmin)
admin.site.register(models.Answer, AnswerAdmin)
| 26
| 68
| 0.693787
|
3519a70791b2636cbd96069edb8169eb4b81d7b9
| 8,530
|
py
|
Python
|
scripts/fixup_dlp_v2_keywords.py
|
renovate-bot/python-dlp
|
f329cf691bc8fa49b70ea5a443b5dc36c3b59eb9
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
scripts/fixup_dlp_v2_keywords.py
|
renovate-bot/python-dlp
|
f329cf691bc8fa49b70ea5a443b5dc36c3b59eb9
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
scripts/fixup_dlp_v2_keywords.py
|
renovate-bot/python-dlp
|
f329cf691bc8fa49b70ea5a443b5dc36c3b59eb9
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class dlpCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'activate_job_trigger': ('name', ),
'cancel_dlp_job': ('name', ),
'create_deidentify_template': ('parent', 'deidentify_template', 'template_id', 'location_id', ),
'create_dlp_job': ('parent', 'inspect_job', 'risk_job', 'job_id', 'location_id', ),
'create_inspect_template': ('parent', 'inspect_template', 'template_id', 'location_id', ),
'create_job_trigger': ('parent', 'job_trigger', 'trigger_id', 'location_id', ),
'create_stored_info_type': ('parent', 'config', 'stored_info_type_id', 'location_id', ),
'deidentify_content': ('parent', 'deidentify_config', 'inspect_config', 'item', 'inspect_template_name', 'deidentify_template_name', 'location_id', ),
'delete_deidentify_template': ('name', ),
'delete_dlp_job': ('name', ),
'delete_inspect_template': ('name', ),
'delete_job_trigger': ('name', ),
'delete_stored_info_type': ('name', ),
'finish_dlp_job': ('name', ),
'get_deidentify_template': ('name', ),
'get_dlp_job': ('name', ),
'get_inspect_template': ('name', ),
'get_job_trigger': ('name', ),
'get_stored_info_type': ('name', ),
'hybrid_inspect_dlp_job': ('name', 'hybrid_item', ),
'hybrid_inspect_job_trigger': ('name', 'hybrid_item', ),
'inspect_content': ('parent', 'inspect_config', 'item', 'inspect_template_name', 'location_id', ),
'list_deidentify_templates': ('parent', 'page_token', 'page_size', 'order_by', 'location_id', ),
'list_dlp_jobs': ('parent', 'filter', 'page_size', 'page_token', 'type_', 'order_by', 'location_id', ),
'list_info_types': ('parent', 'language_code', 'filter', 'location_id', ),
'list_inspect_templates': ('parent', 'page_token', 'page_size', 'order_by', 'location_id', ),
'list_job_triggers': ('parent', 'page_token', 'page_size', 'order_by', 'filter', 'location_id', ),
'list_stored_info_types': ('parent', 'page_token', 'page_size', 'order_by', 'location_id', ),
'redact_image': ('parent', 'location_id', 'inspect_config', 'image_redaction_configs', 'include_findings', 'byte_item', ),
'reidentify_content': ('parent', 'reidentify_config', 'inspect_config', 'item', 'inspect_template_name', 'reidentify_template_name', 'location_id', ),
'update_deidentify_template': ('name', 'deidentify_template', 'update_mask', ),
'update_inspect_template': ('name', 'inspect_template', 'update_mask', ),
'update_job_trigger': ('name', 'job_trigger', 'update_mask', ),
'update_stored_info_type': ('name', 'config', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=dlpCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the dlp client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| 40.619048
| 158
| 0.630832
|
c9f7aa6c5fb3a86ffed726042e9a048ca9704c49
| 12,963
|
py
|
Python
|
bento/commands/tests/test_configure.py
|
cournape/Bento
|
37de23d784407a7c98a4a15770ffc570d5f32d70
|
[
"BSD-3-Clause"
] | 55
|
2015-01-20T21:12:52.000Z
|
2021-11-23T12:29:32.000Z
|
bento/commands/tests/test_configure.py
|
esc/Bento
|
5e13318c0a74e956f9e80fa7617fb31ffc356088
|
[
"BSD-3-Clause"
] | 6
|
2015-01-16T07:01:29.000Z
|
2021-08-19T20:00:17.000Z
|
bento/commands/tests/test_configure.py
|
esc/Bento
|
5e13318c0a74e956f9e80fa7617fb31ffc356088
|
[
"BSD-3-Clause"
] | 6
|
2015-08-12T18:11:47.000Z
|
2019-01-05T08:36:05.000Z
|
import os
import sys
import shutil
import tempfile
import posixpath
import mock
from bento.compat.api.moves \
import \
unittest
from bento.core.options \
import \
PackageOptions
from bento.core \
import \
PackageDescription
from bento.core.node \
import \
create_root_with_source_tree
from bento.commands.wrapper_utils \
import \
run_command_in_context
from bento.commands.tests.utils \
import \
prepare_configure
from bento.backends.yaku_backend \
import \
ConfigureYakuContext
from bento.commands.configure \
import \
_compute_scheme, set_scheme_unix, set_scheme_win32
BENTO_INFO = """\
Name: Sphinx
Version: 0.6.3
Summary: Python documentation generator
Url: http://sphinx.pocoo.org/
DownloadUrl: http://pypi.python.org/pypi/Sphinx
Description: Some long description.
Author: Georg Brandl
AuthorEmail: georg@python.org
Maintainer: Georg Brandl
MaintainerEmail: georg@python.org
License: BSD
"""
PY_VERSION_SHORT = ".".join(str(_) for _ in sys.version_info[:2])
PY_VERSION_SHORT_NO_DOT = "".join(str(_) for _ in sys.version_info[:2])
class TestConfigureCommand(unittest.TestCase):
def setUp(self):
self.d = tempfile.mkdtemp()
self.root = create_root_with_source_tree(self.d, os.path.join(self.d, "build"))
self.old_dir = os.getcwd()
os.chdir(self.d)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.d)
def test_simple(self):
root = self.root
run_node = root.find_node(self.d)
conf, configure = prepare_configure(run_node, BENTO_INFO, ConfigureYakuContext)
run_command_in_context(conf, configure)
def test_flags(self):
bento_info = """\
Name: foo
Flag: floupi
Description: some floupi flag
Default: true
"""
run_node = self.root.find_node(self.d)
conf, configure = prepare_configure(run_node, bento_info, ConfigureYakuContext, ["--floupi=false"])
run_command_in_context(conf, configure)
UNIX_REFERENCE = {
'destdir': "/",
'prefix': None,
'eprefix': None,
'bindir': '$eprefix/bin',
'sbindir': '$eprefix/sbin',
'libexecdir': '$eprefix/libexec',
'sysconfdir': '$prefix/etc',
'sharedstatedir': '$prefix/com',
'localstatedir': '$prefix/var',
'libdir': '$eprefix/lib',
'includedir': '$prefix/include',
'datarootdir': '$prefix/share',
'datadir': '$datarootdir',
'mandir': '$datarootdir/man',
'infodir': '$datarootdir/info',
'localedir': '$datarootdir/locale',
'docdir': '$datarootdir/doc/$pkgname',
'htmldir': '$docdir',
'dvidir': '$docdir',
'psdir': '$docdir',
'pdfdir': '$docdir',
'sitedir': '$libdir/python$py_version_short/site-packages',
'pkgdatadir': '$datadir/$pkgname'
}
WIN32_REFERENCE = {
'destdir': "C:\\",
'prefix': None,
'eprefix': r'$prefix',
'bindir': r'$eprefix\Scripts',
'sbindir': r'$eprefix\Scripts',
'libexecdir': r'$eprefix\Scripts',
'sysconfdir': r'$prefix\etc',
'sharedstatedir': r'$prefix\com',
'localstatedir': r'$prefix\var',
'libdir': r'$eprefix\lib',
'includedir': r'$prefix\include',
'datarootdir': r'$prefix\share',
'datadir': r'$datarootdir',
'mandir': r'$datarootdir\man',
'infodir': r'$datarootdir\info',
'localedir': r'$datarootdir\locale',
'docdir': r'$datarootdir\doc\$pkgname',
'htmldir': r'$docdir',
'dvidir': r'$docdir',
'psdir': r'$docdir',
'pdfdir': r'$docdir',
'sitedir': r'$prefix\Lib\site-packages',
'pkgdatadir': r'$datadir\$pkgname'
}
MOCK_DEBIAN_SCHEME = {
'purelib': '$base/local/lib/python$py_version_short/dist-packages',
'headers': '$base/local/include/python$py_version_short/$dist_name',
'scripts': '$base/local/bin',
}
class TestUnixScheme(unittest.TestCase):
def setUp(self):
super(TestUnixScheme, self).setUp()
options = mock.Mock()
options.eprefix = None
options.prefix = None
self.options = options
def _compute_scheme(self, bento_info, options):
package_options = PackageOptions.from_string(bento_info)
pkg = PackageDescription.from_string(bento_info)
scheme = _compute_scheme(package_options)
set_scheme_unix(scheme, options, pkg)
return scheme
@mock.patch("sys.platform", "linux2")
@mock.patch("bento.commands.configure.virtualenv_prefix", lambda: None)
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: r"/")
@mock.patch("distutils.command.install.INSTALL_SCHEMES", {"unix_prefix": UNIX_REFERENCE})
def test_scheme_default(self):
bento_info = """\
Name: foo
"""
self.options.prefix = self.eprefix = None
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
py_version_short = scheme.pop("py_version_short")
pkgname = scheme.pop("pkgname")
self.assertEqual(prefix, "/usr/local")
self.assertEqual(eprefix, "/usr/local")
self.assertEqual(pkgname, "foo")
self.assertEqual(py_version_short, PY_VERSION_SHORT)
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v, "discrepency for path %s: %s vs %s" % (k, UNIX_REFERENCE[k], v))
@mock.patch("sys.platform", "darwin")
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: r"/")
@mock.patch("bento.commands.configure.op.normpath", posixpath.normpath)
@mock.patch("bento.commands.configure.virtualenv_prefix", lambda: None)
@mock.patch("sys.prefix", "/Library/Frameworks/Python.framework/Versions/2.8")
@mock.patch("sys.exec_prefix", "/Exec/Library/Frameworks/Python.framework/Versions/2.8")
def test_scheme_default_darwin(self):
bento_info = """\
Name: foo
"""
self.options.prefix = self.eprefix = None
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
py_version_short = scheme.pop("py_version_short")
pkgname = scheme.pop("pkgname")
self.assertEqual(prefix, sys.prefix)
self.assertEqual(eprefix, sys.exec_prefix)
self.assertEqual(pkgname, "foo")
self.assertEqual(py_version_short, PY_VERSION_SHORT)
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v)
@mock.patch("sys.platform", "linux2")
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: r"/")
def test_scheme_with_prefix(self):
bento_info = """\
Name: foo
"""
self.options.prefix = "/home/guido"
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
py_version_short = scheme.pop("py_version_short")
pkgname = scheme.pop("pkgname")
self.assertEqual(prefix, "/home/guido")
self.assertEqual(eprefix, "/home/guido")
self.assertEqual(pkgname, "foo")
self.assertEqual(py_version_short, PY_VERSION_SHORT)
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v)
self.options.eprefix = "/home/exec/guido"
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
py_version_short = scheme.pop("py_version_short")
pkgname = scheme.pop("pkgname")
self.assertEqual(prefix, "/home/guido")
self.assertEqual(eprefix, "/home/exec/guido")
self.assertEqual(pkgname, "foo")
self.assertEqual(py_version_short, PY_VERSION_SHORT)
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v)
@mock.patch("sys.platform", "linux2")
def test_scheme_with_eprefix_fail(self):
bento_info = """\
Name: foo
"""
self.options.eprefix = "/home/guido"
self.assertRaises(NotImplementedError, lambda: self._compute_scheme(bento_info, self.options))
@mock.patch("sys.platform", "linux2")
@mock.patch("bento.commands.configure.virtualenv_prefix", lambda: None)
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: r"/")
@mock.patch("distutils.command.install.INSTALL_SCHEMES", {"unix_local": MOCK_DEBIAN_SCHEME}, create=True)
def test_scheme_debian(self):
bento_info = """\
Name: foo
"""
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
sitedir = scheme.pop("sitedir")
includedir = scheme.pop("includedir")
self.assertEqual(prefix, "/usr/local")
self.assertEqual(eprefix, "/usr/local")
self.assertEqual(sitedir, "/usr/local/lib/python%s/dist-packages" % PY_VERSION_SHORT)
self.assertEqual(includedir, "/usr/local/include/python%s/foo" % PY_VERSION_SHORT)
scheme.pop("py_version_short")
scheme.pop("pkgname")
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v)
@mock.patch("sys.platform", "linux2")
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: r"/")
@mock.patch("bento.commands.configure.virtualenv_prefix", lambda: "/home/guido/.env")
def test_scheme_venv(self):
bento_info = """\
Name: foo
"""
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
self.assertEqual(prefix, "/home/guido/.env")
self.assertEqual(eprefix, "/home/guido/.env")
scheme.pop("py_version_short")
scheme.pop("pkgname")
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(UNIX_REFERENCE[k], v)
class TestWin32Scheme(unittest.TestCase):
def setUp(self):
super(TestWin32Scheme, self).setUp()
options = mock.Mock()
options.eprefix = None
options.prefix = None
self.options = options
def _compute_scheme(self, bento_info, options):
package_options = PackageOptions.from_string(bento_info)
pkg = PackageDescription.from_string(bento_info)
scheme = _compute_scheme(package_options)
set_scheme_win32(scheme, options, pkg)
return scheme
@mock.patch("sys.platform", "win32")
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: "C:\\")
@mock.patch("sys.prefix", r"C:\Python%s" % PY_VERSION_SHORT_NO_DOT)
@mock.patch("sys.exec_prefix", r"C:\Python%s" % PY_VERSION_SHORT_NO_DOT)
def test_scheme_default(self):
bento_info = """\
Name: foo
"""
self.options.prefix = self.eprefix = None
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
py_version_short = scheme.pop("py_version_short")
pkgname = scheme.pop("pkgname")
self.assertEqual(prefix, sys.prefix)
self.assertEqual(eprefix, sys.exec_prefix)
self.assertEqual(pkgname, "foo")
self.assertEqual(py_version_short, PY_VERSION_SHORT)
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(WIN32_REFERENCE[k], v, "discrepency for path %s: %s vs %s" % (k, WIN32_REFERENCE[k], v))
@mock.patch("sys.platform", "win32")
@mock.patch("bento.core.platforms.sysconfig.bento.utils.path.find_root", lambda ignored: "C:\\")
def test_scheme_prefix(self):
bento_info = """\
Name: foo
"""
self.options.prefix = r"C:\foo"
self.eprefix = None
scheme = self._compute_scheme(bento_info, self.options)
prefix = scheme.pop("prefix")
eprefix = scheme.pop("eprefix")
scheme.pop("py_version_short")
scheme.pop("pkgname")
self.assertEqual(prefix, r"C:\foo")
self.assertEqual(eprefix, r"C:\foo")
# Check that other values in scheme have not been modified
for k, v in scheme.items():
self.assertEqual(WIN32_REFERENCE[k], v, "discrepency for path %s: %s vs %s" % (k, WIN32_REFERENCE[k], v))
| 34.846774
| 117
| 0.64553
|
5dd6f8ed6415b20b8c562de190ffa03177cd0fab
| 3,432
|
py
|
Python
|
dace/codegen/prettycode.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T07:58:06.000Z
|
2021-07-26T07:58:06.000Z
|
dace/codegen/prettycode.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | null | null | null |
dace/codegen/prettycode.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T13:01:48.000Z
|
2021-03-04T13:01:48.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Code I/O stream that automates indentation and mapping of code to SDFG
nodes. """
from six import StringIO
from dace.config import Config
class CodeIOStream(StringIO):
""" Code I/O stream that automates indentation and mapping of code to SDFG
nodes. """
def __init__(self, base_indentation=0):
super(CodeIOStream, self).__init__()
self._indent = 0
self._spaces = int(Config.get('compiler', 'indentation_spaces'))
def write(self, contents, sdfg=None, state_id=None, node_id=None):
# Delete single trailing newline, as this will be implicitly inserted
# anyway
if contents:
if contents[-1] == "\n":
lines = contents[:-1].split("\n")
else:
lines = contents.split('\n')
else:
lines = contents
# If SDFG/state/node location is given, annotate this line
if sdfg is not None:
location_identifier = ' ////__DACE:%d' % sdfg.sdfg_id
if state_id is not None:
location_identifier += ':' + str(state_id)
if node_id is not None:
if not isinstance(node_id, list):
node_id = [node_id]
for i, nid in enumerate(node_id):
if not isinstance(nid, int):
node_id[i] = sdfg.nodes()[state_id].node_id(nid)
location_identifier += ':' + ','.join(
[str(nid) for nid in node_id])
else:
location_identifier = ''
# Write each line separately
for line in lines:
opening_braces = line.count('{')
closing_braces = line.count('}')
# Count closing braces before opening ones (e.g., for "} else {")
first_opening_brace = line.find('{')
initial_closing_braces = 0
if first_opening_brace > 0:
initial_closing_braces = line[:first_opening_brace].count('}')
closing_braces -= initial_closing_braces
brace_balance = opening_braces - closing_braces
# Write line and then change indentation
if initial_closing_braces > 0:
self._indent -= initial_closing_braces
if brace_balance < 0:
self._indent += brace_balance
codeline = self._indent * self._spaces * ' ' + line.strip()
# Location identifier is written at character 81 and on, find out
# how many spaces we need to add for that
loc_spaces = max(80 - len(codeline), 2)
if location_identifier != '':
super(CodeIOStream, self).write(codeline + loc_spaces * ' ' +
location_identifier + '\n')
else: # avoid ending spaces (useful for OpenCL and multiline macros)
super(CodeIOStream, self).write(codeline + '\n')
if brace_balance > 0:
self._indent += brace_balance
# If indentation failed, warn user
if self._indent < -1:
super(CodeIOStream, self).write(
'///WARNING: Indentation failure! This probably ' +
'indicates an error in the SDFG.\n')
self._indent = 0
| 40.857143
| 81
| 0.550991
|
2a88d970abb658d6fe65c9795e36dc4c9fb1bfc4
| 2,329
|
py
|
Python
|
legal_ner/utils/prepare_pretraining_data.py
|
openlegaldata/legal-ner
|
3a9e8301b05f97ab159e24a3b3c7670b63ccb665
|
[
"MIT"
] | 22
|
2019-02-12T09:50:40.000Z
|
2021-07-05T22:00:00.000Z
|
legal_ner/utils/prepare_pretraining_data.py
|
vishenka-git/legal-ner
|
3a9e8301b05f97ab159e24a3b3c7670b63ccb665
|
[
"MIT"
] | null | null | null |
legal_ner/utils/prepare_pretraining_data.py
|
vishenka-git/legal-ner
|
3a9e8301b05f97ab159e24a3b3c7670b63ccb665
|
[
"MIT"
] | 5
|
2019-02-08T04:34:33.000Z
|
2020-05-24T01:13:34.000Z
|
import re
from pathlib import Path
import plac
import srsly
from legal_ner.preprocessing import HtmlConcealer
def split_paragraphs(string):
paragraph_delimiter = re.compile(r'\n')
paragraphs = []
while True:
m = re.search(paragraph_delimiter, string)
if m is None:
paragraphs += [string]
break
paragraphs += [string[:m.start(0)]]
string = string[m.end(0):]
return paragraphs
@plac.annotations(
input=('Path to json dump of oldp cases', 'option', 'i', Path),
output=('Path to jsonl file with raw texts', 'option', 'o', Path)
)
def main(input: Path, output: Path):
"""A script to prepare unsupervised pretraining data for spacy. The created data is intended to be used with
the commands https://spacy.io/api/cli#pretrain and consecutively https://spacy.io/api/cli#train"""
if not output.parent.exists():
output.parent.mkdir(parents=True, exist_ok=True)
with input.open(mode='r') as in_f:
with output.open(mode='w') as out_f:
print("Preparing training data...")
for line in in_f:
json = srsly.json_loads(line)
concealer = HtmlConcealer(json['content'])
# the order is crucial
concealer.replace_html_special_ents()
concealer.remove_pattern(r'<br.*>', replace_with='\n') # html linebreaks
concealer.remove_pattern(r'<[^>]+>') # html tags
concealer.remove_pattern(r'\xa0+|\t| {2,}', replace_with=' ') # white space
concealer.remove_enumeration_numbers()
concealer.remove_pattern(r'(^ +)|( +$)', flags=re.MULTILINE) # leading or trailing whitespace
concealer.remove_pattern(r'^[A-ZÄÜÖ]( [a-zäüö]){4,}( :)?$', flags=re.MULTILINE) # whitespace
# separated letters for headlines, e.g. T e n o r
concealer.remove_pattern(r'\n{2,}', replace_with='\n') # duplicate newlines
concealer.remove_pattern(r'(^\n)|(\n$)') # newline at start or end
paragraphs = split_paragraphs(concealer.get_content())
out_f.writelines(['{"text": "' + p + '"}\n' for p in paragraphs if len(p) > 10])
print("...finished!")
if __name__ == '__main__':
plac.call(main)
| 40.859649
| 112
| 0.603693
|
750cda8952c7af399a41c5f53b354b95b48f5c8a
| 2,367
|
py
|
Python
|
models/tohinz_models.py
|
chunchentu/EvadeML-Zoo
|
dd8e85b9424871496a4dfb62f603f97e733f2433
|
[
"MIT"
] | 164
|
2017-08-18T16:24:44.000Z
|
2022-03-18T19:32:17.000Z
|
models/tohinz_models.py
|
chunchentu/EvadeML-Zoo
|
dd8e85b9424871496a4dfb62f603f97e733f2433
|
[
"MIT"
] | 14
|
2018-12-19T20:58:54.000Z
|
2022-03-13T14:34:41.000Z
|
models/tohinz_models.py
|
chunchentu/EvadeML-Zoo
|
dd8e85b9424871496a4dfb62f603f97e733f2433
|
[
"MIT"
] | 74
|
2017-11-06T15:35:19.000Z
|
2022-02-09T05:53:09.000Z
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers import MaxPooling2D, Conv2D
from keras.layers.normalization import BatchNormalization
def tohinz_svhn_model(logits=False, input_range_type=2, pre_filter=lambda x:x):
input_shape=(32, 32, 3)
nb_filters = 32
nb_denses = [512,10]
return tohinz_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter)
def tohinz_model(input_shape, nb_filters, nb_denses, logits, input_range_type, pre_filter):
model = Sequential()
if input_range_type == 1:
# The input data range is [0, 1].
# Convert to [-0.5,0.5] by x-0.5.
scaler = lambda x: x-0.5
elif input_range_type == 2:
# The input data range is [-0.5, 0.5].
# Don't need to do scaling for carlini models, as it is the input range by default.
scaler = lambda x: x
elif input_range_type == 3:
# The input data range is [-1, 1]. Convert to [-0.5,0.5] by x/2.
scaler = lambda x: x/2
model.add(Lambda(scaler, input_shape=input_shape))
model.add(Lambda(pre_filter, output_shape=input_shape))
model.add(Conv2D(nb_filters, kernel_size=3, input_shape=input_shape, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(nb_filters, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(nb_filters*2, 3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(nb_filters*2, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(nb_filters*4, 3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(nb_filters*4, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(nb_denses[0], activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(nb_denses[1]))
if not logits:
model.add(Activation('softmax'))
return model
| 33.814286
| 97
| 0.682298
|
1ae64f81cf524efc20569591f33ed71e16eb46a8
| 906
|
py
|
Python
|
.history/extract-table-camelot_20220617141938.py
|
tjxj/pdf2docx
|
3338a95f1a971de8caf0369fa3ce794d2d6d57cd
|
[
"Apache-2.0"
] | null | null | null |
.history/extract-table-camelot_20220617141938.py
|
tjxj/pdf2docx
|
3338a95f1a971de8caf0369fa3ce794d2d6d57cd
|
[
"Apache-2.0"
] | null | null | null |
.history/extract-table-camelot_20220617141938.py
|
tjxj/pdf2docx
|
3338a95f1a971de8caf0369fa3ce794d2d6d57cd
|
[
"Apache-2.0"
] | null | null | null |
from os import set_inheritable
import camelot
import pandas as pd
tables = camelot.read_pdf('/Users/huhaiyang/Archives/2020.pdf',pages='1-3')
#tables.export('foo22.csv') # json, excel, html, markdown, sqlite
# tables[1]
# tables[1].parsing_report
# {
# 'accuracy': 99.02,
# 'whitespace': 12.24,
# 'order': 1,
# 'page': 2
# }
# tables[1].to_csv('foo.csv') # to_json, to_excel, to_html, to_markdown, to_sqlite
# #tables[0].df # get a pandas DataFrame!
result = pd.ExcelWriter('result.xlsx')
# for table in tables:
# table = table.df
# for i in range(0,len(tables)):
# sheetname = str(i)
# table.to_excel(result, sheetname,index=False) # 写入同一个表的不同sheet
for i in range(0,len(tables)):
table = tables[i].df
sheetname = str(i)
table.to_excel(result, sheetname,index=False) # 写入同一个表的不同sheet
result.save()
# print(tables[1].df.info())
# print(len(tables))
| 28.3125
| 82
| 0.661148
|
88a2a6e176f47e4a63a4beaeba9b75bd30c71ff4
| 1,368
|
py
|
Python
|
eha_jsonpath/__init__.py
|
hughpyle/jsonpath-extensions
|
0b053ac61d85d836fd3240a25b18a436a438bc15
|
[
"Apache-2.0"
] | 1
|
2021-07-12T05:12:20.000Z
|
2021-07-12T05:12:20.000Z
|
eha_jsonpath/__init__.py
|
hughpyle/jsonpath-extensions
|
0b053ac61d85d836fd3240a25b18a436a438bc15
|
[
"Apache-2.0"
] | 2
|
2019-05-16T17:31:50.000Z
|
2019-05-20T15:44:23.000Z
|
eha_jsonpath/__init__.py
|
hughpyle/jsonpath-extensions
|
0b053ac61d85d836fd3240a25b18a436a438bc15
|
[
"Apache-2.0"
] | 1
|
2020-09-16T12:19:47.000Z
|
2020-09-16T12:19:47.000Z
|
from jsonpath_ng.ext.parser import ExtentedJsonPathParser
from . import ext_functions as fn
# You must use this parser instead of the normal or ext one.
class JsonPathParser(ExtentedJsonPathParser):
"Aether extension of jsonpath_ng extensions..."
# register all exposed functions in this method
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1].startswith("splitlist("):
p[0] = fn.SplitList(p[1])
elif p[1].startswith("cast("):
p[0] = fn.Cast(p[1])
elif p[1].startswith("match("):
p[0] = fn.Match(p[1])
elif p[1].startswith("notmatch("):
p[0] = fn.NotMatch(p[1])
elif p[1].startswith("epoch("):
p[0] = fn.ParseEpochDatetime(p[1])
elif p[1].startswith("datetime("):
p[0] = fn.ParseDatetime(p[1])
elif p[1].startswith("hash("):
p[0] = fn.Hash(p[1])
elif p[1].startswith("valuereplace("):
p[0] = fn.ValueReplace(p[1])
elif p[1].startswith("template("):
p[0] = fn.Template(p[1])
elif p[1].startswith("dictionaryreplace("):
p[0] = fn.DictionaryReplace(p[1])
else:
super(JsonPathParser, self).p_jsonpath_named_operator(p)
def parse(path, debug=False):
return JsonPathParser(debug=debug).parse(path)
| 36
| 68
| 0.590643
|
42de41a08eb9bb12f0987ac03e4da0e50b97b75a
| 7,244
|
py
|
Python
|
pyNetCDF/concatNetcdfFiles.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 4
|
2017-06-10T13:34:29.000Z
|
2021-10-08T14:33:43.000Z
|
pyNetCDF/concatNetcdfFiles.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 8
|
2018-07-10T12:00:49.000Z
|
2021-09-16T13:58:59.000Z
|
pyNetCDF/concatNetcdfFiles.py
|
mjsauvinen/P4US
|
ba7bbc77a6e482f612ba5aa5f021a41fcbb23345
|
[
"MIT"
] | 6
|
2019-05-03T07:29:12.000Z
|
2022-01-21T03:10:27.000Z
|
#!/usr/bin/env python3
from netcdfTools import *
import sys
import argparse
import numpy as np
'''
Description:
Author: Mikko Auvinen
Finnish Meteorological Institute
'''
#==========================================================#
def idBounds( cmin, cmaxL, cminL, dc, nc ):
ic1 = np.round((cminL-cmin)/dc , decimals=0).astype(int)
ic2 = np.round((cmaxL-cmin)/dc , decimals=0).astype(int)+1
ic2 = np.minimum( nc, ic2 )
return ic1, ic2
#==========================================================#
#==========================================================#
parser = argparse.ArgumentParser(prog='concat3dNetCDF.py')
parser.add_argument("-f", "--filenames",type=str, nargs='+', default=None, \
help="Name of the input NETCDF file.")
parser.add_argument("-fo", "--fileout",type=str, default="CONCAT.nc",\
help="Name of the output NETCDF file.")
parser.add_argument("-sn", "--scalars",type=str, nargs='+', default=None,\
help="(Optional) Scalars to be included.")
parser.add_argument("-so", "--scalarsOnly", action="store_true", default=False,\
help="Only scalars, skip wind vector components.")
parser.add_argument("-nt", "--ntimeskip", type=int, default=0,\
help="Skip <nt> number of time steps.")
args = parser.parse_args()
#==========================================================#
# Initial renaming operations and variable declarations
filenames = args.filenames
fileout = args.fileout
scalars = args.scalars
ntskip = args.ntimeskip
scalarsOnly = args.scalarsOnly
parameter = True; variable = False
dtol = 1E-5 # distance tolerance
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Open output NetCDF file
dso = netcdfOutputDataset( fileout )
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
if(scalarsOnly):
vstr = scalars[0]
else:
vstr = 'u'
if( filenames is not None ):
Nf = len(filenames)
else:
sys.exit(' No input NETCDF files given. Exiting ...')
dsL = list()
xL = list()
yL = list()
zL = list()
timeL = list()
uL = list()
vL = list()
wL = list()
sL = list()
tv = None
xMaxL = np.zeros(Nf); yMaxL = np.zeros(Nf); zMaxL = np.zeros(Nf)
xMinL = np.zeros(Nf); yMinL = np.zeros(Nf); zMinL = np.zeros(Nf)
xMaxL[:] = yMaxL[:] = zMaxL[:] = -9999.
xMinL[:] = yMinL[:] = zMinL[:] = 9999.
dxo = dyo = dzo = None
nto = None
for n,fn in enumerate(filenames):
ds, varList, paramList = netcdfDataset(fn)
dsL.append( ds )
time, time_dims = read1DVariableFromDataset('time',vstr, ds, ntskip, 0, 1 ) # All values.
print(' time dim = {} '.format(time_dims))
if( nto is None ):
nto = len(time)
else:
print(' Checking time array dimensions ...')
if( nto != len(time) ): sys.exit(' Time dimensions do not match ')
x, x_dims = read1DVariableFromDataset( 'x',vstr, ds, 0, 0, 1 )
y, y_dims = read1DVariableFromDataset( 'y',vstr, ds, 0, 0, 1 )
z, z_dims = read1DVariableFromDataset( 'z',vstr, ds, 0, 0, 1 )
# A check should be entered
dx=np.mean(x[1:]-x[:-1]); dy=np.mean(y[1:]-y[:-1]); dz=np.mean(z[1:]-z[:-1])
print('dx={:.3f}; dy={:.3f}; dz={:.3f}'.format(dx,dy,dz))
if( dxo is None ):
dxo = dx; dyo = dy; dzo = dz
else:
print(' Checking that resolutions match ...')
if( np.abs(dx-dxo)>dtol ): sys.exit(' dx do not match ')
if( np.abs(dy-dyo)>dtol ): sys.exit(' dy do not match ')
if( np.abs(dz-dzo)>dtol ): sys.exit(' dz do not match ')
# Store coord arrays and record min and max values
timeL.append(time)
xL.append(x); xMaxL[n]=np.max(x); xMinL[n]=np.min(x)
yL.append(y); yMaxL[n]=np.max(y); yMinL[n]=np.min(y)
zL.append(z); zMaxL[n]=np.max(z); zMinL[n]=np.min(z)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Global coordinate bounds
xMax = np.max(xMaxL); xMin = np.min(xMinL)
yMax = np.max(yMaxL); yMin = np.min(yMinL)
zMax = np.max(zMaxL); zMin = np.min(zMinL)
# Create global coords
xc = np.arange( xMin, xMax+dx, dx )
yc = np.arange( yMin, yMax+dy, dy )
zc = np.arange( zMin, zMax+dz, dz )
nt = len(time)
nx = len(xc); ny = len(yc); nz = len(zc)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
tv = createNetcdfVariable( dso, time,'time', nt ,'s','f4',('time',), parameter )
xv = createNetcdfVariable( dso, xc , 'x' , nx , 'm', 'f4', ('x',) , parameter )
yv = createNetcdfVariable( dso, yc , 'y' , ny , 'm', 'f4', ('y',) , parameter )
zv = createNetcdfVariable( dso, zc , 'z' , nz , 'm', 'f4', ('z',) , parameter )
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
if( not scalarsOnly ):
print('Concatenating u arrays ...')
uc = np.zeros( (nt, nz, ny, nx) ); print(' u concat shape = {} '.format((nt, nz, ny, nx)))
for n in range(Nf):
u, u_dims = read3DVariableFromDataset( 'u', dsL[n], ntskip, 0, 0, 1 ) # All values.
print(' u dims = {} '.format(u_dims))
i1, i2 = idBounds(xMin, xMaxL[n], xMinL[n], dx, nx )
print(' i1, i2 = {}, {} '.format(i1,i2))
j1, j2 = idBounds(yMin, yMaxL[n], yMinL[n], dy, ny )
print(' j1, j2 = {}, {} '.format(j1,j2))
k1, k2 = idBounds(zMin, zMaxL[n], zMinL[n], dz, nz )
print(' k1, k2 = {}, {} '.format(k1,k2))
uc[:,k1:k2, j1:j2, i1:i2] = u[:,:,:,:]
u = None
uv = createNetcdfVariable(dso,uc,'u', nt, 'm/s', 'f4',('time','z','y','x',), variable)
uc = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
print('Concatenating v arrays ...')
vc = np.zeros( (nt, nz, ny, nx) )
for n in range(Nf):
v, v_dims = read3DVariableFromDataset( 'v', dsL[n], ntskip, 0, 0, 1 ) # All values.
i1, i2 = idBounds( xMin, xMaxL[n], xMinL[n], dx, nx )
j1, j2 = idBounds( yMin, yMaxL[n], yMinL[n], dy, ny )
k1, k2 = idBounds( zMin, zMaxL[n], zMinL[n], dz, nz )
vc[:,k1:k2, j1:j2, i1:i2] = v[:,:,:,:]
v = None
vv = createNetcdfVariable(dso,vc,'v', nt, 'm/s', 'f4',('time','z','y','x',), variable)
vc = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
print('Concatenating w arrays ...')
wc = np.zeros( (nt, nz, ny, nx) )
for n in range(Nf):
w, w_dims = read3DVariableFromDataset( 'w', dsL[n], ntskip, 0, 0, 1 ) # All values.
i1, i2 = idBounds( xMin, xMaxL[n], xMinL[n], dx, nx )
j1, j2 = idBounds( yMin, yMaxL[n], yMinL[n], dy, ny )
k1, k2 = idBounds( zMin, zMaxL[n], zMinL[n], dz, nz )
wc[:,k1:k2, j1:j2, i1:i2] = w[:,:,:,:]
w = None
wv = createNetcdfVariable(dso,wc,'w', nt, 'm/s', 'f4',('time','z','y','x',), variable)
wc = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
if( scalars ):
sv = list()
for sn in scalars:
print('Concatenating {} arrays ...'.format(sn))
sc = np.zeros( (nt, nz, ny, nx) )
for n in range(Nf):
s, s_dims = read3DVariableFromDataset( sn, dsL[n], ntskip, 0, 0, 1 ) # All values.
i1, i2 = idBounds( xMin, xMaxL[n], xMinL[n], dx, nx )
j1, j2 = idBounds( yMin, yMaxL[n], yMinL[n], dy, ny )
k1, k2 = idBounds( zMin, zMaxL[n], zMinL[n], dz, nz )
sc[:,k1:k2, j1:j2, i1:i2] = s[:,:,:,:]
s = None
sv.append(createNetcdfVariable(dso,sc,sn,nt,'-','f4',('time','z','y','x',),variable))
sc = None
# - - - - Done , finalize the output - - - - - - - - - -
netcdfWriteAndClose( dso )
| 34.331754
| 92
| 0.53037
|
c3eb0dc1e8912dc690efbd1f04f7f94a7981ce25
| 3,041
|
py
|
Python
|
buildings/tests/gui/test_setup_edit_geometry_production.py
|
strk/nz-buildings
|
8dc8ee19d322837380bb4f016b01eccee2c1bd0a
|
[
"PostgreSQL",
"CC-BY-4.0"
] | 2
|
2020-02-21T00:46:31.000Z
|
2020-08-17T14:22:19.000Z
|
buildings/tests/gui/test_setup_edit_geometry_production.py
|
strk/nz-buildings
|
8dc8ee19d322837380bb4f016b01eccee2c1bd0a
|
[
"PostgreSQL",
"CC-BY-4.0"
] | 243
|
2018-12-16T22:01:54.000Z
|
2022-01-10T20:09:24.000Z
|
buildings/tests/gui/test_setup_edit_geometry_production.py
|
strk/nz-buildings
|
8dc8ee19d322837380bb4f016b01eccee2c1bd0a
|
[
"PostgreSQL",
"CC-BY-4.0"
] | 1
|
2020-03-24T10:35:43.000Z
|
2020-03-24T10:35:43.000Z
|
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright 2018 Crown copyright (c)
# Land Information New Zealand and the New Zealand Government.
# All rights reserved
#
# This program is released under the terms of the 3 clause BSD license. See the
# LICENSE file for more information.
#
################################################################################
Tests: Edit Production Outline GUI setup confirm default settings
***************************************************************************/
"""
import unittest
from qgis.PyQt.QtCore import Qt
from qgis.core import QgsProject
from qgis.utils import plugins, iface
class SetUpEditProduction(unittest.TestCase):
"""
Test Edit Outline GUI initial
setup confirm default settings
"""
def setUp(self):
"""Runs before each test."""
self.building_plugin = plugins.get("buildings")
self.building_plugin.main_toolbar.actions()[0].trigger()
self.dockwidget = self.building_plugin.dockwidget
sub_menu = self.dockwidget.lst_sub_menu
sub_menu.setCurrentItem(sub_menu.findItems("Edit Outlines", Qt.MatchExactly)[0])
self.production_frame = self.dockwidget.current_frame
self.edit_dialog = self.production_frame.edit_dialog
for action in iface.building_toolbar.actions():
if action.text() == "Edit Geometry":
action.trigger()
def tearDown(self):
"""Runs after each test."""
self.production_frame.btn_exit.click()
def test_production_gui_set_up(self):
""" Initial set up of the frame """
self.assertTrue(self.edit_dialog.isVisible())
self.assertFalse(self.edit_dialog.layout_status.isVisible())
self.assertTrue(self.edit_dialog.layout_capture_method.isVisible())
self.assertFalse(self.edit_dialog.layout_lifecycle_stage.isVisible())
self.assertFalse(self.edit_dialog.layout_general_info.isVisible())
self.assertFalse(self.edit_dialog.layout_end_lifespan.isVisible())
self.assertFalse(self.edit_dialog.btn_edit_save.isEnabled())
self.assertFalse(self.edit_dialog.btn_edit_reset.isEnabled())
self.assertFalse(self.edit_dialog.cmb_capture_method.isEnabled())
def test_layer_registry(self):
""" Layer registry has the correct components """
layer_bool = False
edit_bool = False
root = QgsProject.instance().layerTreeRoot()
group = root.findGroup("Building Tool Layers")
layers = group.findLayers()
names = [layer.layer().name() for layer in layers]
if "building_outlines" in names and "historic_outlines" in names:
layer_bool = True
for layer in layers:
if (
layer.layer().name() == "building_outlines"
and layer.layer().isEditable()
):
edit_bool = True
self.assertTrue(layer_bool)
self.assertTrue(edit_bool)
| 38.0125
| 88
| 0.62315
|
9e9b6d7f50ee4888913900942398cba4d6f17430
| 8,000
|
py
|
Python
|
scripts/kconfig/kconfig.py
|
jaruiz/zephyr
|
2a103ea67493ebcae5c5d7720f6e1a38a05faff1
|
[
"Apache-2.0"
] | 2
|
2018-12-17T21:00:20.000Z
|
2019-02-03T09:47:38.000Z
|
scripts/kconfig/kconfig.py
|
jaruiz/zephyr
|
2a103ea67493ebcae5c5d7720f6e1a38a05faff1
|
[
"Apache-2.0"
] | 6
|
2019-01-09T08:50:20.000Z
|
2019-07-29T09:47:59.000Z
|
scripts/kconfig/kconfig.py
|
jaruiz/zephyr
|
2a103ea67493ebcae5c5d7720f6e1a38a05faff1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Modified from: https://github.com/ulfalizer/Kconfiglib/blob/master/examples/merge_config.py
import argparse
import os
import sys
import textwrap
from kconfiglib import Kconfig, Symbol, BOOL, STRING, TRISTATE, TRI_TO_STR
# Warnings that won't be turned into errors (but that will still be printed),
# identified by a substring of the warning. The warning texts from Kconfiglib
# are guaranteed to not change.
WARNING_WHITELIST = (
# Warning generated when a symbol with unsatisfied dependencies is being
# selected. These should be investigated, but whitelist them for now.
"y-selected",
)
def fatal(warning):
# Returns True if 'warning' is not whitelisted and should be turned into an
# error
for wl_warning in WARNING_WHITELIST:
if wl_warning in warning:
return False
# Only allow enabled (printed) warnings to be fatal
return enabled(warning)
def enabled(warning):
# Returns True if 'warning' should be printed
# Some prj.conf files seem to deliberately override settings from the board
# configuration (e.g. samples/bluetooth/hci_usb/prj.conf, with GPIO=y).
# Disable the warning about a symbol being assigned more than once.
return "set more than once" not in warning
def main():
parse_args()
print("Parsing Kconfig tree in {}".format(args.kconfig_root))
kconf = Kconfig(args.kconfig_root, warn_to_stderr=False)
# Enable warnings for assignments to undefined symbols
kconf.enable_undef_warnings()
for i, config in enumerate(args.conf_fragments):
print(("Loading {} as base" if i == 0 else "Merging {}")
.format(config))
# replace=False creates a merged configuration
kconf.load_config(config, replace=False)
# Print warnings for symbols whose actual value doesn't match the assigned
# value
for sym in kconf.unique_defined_syms:
# Was the symbol assigned to? Choice symbols are checked separately.
if sym.user_value is not None and not sym.choice:
verify_assigned_sym_value(sym)
# Print warnings for choices whose actual selection doesn't match the user
# selection
for choice in kconf.unique_choices:
if choice.user_selection:
verify_assigned_choice_value(choice)
# Hack: Force all symbols to be evaluated, to catch warnings generated
# during evaluation. Wait till the end to write the actual output files, so
# that we don't generate any output if there are warnings-turned-errors.
#
# Kconfiglib caches calculated symbol values internally, so this is still
# fast.
kconf.write_config(os.devnull)
# We could roll this into the loop below, but it's nice to always print all
# warnings, even if one of them turns out to be fatal
for warning in kconf.warnings:
if enabled(warning):
print("\n" + warning, file=sys.stderr)
# Turn all warnings except for explicity whitelisted ones into errors. In
# particular, this will turn assignments to undefined Kconfig variables
# into errors.
#
# A warning is generated by this script whenever a symbol gets a different
# value than the one it was assigned. Keep that one as just a warning for
# now as well.
for warning in kconf.warnings:
if fatal(warning):
sys.exit("\n" + textwrap.fill(
"Error: Aborting due to non-whitelisted Kconfig "
"warning '{}'.\nNote: If this warning doesn't point "
"to an actual problem, you can add it to the "
"whitelist at the top of {}."
.format(warning, sys.argv[0]),
100) + "\n")
# Write the merged configuration and the C header
kconf.write_config(args.dotconfig)
kconf.write_autoconf(args.autoconf)
# Message printed when a promptless symbol is assigned (and doesn't get the
# assigned value)
PROMPTLESS_HINT = """
This symbol has no prompt, meaning assignments in configuration files have no
effect on it. It can only be set indirectly, via Kconfig defaults (e.g. in a
Kconfig.defconfig file) or through being 'select'ed or 'imply'd (note: try to
avoid Kconfig 'select's except for trivial promptless "helper" symbols without
dependencies, as it ignores dependencies and forces symbols on)."""
# Message about where to look up symbol information
SYM_INFO_HINT = """
You can check symbol information (including dependencies) in the 'menuconfig'
interface (see the Application Development Primer section of the manual), or in
the Kconfig reference at
http://docs.zephyrproject.org/latest/reference/kconfig/CONFIG_{}.html (which is
updated regularly from the master branch). See the 'Setting configuration
values' section of the Board Porting Guide as well."""
PROMPTLESS_HINT_EXTRA = """
It covers Kconfig.defconfig files."""
def verify_assigned_sym_value(sym):
# Verifies that the value assigned to 'sym' "took" (matches the value the
# symbol actually got), printing a warning otherwise
# Tristate values are represented as 0, 1, 2. Having them as
# "n", "m", "y" is more convenient here, so convert.
if sym.type in (BOOL, TRISTATE):
user_value = TRI_TO_STR[sym.user_value]
else:
user_value = sym.user_value
if user_value != sym.str_value:
msg = "warning: {} was assigned the value '{}' but got the " \
"value '{}'." \
.format(name_and_loc(sym), user_value, sym.str_value)
if promptless(sym): msg += PROMPTLESS_HINT
msg += SYM_INFO_HINT.format(sym.name)
if promptless(sym): msg += PROMPTLESS_HINT_EXTRA
# Use a large fill() width to try to avoid linebreaks in the symbol
# reference link
print("\n" + textwrap.fill(msg, 100), file=sys.stderr)
def verify_assigned_choice_value(choice):
# Verifies that the choice symbol that was selected (by setting it to y)
# ended up as the selection, printing a warning otherwise.
#
# We check choice symbols separately to avoid warnings when two different
# choice symbols within the same choice are set to y. This might happen if
# a choice selection from a board defconfig is overriden in a prj.conf, for
# example. The last choice symbol set to y becomes the selection (and all
# other choice symbols get the value n).
#
# Without special-casing choices, we'd detect that the first symbol set to
# y ended up as n, and print a spurious warning.
if choice.user_selection is not choice.selection:
msg = "warning: the choice symbol {} was selected (set =y), but {} " \
"ended up as the choice selection. {}" \
.format(name_and_loc(choice.user_selection),
name_and_loc(choice.selection) if choice.selection
else "no symbol",
SYM_INFO_HINT.format(choice.user_selection.name))
print("\n" + textwrap.fill(msg, 100), file=sys.stderr)
def name_and_loc(sym):
# Helper for printing the name and Kconfig file location(s) for a symbol
if not sym.nodes:
return sym.name + " (undefined)"
return "{} (defined at {})".format(
sym.name,
", ".join("{}:{}".format(node.filename, node.linenr)
for node in sym.nodes))
def promptless(sym):
# Returns True if 'sym' has no prompt. Since the symbol might be defined in
# multiple locations, we need to check all locations.
return not any(node.prompt for node in sym.nodes)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("kconfig_root")
parser.add_argument("dotconfig")
parser.add_argument("autoconf")
parser.add_argument("conf_fragments", metavar='conf', type=str, nargs='+')
args = parser.parse_args()
if __name__ == "__main__":
main()
| 37.914692
| 93
| 0.685875
|
0488b8c23e44049164a74d7969dd9273cd547974
| 23,338
|
py
|
Python
|
src/skmultiflow/evaluation/evaluate_prequential_delayed.py
|
denisesato/scikit-multiflow
|
3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95
|
[
"BSD-3-Clause"
] | 663
|
2017-11-16T15:48:45.000Z
|
2022-03-28T07:38:17.000Z
|
src/skmultiflow/evaluation/evaluate_prequential_delayed.py
|
denisesato/scikit-multiflow
|
3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95
|
[
"BSD-3-Clause"
] | 293
|
2017-12-16T12:33:49.000Z
|
2022-02-22T03:34:25.000Z
|
src/skmultiflow/evaluation/evaluate_prequential_delayed.py
|
denisesato/scikit-multiflow
|
3eb4c7262bb60d7e3f65c0d3395e4572d9a8cb95
|
[
"BSD-3-Clause"
] | 201
|
2017-11-30T15:52:30.000Z
|
2022-03-25T21:46:55.000Z
|
import os
import warnings
import re
import numpy as np
from timeit import default_timer as timer
from numpy import unique
from skmultiflow.evaluation.base_evaluator import StreamEvaluator
from skmultiflow.utils import constants
from skmultiflow.data import TimeManager
class EvaluatePrequentialDelayed(StreamEvaluator):
""" The prequential evaluation delayed method.
The prequential evaluation delayed is designed specifically for stream
settings, in the sense that each sample serves two purposes, and that
samples are analysed sequentially, in order of arrival, and are used to
update the model only when their label are available, given their
timestamps (arrival and available times).
This method consists of using each sample to test the model, which means
to make a predictions, and then the same sample is used to train the model
(partial fit) after its label is available after a certain delay.
This way the model is always tested on samples that it hasn't seen yet and
updated on samples that have their labels available.
Parameters
----------
n_wait: int (Default: 200)
The number of samples to process between each test. Also defines when
to update the plot if ``show_plot=True``. Note that setting ``n_wait``
too small can significantly slow the evaluation process.
max_samples: int (Default: 100000)
The maximum number of samples to process during the evaluation.
batch_size: int (Default: 1)
The number of samples to pass at a time to the model(s).
pretrain_size: int (Default: 200)
The number of samples to use to train the model before starting the
evaluation. Used to enforce a 'warm' start.
max_time: float (Default: float("inf"))
The maximum duration of the simulation (in seconds).
metrics: list, optional (Default: ['accuracy', 'kappa'])
| The list of metrics to track during the evaluation. Also defines the
metrics that will be displayed in plots and/or logged into the output
file. Valid options are:
| **Classification**
| 'accuracy'
| 'kappa'
| 'kappa_t'
| 'kappa_m'
| 'true_vs_predicted'
| 'precision'
| 'recall'
| 'f1'
| 'gmean'
| **Multi-target Classification**
| 'hamming_score'
| 'hamming_loss'
| 'exact_match'
| 'j_index'
| **Regression**
| 'mean_square_error'
| 'mean_absolute_error'
| 'true_vs_predicted'
| **Multi-target Regression**
| 'average_mean_squared_error'
| 'average_mean_absolute_error'
| 'average_root_mean_square_error'
| **General purpose** (no plot generated)
| 'running_time'
| 'model_size'
output_file: string, optional (Default: None)
File name to save the summary of the evaluation.
show_plot: bool (Default: False)
If True, a plot will show the progress of the evaluation.
Warning: Plotting can slow down the evaluation process.
restart_stream: bool, optional (default: True)
If True, the stream is restarted once the evaluation is complete.
data_points_for_classification: bool(Default: False)
If True, the visualization used is a cloud of data points (only works
for classification) and default performance metrics are ignored. If
specific metrics are required, then they *must* be explicitly set
using the ``metrics`` attribute.
Notes
-----
1. This evaluator can process a single learner to track its performance;
or multiple learners at a time, to compare different models on the same
stream.
2. The metric 'true_vs_predicted' is intended to be informative only. It
corresponds to evaluations at a specific moment which might not
represent the actual learner performance across all instances.
3. The metrics `running_time` and `model_size ` are not plotted when the
`show_plot` option is set. Only their current value is displayed at the
bottom of the figure. However, their values over the evaluation are
written into the resulting csv file if the `output_file` option is set.
Examples
--------
>>> # The first example demonstrates how to evaluate one model
>>> import numpy as np
>>> import pandas as pd
>>> from skmultiflow.data import TemporalDataStream
>>> from skmultiflow.trees import HoeffdingTreeClassifier
>>> from skmultiflow.evaluation import EvaluatePrequentialDelayed
>>>
>>> # Columns used to get the data, label and time from iris_timestamp dataset
>>> DATA_COLUMNS = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
>>> LABEL_COLUMN = "label"
>>> TIME_COLUMN = "timestamp"
>>>
>>> # Read a csv with stream data
>>> data = pd.read_csv("https://raw.githubusercontent.com/scikit-multiflow/streaming-datasets/"
>>> "master/iris_timestamp.csv")
>>> # Convert time column to datetime
>>> data[TIME_COLUMN] = pd.to_datetime(data[TIME_COLUMN])
>>> # Sort data by time
>>> data = data.sort_values(by=TIME_COLUMN)
>>> # Get X, y and time
>>> X = data[DATA_COLUMNS].values
>>> y = data[LABEL_COLUMN].values
>>> time = data[TIME_COLUMN].values
>>>
>>>
>>> # Set a delay of 1 day
>>> delay_time = np.timedelta64(1, "D")
>>> # Set the stream
>>> stream = TemporalDataStream(X, y, time, sample_delay=delay_time, ordered=False)
>>>
>>> # Set the model
>>> ht = HoeffdingTreeClassifier()
>>>
>>> # Set the evaluator
>>>
>>> evaluator = EvaluatePrequentialDelayed(batch_size=1,
>>> pretrain_size=X.shape[0]//2,
>>> max_samples=X.shape[0],
>>> output_file='results_delay.csv',
>>> metrics=['accuracy', 'recall', 'precision', 'f1', 'kappa'])
>>>
>>> # Run evaluation
>>> evaluator.evaluate(stream=stream, model=ht, model_names=['HT'])
>>> # The second example demonstrates how to compare two models
>>> import numpy as np
>>> import pandas as pd
>>> from skmultiflow.data import TemporalDataStream
>>> from skmultiflow.trees import HoeffdingTreeClassifier
>>> from skmultiflow.bayes import NaiveBayes
>>> from skmultiflow.evaluation import EvaluatePrequentialDelayed
>>>
>>> # Columns used to get the data, label and time from iris_timestamp dataset
>>> DATA_COLUMNS = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
>>> LABEL_COLUMN = "label"
>>> TIME_COLUMN = "timestamp"
>>>
>>> # Read a csv with stream data
>>> data = pd.read_csv("../data/datasets/iris_timestamp.csv")
>>> # Convert time column to datetime
>>> data[TIME_COLUMN] = pd.to_datetime(data[TIME_COLUMN])
>>> # Sort data by time
>>> data = data.sort_values(by=TIME_COLUMN)
>>> # Get X, y and time
>>> X = data[DATA_COLUMNS].values
>>> y = data[LABEL_COLUMN].values
>>> time = data[TIME_COLUMN].values
>>>
>>>
>>> # Set a delay of 30 minutes
>>> delay_time = np.timedelta64(30, "m")
>>> # Set the stream
>>> stream = TemporalDataStream(X, y, time, sample_delay=delay_time, ordered=False)
>>>
>>> # Set the models
>>> ht = HoeffdingTreeClassifier()
>>> nb = NaiveBayes()
>>>
>>> evaluator = EvaluatePrequentialDelayed(batch_size=1,
>>> pretrain_size=X.shape[0]//2,
>>> max_samples=X.shape[0],
>>> output_file='results_delay.csv',
>>> metrics=['accuracy', 'recall', 'precision', 'f1', 'kappa'])
>>>
>>> # Run evaluation
>>> evaluator.evaluate(stream=stream, model=[ht, nb], model_names=['HT', 'NB'])
"""
def __init__(self,
n_wait=200,
max_samples=100000,
batch_size=1,
pretrain_size=200,
max_time=float("inf"),
metrics=None,
output_file=None,
show_plot=False,
restart_stream=True,
data_points_for_classification=False):
super().__init__()
self._method = 'prequential'
self.n_wait = n_wait
self.max_samples = max_samples
self.pretrain_size = pretrain_size
self.batch_size = batch_size
self.max_time = max_time
self.output_file = output_file
self.show_plot = show_plot
self.data_points_for_classification = data_points_for_classification
if not self.data_points_for_classification:
if metrics is None:
self.metrics = [constants.ACCURACY, constants.KAPPA]
else:
if isinstance(metrics, list):
self.metrics = metrics
else:
raise TypeError("Attribute 'metrics' must be 'None' or 'list', passed {}".
format(type(metrics)))
else:
if metrics is None:
self.metrics = [constants.DATA_POINTS]
else:
if isinstance(metrics, list):
self.metrics = metrics
self.metrics.append(constants.DATA_POINTS)
else:
raise TypeError("Attribute 'metrics' must be 'None' or 'list', passed {}".
format(type(metrics)))
self.restart_stream = restart_stream
self.n_sliding = n_wait
warnings.filterwarnings("ignore", ".*invalid value encountered in true_divide.*")
warnings.filterwarnings("ignore", ".*Passing 1d.*")
def evaluate(self, stream, model, model_names=None):
""" Evaluates a model or set of models on samples from a stream.
Parameters
----------
stream: Stream
The stream from which to draw the samples.
model: skmultiflow.core.BaseSKMObject or sklearn.base.BaseEstimator or list
The model or list of models to evaluate.
model_names: list, optional (Default=None)
A list with the names of the models.
Returns
-------
StreamModel or list
The trained model(s).
"""
self._init_evaluation(model=model, stream=stream, model_names=model_names)
if self._check_configuration():
self._reset_globals()
# Initialize metrics and outputs (plots, log files, ...)
self._init_metrics()
self._init_plot()
self._init_file()
self.model = self._train_and_test
if self.show_plot:
self.visualizer.hold()
return self.model
def _update_classifiers(self, X, y, sample_weight):
# check if there are samples to update
if len(X) > 0:
# Train
if self.first_run:
for i in range(self.n_models):
if self._task_type != constants.REGRESSION and \
self._task_type != constants.MULTI_TARGET_REGRESSION:
# Accounts for the moment of training beginning
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, classes=self.stream.target_values,
sample_weight=sample_weight)
# Accounts the ending of training
self.running_time_measurements[i].compute_training_time_end()
else:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, sample_weight=sample_weight)
self.running_time_measurements[i].compute_training_time_end()
# Update total running time
self.running_time_measurements[i].update_time_measurements(self.batch_size)
self.first_run = False
else:
for i in range(self.n_models):
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X, y, sample_weight=sample_weight)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.batch_size)
self.global_sample_count += len(X) # self.batch_size
def _update_metrics_delayed(self, y_true_delayed, y_pred_delayed):
# update metrics if y_pred_delayed has items
if len(y_pred_delayed) > 0:
for j in range(self.n_models):
for i in range(len(y_pred_delayed[0])):
self.mean_eval_measurements[j].add_result(y_true_delayed[i],
y_pred_delayed[j][i])
self.current_eval_measurements[j].add_result(y_true_delayed[i],
y_pred_delayed[j][i])
self._check_progress(self.actual_max_samples)
if ((self.global_sample_count % self.n_wait) == 0 or
(self.global_sample_count >= self.max_samples) or
(self.global_sample_count / self.n_wait > self.update_count + 1)):
if y_pred_delayed is not None:
self._update_metrics()
self.update_count += 1
def _transform_model_predictions(self, predictions):
out = []
if len(predictions) > 0:
for j in range(predictions.shape[1]):
out.append(predictions[:, j])
out = np.asarray(out)
return out
def _transform_predictions_model(self, predictions):
out = []
if len(predictions) > 0:
for j in range(predictions.shape[1]):
k = []
for i in range(predictions.shape[0]):
k.append(predictions[i, j])
out.append(k)
return out
def _predict_samples(self, X):
if X is not None:
# Test
prediction = [[] for _ in range(self.n_models)]
for i in range(self.n_models):
try:
# Testing time
self.running_time_measurements[i].compute_testing_time_begin()
prediction[i].extend(self.model[i].predict(X))
self.running_time_measurements[i].compute_testing_time_end()
except TypeError:
raise TypeError("Unexpected prediction value from {}"
.format(type(self.model[i]).__name__))
# adapt prediction matrix to sample-model instead of model-sample by transposing it
y_pred = np.asarray(prediction)
# transform
y_pred_T = self._transform_model_predictions(y_pred)
return y_pred_T
@property
def _train_and_test(self):
""" Method to control the prequential evaluation.
Returns
-------
BaseSKMObject extension or list of BaseClassifier extensions
The trained classifiers.
"""
self._start_time = timer()
self._end_time = timer()
print('Prequential Evaluation Delayed')
print('Evaluating {} target(s).'.format(self.stream.n_targets))
self.actual_max_samples = self.stream.n_remaining_samples()
if self.actual_max_samples == -1 or self.actual_max_samples > self.max_samples:
self.actual_max_samples = self.max_samples
self.first_run = True
if self.pretrain_size > 0:
print('Pre-training on {} sample(s).'.format(self.pretrain_size))
# get current batch
X, y_true, arrival_time, available_time, sample_weight = self.stream.\
next_sample(self.pretrain_size)
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION:
# Training time computation
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y_true,
classes=self.stream.target_values,
sample_weight=sample_weight)
self.running_time_measurements[i].compute_training_time_end()
elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y_true,
classes=unique(self.stream.target_values),
sample_weight=sample_weight)
self.running_time_measurements[i].compute_training_time_end()
else:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y_true, sample_weight=sample_weight)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.pretrain_size)
self.global_sample_count += self.pretrain_size
self.first_run = False
# initialize time_manager with last timestamp available
self.time_manager = TimeManager(arrival_time[-1])
self.update_count = 0
print('Evaluating...')
while ((self.global_sample_count < self.actual_max_samples) & (
self._end_time - self._start_time < self.max_time)
& (self.stream.has_more_samples())):
try:
# get current batch
X, y_true, arrival_time, available_time, sample_weight = self.stream.\
next_sample(self.batch_size)
# update current timestamp
self.time_manager.update_timestamp(arrival_time[-1])
# get delayed samples to update model before predicting a new batch
X_delayed, y_true_delayed, y_pred_delayed = self.time_manager.\
get_available_samples()
# transpose prediction matrix to model-sample again
y_pred_delayed = self._transform_predictions_model(y_pred_delayed)
self._update_metrics_delayed(y_true_delayed=y_true_delayed,
y_pred_delayed=y_pred_delayed)
# before getting new samples, update classifiers with samples
# that are already available
self._update_classifiers(X=X_delayed, y=y_true_delayed,
sample_weight=sample_weight)
# predict samples and get predictions
y_pred = self._predict_samples(X)
# add current samples to delayed queue
self.time_manager.update_queue(X=X, y_true=y_true, y_pred=y_pred,
sample_weight=sample_weight,
arrival_time=arrival_time,
available_time=available_time)
self._end_time = timer()
except BaseException as exc:
print(exc)
if exc is KeyboardInterrupt:
self._update_metrics()
break
# evaluate remaining samples in the delayed_queue
# iterate over delay_queue while it has samples according to batch_size
while self.time_manager.has_more_samples():
# get current samples to process
X_delayed, y_true_delayed, y_pred_delayed, sample_weight = self.time_manager.\
next_sample(self.batch_size)
# transpose prediction matrix to model-sample again
y_pred_delayed = self._transform_predictions_model(y_pred_delayed)
# update metrics
self._update_metrics_delayed(y_true_delayed, y_pred_delayed)
# update classifier with these samples for output models
self._update_classifiers(X_delayed, y_true_delayed, sample_weight)
self._end_time = timer()
# Flush file buffer, in case it contains data
self._flush_file_buffer()
if len(set(self.metrics).difference({constants.DATA_POINTS})) > 0:
self.evaluation_summary()
else:
print('Done')
if self.restart_stream:
self.stream.restart()
return self.model
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially fit all the models on the given data.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
The data upon which the estimators will be trained.
y: numpy.ndarray of shape (, n_samples)
The classification labels / target values for all samples in X.
classes: list, optional (default=None)
Stores all the classes that may be encountered during the
classification task. Not used for regressors.
sample_weight: numpy.ndarray, optional (default=None)
Samples weight. If not provided, uniform weights are assumed.
Returns
-------
EvaluatePrequential
self
"""
if self.model is not None:
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION or \
self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.model[i].partial_fit(X=X, y=y,
classes=classes,
sample_weight=sample_weight)
else:
self.model[i].partial_fit(X=X, y=y,
sample_weight=sample_weight)
return self
else:
return self
def predict(self, X):
""" Predicts with the estimator(s) being evaluated.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
All the samples we want to predict the label for.
Returns
-------
list of numpy.ndarray
Model(s) predictions
"""
predictions = None
if self.model is not None:
predictions = []
for i in range(self.n_models):
predictions.append(self.model[i].predict(X))
return predictions
def get_info(self):
info = self.__repr__()
if self.output_file is not None:
_, filename = os.path.split(self.output_file)
info = re.sub(r"output_file=(.\S+),", "output_file='{}',".format(filename), info)
return info
| 41.306195
| 99
| 0.587197
|
dfea85318cf11eb86eda0983e81edecc22d889bf
| 1,116
|
py
|
Python
|
setup.py
|
epfl-lts2/spikexplore
|
05c5ff1aa1cca3f77126c0de9a1b6b9360813afd
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
epfl-lts2/spikexplore
|
05c5ff1aa1cca3f77126c0de9a1b6b9360813afd
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
epfl-lts2/spikexplore
|
05c5ff1aa1cca3f77126c0de9a1b6b9360813afd
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name='spikexplore',
version='0.0.11',
description='Graph exploration using inhomogeneous filtered diffusion',
url='https://github.com/epfl-lts2/spikexplore',
author='Nicolas Aspert, Benjamin Ricaud',
author_email='nicolas.aspert@epfl.ch, benjamin.ricaud@epfl.ch',
license='Apache license',
packages=['spikexplore', 'spikexplore.backends'],
scripts=[],
install_requires=['pandas',
'numpy',
'networkx',
'tqdm',
'twython',
'wikipedia-api',
'python-louvain',
'TwitterAPI'
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6'
],
)
| 33.818182
| 75
| 0.539427
|
4772f678a77321acbb602c7452561773bd46074a
| 1,763
|
py
|
Python
|
videointelligence/google/cloud/videointelligence_v1beta2/types.py
|
paoloburelli/google-cloud-python
|
e54b4318c2c8d9fc4ef0d78aa2a1eea874ace1cd
|
[
"Apache-2.0"
] | 1
|
2018-06-29T17:53:28.000Z
|
2018-06-29T17:53:28.000Z
|
videointelligence/google/cloud/videointelligence_v1beta2/types.py
|
paoloburelli/google-cloud-python
|
e54b4318c2c8d9fc4ef0d78aa2a1eea874ace1cd
|
[
"Apache-2.0"
] | null | null | null |
videointelligence/google/cloud/videointelligence_v1beta2/types.py
|
paoloburelli/google-cloud-python
|
e54b4318c2c8d9fc4ef0d78aa2a1eea874ace1cd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.videointelligence_v1beta2.proto import video_intelligence_pb2
_shared_modules = [
http_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
duration_pb2,
empty_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
video_intelligence_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.videointelligence_v1beta2.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| 29.383333
| 79
| 0.766307
|
7d63b9efe5660d4792bf3580558dc444a5f48b8b
| 2,601
|
py
|
Python
|
snr_mix.py
|
aws-samples/asteroid-on-sagemaker
|
4613b10b8f579029038cc768cef620d25a09478d
|
[
"MIT-0"
] | null | null | null |
snr_mix.py
|
aws-samples/asteroid-on-sagemaker
|
4613b10b8f579029038cc768cef620d25a09478d
|
[
"MIT-0"
] | null | null | null |
snr_mix.py
|
aws-samples/asteroid-on-sagemaker
|
4613b10b8f579029038cc768cef620d25a09478d
|
[
"MIT-0"
] | null | null | null |
import pathlib
import numpy as np
# import soundfile as sf
# import librosa
EPS = np.finfo(float).eps
np.random.seed(0)
def is_clipped(audio, clipping_threshold=0.99):
return any(abs(audio) > clipping_threshold)
def normalize(audio, target_level=-25):
'''Normalize the signal to the target level'''
rms = (audio ** 2).mean() ** 0.5
scalar = 10 ** (target_level / 20) / (rms+EPS)
audio = audio * scalar
return audio
def snr_mixer(clean, noise, snr, target_level=-25, clipping_threshold=0.99):
'''Function to mix clean speech and noise at various SNR levels'''
if len(clean) > len(noise):
noise = np.append(noise, np.zeros(len(clean)-len(noise)))
else:
clean = np.append(clean, np.zeros(len(noise)-len(clean)))
# Normalizing to -25 dB FS
clean = clean/(max(abs(clean))+EPS)
clean = normalize(clean, target_level)
rmsclean = (clean**2).mean()**0.5
noise = noise/(max(abs(noise))+EPS)
noise = normalize(noise, target_level)
rmsnoise = (noise**2).mean()**0.5
# Set the noise level for a given SNR
noisescalar = rmsclean / (10**(snr/20)) / (rmsnoise+EPS)
noisenewlevel = noise * noisescalar
# Mix noise and clean speech
noisyspeech = clean + noisenewlevel
# Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
# There is a chance of clipping that might happen with very less probability, which is not a major issue.
noisy_rms_level = np.random.randint(-35, -15)
rmsnoisy = (noisyspeech**2).mean()**0.5
scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy+EPS)
noisyspeech = noisyspeech * scalarnoisy
clean = clean * scalarnoisy
noisenewlevel = noisenewlevel * scalarnoisy
# Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
if is_clipped(noisyspeech):
noisyspeech_maxamplevel = max(abs(noisyspeech))/(clipping_threshold-EPS)
noisyspeech = noisyspeech/noisyspeech_maxamplevel
clean = clean/noisyspeech_maxamplevel
noisenewlevel = noisenewlevel/noisyspeech_maxamplevel
noisy_rms_level = int(20*np.log10(scalarnoisy/noisyspeech_maxamplevel*(rmsnoisy+EPS)))
return clean, noisenewlevel, noisyspeech, noisy_rms_level
cleanpath = pathlib.Path('clean').glob('*wav')
noisepath = pathlib.Path('noise').glob('*wav')
for clean, noise in zip(cleanpath, noisepath):
clean_wav,_ = librosa.load(clean, sr =16000)
noise_wav,_ = librosa.load(noise, sr = 16000)
clean_wav[::2]
print(clean, noise)
| 36.633803
| 114
| 0.688966
|
2cf2ec5b11427f2abe6dd5ce17368fa46901ed28
| 690
|
py
|
Python
|
defichain/ocean/__init__.py
|
eric-volz/defichainLibrary
|
458a8155bd595bf0fdf026651d95a5fe78dafc9c
|
[
"MIT"
] | 1
|
2022-03-29T15:15:17.000Z
|
2022-03-29T15:15:17.000Z
|
defichain/ocean/__init__.py
|
eric-volz/defichainPythonLibrary
|
b01dfddbb5e16d5b3dcda8e4014bcea8d2a1832b
|
[
"MIT"
] | null | null | null |
defichain/ocean/__init__.py
|
eric-volz/defichainPythonLibrary
|
b01dfddbb5e16d5b3dcda8e4014bcea8d2a1832b
|
[
"MIT"
] | 1
|
2022-03-24T12:25:44.000Z
|
2022-03-24T12:25:44.000Z
|
# Just to be present: Ocean
from defichain.ocean.modules.address import Address
from defichain.ocean.modules.blocks import Blocks
from defichain.ocean.modules.fee import Fee
from defichain.ocean.modules.loan import Loan
from defichain.ocean.modules.masternodes import Masternodes
from defichain.ocean.modules.oracles import Oracles
from defichain.ocean.modules.poolpairs import Poolpairs
from defichain.ocean.modules.prices import Prices
from defichain.ocean.modules.rawTx import RawTx
from defichain.ocean.modules.rpc import Rpc
from defichain.ocean.modules.stats import Stats
from defichain.ocean.modules.tokens import Tokens
from defichain.ocean.modules.transactions import Transactions
| 46
| 61
| 0.856522
|
89fa38879eebea779b1cb808e15de39e9be4db5c
| 12,341
|
py
|
Python
|
ReTreeingFuncs.py
|
JudoWill/ResearchNotebooks
|
35796f7ef07361eb2926c8770e623f4e9d48ab96
|
[
"MIT"
] | 1
|
2019-02-03T03:45:29.000Z
|
2019-02-03T03:45:29.000Z
|
ReTreeingFuncs.py
|
JudoWill/ResearchNotebooks
|
35796f7ef07361eb2926c8770e623f4e9d48ab96
|
[
"MIT"
] | null | null | null |
ReTreeingFuncs.py
|
JudoWill/ResearchNotebooks
|
35796f7ef07361eb2926c8770e623f4e9d48ab96
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import DataFrame, Series, merge, read_csv, MultiIndex, Index, concat
from subprocess import check_call
from tempfile import NamedTemporaryFile as NTF
import os, os.path
import numpy as np
from scipy.stats import ttest_ind
from itertools import groupby,combinations, islice
from operator import itemgetter
from Bio import Phylo
import networkx
import sys
import pickle
from random import shuffle
import csv, shlex, shutil
os.chdir('/home/will/HIVTropism//')
sys.path.append('/home/will/HIVReportGen/AnalysisCode/')
sys.path.append('/home/will/PySeqUtils/')
# <codecell>
from SeqProcessTools import read_pat_seq_data, load_training_seq_data, align_seq_data_frame
from GeneralSeqTools import fasta_reader, WebPSSM_V3_fasta, yield_chunks
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import glob
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from itertools import chain, product
# <codecell>
with open('trop_dict.pkl') as handle:
trop_dict = pickle.load(handle)
with open('wanted_data.pkl') as handle:
wanted_data = pickle.load(handle)
trans_dict = wanted_data['Name'].to_dict()
ntrop_dict = dict((trans_dict[key], val) for key, val in trop_dict.items())
trop_dict = ntrop_dict
wanted_data = wanted_data.set_index('Name')
# <codecell>
wanted_data['Tropism'][wanted_data['gp120-seq-align'].notnull()].value_counts()
# <codecell>
from GeneralSeqTools import fasta_writer
fourkb_cols = ['gp120-seq-align', 'Nef-seq-align', 'Vpr-seq-align',
'Tat-1-seq-align', 'Tat-2-seq-align', 'LTR-seq-align']
four = wanted_data[fourkb_cols].dropna()
wseqs = set()
with open('/home/will/Dropbox/HIVseqs/BensTropismLabels.csv') as handle:
for row in csv.DictReader(handle, delimiter=','):
wseqs.add(row['Patient ID'])
for col in four.columns:
found = set()
prot = col.rsplit('-', 2)[0]
fname = 'AlignForBenj/fourKB_%s.fasta' % prot
with open(fname, 'w') as handle:
for seq, name in zip(four[col], four.index):
if name in wseqs and name not in found:
fasta_writer(handle, [(name+'-'+trop_dict[name], ''.join(seq))])
found.add(name)
print prot, len(found)
# <codecell>
foukb_lanl = ['AB078005', 'AB221126', 'AB253432', 'AB286955',
'AB287365', 'AB287367', 'AB287368', 'AB287369',
'AB480695', 'AB485642', 'AB565479', 'AB565496',
'AB565497', 'AB565499', 'AB565500', 'AB565502',
'AB604946', 'AB604948', 'AB604950', 'AB604951',
'AB641836', 'AF003887', 'AF003888', 'AF004394',
'AF042100', 'AF042101', 'AF538302', 'AF538303',
'AF538307', 'AJ271445', 'AY173953', 'AY352275',
'AY835748', 'AY835754', 'AY835759', 'AY835762',
'AY835766', 'AY835769', 'AY835770', 'AY835774',
'AY835777', 'AY835779', 'AY970950', 'DQ007901',
'DQ007902', 'DQ007903', 'DQ295192', 'DQ295193',
'DQ295194', 'DQ295195', 'DQ358809', 'DQ837381',
'DQ990880', 'EF057102', 'EF363123', 'EF363124',
'EF363126', 'EF363127', 'GU647196', 'GU733713',
'JN944928', 'JN944936', 'JN944939', 'JN944940',
'JN944942', 'JN944943', 'JN944944', 'JN944946',
'JN944948', 'JQ316126', 'JQ316128', 'JQ316131',
'JQ316132', 'JQ316134', 'JQ316135', 'JQ341411',
'JQ429433', 'M17449', 'U34604']
# <codecell>
from collections import Counter
trops = []
for p in wanted_data['gp120-seq-align'].dropna().index:
trops.append(trop_dict.get(p, None))
Counter(trops)
# <codecell>
wseqs = set(wanted_data['gp120'].dropna().index)
cols = ['gp120-seq-align', 'Nef-seq-align', 'Vpr-seq-align',
'Tat-1-seq-align', 'Tat-2-seq-align', 'LTR-seq-align']
for col in cols:
found = set()
prot = col.rsplit('-', 2)[0]
fname = 'AlignForBenj/has_env_%s.fasta' % prot
df = wanted_data[col].dropna()
with open(fname, 'w') as handle:
for seq, name in zip(df, df.index):
if name in wseqs and name not in found:
fasta_writer(handle, [(name+'-'+trop_dict[name], ''.join(seq))])
found.add(name)
# <codecell>
def yield_regions(trop_dict):
regions = ['LTR-seq-align',
'gp41-seq-align',
'gp120-seq-align',
'Nef-seq-align',
'Vpr-seq-align',
'Tat-1-seq-align',
'Tat-2-seq-align',
]
tail_cols = ['gp120', 'gp41', 'Nef', 'Vpr',
'Tat-1', 'Tat-2', 'LTR']
fourkb_cols = ['gp120', 'Nef', 'Vpr',
'Tat-1', 'Tat-2', 'LTR']
groups = [('fourkb', wanted_data[fourkb_cols].dropna().index),
('full_env', wanted_data[['gp120', 'gp41']].dropna().index),
('full_tail', wanted_data[tail_cols].dropna().index),
]
subs = ['SubB']
win_sizes = [5, 10, 15, 20, 30, 35]
for region, (gname, ind), sub in product(regions, groups, subs):
prot = region.split('-')[0]
gwanted = wanted_data.ix[ind]
mask = gwanted['Sub'] == sub
seq_ser = gwanted[mask][region].dropna()
print prot, gname, sub, len(seq_ser)
seqs = [(name, ''.join(list(seq))) for name, seq in zip(seq_ser.index, seq_ser.values)]
seq_len = len(seqs[0][1])
for win, start in product(win_sizes, range(seq_len)):
stop = start+win
if stop < seq_len:
nseqs = [(name, seq[start:stop]) for name, seq in seqs]
yield gname, sub, prot, start, win, nseqs, trop_dict
# <codecell>
import dendropy
# <codecell>
from Bio.Alphabet import generic_dna, generic_protein
import TreeingTools
def calculate_region(arg):
gname, sub, prot, start, win, nseqs, trop_dict = arg
treename = 'quicktrees/%s-%s-%s-%i-%i.tree' % (gname, sub, prot, start, win)
matfname = 'quicktrees/%s-%s-%s-%i-%i.pkl' % (gname, sub, prot, start, win)
if os.path.exists(treename):
#benj_res = 'Already Processed'
#return gname, sub, prot, win, start, benj_res
with open(matfname) as handle:
dmat = pickle.load(handle)
with open(treename) as handle:
tree = dendropy.Tree.get_from_stream(handle, 'newick')
else:
is_aa = prot != 'LTR'
alphabet = generic_protein if is_aa else generic_dna
try:
tree, dmat = TreeingTools.phylip_tree_collapse_unique(nseqs, alphabet=alphabet)
except ValueError:
benj_res = 'Too few unique sequences to process'
return gname, sub, prot, win, start, benj_res
except:
benj_res = 'uncaught exception in dist-mat'
return gname, sub, prot, win, start, benj_res
print 'writing'
with open(matfname, 'w') as handle:
pickle.dump(dmat, handle)
with open(treename, 'w') as handle:
tree.write_to_stream(handle, 'newick')
try:
benj_res = TreeingTools.check_distance_pvals(dmat, trop_dict, nreps = 50)
except AssertionError:
benj_res = 'too few groups'
return gname, sub, prot, win, start, benj_res
except:
benj_res = 'uncaught exception'
return gname, sub, prot, win, start, benj_res
try:
out = TreeingTools.evaluate_association_index(tree, trop_dict)
benj_res['AI'], benj_res['AI-pval'], benj_res['AI-null'] = out
except:
benj_res['AI'], benj_res['AI-pval'], benj_res['AI-null'] = ('error', 'error', 'error')
return gname, sub, prot, win, start, benj_res
# <codecell>
def quick_yield_regions(trop_dict):
nseqs = wanted_data.ix[foukb_lanl]['gp120-seq-align']
aseqs = wanted_data['gp120-seq-align'].dropna()
regions = [('C1', 0, 101),
('V1', 101, 127),
('V2', 127, 166),
('C2', 166, 266),
('V3', 266, 301),
('C3', 301, 355),
('V4', 355, 388),
('C4', 388, 430),
('V5', 430, 439),
('C5', 439, 462)]
for name, start, stop in regions:
seqs = [(n, ''.join(s[start:stop])) for n, s in zip(nseqs.index, nseqs.values)]
yield '44kb', 'gp120', name, start, stop, seqs, trop_dict
seqs = [(n, ''.join(s[start:stop])) for n, s in zip(aseqs.index, aseqs.values)]
yield 'All', 'gp120', name, start, stop, seqs, trop_dict
# <codecell>
from itertools import groupby, imap
from types import StringType
benj_fields = ['GroupName',
'Subtype',
'Prot',
'Start',
'WinSize',
'Group2Mean',
'Group2Std',
'Group2Name',
'Group1Mean',
'Group1Std',
'RawPval',
'AdjPval',
'Group1Name',
'AI',
'AI-pval',
'AI-null']
fname = 'gp120_new_BenjRes.tsv'
handle = open(fname, 'w')
benj_writer = csv.DictWriter(handle, benj_fields, delimiter = '\t')
benj_writer.writeheader()
results = imap(calculate_region, quick_yield_regions(trop_dict))
for gname, sub, prot, win, start, benj_res in results:
#print prot, start, win
tdict = {
'Prot':prot,
'Start':start,
'WinSize':win,
'GroupName':gname,
'Subtype':sub,
}
if type(benj_res) is StringType:
if (benj_res == 'Already Processed') or benj_res.startswith('Too few unique sequences'):
continue
print benj_res, prot, start, win
else:
benj_res.update(tdict)
benj_writer.writerow(benj_res)
handle.close()
# <codecell>
from itertools import groupby, imap
from operator import itemgetter
from concurrent.futures import ThreadPoolExecutor
benj_fields = ['GroupName',
'Subtype',
'Prot',
'Start',
'WinSize',
'Group2Mean',
'Group2Std',
'Group2Name',
'Group1Mean',
'Group1Std',
'RawPval',
'AdjPval',
'Group1Name',
'AI',
'AI-pval',
'AI-null']
fname = 'more_phylip_BenjRes.tsv'
benj_writer = csv.DictWriter(open(fname, 'w'), benj_fields, delimiter = '\t')
benj_writer.writeheader()
multi = True
print 'Starting multiprocessing!'
if multi:
pool = ProcessPoolExecutor(max_workers = 30)
results = pool.map(calculate_region, yield_regions(trop_dict))
else:
results = imap(calculate_region, islice(yield_regions(trop_dict), 0,35))
for gname, sub, prot, win, start, benj_res in results:
#print prot, start, win
tdict = {
'Prot':prot,
'Start':start,
'WinSize':win,
'GroupName':gname,
'Subtype':sub,
}
if type(benj_res) is StringType:
if (benj_res == 'Already Processed') or benj_res.startswith('Too few unique sequences'):
continue
print benj_res, prot, start, win
else:
benj_res.update(tdict)
benj_writer.writerow(benj_res)
if multi:
pool.shutdown()
# <codecell>
# <codecell>
#with open('allgp120.fasta', 'w') as handle:
tres = []
for key, row in wanted_data[['gp120-seq-align', 'Tropism']].dropna().iterrows():
oname = key+'-'+row['Tropism']
tres.append((oname, ''.join(row['gp120-seq-align'])))
# <codecell>
tree, dmat = TreeingTools.phylip_tree_collapse_unique(tres, alphabet=generic_protein)
# <codecell>
with open('gp120tree.nexus', 'w') as handle:
tree.write_to_stream(handle, 'nexus')
# <codecell>
import networkx
with open('gp120tree.dot') as handle:
new_tree = networkx.read_dot(handle)
# <codecell>
pos = networkx.spring_layout(new_tree, dim=100)
#networkx.draw_spring(new_tree,
# with_labels = False,
# dim = 10)
# <codecell>
pos.items()[-10:]
# <codecell>
| 30.775561
| 96
| 0.578235
|
b57bd152c99c2f1dc2584c2e2e6268b58b86bb27
| 7,937
|
py
|
Python
|
src/vector_quantizer_ema.py
|
VictorZuanazzi/VQ-VAE-Images
|
cf39dccbc1cd233426fd0145ce5945574ec431d1
|
[
"MIT"
] | 2
|
2019-11-23T11:46:31.000Z
|
2020-02-13T15:19:55.000Z
|
src/vector_quantizer_ema.py
|
VictorZuanazzi/VQ-VAE-Images
|
cf39dccbc1cd233426fd0145ce5945574ec431d1
|
[
"MIT"
] | null | null | null |
src/vector_quantizer_ema.py
|
VictorZuanazzi/VQ-VAE-Images
|
cf39dccbc1cd233426fd0145ce5945574ec431d1
|
[
"MIT"
] | null | null | null |
#####################################################################################
# MIT License #
# #
# Copyright (C) 2019 Charly Lamothe #
# Copyright (C) 2018 Zalando Research #
# #
# This file is part of VQ-VAE-images. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
import torch
import torch.nn as nn
class VectorQuantizerEMA(nn.Module):
"""
Inspired from Sonnet implementation of VQ-VAE https://arxiv.org/abs/1711.00937,
in https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/nets/vqvae.py and
pytorch implementation of it from zalandoresearch in https://github.com/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb.
Implements a slightly modified version of the algorithm presented in
'Neural Discrete Representation Learning' by van den Oord et al.
https://arxiv.org/abs/1711.00937
The difference between VectorQuantizerEMA and VectorQuantizer is that
this module uses exponential moving averages to update the embedding vectors
instead of an auxiliary loss. This has the advantage that the embedding
updates are independent of the choice of optimizer (SGD, RMSProp, Adam, K-Fac,
...) used for the encoder, decoder and other parts of the architecture. For
most experiments the EMA version trains faster than the non-EMA version.
Input any tensor to be quantized. Last dimension will be used as space in
which to quantize. All other dimensions will be flattened and will be seen
as different examples to quantize.
The output tensor will have the same shape as the input.
For example a tensor with shape [16, 32, 32, 64] will be reshaped into
[16384, 64] and all 16384 vectors (each of 64 dimensions) will be quantized
independently.
Args:
embedding_dim: integer representing the dimensionality of the tensors in the
quantized space. Inputs to the modules must be in this format as well.
num_embeddings: integer, the number of vectors in the quantized space.
commitment_cost: scalar which controls the weighting of the loss terms (see
equation 4 in the paper).
decay: float, decay for the moving averages.
epsilon: small float constant to avoid numerical instability.
"""
def __init__(self, device, num_embeddings, embedding_dim, commitment_cost, decay, epsilon=1e-5):
super(VectorQuantizerEMA, self).__init__()
self._device = device
self._embedding_dim = embedding_dim
self._num_embeddings = num_embeddings
self._embedding = nn.Embedding(self._num_embeddings, self._embedding_dim)
self._embedding.weight.data.normal_()
self._commitment_cost = commitment_cost
self.register_buffer('_ema_cluster_size', torch.zeros(num_embeddings))
self._ema_w = nn.Parameter(torch.Tensor(num_embeddings, self._embedding_dim))
self._ema_w.data.normal_()
self._decay = decay
self._epsilon = epsilon
def forward(self, inputs):
"""
Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
"""
# Convert inputs from BCHW -> BHWC
inputs = inputs.permute(0, 2, 3, 1).contiguous()
input_shape = inputs.shape
# Flatten input
flat_input = inputs.view(-1, self._embedding_dim)
# Calculate distances
distances = (torch.sum(flat_input ** 2, dim=1, keepdim=True)
+ torch.sum(self._embedding.weight ** 2, dim=1)
- 2 * torch.matmul(flat_input, self._embedding.weight.t()))
# Encoding
encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
encodings = torch.zeros(encoding_indices.shape[0], self._num_embeddings).to(self._device)
encodings.scatter_(1, encoding_indices, 1)
# Use EMA to update the embedding vectors
if self.training:
self._ema_cluster_size = self._ema_cluster_size * self._decay + \
(1 - self._decay) * torch.sum(encodings, 0)
n = torch.sum(self._ema_cluster_size.data)
self._ema_cluster_size = (
(self._ema_cluster_size + self._epsilon)
/ (n + self._num_embeddings * self._epsilon) * n
)
dw = torch.matmul(encodings.t(), flat_input)
self._ema_w = nn.Parameter(self._ema_w * self._decay + (1 - self._decay) * dw)
self._embedding.weight = nn.Parameter(self._ema_w / self._ema_cluster_size.unsqueeze(1))
# Quantize and unflatten
quantized = torch.matmul(encodings, self._embedding.weight).view(input_shape)
# Loss
e_latent_loss = torch.mean((quantized.detach() - inputs) ** 2)
loss = self._commitment_cost * e_latent_loss
quantized = inputs + (quantized - inputs).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
# Convert quantized from BHWC -> BCHW
return loss, quantized.permute(0, 3, 1, 2).contiguous(), perplexity, encodings
@property
def embedding(self):
return self._embedding
| 52.217105
| 132
| 0.597077
|
1a908899a742f564a44112682fa2b97caa2491e6
| 6,797
|
py
|
Python
|
pytorch_lightning/loggers/mlflow.py
|
Lucien-cs/pytorch-lightning
|
bf8a395a4004fa7f2ea804b6de8cececc609f9aa
|
[
"Apache-2.0"
] | 1
|
2021-04-06T08:47:01.000Z
|
2021-04-06T08:47:01.000Z
|
pytorch_lightning/loggers/mlflow.py
|
Lucien-cs/pytorch-lightning
|
bf8a395a4004fa7f2ea804b6de8cececc609f9aa
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loggers/mlflow.py
|
Lucien-cs/pytorch-lightning
|
bf8a395a4004fa7f2ea804b6de8cececc609f9aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MLflow Logger
-------------
"""
import re
import warnings
from argparse import Namespace
from time import time
from typing import Any, Dict, Optional, Union
try:
import mlflow
from mlflow.tracking import MlflowClient
except ModuleNotFoundError: # pragma: no-cover
mlflow = None
MlflowClient = None
from pytorch_lightning import _logger as log
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import rank_zero_only
LOCAL_FILE_URI_PREFIX = "file:"
class MLFlowLogger(LightningLoggerBase):
"""
Log using `MLflow <https://mlflow.org>`_.
Install it with pip:
.. code-block:: bash
pip install mlflow
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import MLFlowLogger
mlf_logger = MLFlowLogger(
experiment_name="default",
tracking_uri="file:./ml-runs"
)
trainer = Trainer(logger=mlf_logger)
Use the logger anywhere in your :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:
.. code-block:: python
from pytorch_lightning import LightningModule
class LitModel(LightningModule):
def training_step(self, batch, batch_idx):
# example
self.logger.experiment.whatever_ml_flow_supports(...)
def any_lightning_module_function_or_hook(self):
self.logger.experiment.whatever_ml_flow_supports(...)
Args:
experiment_name: The name of the experiment
tracking_uri: Address of local or remote tracking server.
If not provided, defaults to `file:<save_dir>`.
tags: A dictionary tags for the experiment.
save_dir: A path to a local directory where the MLflow runs get saved.
Defaults to `./mlflow` if `tracking_uri` is not provided.
Has no effect if `tracking_uri` is provided.
prefix: A string to put at the beginning of metric keys.
"""
LOGGER_JOIN_CHAR = '-'
def __init__(
self,
experiment_name: str = 'default',
tracking_uri: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
save_dir: Optional[str] = './mlruns',
prefix: str = '',
):
if mlflow is None:
raise ImportError('You want to use `mlflow` logger which is not installed yet,'
' install it with `pip install mlflow`.')
super().__init__()
if not tracking_uri:
tracking_uri = f'{LOCAL_FILE_URI_PREFIX}{save_dir}'
self._experiment_name = experiment_name
self._experiment_id = None
self._tracking_uri = tracking_uri
self._run_id = None
self.tags = tags
self._prefix = prefix
self._mlflow_client = MlflowClient(tracking_uri)
@property
@rank_zero_experiment
def experiment(self) -> MlflowClient:
r"""
Actual MLflow object. To use MLflow features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
self.logger.experiment.some_mlflow_function()
"""
if self._experiment_id is None:
expt = self._mlflow_client.get_experiment_by_name(self._experiment_name)
if expt is not None:
self._experiment_id = expt.experiment_id
else:
log.warning(f'Experiment with name {self._experiment_name} not found. Creating it.')
self._experiment_id = self._mlflow_client.create_experiment(name=self._experiment_name)
if self._run_id is None:
run = self._mlflow_client.create_run(experiment_id=self._experiment_id, tags=self.tags)
self._run_id = run.info.run_id
return self._mlflow_client
@property
def run_id(self):
# create the experiment if it does not exist to get the run id
_ = self.experiment
return self._run_id
@property
def experiment_id(self):
# create the experiment if it does not exist to get the experiment id
_ = self.experiment
return self._experiment_id
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
params = self._convert_params(params)
params = self._flatten_dict(params)
for k, v in params.items():
self.experiment.log_param(self.run_id, k, v)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'
metrics = self._add_prefix(metrics)
timestamp_ms = int(time() * 1000)
for k, v in metrics.items():
if isinstance(v, str):
log.warning(f'Discarding metric with string value {k}={v}.')
continue
new_k = re.sub("[^a-zA-Z0-9_/. -]+", "", k)
if k != new_k:
warnings.warn(("MLFlow only allows '_', '/', '.' and ' ' special characters in metric name.\n",
f"Replacing {k} with {new_k}."))
k = new_k
self.experiment.log_metric(self.run_id, k, v, timestamp_ms, step)
@rank_zero_only
def finalize(self, status: str = 'FINISHED') -> None:
super().finalize(status)
status = 'FINISHED' if status == 'success' else status
if self.experiment.get_run(self.run_id):
self.experiment.set_terminated(self.run_id, status)
@property
def save_dir(self) -> Optional[str]:
"""
The root file directory in which MLflow experiments are saved.
Return:
Local path to the root experiment directory if the tracking uri is local.
Otherwhise returns `None`.
"""
if self._tracking_uri.startswith(LOCAL_FILE_URI_PREFIX):
return self._tracking_uri.lstrip(LOCAL_FILE_URI_PREFIX)
@property
def name(self) -> str:
return self.experiment_id
@property
def version(self) -> str:
return self.run_id
| 33.985
| 111
| 0.643666
|
f71a062d2b5783e4fd92b44153a453460f29e699
| 53,902
|
py
|
Python
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2019-05-29T18:22:03.000Z
|
2019-05-29T18:22:03.000Z
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2022-03-30T01:50:22.000Z
|
2022-03-30T01:50:28.000Z
|
Lib/http/client.py
|
treebee/cpython
|
e152169da95b52fa41931572bc90857253c4a5dd
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|\_____________________________
| | getresponse() raises
| response = getresponse() | ConnectionError
v v
Unread-response Idle
[Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import http
import io
import re
import socket
import collections.abc
from urllib.parse import urlsplit
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# hack to maintain backwards compatibility
globals().update(http.HTTPStatus.__members__)
# another hack to maintain backwards compatibility
# Mapping status codes to official W3C names
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# Arguably only these _should_ allowed:
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr, val in self.headers.items():
print("header:", hdr + ":", val)
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
"""Always returns True"""
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b and return the number
of bytes read.
"""
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk, discard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
data = self.fp.read(amt)
if len(data) < amt:
raise IncompleteRead(data, amt-len(data))
return data
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
amt = len(b)
n = self.fp.readinto(b)
if n < amt:
raise IncompleteRead(bytes(b[:n]), amt-n)
return n
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
result = self.fp.read1(n)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
'''Returns an instance of the class mimetools.Message containing
meta-information associated with the URL.
When the method is HTTP, these headers are those returned by
the server at the head of the retrieved HTML page (including
Content-Length and Content-Type).
When the method is FTP, a Content-Length header will be
present if (as is now usual) the server passed back a file
length in response to the FTP retrieval request. A
Content-Type header will be present if the MIME type can be
guessed.
When the method is local-file, returned headers will include
a Date representing the file's last-modified time, a
Content-Length giving file size, and a Content-Type
containing a guess at the file's type. See also the
description of the mimetools module.
'''
return self.headers
def geturl(self):
'''Return the real URL of the page.
In some cases, the HTTP server redirects a client to another
URL. The urlopen() function handles this transparently, but in
some cases the caller needs to know which URL the client was
redirected to. The geturl() method can be used to get at this
redirected URL.
'''
return self.url
def getcode(self):
'''Return the HTTP status code that was sent with the response,
or None if the URL is not an HTTP URL.
'''
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
@staticmethod
def _is_textIO(stream):
"""Test whether a file-like object is a text or a binary stream.
"""
return isinstance(stream, io.TextIOBase)
@staticmethod
def _get_content_length(body, method):
"""Get the content-length based on the body.
If the body is None, we set Content-Length: 0 for methods that expect
a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
any method if the body is a str or bytes-like object and not a file.
"""
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, blocksize=8192):
self.timeout = timeout
self.source_address = source_address
self.blocksize = blocksize
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.abc.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _read_readable(self, readable):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(self.blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def _send_output(self, message_body=None, encode_chunked=False):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
# create a consistent interface to message_body
if hasattr(message_body, 'read'):
# Let file-like take precedence over byte-like. This
# is needed to allow the current position of mmap'ed
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
def putrequest(self, method, url, skip_host=False,
skip_accept_encoding=False):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
# Prevent CVE-2019-9740.
if match := _contains_disallowed_url_pchar_re.search(url):
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None, *, encode_chunked=False):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def request(self, method, url, body=None, headers={}, *,
encode_chunked=False):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers, encode_chunked)
def _send_request(self, method, url, body, headers, encode_chunked):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = frozenset(k.lower() for k in headers)
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = self._get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body, encode_chunked=encode_chunked)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None, blocksize=8192):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address,
blocksize=blocksize)
if (key_file is not None or cert_file is not None or
check_hostname is not None):
import warnings
warnings.warn("key_file, cert_file and check_hostname are "
"deprecated, use a custom context instead.",
DeprecationWarning, 2)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
if check_hostname is not None:
self._context.check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
| 37.020604
| 82
| 0.571352
|
f031e0720b0d2b724ee611831958d96caa96ff5a
| 251
|
py
|
Python
|
manage.py
|
alexfalcucc/wyd-kravov-raffle
|
75b39fde050166a3883dd83dbb7046c0b8339eb3
|
[
"MIT"
] | null | null | null |
manage.py
|
alexfalcucc/wyd-kravov-raffle
|
75b39fde050166a3883dd83dbb7046c0b8339eb3
|
[
"MIT"
] | null | null | null |
manage.py
|
alexfalcucc/wyd-kravov-raffle
|
75b39fde050166a3883dd83dbb7046c0b8339eb3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jmj_2016.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.818182
| 72
| 0.772908
|
e55eacb5ca0e7abf77d90693ac6740788b8279eb
| 720
|
py
|
Python
|
paddlevideo/modeling/framework/__init__.py
|
shippingwang/PaddleVideo
|
48e6bb5f67ad44f7ef3c5cd683e8e7b8c50f0918
|
[
"Apache-2.0"
] | 1
|
2021-01-13T09:14:35.000Z
|
2021-01-13T09:14:35.000Z
|
paddlevideo/modeling/framework/__init__.py
|
shippingwang/PaddleVideo
|
48e6bb5f67ad44f7ef3c5cd683e8e7b8c50f0918
|
[
"Apache-2.0"
] | null | null | null |
paddlevideo/modeling/framework/__init__.py
|
shippingwang/PaddleVideo
|
48e6bb5f67ad44f7ef3c5cd683e8e7b8c50f0918
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .recognizers import BaseRecognizer, Recognizer2D
__all__ = ['BaseRecognizer',
'Recognizer2D']
| 40
| 74
| 0.758333
|
c3052d4811568fb451986374c6ed89ddce5fa927
| 7,306
|
py
|
Python
|
doc/conf.py
|
Andrea-MariaDB-2/treeio
|
f50ab9bae93f7a0a062b5316485a7bbeb4b6ac4e
|
[
"MIT"
] | 242
|
2015-01-01T15:08:23.000Z
|
2022-01-19T21:14:24.000Z
|
doc/conf.py
|
J4CODE/treeio
|
bae3115f4015aad2cbc5ab45572232ceec990495
|
[
"MIT"
] | 52
|
2015-01-05T09:13:17.000Z
|
2018-12-26T14:52:43.000Z
|
doc/conf.py
|
J4CODE/treeio
|
bae3115f4015aad2cbc5ab45572232ceec990495
|
[
"MIT"
] | 99
|
2015-01-09T23:28:14.000Z
|
2021-12-30T09:19:51.000Z
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# -*- coding: utf-8 -*-
#
# Hardtree documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 25 16:15:27 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
import treeio_project.settings, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeio_project.settings")
django.setup()
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hardtree'
copyright = u'2010, Tree.io Limited'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hardtreedoc'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hardtree.tex', u'Hardtree Documentation',
u'Tree.io Limited', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'treeio', u'Hardtree Documentation',
[u'Tree.io Limited'], 1)
]
| 32.04386
| 80
| 0.722557
|
edcecde4be995e447dd2bd5858cd4169ff024caa
| 2,068
|
py
|
Python
|
msbapi/api.py
|
samuelrince/msbapi
|
31dfb3780dfd562be65c27ecc35f07373dbf1251
|
[
"MIT"
] | null | null | null |
msbapi/api.py
|
samuelrince/msbapi
|
31dfb3780dfd562be65c27ecc35f07373dbf1251
|
[
"MIT"
] | null | null | null |
msbapi/api.py
|
samuelrince/msbapi
|
31dfb3780dfd562be65c27ecc35f07373dbf1251
|
[
"MIT"
] | null | null | null |
import os
import requests
from enum import Enum
from typing import Optional
from pydantic import BaseModel
from msbapi.command import add_torrent_file, add_torrent_magnet
class TorrentTypes(str, Enum):
default = 'default'
movie = 'movie'
series = 'series'
music = 'music'
class TorrentRequest(BaseModel):
url: str
type: Optional[TorrentTypes] = None
class Config:
use_enum_values = True
class TorrentResponse(BaseModel):
success: bool
def download_path(torrent_type: Optional[str] = None) -> str:
if torrent_type == TorrentTypes.movie:
return os.path.join(os.getenv('BASE_PATH'), 'Plex', 'Films')
elif torrent_type == TorrentTypes.series:
return os.path.join(os.getenv('BASE_PATH'), 'Plex', 'Series')
elif torrent_type == TorrentTypes.music:
return os.path.join(os.getenv('BASE_PATH'), 'music')
elif torrent_type == TorrentTypes.default:
return os.getenv('BASE_PATH')
else:
return os.getenv('BASE_PATH')
def download_torrent_file(url: str) -> Optional[bytes]:
response = requests.get(url)
if response.ok:
return response.content
def rt_response_success(response: requests.Response) -> bool:
if response.ok:
return response.json() == {'result': 'Success'}
return False
def add_new_torrent(torrent: TorrentRequest) -> TorrentResponse:
dl_path = download_path(torrent.type)
# .torrent file to download and send
if torrent.url.startswith('http'):
file = download_torrent_file(torrent.url)
if file:
rutorrent_response = add_torrent_file(torrent_file=file, path=dl_path)
if rt_response_success(rutorrent_response):
return TorrentResponse(success=True)
# magnet link to send
elif torrent.url.startswith('magnet'):
rutorrent_response = add_torrent_magnet(torrent_magnet=torrent.url, path=dl_path)
if rt_response_success(rutorrent_response):
return TorrentResponse(success=True)
return TorrentResponse(success=False)
| 28.328767
| 89
| 0.694874
|
aa2e6b3d5ecfdd80762535350b7fd9bd2734ecd7
| 14,013
|
py
|
Python
|
VMBackup/main/fsfreezer.py
|
abhishek-goyal1/azure-linux-extensions
|
8737310b9697879240a7cb1f332d75b0862f36ff
|
[
"Apache-2.0"
] | null | null | null |
VMBackup/main/fsfreezer.py
|
abhishek-goyal1/azure-linux-extensions
|
8737310b9697879240a7cb1f332d75b0862f36ff
|
[
"Apache-2.0"
] | null | null | null |
VMBackup/main/fsfreezer.py
|
abhishek-goyal1/azure-linux-extensions
|
8737310b9697879240a7cb1f332d75b0862f36ff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from mounts import Mounts
import datetime
import threading
import os
import time
import sys
import signal
import traceback
import threading
import fcntl
from common import CommonVariables
from Utils.ResourceDiskUtil import ResourceDiskUtil
def thread_for_binary(self,args):
self.logger.log("Thread for binary is called",True)
time.sleep(3)
self.logger.log("Waited in thread for 3 seconds",True)
self.logger.log("****** 1. Starting Freeze Binary ",True)
self.child = subprocess.Popen(args,stdout=subprocess.PIPE)
self.logger.log("Binary subprocess Created",True)
class FreezeError(object):
def __init__(self):
self.errorcode = None
self.fstype = None
self.path = None
def __str__(self):
return "errorcode:" + str(self.errorcode) + " fstype:" + str(self.fstype) + " path" + str(self.path)
class FreezeResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class FreezeHandler(object):
def __init__(self,logger,hutil):
# sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)
self.sig_handle = 0
self.child= None
self.logger=logger
self.hutil = hutil
def sigusr1_handler(self,signal,frame):
self.logger.log('freezed',False)
self.logger.log("****** 4. Freeze Completed (Signal=1 received)",False)
self.sig_handle=1
def sigchld_handler(self,signal,frame):
self.logger.log('some child process terminated')
if(self.child is not None and self.child.poll() is not None):
self.logger.log("binary child terminated",True)
self.logger.log("****** 9. Binary Process completed (Signal=2 received)",True)
self.sig_handle=2
def reset_signals(self):
self.sig_handle = 0
self.child= None
def startproc(self,args):
binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])
binary_thread.start()
SafeFreezeWaitInSecondsDefault = 66
proc_sleep_time = self.hutil.get_intvalue_from_configfile('SafeFreezeWaitInSeconds',SafeFreezeWaitInSecondsDefault)
for i in range(0,(int(proc_sleep_time/2))):
if(self.sig_handle==0):
self.logger.log("inside while with sig_handle "+str(self.sig_handle))
time.sleep(2)
else:
break
self.logger.log("Binary output for signal handled: "+str(self.sig_handle))
return self.sig_handle
def signal_receiver(self):
signal.signal(signal.SIGUSR1,self.sigusr1_handler)
signal.signal(signal.SIGCHLD,self.sigchld_handler)
class FsFreezer:
def __init__(self, patching, logger, hutil):
"""
"""
self.patching = patching
self.logger = logger
self.hutil = hutil
try:
self.mounts = Mounts(patching = self.patching, logger = self.logger)
except Exception as e:
errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg,True,'Warning')
self.logger.log(str(e), True)
self.mounts = None
self.frozen_items = set()
self.unfrozen_items = set()
self.freeze_handler = FreezeHandler(self.logger, self.hutil)
self.mount_open_failed = False
resource_disk = ResourceDiskUtil(patching = patching, logger = logger)
self.resource_disk_mount_point = resource_disk.get_resource_disk_mount_point()
self.skip_freeze = True
self.isAquireLockSucceeded = True
self.getLockRetry = 0
self.maxGetLockRetry = 5
self.safeFreezelockFile = None
def should_skip(self, mount):
if(self.resource_disk_mount_point is not None and mount.mount_point == self.resource_disk_mount_point):
return True
elif((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop' ):
return False
else:
return True
def freeze_safe(self,timeout):
self.root_seen = False
error_msg=''
timedout = False
self.skip_freeze = True
mounts_to_skip = None
try:
mounts_to_skip = self.hutil.get_strvalue_from_configfile('MountsToSkip','')
self.logger.log("skipped mount :" + str(mounts_to_skip), True)
mounts_list_to_skip = mounts_to_skip.split(',')
except Exception as e:
errMsg='Failed to read from config, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg,True,'Warning')
try:
freeze_result = FreezeResult()
freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),"safefreeze/bin/safefreeze")
args=[freezebin,str(timeout)]
no_mount_found = True
for mount in self.mounts.mounts:
self.logger.log("fsfreeze mount :" + str(mount.mount_point), True)
if(mount.mount_point == '/'):
self.root_seen = True
self.root_mount = mount
elif(mount.mount_point not in mounts_list_to_skip and not self.should_skip(mount)):
if(self.skip_freeze == True):
self.skip_freeze = False
args.append(str(mount.mount_point))
if(self.root_seen and not self.should_skip(self.root_mount)):
if(self.skip_freeze == True):
self.skip_freeze = False
args.append('/')
self.logger.log("skip freeze is : " + str(self.skip_freeze), True)
if(self.skip_freeze == True):
return freeze_result,timedout
self.logger.log("arg : " + str(args),True)
self.freeze_handler.reset_signals()
self.freeze_handler.signal_receiver()
self.logger.log("proceeded for accepting signals", True)
if(mounts_to_skip == '/'): #for continue logging to avoid out of memory issue
self.logger.enforce_local_flag(True)
else:
self.logger.enforce_local_flag(False)
start_time = datetime.datetime.utcnow()
while self.getLockRetry < self.maxGetLockRetry:
try:
if not os.path.isdir('/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock'):
os.mkdir('/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock')
self.safeFreezelockFile = open("/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile","w")
self.logger.log("/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile file opened Sucessfully",True)
try:
fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.logger.log("Aquiring lock succeeded",True)
self.isAquireLockSucceeded = True
break
except Exception as ex:
self.safeFreezelockFile.close()
raise ex
except Exception as e:
self.logger.log("Failed to open file or aquire lock: %s, stack trace: %s" % (str(e), traceback.format_exc()),True)
self.isAquireLockSucceeded = False
self.getLockRetry= self.getLockRetry + 1
time.sleep(1)
if(self.getLockRetry == self.maxGetLockRetry - 1):
time.sleep(30)
self.logger.log("Retry to aquire lock count: "+ str(self.getLockRetry),True)
end_time = datetime.datetime.utcnow()
self.logger.log("Wait time to aquire lock "+ str(end_time - start_time),True)
sig_handle = None
if (self.isAquireLockSucceeded == True):
sig_handle=self.freeze_handler.startproc(args)
self.logger.log("freeze_safe after returning from startproc : sig_handle="+str(sig_handle))
if(sig_handle != 1):
if (self.freeze_handler.child is not None):
self.log_binary_output()
if (sig_handle == 0):
timedout = True
error_msg="freeze timed-out"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
elif (self.mount_open_failed == True):
error_msg=CommonVariables.unable_to_open_err_string
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
elif (self.isAquireLockSucceeded == False):
error_msg="Mount Points already freezed by some other processor"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg,True,'Error')
else:
error_msg="freeze failed for some mount"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
except Exception as e:
self.logger.enforce_local_flag(True)
error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
return freeze_result,timedout
def releaseFileLock(self):
if (self.isAquireLockSucceeded == True):
try:
fcntl.lockf(self.safeFreezelockFile, fcntl.LOCK_UN)
self.safeFreezelockFile.close()
except Exception as e:
self.logger.log("Failed to unlock: %s, stack trace: %s" % (str(e), traceback.format_exc()),True)
try:
os.remove("/etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile")
except Exception as e:
self.logger.log("Failed to delete /etc/azure/MicrosoftRecoverySvcsSafeFreezeLock/SafeFreezeLockFile file: %s, stack trace: %s" % (str(e), traceback.format_exc()),True)
def thaw_safe(self):
thaw_result = None
unable_to_sleep = False
try:
thaw_result = FreezeResult()
if(self.skip_freeze == True):
return thaw_result, unable_to_sleep
if(self.freeze_handler.child is None):
self.logger.log("child already completed", True)
self.logger.log("****** 7. Error - Binary Process Already Completed", True)
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
elif(self.freeze_handler.child.poll() is None):
self.logger.log("child process still running")
self.logger.log("****** 7. Sending Thaw Signal to Binary")
self.freeze_handler.child.send_signal(signal.SIGUSR1)
for i in range(0,30):
if(self.freeze_handler.child.poll() is None):
self.logger.log("child still running sigusr1 sent")
time.sleep(1)
else:
break
self.logger.enforce_local_flag(True)
self.log_binary_output()
if(self.freeze_handler.child.returncode!=0):
error_msg = 'snapshot result inconsistent as child returns with failure'
thaw_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
else:
self.logger.log("Binary output after process end when no thaw sent: ", True)
if(self.freeze_handler.child.returncode==2):
error_msg = 'Unable to execute sleep'
thaw_result.errors.append(error_msg)
unable_to_sleep = True
else:
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
self.logger.enforce_local_flag(True)
self.log_binary_output()
self.logger.log(error_msg, True, 'Error')
self.logger.enforce_local_flag(True)
finally:
self.releaseFileLock()
return thaw_result, unable_to_sleep
def log_binary_output(self):
self.logger.log("============== Binary output traces start ================= ", True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if("Failed to open:" in line):
self.mount_open_failed = True
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
self.logger.log("============== Binary output traces end ================= ", True)
| 44.485714
| 179
| 0.596161
|
8ffec7f2be53a7b2d40e3e8c480a12532d6a7974
| 10,073
|
py
|
Python
|
phpmyadmin/doc/conf.py
|
worttreffer/real1
|
424e896322e74179cd9f3a2689c44df6ac02bb80
|
[
"MIT"
] | null | null | null |
phpmyadmin/doc/conf.py
|
worttreffer/real1
|
424e896322e74179cd9f3a2689c44df6ac02bb80
|
[
"MIT"
] | 1
|
2021-05-10T11:02:02.000Z
|
2021-05-10T11:02:02.000Z
|
phpmyadmin/doc/conf.py
|
worttreffer/real1
|
424e896322e74179cd9f3a2689c44df6ac02bb80
|
[
"MIT"
] | 1
|
2019-09-05T09:39:18.000Z
|
2019-09-05T09:39:18.000Z
|
# -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2018, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.1.0-dev'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
# Number of retries and timeout for linkcheck
linkcheck_retries = 10
linkcheck_timeout = 10
linkcheck_anchors = False
linkcheck_ignore = [
# Site is often down
r'https://software.opensuse.org/package/.*',
r'https://pecl.php.net/.*',
# 403 Client Error: Forbidden
r'https://authy.com/.*',
# 500 Server Error: Internal Server Error
r'http://www.scriptalicious.com/.*',
]
| 31.576803
| 82
| 0.718952
|
2f4f7cd7ffa25422c458f7b92062cf771c95af4b
| 1,609
|
py
|
Python
|
stubs.min/System/__init___parts/MarshalByRefObject.py
|
denfromufa/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2017-07-07T11:15:45.000Z
|
2017-07-07T11:15:45.000Z
|
stubs.min/System/__init___parts/MarshalByRefObject.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/__init___parts/MarshalByRefObject.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class MarshalByRefObject(object):
""" Enables access to objects across application domain boundaries in applications that support remoting. """
def CreateObjRef(self,requestedType):
"""
CreateObjRef(self: MarshalByRefObject,requestedType: Type) -> ObjRef
Creates an object that contains all the relevant information required to
generate a proxy used to communicate with a remote object.
requestedType: The System.Type of the object that the new System.Runtime.Remoting.ObjRef will
reference.
Returns: Information required to generate a proxy.
"""
pass
def GetLifetimeService(self):
"""
GetLifetimeService(self: MarshalByRefObject) -> object
Retrieves the current lifetime service object that controls the lifetime policy
for this instance.
Returns: An object of type System.Runtime.Remoting.Lifetime.ILease used to control the
lifetime policy for this instance.
"""
pass
def InitializeLifetimeService(self):
"""
InitializeLifetimeService(self: MarshalByRefObject) -> object
Obtains a lifetime service object to control the lifetime policy for this
instance.
Returns: An object of type System.Runtime.Remoting.Lifetime.ILease used to control the
lifetime policy for this instance. This is the current lifetime service object
for this instance if one exists; otherwise,a new lifetime service object
initialized to the value of the
System.Runtime.Remoting.Lifetime.LifetimeServices.LeaseManagerPollTime
property.
"""
pass
| 37.418605
| 111
| 0.724052
|
b9dbf62f9c3d8025199b3066cf74a32ffbf437fb
| 156
|
py
|
Python
|
hw/andrei_bondar/test_bondar.py
|
alexander-sidorov/qap-05
|
6db7c0a1eeadd15f7d3f826e7f0ac4be3949ec8c
|
[
"MIT"
] | 9
|
2021-12-10T21:30:07.000Z
|
2022-02-25T21:32:34.000Z
|
hw/andrei_bondar/test_bondar.py
|
alexander-sidorov/qap-05
|
6db7c0a1eeadd15f7d3f826e7f0ac4be3949ec8c
|
[
"MIT"
] | 22
|
2021-12-11T08:46:58.000Z
|
2022-02-02T15:56:37.000Z
|
hw/andrei_bondar/test_bondar.py
|
alexander-sidorov/qap-05
|
6db7c0a1eeadd15f7d3f826e7f0ac4be3949ec8c
|
[
"MIT"
] | 8
|
2021-12-11T09:15:45.000Z
|
2022-02-02T08:09:09.000Z
|
def test_example() -> None:
assert True, "not True"
assert 1 + 1 == 2
assert 4 / 2 == 2
assert 2 * 2 == 4
assert "ab" + "bc" == "abbc"
| 19.5
| 32
| 0.49359
|
ef9c889ac241646a46b25de312e4e463611176e7
| 4,420
|
py
|
Python
|
src/OTLMOW/PostenMapping/Model/Post060344470.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/PostenMapping/Model/Post060344470.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/PostenMapping/Model/Post060344470.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060344470(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.44470',
beschrijving='Gestaalstraalde waterdoorlatende betonstraatstenen, wit met kleurondersteunende granulaten volgens 6-3.5, met drainageopeningen, 100 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.aard',
dotnotatie='aard',
defaultWaarde='met-drainageopeningen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.type',
dotnotatie='type',
defaultWaarde='witte-met-kleurondersteunende-granulaten',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating.afwerking',
dotnotatie='afwerking',
defaultWaarde='gestaaldstraald',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='10',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#WaterdoorlatendeBestrating',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.44470')])
| 48.571429
| 163
| 0.578054
|
ed045ae4cb6ee0e2f44f269d3dca1166314ca141
| 465
|
py
|
Python
|
_pytest/test_everything.py
|
tzvetkoff/wee-slack
|
da7a9643b5d2512b749e711d5af12259f90517b7
|
[
"MIT"
] | null | null | null |
_pytest/test_everything.py
|
tzvetkoff/wee-slack
|
da7a9643b5d2512b749e711d5af12259f90517b7
|
[
"MIT"
] | null | null | null |
_pytest/test_everything.py
|
tzvetkoff/wee-slack
|
da7a9643b5d2512b749e711d5af12259f90517b7
|
[
"MIT"
] | 1
|
2021-07-05T08:57:42.000Z
|
2021-07-05T08:57:42.000Z
|
from __future__ import print_function, unicode_literals
import glob
import json
def test_everything(realish_eventrouter, team):
datafiles = glob.glob("_pytest/data/websocket/*.json")
for fname in sorted(datafiles):
data = json.loads(open(fname, 'r').read())
team.ws.add(data)
realish_eventrouter.receive_ws_callback(team.team_hash, None)
realish_eventrouter.handle_next()
assert len(realish_eventrouter.queue) == 14
| 27.352941
| 69
| 0.724731
|
ca312ec06796627aebcdf95772d6011616f907c8
| 13,909
|
py
|
Python
|
aiobotocore/endpoint.py
|
fraglab/aiobotocore
|
dec5a6324d150e8d7eff648b511c0227f30485cc
|
[
"Apache-2.0"
] | 2
|
2020-08-28T12:11:59.000Z
|
2020-09-29T14:34:15.000Z
|
aiobotocore/endpoint.py
|
fraglab/aiobotocore
|
dec5a6324d150e8d7eff648b511c0227f30485cc
|
[
"Apache-2.0"
] | 4
|
2020-08-28T11:51:20.000Z
|
2020-11-28T13:54:15.000Z
|
aiobotocore/endpoint.py
|
admdev8/aiobotocore
|
bddbeeec248ed25ae514c222f012e778fbd4655c
|
[
"Apache-2.0"
] | null | null | null |
import aiohttp
import asyncio
import io
import ssl
import aiohttp.http_exceptions
from aiohttp.client import URL
from botocore.endpoint import EndpointCreator, Endpoint, DEFAULT_TIMEOUT, \
MAX_POOL_CONNECTIONS, logger, history_recorder, create_request_object
from botocore.exceptions import ConnectionClosedError
from botocore.hooks import first_non_none_response
from botocore.utils import is_valid_endpoint_url
from multidict import MultiDict
from urllib.parse import urlparse
from urllib3.response import HTTPHeaderDict
from aiobotocore.response import StreamingBody
from aiobotocore._endpoint_helpers import _text, _IOBaseWrapper, \
ClientResponseProxy
async def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
# botocore converts keys to str, so make sure that they are in
# the expected case. See detailed discussion here:
# https://github.com/aio-libs/aiobotocore/pull/116
# aiohttp's CIMultiDict camel cases the headers :(
'headers': HTTPHeaderDict(
{k.decode('utf-8').lower(): v.decode('utf-8')
for k, v in http_response.raw_headers}),
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = await http_response.read()
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = await http_response.read()
return response_dict
class AioEndpoint(Endpoint):
def __init__(self, *args, proxies=None, **kwargs):
super().__init__(*args, **kwargs)
self.proxies = proxies or {}
async def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
await self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
async def _send_request(self, request_dict, operation_model):
attempts = 1
request = await self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = await self._get_response(
request, operation_model, context)
while await self._needs_retry(attempts, operation_model,
request_dict, success_response,
exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = await self.create_request(
request_dict, operation_model)
success_response, exception = await self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
async def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = await self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = await convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
await self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
async def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (
service_id, operation_model.name)
responses = await self._event_emitter.emit(event_name,
request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = await self._send(request)
except aiohttp.ClientConnectionError as e:
e.request = request # botocore expects the request property
return None, e
except aiohttp.http_exceptions.BadStatusLine:
better_exception = ConnectionClosedError(
endpoint_url=request.url, request=request)
return None, better_exception
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return None, e
# This returns the http_response and the parsed_data.
response_dict = await convert_to_response_dict(http_response,
operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
# NOTE: The only line changed here changing time.sleep to asyncio.sleep
async def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = await self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
await asyncio.sleep(handler_response)
return True
async def _send(self, request):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# https://github.com/boto/botocore/issues/1255
url = request.url
headers = request.headers
data = request.body
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], _text(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
# TODO: this should be part of the ClientSession, perhaps make wrapper
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
resp = await self.http_session.request(
request.method, url=url, headers=headers_, data=data, proxy=proxy)
# If we're not streaming, read the content so we can retry any timeout
# errors, see:
# https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604
if not request.stream_output:
await resp.read()
return resp
class AioEndpointCreator(EndpointCreator):
# TODO: handle socket_options
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=aiohttp.ClientSession,
proxies=None,
socket_options=None,
client_cert=None,
connector_args=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
if isinstance(timeout, (list, tuple)):
conn_timeout, read_timeout = timeout
else:
conn_timeout = read_timeout = timeout
if connector_args is None:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# aiohttp default timeout is 30s so set something reasonable here
connector_args = dict(keepalive_timeout=12)
timeout = aiohttp.ClientTimeout(
sock_connect=conn_timeout,
sock_read=read_timeout
)
ssl_context = None
if client_cert:
if isinstance(client_cert, str):
key_file = None
cert_file = client_cert
elif isinstance(client_cert, tuple):
cert_file, key_file = client_cert
else:
assert False
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(cert_file, key_file)
connector = aiohttp.TCPConnector(
limit=max_pool_connections,
verify_ssl=self._get_verify_value(verify),
ssl_context=ssl_context,
**connector_args)
aio_session = aiohttp.ClientSession(
connector=connector,
timeout=timeout,
skip_auto_headers={'CONTENT-TYPE'},
response_class=ClientResponseProxy,
auto_decompress=False)
return AioEndpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=aio_session,
proxies=proxies)
| 43.330218
| 99
| 0.637573
|
de00e5f6632b7561d3897ee36a4a17c3e3bbc959
| 1,850
|
py
|
Python
|
tests/nnapi/specs/Ex/transpose_conv_ex_float_4.mod.py
|
bogus-sudo/ONE-1
|
7052a817eff661ec2854ed2e7ee0de5e8ba82b55
|
[
"Apache-2.0"
] | 255
|
2020-05-22T07:45:29.000Z
|
2022-03-29T23:58:22.000Z
|
tests/nnapi/specs/Ex/transpose_conv_ex_float_4.mod.py
|
bogus-sudo/ONE-1
|
7052a817eff661ec2854ed2e7ee0de5e8ba82b55
|
[
"Apache-2.0"
] | 5,102
|
2020-05-22T07:48:33.000Z
|
2022-03-31T23:43:39.000Z
|
tests/nnapi/specs/Ex/transpose_conv_ex_float_4.mod.py
|
bogus-sudo/ONE-1
|
7052a817eff661ec2854ed2e7ee0de5e8ba82b55
|
[
"Apache-2.0"
] | 120
|
2020-05-22T07:51:08.000Z
|
2022-02-16T19:08:05.000Z
|
# model
batch = 1
in_chans = 1
out_chans = 1
in_rows = 4
in_cols = 4
out_rows = 8
out_cols = 8
ker_rows = 3
ker_cols = 3
stride = 2
# pad is 0 (left: 0 right: 1 top: 0 bottom: 1)
input_table = [x for x in range(batch * in_rows * in_cols * in_chans)]
kernel_table = [x for x in range(out_chans * ker_rows * ker_cols * in_chans)]
out_table = [0 for x in range(batch * out_rows * out_cols * out_chans)]
for i in range(batch):
for j in range(in_rows):
for k in range(in_cols):
for l in range(in_chans):
out_row_origin = j * stride
out_col_origin = k * stride
input_value = input_table[((i * in_rows + j) * in_cols + k) * in_chans + l]
for m in range(ker_rows):
for n in range(ker_cols):
for o in range(out_chans):
out_row = out_row_origin + m
out_col = out_col_origin + n
if (out_row < out_rows) and (out_col < out_cols) and (out_row >= 0) and (out_col >= 0):
kernel_value = kernel_table[((o * ker_rows + m) * ker_cols + n) * in_chans + l]
out_table[((i * out_rows + out_row) * out_cols + out_col) * out_chans + o] += (input_value * kernel_value)
model = Model()
i0 = Input("op_shape", "TENSOR_INT32", "{4}")
weights = Parameter("ker", "TENSOR_FLOAT32", "{1, 3, 3, 1}", kernel_table)
i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}" )
pad = Int32Scalar("pad_same", 1)
s_x = Int32Scalar("stride_x", 2)
s_y = Int32Scalar("stride_y", 2)
i2 = Output("op", "TENSOR_FLOAT32", "{1, 8, 8, 1}")
model = model.Operation("TRANSPOSE_CONV_EX", i0, weights, i1, pad, s_x, s_y).To(i2)
# Example 1. Input in operand 0,
input0 = {i0: # output shape
[1, 8, 8, 1],
i1: # input 0
input_table}
output0 = {i2: # output 0
out_table}
# Instantiate an example
Example((input0, output0))
| 33.636364
| 122
| 0.605946
|
1ae6e2d58397ea698b8c3ce58fd4be0a1ee56533
| 742
|
py
|
Python
|
src/articles/entrypoints/rest_api/views/authentication_view.py
|
Tserewara/dama-rowahutu
|
9f8d0ecfa8760cf25efb343472734042a4b2fa15
|
[
"MIT"
] | 2
|
2021-03-19T11:19:14.000Z
|
2021-05-04T19:02:34.000Z
|
src/articles/entrypoints/rest_api/views/authentication_view.py
|
Tserewara/dama-rowahutu
|
9f8d0ecfa8760cf25efb343472734042a4b2fa15
|
[
"MIT"
] | null | null | null |
src/articles/entrypoints/rest_api/views/authentication_view.py
|
Tserewara/dama-rowahutu
|
9f8d0ecfa8760cf25efb343472734042a4b2fa15
|
[
"MIT"
] | 1
|
2021-03-03T17:33:12.000Z
|
2021-03-03T17:33:12.000Z
|
from flask import request, jsonify, session
from flask.views import MethodView
from src.articles.domain.entities import exceptions
from src.articles.services import credential_service, unit_of_work
class AuthenticationAPI(MethodView):
@staticmethod
def post():
try:
username = credential_service.authenticate(
username=request.json['username'],
password=request.json['password'],
uow=unit_of_work.SqlAlchemyUnitOfWork()
)
session['username'] = username
return jsonify({'message': 'Logging successful!'}), 200
except exceptions.CredentialValueError as e:
return jsonify({'message': f'{str(e)}'}), 401
| 26.5
| 67
| 0.648248
|
998082bccf447ca3705359362c8fcecc22faba19
| 1,487
|
py
|
Python
|
vendor/github.com/elastic/beats/metricbeat/module/mysql/test_mysql.py
|
maehue/mqttbeat
|
6c10f56096747732e40b70e298391eccd43d118b
|
[
"Apache-2.0"
] | 14
|
2019-05-07T16:12:03.000Z
|
2020-07-17T12:49:42.000Z
|
vendor/github.com/elastic/beats/metricbeat/module/mysql/test_mysql.py
|
maehue/mqttbeat
|
6c10f56096747732e40b70e298391eccd43d118b
|
[
"Apache-2.0"
] | 7
|
2019-05-14T13:46:01.000Z
|
2021-01-09T08:10:59.000Z
|
vendor/github.com/elastic/beats/metricbeat/module/mysql/test_mysql.py
|
maehue/mqttbeat
|
6c10f56096747732e40b70e298391eccd43d118b
|
[
"Apache-2.0"
] | 3
|
2019-08-19T12:22:02.000Z
|
2020-07-17T12:53:43.000Z
|
import os
import sys
import unittest
from nose.plugins.attrib import attr
sys.path.append(os.path.join(os.path.dirname(__file__), '../../tests/system'))
import metricbeat
MYSQL_FIELDS = metricbeat.COMMON_FIELDS + ["mysql"]
MYSQL_STATUS_FIELDS = ["clients", "cluster", "cpu", "keyspace", "memory",
"persistence", "replication", "server", "stats"]
@metricbeat.parameterized_with_supported_versions
class Test(metricbeat.BaseTest):
COMPOSE_SERVICES = ['mysql']
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
@attr('integration')
def test_status(self):
"""
MySQL module outputs an event.
"""
self.render_config_template(modules=[{
"name": "mysql",
"metricsets": ["status"],
"hosts": self.get_hosts(),
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
output = self.read_output_json()
self.assertEqual(len(output), 1)
evt = output[0]
self.assertItemsEqual(self.de_dot(MYSQL_FIELDS), evt.keys(), evt)
status = evt["mysql"]["status"]
assert status["connections"] > 0
assert status["opened_tables"] > 0
self.assert_fields_are_documented(evt)
def get_hosts(self):
return ['root:test@tcp({})/'.format(self.compose_host())]
| 29.156863
| 78
| 0.627438
|
d7083384b0883e4c2af5d6bb5e127cb32c8d23fb
| 8,395
|
py
|
Python
|
aws_deploy_package/twilio/rest/studio/v1/flow/execution/execution_context.py
|
anandhakrishnanh1998/Twilio-Chat-Bot
|
bb5cb02e363deb4c31a24cae6b0fd0b893ef2e20
|
[
"MIT"
] | 1
|
2020-01-18T08:06:57.000Z
|
2020-01-18T08:06:57.000Z
|
aws_deploy_package/twilio/rest/studio/v1/flow/execution/execution_context.py
|
anandhakrishnanh1998/Twilio-Chat-Bot
|
bb5cb02e363deb4c31a24cae6b0fd0b893ef2e20
|
[
"MIT"
] | 9
|
2019-12-05T00:49:12.000Z
|
2021-09-08T01:31:25.000Z
|
flask/lib/python3.6/site-packages/twilio/rest/studio/v1/flow/execution/execution_context.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ExecutionContextList(ListResource):
""" """
def __init__(self, version, flow_sid, execution_sid):
"""
Initialize the ExecutionContextList
:param Version version: Version that contains the resource
:param flow_sid: Flow Sid.
:param execution_sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextList
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextList
"""
super(ExecutionContextList, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, 'execution_sid': execution_sid, }
def get(self):
"""
Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
return ExecutionContextContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
)
def __call__(self):
"""
Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
return ExecutionContextContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.ExecutionContextList>'
class ExecutionContextPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the ExecutionContextPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param flow_sid: Flow Sid.
:param execution_sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextPage
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextPage
"""
super(ExecutionContextPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ExecutionContextInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
"""
return ExecutionContextInstance(
self._version,
payload,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Studio.V1.ExecutionContextPage>'
class ExecutionContextContext(InstanceContext):
""" """
def __init__(self, version, flow_sid, execution_sid):
"""
Initialize the ExecutionContextContext
:param Version version: Version that contains the resource
:param flow_sid: Flow Sid.
:param execution_sid: Execution Sid.
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
super(ExecutionContextContext, self).__init__(version)
# Path Solution
self._solution = {'flow_sid': flow_sid, 'execution_sid': execution_sid, }
self._uri = '/Flows/{flow_sid}/Executions/{execution_sid}/Context'.format(**self._solution)
def fetch(self):
"""
Fetch a ExecutionContextInstance
:returns: Fetched ExecutionContextInstance
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ExecutionContextInstance(
self._version,
payload,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.ExecutionContextContext {}>'.format(context)
class ExecutionContextInstance(InstanceResource):
""" """
def __init__(self, version, payload, flow_sid, execution_sid):
"""
Initialize the ExecutionContextInstance
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
"""
super(ExecutionContextInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'context': payload['context'],
'flow_sid': payload['flow_sid'],
'execution_sid': payload['execution_sid'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'flow_sid': flow_sid, 'execution_sid': execution_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExecutionContextContext for this ExecutionContextInstance
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
if self._context is None:
self._context = ExecutionContextContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def context(self):
"""
:returns: Flow state.
:rtype: dict
"""
return self._properties['context']
@property
def flow_sid(self):
"""
:returns: Flow Sid.
:rtype: unicode
"""
return self._properties['flow_sid']
@property
def execution_sid(self):
"""
:returns: Execution Sid.
:rtype: unicode
"""
return self._properties['execution_sid']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a ExecutionContextInstance
:returns: Fetched ExecutionContextInstance
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Studio.V1.ExecutionContextInstance {}>'.format(context)
| 31.092593
| 99
| 0.633234
|
f6d66c63660a566fbf18b5956e31988ff80416e4
| 3,607
|
py
|
Python
|
detectron/datasets/dummy_datasets.py
|
gaohaidong/Detectron
|
cbf1b9177e3a1a3420e38c8c6e7a2e39aed604ef
|
[
"Apache-2.0"
] | null | null | null |
detectron/datasets/dummy_datasets.py
|
gaohaidong/Detectron
|
cbf1b9177e3a1a3420e38c8c6e7a2e39aed604ef
|
[
"Apache-2.0"
] | null | null | null |
detectron/datasets/dummy_datasets.py
|
gaohaidong/Detectron
|
cbf1b9177e3a1a3420e38c8c6e7a2e39aed604ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Provide stub objects that can act as stand-in "dummy" datasets for simple use
cases, like getting all classes in a dataset. This exists so that demos can be
run without requiring users to download/install datasets first.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from detectron.utils.collections import AttrDict
def get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def get_traffic_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'car'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def get_cloth_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'huibian','qianjie','bianzhadong','quewei','diaojing','cusha','xianyin','quejing','diaogong','zhixi','pobian','lengduan','cashang','bianquejing','maoban','wuzi','mingqianxian','houduan','bianzhenyan','gongsha','zhengneyin','cadong','jiandong','jiama','jingtiaohua','maodong','zhiru','youzi','camao','zhadong','tiaohua','diaowei','houbaoduan','xiuyin','bianquewei','erduo','jiedong','maoli','podong','huangzi','jinsha','zhashu','zhasha','bianbaiyin','jingcusha','weicusha'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def get_steel_dataset():
ds = AttrDict()
classes = [
'__background__', 'steel'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def get_hanzi_dataset():
ds = AttrDict()
classes = [
'__background__', 'hanzi'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
| 44.530864
| 497
| 0.641808
|
972a998e659d9d74dd29f62c346df8477136048c
| 4,195
|
py
|
Python
|
package_monitor/admin.py
|
yunojuno-archive/django-package-monitor
|
5e387c1274b707050dcb441dbfd5b6c0aa7c57dc
|
[
"MIT"
] | 4
|
2019-07-22T18:28:26.000Z
|
2020-08-03T15:06:33.000Z
|
package_monitor/admin.py
|
yunojuno-archive/django-package-monitor
|
5e387c1274b707050dcb441dbfd5b6c0aa7c57dc
|
[
"MIT"
] | 4
|
2020-09-12T12:15:02.000Z
|
2020-09-13T09:38:26.000Z
|
package_monitor/admin.py
|
yunojuno-archive/django-package-monitor
|
5e387c1274b707050dcb441dbfd5b6c0aa7c57dc
|
[
"MIT"
] | 2
|
2019-07-22T18:50:54.000Z
|
2020-06-30T03:19:03.000Z
|
import logging
from typing import Iterable, List, Optional, Tuple
from django.contrib import admin
from django.db.models import F
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from django.template.defaultfilters import truncatechars
from django.utils.safestring import mark_safe
from .models import PackageVersion
logger = logging.getLogger(__name__)
def html_list(data: Optional[Iterable]) -> str:
"""Convert dict into formatted HTML."""
if data is None:
return ""
def as_li(val: str) -> str:
return f"<li>{val}</li>"
items = [as_li(v) for v in data]
return mark_safe("<ul>%s</ul>" % "".join(items))
def check_pypi(
modeladmin: admin.ModelAdmin, request: HttpRequest, queryset: QuerySet
) -> None:
"""Update latest package info from PyPI."""
for p in queryset:
if p.is_editable:
logger.debug("Ignoring version update '%s' is editable", p.package_name)
else:
p.update_from_pypi()
check_pypi.short_description = "Update selected packages from PyPI" # type: ignore
class UpdateAvailableListFilter(admin.SimpleListFilter):
"""Enable filtering by packages with an update available."""
title = "Update available"
parameter_name = "update"
def lookups(
self, request: HttpRequest, model_admin: admin.ModelAdmin
) -> List[Tuple[str, str]]:
return [
("1", "Yes"),
("0", "No"),
("-1", "Unknown"),
]
def queryset(self, request: HttpRequest, queryset: QuerySet) -> QuerySet:
"""Filter based on whether an update (of any sort) is available."""
if self.value() == "-1":
return queryset.filter(latest_version__isnull=True)
elif self.value() == "0":
return queryset.filter(
current_version__isnull=False,
latest_version__isnull=False,
latest_version=F("current_version"),
)
elif self.value() == "1":
return queryset.filter(
current_version__isnull=False, latest_version__isnull=False
).exclude(latest_version=F("current_version"))
else:
return queryset
class PackageVersionAdmin(admin.ModelAdmin):
actions = (check_pypi,)
change_list_template = "change_list.html"
list_display = (
"package_name",
"_updateable",
"current_version",
"next_version",
"latest_version",
"supports_py3",
"_licence",
"diff_status",
"checked_pypi_at",
)
list_filter = (
"diff_status",
"is_editable",
"is_parseable",
UpdateAvailableListFilter,
"supports_py3",
)
ordering = ["package_name"]
readonly_fields = (
"package_name",
"is_editable",
"is_parseable",
"current_version",
"next_version",
"latest_version",
"diff_status",
"checked_pypi_at",
"url",
"licence",
"raw",
"available_updates",
"python_support",
"supports_py3",
"django_support",
)
def _licence(self, obj: "PackageVersion") -> str:
"""Return truncated version of licence."""
return truncatechars(obj.licence, 20)
_licence.short_description = "PyPI licence" # type: ignore
def _updateable(self, obj: "PackageVersion") -> Optional[bool]:
"""Return True if there are available updates."""
if obj.latest_version is None or obj.is_editable:
return None
else:
return obj.latest_version != obj.current_version
_updateable.boolean = True # type: ignore
_updateable.short_description = "Update available" # type: ignore
def available_updates(self, obj: "PackageVersion") -> str:
"""Print out all versions ahead of the current one."""
from package_monitor import pypi
package = pypi.Package(obj.package_name)
versions = package.all_versions()
return html_list([v for v in versions if v > obj.current_version])
admin.site.register(PackageVersion, PackageVersionAdmin)
| 29.542254
| 84
| 0.622884
|
d8b1294c11b87a1305f6147267ed6ca9bbc4d2ab
| 3,260
|
py
|
Python
|
detecting_gender/predict.py
|
melvinwevers/detecting_faces-medium-types-gender
|
eac51acdcc19b073b6a3a46275e3d47abdbc3b6f
|
[
"MIT"
] | null | null | null |
detecting_gender/predict.py
|
melvinwevers/detecting_faces-medium-types-gender
|
eac51acdcc19b073b6a3a46275e3d47abdbc3b6f
|
[
"MIT"
] | null | null | null |
detecting_gender/predict.py
|
melvinwevers/detecting_faces-medium-types-gender
|
eac51acdcc19b073b6a3a46275e3d47abdbc3b6f
|
[
"MIT"
] | null | null | null |
from adabound import AdaBound
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import CustomObjectScope
import os.path
import tensorflow as tf
import keras.backend as K
import numpy as np
import argparse
from sklearn.metrics import classification_report
def make_generators(args):
testPath = os.path.join(args.dataset, "test/1990")
testAug = ImageDataGenerator(
rescale=1.0 / 255
)
mean = np.array([123.68, 116.779, 103.939], dtype="float32")
testAug.mean = mean
# initialize the testing generator
testGen = testAug.flow_from_directory(
testPath,
class_mode="binary",
target_size=(args.image_size, args.image_size),
color_mode="rgb",
shuffle=False,
batch_size=args.batch_size)
print(testGen.class_indices.keys())
return testGen
def get_args():
parser = argparse.ArgumentParser(description="Fine-tuning vgg16 for gender estimation in historical adverts.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--batch_size", type=int, default=64,
help="batch size")
parser.add_argument("--n_epochs", type=int, default=50,
help="number of epochs")
parser.add_argument("--lr", type=float, default=1e-3,
help="initial learning rate")
parser.add_argument("--opt", type=str, default="adabound",
help="optimizer name; 'sgd' or 'adam'")
parser.add_argument("--patience", type=int, default=6,
help="Patience for callbacks")
parser.add_argument("--image_size", type=int, default=224,
help="Input size of images")
parser.add_argument("--eraser", action="store_true")
parser.add_argument("--dataset", type=str, default="../data/gender_3")
parser.add_argument("--aug", action="store_true",
help="use data augmentation if set true")
parser.add_argument("--output_path", type=str, default="checkpoints",
help="checkpoint dir")
parser.add_argument("--figures_path", type=str, default="figures",
help="path for figures")
parser.add_argument("--model_path", type=str, default="model.h5",
help="model dir")
args = parser.parse_args()
return args
def evaluate(args):
testGen = make_generators(args)
with CustomObjectScope({'AdaBound': AdaBound()}):
model = load_model('binary_model.h5')
# print(testGen.classes)
predIdxs = model.predict_generator(testGen, steps=(
testGen.samples // args.batch_size) + 1, verbose=1)
predIdxs = predIdxs > 0.5
# print(predIdxs)
print(classification_report(testGen.classes, predIdxs,
target_names=testGen.class_indices.keys()))
if __name__ == '__main__':
os.environ['KERAS_BACKEND'] = 'tensorflow'
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
args = get_args()
evaluate(args)
| 33.958333
| 114
| 0.644785
|
e2a9d373c8dd89e535f964917bfa7ffc8342adea
| 1,127
|
py
|
Python
|
main/migrations/0013_auto_20171117_0305.py
|
abhishek9991/Club-Website
|
cadd69fe46a94c26af2035ed6ca9a3a0e4743fc9
|
[
"MIT"
] | 1
|
2021-06-04T04:32:34.000Z
|
2021-06-04T04:32:34.000Z
|
main/migrations/0013_auto_20171117_0305.py
|
abhishek9991/Club-Website
|
cadd69fe46a94c26af2035ed6ca9a3a0e4743fc9
|
[
"MIT"
] | null | null | null |
main/migrations/0013_auto_20171117_0305.py
|
abhishek9991/Club-Website
|
cadd69fe46a94c26af2035ed6ca9a3a0e4743fc9
|
[
"MIT"
] | 1
|
2020-10-01T04:23:04.000Z
|
2020-10-01T04:23:04.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-16 21:35
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20171117_0226'),
]
operations = [
migrations.CreateModel(
name='feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eventid', models.IntegerField()),
('name', models.CharField(blank=True, max_length=100)),
('comment', models.TextField()),
('star', models.IntegerField(default=0)),
],
),
migrations.AlterField(
model_name='event',
name='end_time',
field=models.TimeField(default=datetime.time(3, 5, 21, 651681)),
),
migrations.AlterField(
model_name='event',
name='start_time',
field=models.TimeField(default=datetime.time(3, 5, 21, 651681)),
),
]
| 30.459459
| 114
| 0.566105
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.